[Erp5-report] r8851 - /erp5/trunk/utils/erp5mechanize/runBenchmarks.py

nobody at svn.erp5.org nobody at svn.erp5.org
Fri Jul 28 13:23:26 CEST 2006


Author: vincent
Date: Fri Jul 28 13:23:25 2006
New Revision: 8851

URL: http://svn.erp5.org?rev=8851&view=rev
Log:
Save the number of steps run in the test on result-type output files.

Modified:
    erp5/trunk/utils/erp5mechanize/runBenchmarks.py

Modified: erp5/trunk/utils/erp5mechanize/runBenchmarks.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5mechanize/runBenchmarks.py?rev=8851&r1=8850&r2=8851&view=diff
==============================================================================
--- erp5/trunk/utils/erp5mechanize/runBenchmarks.py (original)
+++ erp5/trunk/utils/erp5mechanize/runBenchmarks.py Fri Jul 28 13:23:25 2006
@@ -97,7 +97,6 @@
       Waits a random (configurable) ammount of time).
     """
     for test in self.test_list:
-      sleep(randint(10,20) * self.lazyness)
       if MAIN_IS_ALIVE == 0:
         return
       try:
@@ -105,6 +104,7 @@
         self.test_result_list.append({'id': test['id'], 'step_list': result})
       except: # Don't let server error kill the whole test list - but notify that this test went wrong
         self.test_result_list.append({'id': test['id'], 'step_list': []})
+      sleep(randint(10,20) * self.lazyness)
 
 def sig_handler(signal_number, stack):
   """
@@ -214,7 +214,7 @@
             duration = 0
             for test_step in step_list:
               duration += test_step.getDuration()
-            results_file.append(render_csv(thread.getName(), test_result['id'], duration))
+            results_file.append(render_csv(thread.getName(), test_result['id'], duration, len(step_list)))
           # Detailed profiling : store an entry containing start & stop timestamps for each step
           if dresults_file is not None:
             for test_step in step_list: # Feed step-level output file




More information about the Erp5-report mailing list