[Erp5-report] r8949 - /erp5/trunk/utils/erp5mechanize/runBenchmarks.py

nobody at svn.erp5.org nobody at svn.erp5.org
Tue Aug 1 11:02:45 CEST 2006


Author: vincent
Date: Tue Aug  1 11:02:42 2006
New Revision: 8949

URL: http://svn.erp5.org?rev=8949&view=rev
Log:
Wordwrap at 80 chars.
Fix bugs introduced in the parameters-from-config-file feature.

Modified:
    erp5/trunk/utils/erp5mechanize/runBenchmarks.py

Modified: erp5/trunk/utils/erp5mechanize/runBenchmarks.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5mechanize/runBenchmarks.py?rev=8949&r1=8948&r2=8949&view=diff
==============================================================================
--- erp5/trunk/utils/erp5mechanize/runBenchmarks.py (original)
+++ erp5/trunk/utils/erp5mechanize/runBenchmarks.py Tue Aug  1 11:02:42 2006
@@ -37,7 +37,8 @@
 
 class LogFile:
   """
-    Handles log writing in a way to ensure that logs are safely saved even if test crashes, to allow logging for long periods without fearing a crash.
+    Handles log writing in a way to ensure that logs are safely saved even if
+    test crashes, to allow logging for long periods without fearing a crash.
   """
   def __init__(self, filename):
     self.filename = filename
@@ -100,9 +101,14 @@
       if MAIN_IS_ALIVE == 0:
         return
       try:
-        result = test['function'](url=self.url, name=self.name, password=self.password, lazyness=self.lazyness, **test.get('kw', {}))
+        result = test['function'](url=self.url,
+                                  name=self.name,
+                                  password=self.password,
+                                  lazyness=self.lazyness,
+                                  **test.get('kw', {}))
         self.test_result_list.append({'id': test['id'], 'step_list': result})
-      except: # Don't let server error kill the whole test list - but notify that this test went wrong
+      except: # Don't let server error kill the whole test list - but notify
+              # that this test went wrong
         self.test_result_list.append({'id': test['id'], 'step_list': []})
       sleep(randint(10,20) * self.lazyness)
 
@@ -138,7 +144,8 @@
     values[pos].replace('.', ',')
   return ';'.join(values)
 
-def main(instance_list, test_list, dresults=None, results=None, load=None, Lazyness=None, usercount=None, Repeat=1, zopecount=None):
+def main(instance_list, test_list, dresults=None, results=None, load=None,
+         Lazyness=None, usercount=None, Repeat=1, zopecount=None):
   """
     Create one thread per (instance, user).
     Start threads.
@@ -157,11 +164,13 @@
     load_file = LogFile(load)
   del load
   temp_list = []
-  for repetition in xrange(int(Repeat)): # 0 mean only one execution, 1 means two executions, etc.
+  for repetition in xrange(int(Repeat)): # 0 mean only one execution, 1 means
+                                         # two executions, etc.
     temp_list.extend(test_list)
   test_list = temp_list
   del temp_list
-  for signal_number in (signal.SIGHUP, signal.SIGSEGV, signal.SIGTERM): # All signal who could lead to 'soft' program termination
+  for signal_number in (signal.SIGHUP, signal.SIGSEGV, signal.SIGTERM):
+    # All signal who could lead to 'soft' program termination
     signal.signal(signal_number, sig_handler)
   if zopecount is not None:
     zopecount = int(zopecount)
@@ -185,7 +194,9 @@
       except IndexError:
         user = None
       if user is not None:
-        thread = BenchmarkThread(url=instance['url'], name=user['name'], password=user['password'], test_list=test_list)
+        thread = BenchmarkThread(url=instance['url'], name=user['name'],
+                                 password=user['password'],
+                                 test_list=test_list)
         thread.setName('%s@%s' % (user['name'], instance['url']))
         if Lazyness is not None:
           thread.setLazyness(float(Lazyness))
@@ -198,73 +209,94 @@
 
   while len(thread_list):
     sleep(5) # Sleep to leave more cpu time for actual tests.
-    for pos in xrange(len(thread_list)-1, -1, -1): # From last to first, to be able to delete objects while looping.
+    for pos in xrange(len(thread_list)-1, -1, -1):
+      # From last to first, to be able to delete objects while looping.
       thread = thread_list[pos]
       thread_name = thread.getName()
       thread_url = thread.getUrl()
       test_result = thread.getResult()
       while test_result is not None:
-        sys.stdout.write('%i: Thread %s, test %s : ' % (time(), thread_name, test_result['id']))
-        if len(test_result['step_list']) > 0: # Thread produced exploitable results (the test did not fail).
+        sys.stdout.write('%i: Thread %s, test %s : ' % (time(),
+                                              thread_name, test_result['id']))
+        if len(test_result['step_list']) > 0:
+          # Thread produced exploitable results (the test did not fail).
           print 'Success'
           test_result['step_list'].sort()
           step_list = test_result['step_list']
-          # Simple profiling : store an entry containing start & stop timestamps for each test
+          # Simple profiling : store an entry containing start & stop
+          # timestamps for each test
           if results_file is not None:
             duration = 0
             for test_step in step_list:
               duration += test_step.getDuration()
-            results_file.append(render_csv(thread.getName(), test_result['id'], duration, len(step_list)))
-          # Detailed profiling : store an entry containing start & stop timestamps for each step
+            results_file.append(render_csv(thread.getName(), test_result['id'],
+                                duration, len(step_list)))
+          # Detailed profiling : store an entry containing start & stop
+          # timestamps for each step
           if dresults_file is not None:
             for test_step in step_list: # Feed step-level output file
-              dresults_file.append(render_csv(thread.getName(), test_result['id'], test_step.getId(), test_step.getStart(), test_step.getStop()))
-          # Load computation : store 1 at each step start timestamp, and -1 at each step stop timestamp, cumulating with concurent steps if any.
+              dresults_file.append(render_csv(thread.getName(),
+                                              test_result['id'],
+                                              test_step.getId(),
+                                              test_step.getStart(),
+                                              test_step.getStop()))
+          # Load computation : store 1 at each step start timestamp, and -1 at
+          # each step stop timestamp, cumulating with concurent steps if any.
           if load_file is not None:
             for test_step in step_list:
               if not server_load_list.has_key(thread_url):
                 server_load_list[thread_url] = {}
               cause = (thread_url, test_step.getId())
-              for timestamp, delta in ((test_step.getStart(), 1), (test_step.getStop(), -1)):
+              for timestamp, delta in ((test_step.getStart(), 1),
+                                       (test_step.getStop(), -1)):
                 if not server_load_list[thread_url].has_key(timestamp):
-                  server_load_list[thread_url][timestamp] = {'delta': delta, 'cause_list': [cause]}
+                  server_load_list[thread_url][timestamp] = \
+                                       {'delta': delta, 'cause_list': [cause]}
                 else:
                   server_load_list[thread_url][timestamp]['delta'] += delta
-                  server_load_list[thread_url][timestamp]['cause_list'].append(cause)
+                  server_load_list[thread_url][timestamp]['cause_list'].\
+                                                                 append(cause)
         else: # Test did fail.
           print 'Failed'
           if results_file is not None:
-            results_file.append(render_csv(thread.getName(), test_result['id'], 'FAILED'))
+            results_file.append(render_csv(thread.getName(),
+                                           test_result['id'], 'FAILED'))
           if dresults_file is not None:
-            dresults_file.append(render_csv(thread.getName(), test_result['id'], 'FAILED'))
+            dresults_file.append(render_csv(thread.getName(),
+                                            test_result['id'], 'FAILED'))
         test_result = thread.getResult() # Get next test result
       if not thread.isAlive():
         del thread_list[pos] # This thread won't move anymore, throw it.
 
-  # Post processing of load computation : as threads can produce results in any order, we must wait for everything to be finished to compute the load.
+  # Post processing of load computation : as threads can produce results in
+  # any order, we must wait for everything to be finished to compute the load.
   for server, server_load in server_load_list.items():
     current_load = 0
-    event_list=[{'timestamp': timestamp, 'delta': delta['delta'], 'cause_list': delta['cause_list']} for timestamp, delta in server_load.items()]
+    event_list=[{'timestamp': timestamp, 'delta': delta['delta'],
+                 'cause_list': delta['cause_list']} for timestamp, delta in \
+                 server_load.items()]
     event_list.sort(sort_event_list)
     for event in event_list:
       current_load += event['delta']
-      event_csv = '%s;%s;%s' % (server, str(event['timestamp']).replace('.', ','), current_load) # XXX: 'replace' for french-style decimal separator
-      load_file.append(event_csv)
+      load_file.append(render_csv(server, event['timestamp'], current_load))
 
 if __name__ == '__main__':
   import getopt
-  possible_arg_list = ('config', 'dresults', 'results', 'load', 'Lazyness', 'usercount', 'Repeat', 'zopecount')
+  possible_arg_list = ('config', 'dresults', 'results', 'load', 'Lazyness',
+                       'usercount', 'Repeat', 'zopecount')
   possible_opt_list = (, )
   kw = {}
   args = []
   try:
-    opt_list, arg_list = getopt.getopt(sys.argv[1:], '%s%s' % ( ''.join(['%s:' % (v[0], ) for v in possible_arg_list])
-                                                              , ''.join([v[0] for v in possible_opt_list]))
-                                                   , ['%s=' % (v, ) for v in possible_arg_list] + list(possible_opt_list))
+    opt_list, arg_list = getopt.getopt(sys.argv[1:],
+             '%s%s' % ( ''.join(['%s:' % (v[0], ) for v in possible_arg_list])
+                      , ''.join([v[0] for v in possible_opt_list])),
+             ['%s=' % (v, ) for v in possible_arg_list] + list(possible_opt_list))
   except getopt.error, msg:
     print msg
     sys.exit(2)
-  combined_arg_list = dict([('-%s' % (v[0], ), v) for v in possible_arg_list] + [('--%s' % (v, ), v) for v in possible_arg_list])
+  combined_arg_list = dict([('-%s' % (v[0], ), v) for v in possible_arg_list] + \
+                           [('--%s' % (v, ), v) for v in possible_arg_list])
   for o, a in opt_list:
     if o in combined_arg_list:
       kw['%s' % combined_arg_list[o]] = a
@@ -272,32 +304,48 @@
     import imp
     import os
     # XXX This is a workaround for the case where PWD != path of the script
-    # It can happend when calling script using a symlink, in which case the actual PWD isn't prepended to path.
-    sys.path.insert(0, os.environ['PWD']) # Modify sys.path to make sure imports from inside the tests suite will also have a correct path. 
+    # It can happend when calling script using a symlink, in which case the
+    # actual PWD isn't prepended to path.
+    sys.path.insert(0, os.environ['PWD']) # Modify sys.path to make sure
+                                          # imports from inside the tests
+                                          # suite will also have a correct
+                                          # path.
     file, path, description = imp.find_module(kw['config'], sys.path)
     module = imp.load_module(kw['config'], file, path, description)
     file.close()
     del kw['config'] # It should not be passed to main
-    if module.hasattr('runBenchmarks_parameter_list'):
+    if 'runBenchmarks_parameter_list' in dir(module):
+      # This part handled parameters stored in the configuration file.
+      # If the parameter value is callable, it's called and gets one argument
+      # which is the dictionnary of parameters actually given to the program,
+      # ie without any parameter comming from the parameter list given in the
+      # file. This is done to make sure the way parameters are found doesn't
+      # impact generation.
       initial_kw = kw
       for k in possible_arg_list:
-        if k != 'config' and module.runBenchmarks_parameter_list.has_key[k] and not kw.has_key[k]:
+        if k != 'config' and module.runBenchmarks_parameter_list.has_key(k) \
+                                                        and not kw.has_key(k):
           if callable(module.runBenchmarks_parameter_list[k]):
-            module.runBenchmarks_parameter_list[k] = module.runBenchmarks_parameter_list[k](initial_kw)
+            module.runBenchmarks_parameter_list[k] = \
+                            module.runBenchmarks_parameter_list[k](initial_kw)
           kw[k] = module.runBenchmarks_parameter_list[k]
     args.append(module.instance_list)
     args.append(module.test_list)
   else:
-    print "'config' argument is required. Configuration file must be a python module and define :"
-    print "  instance_list ex: [{'url': 'http://127.0.0.1/erp5',"
-    print "                      'user_list': [{'name': 'testuser1', 'password': 'secret'}]}]"
-    print "  test_list     ex: [{'id': 'theFirstTest',"
-    print "                      'function': someStandaloneTestFunction,"
-    print "                      'kw': {'some_parameter': 'some_value_used_in_test_function'}}]"
-    print " Optionally, it can also contain :"
-    print "  runBenchmarks_parameter_list ex: {'Repeat': '3', 'Lazyness': '1'}"
-    print "The test function has the following prototype :"
-    print "  someStandaloneTestFunction(url, name, password) -> [TimeResult[, TimeResult[, ...]]]"
+    print """\
+'config' argument is required.
+Configuration file must be a python module and must define :
+  instance_list ex: [{'url': 'http://127.0.0.1/erp5',
+                      'user_list': [{'name': 'testuser1', 
+                                     'password': 'secret'}]}]
+  test_list     ex: [{'id': 'theFirstTest',
+                      'function': someStandaloneTestFunction,
+                      'kw': {'some_parameter': 'some_value'}}]
+ Optionally, it can also contain :
+  runBenchmarks_parameter_list ex: {'Repeat': '3', 'Lazyness': '1'}
+The test function has the following prototype :
+  someStandaloneTestFunction(url, name, password) -> [TimeResult[, ...]]
+"""
   try:
     main(*args, **kw)
   except:




More information about the Erp5-report mailing list