[Erp5-report] r8353 - in /erp5/trunk/utils/erp5mechanize: ERP5Mechanize.py runBenchmarks.py

nobody at svn.erp5.org nobody at svn.erp5.org
Mon Jul 10 13:16:22 CEST 2006


Author: vincent
Date: Mon Jul 10 13:16:20 2006
New Revision: 8353

URL: http://svn.erp5.org?rev=8353&view=rev
Log:
Cleanup docstrings.
Implement new class TimeResult.
Use TimeResult to return execution times.
Remove presentation-related functions from ERP5Mechanize file.
Make user number limit apply on all servers.

Modified:
    erp5/trunk/utils/erp5mechanize/ERP5Mechanize.py
    erp5/trunk/utils/erp5mechanize/runBenchmarks.py

Modified: erp5/trunk/utils/erp5mechanize/ERP5Mechanize.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5mechanize/ERP5Mechanize.py?rev=8353&r1=8352&r2=8353&view=diff
==============================================================================
--- erp5/trunk/utils/erp5mechanize/ERP5Mechanize.py (original)
+++ erp5/trunk/utils/erp5mechanize/ERP5Mechanize.py Mon Jul 10 13:16:20 2006
@@ -68,7 +68,7 @@
 
   def timeOpen(self, request):
     """
-      Returns the number or seconds spent in this call.
+      Returns begin and end timestamps.
     """
     start = time()
     self.last_page = self.browser.open(request)
@@ -77,7 +77,7 @@
 
   def timeSubmit(self):
     """
-      Returns the number or seconds spent in this call.
+      Returns begin and end timestamps.
     """
     start = time()
     self.last_page = self.browser.submit()
@@ -317,76 +317,81 @@
     """
     return self.doAction(link_target=value)
     
-
-# Listbox specific stuff. XXX: to be implemented
-#  def getListboxDataLineCount(self, id):
-#    """
-#      Returns the number of table data rows of the given listbox.
-#      Statistic & title lines are exclued from this count.
-#    """
-#
-#  def getListboxDataColumnCount(self, id):
-#    """
-#      Returns the number of table data columns of the given listbox.
-#      Columns like the one containing checkboxes is exclued from this count.
-#    """
-#
-#  def followListboxLink(self, id, row, column=0):
-#    """
-#      Follow the link in listbox id, on given row number and given column number.
-#      The column and line number follows the restrictions of getListboxDataLineCount and getListboxDataColumnCount.
-#    """
-#
-#  def (self):
-
-def format(duration, max_action_length, start=None, stop=None):
-  """
-    Returns a human-friendly string representation of a result set.
-    duration          : Tupple of 2 timestamps : (start, stop).
-    max_action_length : If action took more than this amount of time, print an additionnal warning.
-    start, stop       : Optionnal timestamps designed to describe an overhead around the measured time. Allows to profile the profiler, at a certain extent.
-  """
-  action_length = duration[1]-duration[0]
-  res = '%.03fs' % (action_length, )
-  if None not in (start, stop):
-    first_delay = duration[0]-start
-    second_delay = stop-duration[1]
-    res = 'Duration = %s, Overhead = %.03fs (%.02f%%)' % (res, first_delay+second_delay, (first_delay+second_delay)/(action_length+first_delay+second_delay)*100)
-  if action_length > max_action_length:
-    res = '%s TIME EXCEEDED (limit=%s)' % (res, max_action_length)
-  return res
-
-def timeExecution(id, function, params={}, measure_overhead=0):
-  """
-    Esecute function with given parameters.
-    Return a dict containing :
-    id    : The given id. Dezsigned to identify the test step among other steps.
-    start : Timestamp taken right before starting the http request. Must be returned by the called function as the first value of a 2 item tuple.
-    stop  : Timestamp taken right after the http request returned. Must be returned by the called function as the second value of a 2 item tuple.
-
-    If measure_overhead if true, there are 2 additionnal fields in the returned dict :
-    test_start : Timestamp taken right before calling the function.
-    test_stop  : Timestamp taken right after the function returned.
-    This allows the measure the ammount of time "wasted" by the test system before and after the http request.
-  """
-  if measure_overhead:
-    start = time()
-    duration = function(**params)
-    stop = time()
-    return {'id': title, 'start': duration[0], 'stop': duration[1], 'test_start': start, 'test_stop': stop}
-  else:
-    duration = function(**params)
-    return {'id': title, 'start': duration[0], 'stop': duration[1]}
-
-def renderResult(actor_id, test_id, start, stop):
-  """
-    Renders a CSV of given values.
-  """
-  return '%s, %s, %s, %d\n' % (actor_id, test_id, start, stop)
-
-def renderDetailedResult(actor_id, test_id, step_id, start, stop):
-  """
-    Renders a CSV of given values.
-  """
-  return '%s, %s, %s, %s, %s\n' % (actor_id, test_id, step_id, start, stop)
-
+class TimeResult:
+  """
+    Contains details about a tests execution :
+      -test id
+      -request begin timestamp
+      -request end timestamp
+     And optionally :
+      -test begin timestamp
+      -test end timestamp
+     Those last two values allow to measure the time spent handling the request before & after sending it.
+  """
+
+  def __init__(self, id, start, stop, test_start=None, test_stop=None):
+    self.id = id
+    self.start = start
+    self.stop = stop
+    self.test_start = test_start
+    self.test_stop = test_stop
+
+  def sort(self, other):
+    """
+      Sort by start timestamp, then by stop timestamp.
+      Both ascending.
+    """
+    o_start = other.getStart()
+    if self.start < o_start:
+      return -1
+    if self.start > o_start:
+      return 1
+    o_stop = other.getStop()
+    if self.stop < o_stop:
+      return -1
+    if self.stop > o_stop:
+      return 1
+    return 0
+
+  def getId(self):
+    """ Return the test id """
+    return self.id
+
+  def getStart(self):
+    """ Return the request begin timestamp """
+    return self.start
+
+  def getStop(self):
+    """ Return the request end timestamp """
+    return self.stop
+
+  def getTestStart(self):
+    """ Return the test begin timestamp """
+    return self.test_start
+
+  def getTestStop(self):
+    """ Return the test end timestamp """
+    return self.test_stop
+
+  def getDuration(self):
+    """ Return the request duration """
+    return self.stop - self.start
+
+  def getTestDuration(self):
+    """ Return the test duration (including time spent in the request) """
+    return self.test_stop - self.test_start
+
+  def getOverhead(self):
+    """ Return the test overhead duration (ie. test time spent outside of request time) """
+    return self.getTestDuration() - self.getDuration()
+
+def timeExecution(id, function, args=[], kw={}):
+  """
+    Execute function with given parameters.
+    Returns a TimeResult instance.
+  """
+  start = time()
+  duration = function(*args, **kw)
+  stop = time()
+  return TimeResult(id, duration[0], duration[1], start, stop)
+

Modified: erp5/trunk/utils/erp5mechanize/runBenchmarks.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5mechanize/runBenchmarks.py?rev=8353&r1=8352&r2=8353&view=diff
==============================================================================
--- erp5/trunk/utils/erp5mechanize/runBenchmarks.py (original)
+++ erp5/trunk/utils/erp5mechanize/runBenchmarks.py Mon Jul 10 13:16:20 2006
@@ -32,11 +32,10 @@
 from time import time, sleep
 import sys
 import signal
-from ERP5Mechanize import renderResult, renderDetailedResult
 
 MAIN_IS_ALIVE = 1
 
-class LogFile():
+class LogFile:
   """
     Handles log writing in a way to ensure that logs are safely saved even if test crashes, to allow logging for long periods without fearing a crash.
   """
@@ -45,12 +44,17 @@
     file = open(filename, "w") # Empty / create file.
     file.close()
 
-  def append(data):
-    file = open(filename, "a")
+  def append(self, data):
+    """
+      Appends data to LogFile.
+      Also appends a carriage return.
+    """
+    file = open(self.filename, "a")
     file.write(data)
+    file.write('\n')
     file.close()
 
-class Benchmark(Thread):
+class BenchmarkThread(Thread):
   """
     Benchmarking thread.
     Runs tests as a given user on a given ERP5 instance.
@@ -126,7 +130,16 @@
     return 1
   return 0
 
-def main(instance_list, test_list, dresults=None, results=None, load=None, minsleep=None, Maxsleep=None, usercount=None):
+def render_csv(file, *args, **kw):
+  """
+    Return a CSV-rendered version of arguments.
+
+    TODO: add escapes
+  """
+  values = [repr(v) for v in args] + [repr(v) for k, v in kw]
+  return ','.join(values)
+
+def main(instance_list, test_list, dresults=None, results=None, load=None, minsleep=None, Maxsleep=None, usercount=None, verbose=0):
   """
     Create one thread per (instance, user).
     Start threads.
@@ -146,12 +159,11 @@
   del load
   for signal_number in (signal.SIGHUP, signal.SIGSEGV, signal.SIGTERM): # All signal who could lead to 'soft' program termination
     signal.signal(signal_number, sig_handler)
-  if usercount is not None:
-    instance_list = instance_list[0:int(usercount)]
-  del usercount
+  usercount = int(usercount)
   for instance in instance_list:
-    for user in instance['user_list']:
-      thread = Benchmark(url=instance['url'], name=user['name'], password=user['password'], test_list=test_list)
+    for pos in xrange(min(usercount, len(instance['user_list']))):
+      user = instance['user_list'][pos]
+      thread = BenchmarkThread(url=instance['url'], name=user['name'], password=user['password'], test_list=test_list)
       thread.setName('%s@%s' % (user['name'], instance['url']))
       if minsleep is not None:
         thread.setMinSleep(int(minsleep))
@@ -170,28 +182,30 @@
       test_result = thread.getResult()
       while test_result is not None:
         if len(test_result['step_list']) > 0: # Thread produced exploitable results (the test did not fail).
-          print '%s: Thread %s produced results.' % (time(), thread_name)
+          print '%s: Thread %s finished test %s.' % (time(), thread_name, test_result['id'])
+          test_result['step_list'].sort()
+          step_list = test_result['step_list']
           # Simple profiling : store an entry containing start & stop timestamps for each test
           if results_file is not None:
-            results_file.append(renderResult(thread.getName(), test_result['id'], test_result['step_list'][0]['start'], test_result['step_list'][-1]['stop']))
+            results_file.append(render_csv(thread.getName(), test_result['id'], step_list[0].getStart(), step_list[-1].getStop()))
           # Detailed profiling : store an entry containing start & stop timestamps for each step
           if dresults_file is not None:
-            for test_step in test_result['step_list']: # Feed step-level output file
-              dresults_file.append(renderDetailedResult(thread.getName(), test_result['id'], test_step['id'], test_step['start'], test_step['stop']))
+            for test_step in step_list: # Feed step-level output file
+              dresults_file.append(render_csv(thread.getName(), test_result['id'], test_step.getId(), test_step.getStart(), test_step.getStop()))
           # Load computation : store 1 at each step start timestamp, and -1 at each step stop timestamp, cumulating with concurent steps if any.
           if load_file is not None:
-            for test_step in test_result['step_list']:
+            for test_step in step_list:
               if not server_load_list.has_key(thread_url):
                 server_load_list[thread_url] = {}
-              cause = (thread_url, test_step['id'])
-              for timestamp, delta in ((test_step['start'], 1), (test_step['stop'], -1)):
+              cause = (thread_url, test_step.getId())
+              for timestamp, delta in ((test_step.getStart(), 1), (test_step.getStop(), -1)):
                 if not server_load_list[thread_url].has_key(timestamp):
                   server_load_list[thread_url][timestamp] = {'delta': delta, 'cause_list': [cause]}
                 else:
                   server_load_list[thread_url][timestamp]['delta'] += delta
                   server_load_list[thread_url][timestamp]['cause_list'].append(cause)
         else: # Test did fail.
-          print '%s: Thread %s failed to execute activity %s.' % (time(), thread_name, test_result['id'])
+          print '%s: Thread %s failed to execute test %s.' % (time(), thread_name, test_result['id'])
         test_result = thread.getResult() # Get next test result
       if not thread.isAlive():
         print '%s: Test thread %s ended.' % (time(), thread.getName(), )
@@ -205,13 +219,13 @@
     event_list.sort(sort_event_list)
     for event in event_list:
       current_load += event['delta']
-      event_csv = '%s; %s; %s\n' % (server, str(event['timestamp']).replace('.', ','), current_load) # 'replace' for french-style decimal separator
+      event_csv = '%s;%s;%s' % (server, str(event['timestamp']).replace('.', ','), current_load) # XXX: 'replace' for french-style decimal separator
       load_file.append(event_csv)
 
 if __name__ == '__main__':
   import getopt
   possible_arg_list = ('conf', 'dresults', 'results', 'load', 'minsleep', 'Maxsleep', 'usercount')
-  possible_opt_list = ()
+  possible_opt_list = ('verbose', )
   kw = {}
   args = []
   try:
@@ -227,12 +241,24 @@
       kw['%s' % combined_arg_list[o]] = a
   if kw.has_key('conf'):
     import imp
+    import os
+    # XXX This is a workaround for the case where PWD != path of the script
+    # It can happend when calling script using a symlink, in which case the actual PWD isn't prepended to path.
+    sys.path.insert(0, os.environ['PWD']) # Modify sys.path to make sure imports from inside the tests suite will also have a correct path. 
     file, path, description = imp.find_module(kw['conf'], sys.path)
     module = imp.load_module(kw['conf'], file, path, description)
     file.close()
     del kw['conf']
     args.append(module.instance_list)
     args.append(module.test_list)
+  else:
+    print "'conf' argument is required. Configuration file must be a python module and define :"
+    print "  instance_list ex: [{'url': 'http://127.0.0.1/erp5',"
+    print "                      'user_list': [{'name': 'testuser1', 'password': 'secret'}]}]"
+    print "  test_list     ex: [{'id': 'theFirstTest',"
+    print "                      'function': someStandaloneTestFunction}]"
+    print "The test function has the following prototype :"
+    print "  someStandaloneTestFunction(url, name, password) -> [TimeResult[, TimeResult[, ...]]]"
   try:
     main(*args, **kw)
   except:




More information about the Erp5-report mailing list