[Erp5-report] r45502 arnaud.fontaine - in /erp5/trunk/utils/erp5.utils.benchmark: ./ src/ s...
nobody at svn.erp5.org
nobody at svn.erp5.org
Fri Apr 15 14:41:33 CEST 2011
Author: arnaud.fontaine
Date: Fri Apr 15 14:41:33 2011
New Revision: 45502
URL: http://svn.erp5.org?rev=45502&view=rev
Log:
Add erp5.utils.benchmark
Added:
erp5/trunk/utils/erp5.utils.benchmark/
erp5/trunk/utils/erp5.utils.benchmark/setup.py (with props)
erp5/trunk/utils/erp5.utils.benchmark/src/
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/__init__.py
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/__init__.py
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/__init__.py
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/generateReport.py (with props)
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py (with props)
Added: erp5/trunk/utils/erp5.utils.benchmark/setup.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/setup.py?rev=45502&view=auto
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/setup.py (added)
+++ erp5/trunk/utils/erp5.utils.benchmark/setup.py [utf8] Fri Apr 15 14:41:33 2011
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from setuptools import setup, find_packages
+
+name = 'erp5.utils.benchmark'
+version = '0.1'
+
+setup(
+ name=name,
+ version=version,
+ description="Performance benchmarks for ERP5 with erp5.utils.test_browser",
+ author='Arnaud Fontaine',
+ author_email='arnaud.fontaine at nexedi.com',
+ license='GPL version 2',
+ install_requires = ['erp5.utils.test_browser', 'argparse', 'matplotlib', 'numpy'],
+ package_dir={'':'src'},
+ packages=find_packages('src'),
+ namespace_packages=['erp5', 'erp5.utils'],
+ entry_points = {
+ 'console_scripts': [
+ 'runBenchmark = erp5.utils.benchmark.runBenchmark:runBenchmark',
+ 'generateReport = erp5.utils.benchmark.generateReport:generateReport']
+ },
+ include_package_data=True,
+ classifiers=[
+ 'Environment :: Web Environment',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: GNU General Public License (GPL)',
+ 'Programming Language :: Python',
+ 'Topic :: Internet :: WWW/HTTP',
+ 'Topic :: Software Development :: Testing',
+ ],
+ url='http://www.erp5.org/',
+)
Propchange: erp5/trunk/utils/erp5.utils.benchmark/setup.py
------------------------------------------------------------------------------
svn:executable = *
Added: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/__init__.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/__init__.py?rev=45502&view=auto
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/src/erp5/__init__.py (added)
+++ erp5/trunk/utils/erp5.utils.benchmark/src/erp5/__init__.py [utf8] Fri Apr 15 14:41:33 2011
@@ -0,0 +1,7 @@
+# See http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
+try:
+ __import__('pkg_resources').declare_namespace(__name__)
+except ImportError:
+ from pkgutil import extend_path
+ __path__ = extend_path(__path__, __name__)
+
Added: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/__init__.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/__init__.py?rev=45502&view=auto
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/__init__.py (added)
+++ erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/__init__.py [utf8] Fri Apr 15 14:41:33 2011
@@ -0,0 +1,7 @@
+# See http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
+try:
+ __import__('pkg_resources').declare_namespace(__name__)
+except ImportError:
+ from pkgutil import extend_path
+ __path__ = extend_path(__path__, __name__)
+
Added: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/__init__.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/__init__.py?rev=45502&view=auto
==============================================================================
(empty)
Added: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py?rev=45502&view=auto
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py (added)
+++ erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py [utf8] Fri Apr 15 14:41:33 2011
@@ -0,0 +1,361 @@
+##############################################################################
+#
+# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
+# Arnaud Fontaine <arnaud.fontaine at nexedi.com>
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsability of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# garantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+##############################################################################
+
+import argparse
+import functools
+
+class ArgumentType(object):
+ @classmethod
+ def directoryType(cls, path):
+ if not (os.path.isdir(path) and os.access(path, os.W_OK)):
+ raise argparse.ArgumentTypeError("'%s' is not a valid directory or is "\
+ "not writable" % path)
+
+ return path
+
+ @classmethod
+ def objectFromModule(cls, module_name, object_name=None, callable_object=False):
+ if module_name.endswith('.py'):
+ module_name = module_name[:-3]
+
+ if not object_name:
+ object_name = module_name
+
+ import sys
+ sys.path.append(os.getcwd())
+
+ try:
+ module = __import__(module_name, globals(), locals(), [object_name], -1)
+ except Exception, e:
+ raise argparse.ArgumentTypeError("Cannot import '%s.%s': %s" % \
+ (module_name, object_name, str(e)))
+
+ try:
+ obj = getattr(module, object_name)
+ except AttributeError:
+ raise argparse.ArgumentTypeError("Could not get '%s' in '%s'" % \
+ (object_name, module_name))
+
+ if callable_object and not callable(obj):
+ raise argparse.ArgumentTypeError(
+ "'%s.%s' is not callable" % (module_name, object_name))
+
+ return obj
+
+ @classmethod
+ def strictlyPositiveIntType(cls, value):
+ try:
+ converted_value = int(value)
+ except ValueError:
+ pass
+ else:
+ if converted_value > 0:
+ return converted_value
+
+ raise argparse.ArgumentTypeError('expects a strictly positive integer')
+
+ @classmethod
+ def strictlyPositiveIntOrRangeType(cls, value):
+ try:
+ return cls.strictlyPositiveIntType(value)
+ except argparse.ArgumentTypeError:
+ try:
+ min_max_list = value.split(',')
+ except ValueError:
+ pass
+ else:
+ if len(min_max_list) == 2:
+ minimum, maximum = cls.strictlyPositiveIntType(min_max_list[0]), \
+ cls.strictlyPositiveIntType(min_max_list[1])
+
+ if minimum >= maximum:
+ raise argparse.ArgumentTypeError('%d >= %d' % (minimum, maximum))
+
+ return (minimum, maximum)
+
+ raise argparse.ArgumentTypeError(
+ 'expects either a strictly positive integer or a range of strictly '
+ 'positive integer separated by a comma')
+
+ @classmethod
+ def ERP5UrlType(cls, url):
+ if url[-1] == '/':
+ url_list = url.rsplit('/', 2)[:-1]
+ else:
+ url_list = url.rsplit('/', 1)
+
+ url_list[0] = url_list[0] + '/'
+ if len(url_list) != 2:
+ raise argparse.ArgumentTypeError("Invalid URL given")
+
+ return url_list
+
+import sys
+import math
+
+class BenchmarkResultStatistic(object):
+ def __init__(self, suite, label):
+ self.suite = suite
+ self.label = label
+
+ self.full_label = '%s: %s' % (self.suite, self.label)
+
+ self.minimum = sys.maxint
+ self.maximum = -1
+ self.n = 0
+ self.error_sum = 0
+
+ # For calculating the mean
+ self._value_sum = 0
+
+ # For calculating the standard deviation
+ self._variance_sum = 0
+ self._mean = 0
+
+ def add(self, value):
+ if value == 0:
+ self.error_sum += 1
+ return
+
+ if value < self.minimum:
+ self.minimum = value
+ if value > self.maximum:
+ self.maximum = value
+
+ self._value_sum += value
+ self.n += 1
+
+ delta = value - self._mean
+ self._mean += delta / self.n
+ self._variance_sum += delta * (value - self._mean)
+
+ @property
+ def mean(self):
+ return self._value_sum / self.n
+
+ @property
+ def standard_deviation(self):
+ return math.sqrt(self._variance_sum / self.n)
+
+class BenchmarkResult(object):
+ def __init__(self):
+ self._stat_list = []
+ self._suite_idx = 0
+ self._result_idx = 0
+ self._result_list = []
+ self._first_iteration = True
+ self._current_suite_name = None
+ self._result_idx_checkpoint_list = []
+
+ def enterSuite(self, name):
+ self._current_suite_name = name
+
+ def __call__(self, label, value):
+ self._result_list.append(value)
+ if self._first_iteration:
+ self._stat_list.append(BenchmarkResultStatistic(self._current_suite_name,
+ label))
+
+ self._stat_list[self._result_idx].add(value)
+ self._result_idx += 1
+
+ def exitSuite(self):
+ if self._first_iteration:
+ self._result_idx_checkpoint_list.append(self._result_idx)
+ else:
+ expected_result_idx = self._result_idx_checkpoint_list[self._suite_idx]
+ while self._result_idx != expected_result_idx:
+ self._result_list.append(0)
+ self._stat_list[self._result_idx].add(0)
+ self._result_idx += 1
+
+ self._suite_idx += 1
+
+ def getLabelList(self):
+ self._first_iteration = False
+ return [ stat.full_label for stat in self._stat_list ]
+
+ def getResultList(self):
+ self._suite_idx = 0
+ self._result_idx = 0
+
+ result_list = self._result_list
+ self._result_list = []
+ return result_list
+
+ def getStatList(self):
+ return self._stat_list
+
+ def getCurrentSuiteStatList(self):
+ start_index = self._suite_idx and \
+ self._result_idx_checkpoint_list[self._suite_idx - 1] or 0
+
+ return self._stat_list[start_index:self._result_idx]
+
+import multiprocessing
+import csv
+import traceback
+import os
+import logging
+import signal
+import sys
+
+from erp5.utils.test_browser.browser import Browser
+
+class BenchmarkProcess(multiprocessing.Process):
+ def __init__(self, exit_msg_queue, nb_users, user_index,
+ argument_namespace, *args, **kwargs):
+ self._exit_msg_queue = exit_msg_queue
+ self._nb_users = nb_users
+ self._user_index = user_index
+ self._argument_namespace = argument_namespace
+
+ filename_path_prefix = self.getFilenamePrefix()
+ self._result_filename = "%s.csv" % filename_path_prefix
+ self._log_filename = "%s.log" % filename_path_prefix
+
+ # Initialized when running the test
+ self._csv_writer = None
+ self._browser = None
+
+ self._current_repeat = 1
+ self._current_result = BenchmarkResult()
+
+ super(BenchmarkProcess, self).__init__(*args, **kwargs)
+
+ def getFilenamePrefix(self):
+ max_nb_users = isinstance(self._argument_namespace.users, int) and \
+ self._argument_namespace.users or self._argument_namespace.users[1]
+
+ fmt = "%%s-%%drepeat-%%0%ddusers-process%%0%dd" % \
+ (len(str(max_nb_users)), len(str(self._nb_users)))
+
+ return os.path.join(
+ self._argument_namespace.report_directory,
+ fmt % (self._argument_namespace.filename_prefix,
+ self._argument_namespace.repeat,
+ self._nb_users,
+ self._user_index))
+
+ def stopGracefully(self, *args, **kwargs):
+ raise StopIteration, "Interrupted by user"
+
+ def getBrowser(self):
+ info_list = tuple(self._argument_namespace.url) + \
+ tuple(self._argument_namespace.user_tuple[self._user_index])
+
+ return Browser(*info_list,
+ is_debug=self._argument_namespace.is_debug,
+ log_filename=self._log_filename,
+ is_legacy_listbox=self._argument_namespace.is_legacy_listbox)
+
+ def runBenchmarkSuiteList(self):
+ for target_idx, target in enumerate(self._argument_namespace.benchmark_suite_list):
+ self._logger.debug("EXECUTE: %s" % target)
+ self._current_result.enterSuite(target.__name__)
+
+ try:
+ target(self._current_result, self._browser)
+ except:
+ msg = "%s: %s" % (target, traceback.format_exc())
+ if self._current_repeat == 1:
+ self._logger.error(msg)
+ raise
+
+ self._logger.warning(msg)
+
+ for stat in self._current_result.getCurrentSuiteStatList():
+ mean = stat.mean
+
+ self._logger.info("%s: min=%.3f, mean=%.3f (+/- %.3f), max=%.3f" % \
+ (stat.full_label,
+ stat.minimum,
+ mean,
+ stat.standard_deviation,
+ stat.maximum))
+
+ if self._argument_namespace.max_global_average and \
+ mean > self._argument_namespace.max_global_average:
+ self._logger.info("Stopping as mean is greater than maximum "
+ "global average")
+
+ raise StopIteration, "See: %s" % self._log_filename
+
+ self._current_result.exitSuite()
+
+ if self._current_repeat == 1:
+ self._csv_writer.writerow(self._current_result.getLabelList())
+
+ result_list = self._current_result.getResultList()
+ self._logger.debug("RESULTS: %s" % result_list)
+ self._csv_writer.writerow(result_list)
+
+ def getLogger(self):
+ logging.basicConfig(filename=self._log_filename, filemode='w',
+ level=self._argument_namespace.is_debug and \
+ logging.DEBUG or logging.INFO)
+
+ return logging.getLogger('erp5.utils.benchmark')
+
+ def run(self):
+ self._logger = self.getLogger()
+
+ if self._argument_namespace.repeat != -1:
+ signal.signal(signal.SIGTERM, self.stopGracefully)
+
+ try:
+ self._browser = self.getBrowser()
+ except:
+ self._logger.error(traceback.format_exc())
+ raise
+
+ exit_status = 0
+ exit_msg = None
+
+ # Create the result CSV file
+ with open(self._result_filename, 'wb') as result_file:
+ self._csv_writer = csv.writer(result_file, delimiter=',',
+ quoting=csv.QUOTE_MINIMAL)
+
+ try:
+ while self._current_repeat != (self._argument_namespace.repeat + 1):
+ self._logger.info("Iteration: %d" % self._current_repeat)
+ self.runBenchmarkSuiteList()
+ self._current_repeat += 1
+
+ except StopIteration, e:
+ exit_msg = str(e)
+ exit_status = 1
+
+ except:
+ self._logger.error(traceback.format_exc())
+ exit_msg = "An error occured, see: %s" % self._log_filename
+ exit_status = 2
+
+ self._exit_msg_queue.put(exit_msg)
+ sys.exit(exit_status)
Added: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/generateReport.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/generateReport.py?rev=45502&view=auto
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/generateReport.py (added)
+++ erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/generateReport.py [utf8] Fri Apr 15 14:41:33 2011
@@ -0,0 +1,278 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+##############################################################################
+#
+# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
+# Arnaud Fontaine <arnaud.fontaine at nexedi.com>
+#
+# First version: ERP5Mechanize from Vincent Pelletier <vincent at nexedi.com>
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsability of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# garantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+##############################################################################
+
+import argparse
+
+def parseArguments():
+ parser = argparse.ArgumentParser(
+ description='Generate reports for ERP5 benchmarking suites.')
+
+ parser.add_argument('--enable-debug',
+ dest='is_debug',
+ action='store_true',
+ default=False,
+ help='Enable debug messages')
+
+ parser.add_argument('--filename-prefix',
+ default='result',
+ metavar='PREFIX',
+ help='Filename prefix for results CSV files '
+ '(default: result)')
+
+ parser.add_argument('--output-filename',
+ default='results.pdf',
+ metavar='FILENAME',
+ help='PDF output file (default: results.pdf)')
+
+ parser.add_argument('report_directory',
+ help='Reports directory')
+
+ namespace = parser.parse_args()
+
+ return namespace
+
+import csv
+
+from benchmark import BenchmarkResultStatistic
+
+def computeStatisticFromFilenameList(argument_namespace, filename_list):
+ reader_list = []
+ stat_list = []
+ label_list = []
+
+ for filename in filename_list:
+ reader = csv.reader(open(filename, 'rb'), delimiter=',',
+ quoting=csv.QUOTE_MINIMAL)
+
+ reader_list.append(reader)
+
+ # Get headers
+ row_list = reader.next()
+ if not label_list:
+ label_list = row_list
+ for label in label_list:
+ stat_list.append(BenchmarkResultStatistic(*label.split(': ', 1)))
+
+ if row_list != label_list:
+ raise AssertionError, "ERROR: Result labels: %s != %s" % \
+ (label_list, row_list)
+
+ for row_list in reader:
+ for idx, row in enumerate(row_list):
+ stat_list[idx].add(float(row))
+
+ return stat_list
+
+def formatFloatList(value_list):
+ return [ format(value, ".3f") for value in value_list ]
+
+import numpy
+import pylab
+
+from matplotlib import pyplot, ticker
+
+def drawBarDiagram(pdf, title, stat_list):
+ mean_list = []
+ yerr_list = []
+ minimum_list = []
+ maximum_list = []
+ label_list = []
+ error_list = []
+
+ for stat in stat_list:
+ mean_list.append(stat.mean)
+ yerr_list.append(stat.standard_deviation)
+ minimum_list.append(stat.minimum)
+ maximum_list.append(stat.maximum)
+ label_list.append(stat.label)
+ error_list.append(stat.error_sum)
+
+ min_array = numpy.array(minimum_list)
+ mean_array = numpy.array(mean_list)
+ max_array = numpy.array(maximum_list)
+
+ yerr_lower = numpy.minimum(mean_array - min_array, yerr_list)
+ yerr_upper = numpy.minimum(max_array - mean_array, yerr_list)
+
+ ## Draw diagrams
+ # Create the figure
+ figure = pyplot.figure(figsize=(11.69, 8.29))
+ figure.subplots_adjust(bottom=0.13, right=0.98, top=0.95)
+ pyplot.title(title)
+
+ # Create the axes along with their labels
+ axes = figure.add_subplot(111)
+ axes.set_ylabel('Seconds')
+ axes.set_xticks([])
+
+ axes.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
+ axes.yaxis.set_minor_locator(ticker.MultipleLocator(0.25))
+ axes.yaxis.grid(True, 'major', linewidth=1.5)
+ axes.yaxis.grid(True, 'minor')
+
+ # Create the bars
+ ind = numpy.arange(len(label_list))
+ width = 0.33
+
+ min_rects = axes.bar(ind, minimum_list, width, color='y', label='Minimum')
+
+ avg_rects = axes.bar(ind + width, mean_list, width, color='r', label='Mean')
+
+ axes.errorbar(numpy.arange(0.5, len(stat_list)), mean_list,
+ yerr=[yerr_lower, yerr_upper], fmt=None,
+ label='Standard deviation')
+
+ max_rects = axes.bar(ind + width * 2, maximum_list, width, label='Maximum',
+ color='g')
+
+ # Add the legend of bars
+ axes.legend(loc=0)
+
+ axes.table(rowLabels=['Minimum', 'Average', 'Std. deviation', 'Maximum', 'Errors'],
+ colLabels=label_list,
+ cellText=[formatFloatList(minimum_list),
+ formatFloatList(mean_list),
+ formatFloatList(yerr_list),
+ formatFloatList(maximum_list),
+ error_list],
+ rowColours=('y', 'r', 'b', 'g', 'w'),
+ loc='bottom',
+ colLoc='center',
+ rowLoc='center',
+ cellLoc='center')
+
+ pdf.savefig()
+ pylab.close()
+
+def drawConcurrentUsersPlot(pdf, title, nb_users_list, stat_list):
+ figure = pyplot.figure(figsize=(11.69, 8.29), frameon=False)
+ figure.subplots_adjust(bottom=0.1, right=0.98, left=0.07, top=0.95)
+ pyplot.title(title)
+ pyplot.grid(True, linewidth=1.5)
+
+ axes = figure.add_subplot(111)
+
+ min_array = numpy.array([stat.minimum for stat in stat_list])
+ mean_array = numpy.array([stat.mean for stat in stat_list])
+ max_array = numpy.array([stat.maximum for stat in stat_list])
+
+ yerr_list = [stat.standard_deviation for stat in stat_list]
+ yerr_lower = numpy.minimum(mean_array - min_array, yerr_list)
+ yerr_upper = numpy.minimum(max_array - mean_array, yerr_list)
+
+ axes.plot(nb_users_list, min_array, 'yo-', label='Minimum')
+
+ axes.errorbar(nb_users_list,
+ mean_array,
+ yerr=[yerr_lower, yerr_upper],
+ color='r',
+ ecolor='b',
+ label='Mean',
+ elinewidth=2,
+ fmt='D-',
+ capsize=10.0)
+
+ axes.plot(nb_users_list, max_array, 'gs-', label='Maximum')
+
+ axes.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
+ axes.yaxis.set_minor_locator(ticker.MultipleLocator(0.25))
+ axes.yaxis.grid(True, 'minor')
+
+ axes.xaxis.set_major_locator(ticker.FixedLocator(nb_users_list))
+
+ axes.set_xticks(nb_users_list)
+ axes.legend(loc=0)
+ axes.set_xlabel('Concurrent users')
+ axes.set_ylabel('Seconds')
+
+ pyplot.xlim(xmin=nb_users_list[0])
+ pdf.savefig()
+ pylab.close()
+
+from matplotlib.backends.backend_pdf import PdfPages
+
+import glob
+import os
+import re
+
+user_re = re.compile('-(\d+)users-')
+
+def generateReport():
+ argument_namespace = parseArguments()
+
+ filename_iter = glob.iglob("%s-*repeat*-*users*-*process*.csv" % os.path.join(
+ argument_namespace.report_directory,
+ argument_namespace.filename_prefix))
+
+ per_nb_users_report_dict = {}
+ for filename in filename_iter:
+ report_dict = per_nb_users_report_dict.setdefault(
+ int(user_re.search(filename).group(1)), {'filename': []})
+
+ report_dict['filename'].append(filename)
+
+ pdf = PdfPages(argument_namespace.output_filename)
+
+ for nb_users, report_dict in per_nb_users_report_dict.items():
+ stat_list = computeStatisticFromFilenameList(
+ argument_namespace, report_dict['filename'])
+
+ title = "Ran suites with %d users" % len(report_dict['filename'])
+ for slice_start_idx in range(0, len(stat_list), 12):
+ if slice_start_idx != 0:
+ title += ' (Ctd.)'
+
+ drawBarDiagram(pdf, title, stat_list[slice_start_idx:slice_start_idx + 12])
+
+ report_dict['stats'] = stat_list
+
+ if len(per_nb_users_report_dict) != 1:
+ for i in range(len(report_dict['stats'])):
+ stat_list = []
+ nb_users_list = per_nb_users_report_dict.keys()
+ for report_dict in per_nb_users_report_dict.values():
+ stat_list.append(report_dict['stats'][i])
+
+ drawConcurrentUsersPlot(
+ pdf,
+ "%s from %d to %d users (step: %d)" % (stat_list[0].full_label,
+ nb_users_list[0],
+ nb_users_list[-1],
+ nb_users_list[1] - nb_users_list[0]),
+ nb_users_list,
+ stat_list)
+
+ pdf.close()
+
+if __name__ == '__main__':
+ generateReport()
Propchange: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/generateReport.py
------------------------------------------------------------------------------
svn:executable = *
Added: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py?rev=45502&view=auto
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py (added)
+++ erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py [utf8] Fri Apr 15 14:41:33 2011
@@ -0,0 +1,192 @@
+#!/usr/bin/env python
+
+##############################################################################
+#
+# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
+# Arnaud Fontaine <arnaud.fontaine at nexedi.com>
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsability of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# garantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+##############################################################################
+
+import argparse
+import os
+
+from benchmark import ArgumentType
+
+def parseArguments():
+ parser = argparse.ArgumentParser(description='Run ERP5 benchmarking suites.')
+
+ # Optional arguments
+ parser.add_argument('--filename-prefix',
+ default='result',
+ metavar='PREFIX',
+ help='Filename prefix for results and logs files '
+ '(default: result)')
+
+ parser.add_argument('--report-directory',
+ type=ArgumentType.directoryType,
+ default=os.getcwd(),
+ metavar='DIRECTORY',
+ help='Directory where the results and logs will be stored '
+ '(default: current directory)')
+
+ parser.add_argument('--max-global-average',
+ type=float,
+ default=0,
+ metavar='N',
+ help='Stop when any suite operation is over this value '
+ '(default: disable)')
+
+ parser.add_argument('--users-file',
+ dest='user_info_filename',
+ default='userInfo',
+ metavar='MODULE',
+ help="Import users from ``user_tuple'' in MODULE")
+
+ parser.add_argument('--users-range-increment',
+ type=ArgumentType.strictlyPositiveIntType,
+ default=1,
+ metavar='N',
+ help='Number of users being added after each repetition '
+ '(default: 1)')
+
+ parser.add_argument('--enable-debug',
+ dest='is_debug',
+ action='store_true',
+ default=False,
+ help='Enable debug messages')
+
+ parser.add_argument('--enable-legacy-listbox',
+ dest='is_legacy_listbox',
+ action='store_true',
+ default=False,
+ help='Enable legacy listbox for Browser')
+
+ parser.add_argument('--repeat',
+ type=ArgumentType.strictlyPositiveIntType,
+ default=-1,
+ metavar='N',
+ help='Repeat the benchmark suite N times '
+ '(default: infinite)')
+
+ # Mandatory arguments
+ parser.add_argument('url',
+ type=ArgumentType.ERP5UrlType,
+ metavar='URL',
+ help='ERP5 base URL')
+
+ parser.add_argument('users',
+ type=ArgumentType.strictlyPositiveIntOrRangeType,
+ metavar='NB_USERS|MIN_NB_USERS,MAX_NB_USERS',
+ help='Number of users (fixed or a range)')
+
+ parser.add_argument('benchmark_suite_list',
+ nargs='+',
+ metavar='BENCHMARK_SUITES',
+ help='Benchmark suite modules')
+
+ namespace = parser.parse_args()
+
+ namespace.user_tuple = ArgumentType.objectFromModule(namespace.user_info_filename,
+ object_name='user_tuple')
+
+ object_benchmark_suite_list = []
+ for benchmark_suite in namespace.benchmark_suite_list:
+ object_benchmark_suite_list.append(ArgumentType.objectFromModule(benchmark_suite,
+ callable_object=True))
+
+ namespace.benchmark_suite_list = object_benchmark_suite_list
+
+ max_nb_users = isinstance(namespace.users, tuple) and namespace.users[1] or \
+ namespace.users
+
+ if max_nb_users > len(namespace.user_tuple):
+ raise argparse.ArgumentTypeError("Not enough users in the given file")
+
+ return namespace
+
+import sys
+import multiprocessing
+
+from benchmark import BenchmarkProcess
+
+def runConstantBenchmark(argument_namespace, nb_users):
+ process_list = []
+
+ exit_msg_queue = multiprocessing.Queue(nb_users)
+
+ for user_index in range(nb_users):
+ process = BenchmarkProcess(exit_msg_queue, nb_users, user_index, argument_namespace)
+ process_list.append(process)
+
+ for process in process_list:
+ process.start()
+
+ error_message_set = set()
+ i = 0
+ while i != len(process_list):
+ try:
+ msg = exit_msg_queue.get()
+ except KeyboardInterrupt:
+ if argument_namespace.repeat != -1:
+ print >>sys.stderr, "Stopping gracefully"
+ for process in process_list:
+ process.terminate()
+
+ i = 0
+ continue
+
+ if msg is not None:
+ error_message_set.add(msg)
+ for process in process_list:
+ process.terminate()
+
+ break
+
+ i += 1
+
+ if error_message_set:
+ for error_message in error_message_set:
+ print >>sys.stderr, "ERROR: %s" % error_message
+
+ sys.exit(1)
+
+def runBenchmark():
+ argument_namespace = parseArguments()
+
+ if isinstance(argument_namespace.users, tuple):
+ nb_users, max_users = argument_namespace.users
+ while True:
+ runConstantBenchmark(argument_namespace, nb_users)
+
+ if nb_users == max_users:
+ break
+
+ nb_users = min(nb_users + argument_namespace.users_range_increment,
+ max_users)
+
+ else:
+ runConstantBenchmark(argument_namespace, argument_namespace.users)
+
+if __name__ == '__main__':
+ runBenchmark()
Propchange: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py
------------------------------------------------------------------------------
svn:executable = *
More information about the Erp5-report
mailing list