[Erp5-report] r45910 arnaud.fontaine - in /erp5/trunk/utils/erp5.utils.benchmark: ./ src/er...
nobody at svn.erp5.org
nobody at svn.erp5.org
Sat Jun 18 10:26:21 CEST 2011
Author: arnaud.fontaine
Date: Sat Jun 18 10:26:21 2011
New Revision: 45910
URL: http://svn.erp5.org?rev=45910&view=rev
Log:
Only require argparse for python < 2.7.
Added:
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/performance_tester.py
- copied, changed from r45899, erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/scalability_tester.py
Removed:
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py
Modified:
erp5/trunk/utils/erp5.utils.benchmark/setup.py
erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py
Modified: erp5/trunk/utils/erp5.utils.benchmark/setup.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/setup.py?rev=45910&r1=45909&r2=45910&view=diff
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/setup.py [utf8] (original)
+++ erp5/trunk/utils/erp5.utils.benchmark/setup.py [utf8] Sat Jun 18 10:26:21 2011
@@ -6,6 +6,14 @@ from setuptools import setup, find_packa
name = 'erp5.utils.benchmark'
version = '0.1'
+install_require_list = ['erp5.utils.test_browser']
+
+# argparse is only available from python >= 2.7
+import sys
+python_major_version, python_minor_version = sys.version_info[:2]
+if python_major_version == 2 and python_minor_version < 7:
+ install_require_list.append('argparse')
+
setup(
name=name,
version=version,
@@ -13,9 +21,7 @@ setup(
author='Arnaud Fontaine',
author_email='arnaud.fontaine at nexedi.com',
license='GPL version 2',
- install_requires = ['erp5.utils.test_browser',
- # Only required for python < 2.7
- 'argparse'],
+ install_requires = install_require_list,
extras_require = {'generate_erp5_tester_report': ['matplotlib', 'numpy'],
'scalability_tester_erp5': ['slapos.tool.nosqltester']},
package_dir={'':'src'},
Modified: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py?rev=45910&r1=45909&r2=45910&view=diff
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py [utf8] (original)
+++ erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/benchmark.py [utf8] Sat Jun 18 10:26:21 2011
@@ -272,7 +272,7 @@ class BenchmarkProcess(multiprocessing.P
tuple(self._argument_namespace.user_tuple[self._user_index])
return Browser(*info_list,
- is_debug=self._argument_namespace.is_debug,
+ is_debug=self._argument_namespace.enable_debug,
log_filename=self._log_filename,
is_legacy_listbox=self._argument_namespace.is_legacy_listbox)
@@ -319,7 +319,7 @@ class BenchmarkProcess(multiprocessing.P
def getLogger(self):
logging.basicConfig(filename=self._log_filename, filemode='w',
- level=self._argument_namespace.is_debug and \
+ level=self._argument_namespace.enable_debug and \
logging.DEBUG or logging.INFO)
return logging.getLogger('erp5.utils.benchmark')
Copied: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/performance_tester.py (from r45899, erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py)
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/performance_tester.py?p2=erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/performance_tester.py&p1=erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py&r1=45899&r2=45910&rev=45910&view=diff
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py [utf8] (original)
+++ erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/performance_tester.py [utf8] Sat Jun 18 10:26:21 2011
@@ -30,185 +30,187 @@
import argparse
import os
-
-from benchmark import ArgumentType
-
-def parseArguments(argv):
- parser = argparse.ArgumentParser(description='Run ERP5 benchmarking suites.')
-
- # Optional arguments
- parser.add_argument('--filename-prefix',
- default='result',
- metavar='PREFIX',
- help='Filename prefix for results and logs files '
- '(default: result)')
-
- parser.add_argument('--report-directory',
- type=ArgumentType.directoryType,
- default=os.getcwd(),
- metavar='DIRECTORY',
- help='Directory where the results and logs will be stored '
- '(default: current directory)')
-
- parser.add_argument('--max-global-average',
- type=float,
- default=0,
- metavar='N',
- help='Stop when any suite operation is over this value '
- '(default: disable)')
-
- parser.add_argument('--users-file',
- dest='user_info_filename',
- default='userInfo',
- metavar='MODULE',
- help="Import users from ``user_tuple'' in MODULE")
-
- parser.add_argument('--users-range-increment',
- type=ArgumentType.strictlyPositiveIntType,
- default=1,
- metavar='N',
- help='Number of users being added after each repetition '
- '(default: 1)')
-
- parser.add_argument('--enable-debug',
- dest='is_debug',
- action='store_true',
- default=False,
- help='Enable debug messages')
-
- parser.add_argument('--enable-legacy-listbox',
- dest='is_legacy_listbox',
- action='store_true',
- default=False,
- help='Enable legacy listbox for Browser')
-
- parser.add_argument('--repeat',
- type=ArgumentType.strictlyPositiveIntType,
- default=-1,
- metavar='N',
- help='Repeat the benchmark suite N times '
- '(default: infinite)')
-
- parser.add_argument('--user-index',
- type=int,
- default=0,
- metavar='INDEX',
- help='Index of the first user within userInfo '
- '(default: 0)')
-
- # Mandatory arguments
- parser.add_argument('url',
- type=ArgumentType.ERP5UrlType,
- metavar='URL',
- help='ERP5 base URL')
-
- parser.add_argument('users',
- type=ArgumentType.strictlyPositiveIntOrRangeType,
- metavar='NB_USERS|MIN_NB_USERS,MAX_NB_USERS',
- help='Number of users (fixed or a range)')
-
- parser.add_argument('benchmark_suite_list',
- nargs='+',
- metavar='BENCHMARK_SUITES',
- help='Benchmark suite modules')
-
- namespace = parser.parse_args(argv)
-
- namespace.user_tuple = ArgumentType.objectFromModule(namespace.user_info_filename,
- object_name='user_tuple')
-
- object_benchmark_suite_list = []
- for benchmark_suite in namespace.benchmark_suite_list:
- object_benchmark_suite_list.append(ArgumentType.objectFromModule(benchmark_suite,
- callable_object=True))
-
- namespace.benchmark_suite_list = object_benchmark_suite_list
-
- max_nb_users = isinstance(namespace.users, tuple) and namespace.users[1] or \
- namespace.users
-
- namespace.user_tuple = namespace.user_tuple[namespace.user_index:]
- if max_nb_users > len(namespace.user_tuple):
- raise argparse.ArgumentTypeError("Not enough users in the given file")
-
- return namespace
-
import sys
import multiprocessing
-from benchmark import BenchmarkProcess
+from benchmark import ArgumentType, BenchmarkProcess
-def runConstantBenchmark(argument_namespace, nb_users, publish_method):
- process_list = []
+class PerformanceTester(object):
+ def __init__(self, publish_method=None, namespace=None):
+ if not namespace:
+ self._argument_namespace = self._parse_arguments(argparse.ArgumentParser(
+ description='Run ERP5 benchmarking suites.'))
+ else:
+ self._argument_namespace = namespace
+
+ self._publish_method = publish_method
+
+ @staticmethod
+ def _add_parser_arguments(parser):
+ # Optional arguments
+ parser.add_argument('--filename-prefix',
+ default='result',
+ metavar='PREFIX',
+ help='Filename prefix for results and logs files '
+ '(default: result)')
+
+ parser.add_argument('--report-directory', '-r',
+ type=ArgumentType.directoryType,
+ default=os.getcwd(),
+ metavar='DIRECTORY',
+ help='Directory where the results and logs will be stored '
+ '(default: current directory)')
+
+ parser.add_argument('--max-global-average',
+ type=float,
+ default=0,
+ metavar='N',
+ help='Stop when any suite operation is over this value '
+ '(default: disable)')
+
+ parser.add_argument('--users-file',
+ dest='user_info_filename',
+ default='userInfo',
+ metavar='MODULE',
+ help="Import users from ``user_tuple'' in MODULE")
+
+ parser.add_argument('--users-range-increment',
+ type=ArgumentType.strictlyPositiveIntType,
+ default=1,
+ metavar='N',
+ help='Number of users being added after each repetition '
+ '(default: 1)')
+
+ parser.add_argument('--enable-debug', '-d',
+ action='store_true',
+ default=False,
+ help='Enable debug messages')
+
+ parser.add_argument('--enable-legacy-listbox',
+ dest='is_legacy_listbox',
+ action='store_true',
+ default=False,
+ help='Enable legacy listbox for Browser')
+
+ parser.add_argument('--repeat',
+ type=ArgumentType.strictlyPositiveIntType,
+ default=-1,
+ metavar='N',
+ help='Repeat the benchmark suite N times '
+ '(default: infinite)')
+
+ parser.add_argument('--user-index',
+ type=int,
+ default=0,
+ metavar='INDEX',
+ help='Index of the first user within userInfo '
+ '(default: 0)')
+
+ # Mandatory arguments
+ parser.add_argument('url',
+ type=ArgumentType.ERP5UrlType,
+ metavar='URL',
+ help='ERP5 base URL')
+
+ parser.add_argument('users',
+ type=ArgumentType.strictlyPositiveIntOrRangeType,
+ metavar='NB_USERS|MIN_NB_USERS,MAX_NB_USERS',
+ help='Number of users (fixed or a range)')
+
+ parser.add_argument('benchmark_suite_list',
+ nargs='+',
+ metavar='BENCHMARK_SUITES',
+ help='Benchmark suite modules')
+
+ @staticmethod
+ def _check_parsed_arguments(namespace):
+ namespace.user_tuple = ArgumentType.objectFromModule(namespace.user_info_filename,
+ object_name='user_tuple')
+
+ object_benchmark_suite_list = []
+ for benchmark_suite in namespace.benchmark_suite_list:
+ object_benchmark_suite_list.append(ArgumentType.objectFromModule(benchmark_suite,
+ callable_object=True))
+
+ namespace.benchmark_suite_list = object_benchmark_suite_list
+
+ max_nb_users = isinstance(namespace.users, tuple) and namespace.users[1] or \
+ namespace.users
+
+ namespace.user_tuple = namespace.user_tuple[namespace.user_index:]
+ if max_nb_users > len(namespace.user_tuple):
+ raise argparse.ArgumentTypeError("Not enough users in the given file")
+
+ return namespace
+
+ @staticmethod
+ def _parse_arguments(parser):
+ PerformanceTester._add_parser_arguments(parser)
+ namespace = parser.parse_args()
+ PerformanceTester._check_parsed_arguments(namespace)
+ return namespace
+
+ def _run_constant(self, nb_users):
+ process_list = []
+ exit_msg_queue = multiprocessing.Queue(nb_users)
+
+ for user_index in range(nb_users):
+ process = BenchmarkProcess(exit_msg_queue, nb_users, user_index,
+ self._argument_namespace,
+ self._publish_method)
+ process_list.append(process)
+
+ for process in process_list:
+ process.start()
+
+ error_message_set = set()
+ i = 0
+ while i != len(process_list):
+ try:
+ msg = exit_msg_queue.get()
+ except KeyboardInterrupt:
+ if self._argument_namespace.repeat != -1:
+ print >>sys.stderr, "Stopping gracefully"
+ for process in process_list:
+ process.terminate()
- exit_msg_queue = multiprocessing.Queue(nb_users)
+ i = 0
+ continue
- for user_index in range(nb_users):
- process = BenchmarkProcess(exit_msg_queue, nb_users, user_index, argument_namespace,
- publish_method)
- process_list.append(process)
-
- for process in process_list:
- process.start()
-
- error_message_set = set()
- i = 0
- while i != len(process_list):
- try:
- msg = exit_msg_queue.get()
- except KeyboardInterrupt:
- if argument_namespace.repeat != -1:
- print >>sys.stderr, "Stopping gracefully"
+ if msg is not None:
+ error_message_set.add(msg)
for process in process_list:
process.terminate()
- i = 0
- continue
-
- if msg is not None:
- error_message_set.add(msg)
- for process in process_list:
- process.terminate()
-
- break
-
- i += 1
-
- if error_message_set:
- for error_message in error_message_set:
- print >>sys.stderr, "ERROR: %s" % error_message
-
- sys.exit(1)
-
-def runBenchmark(publish_method=None, argv=None):
- argument_namespace = parseArguments(argv)
+ break
- if isinstance(argument_namespace.users, tuple):
- nb_users, max_users = argument_namespace.users
- while True:
- runConstantBenchmark(argument_namespace, nb_users, publish_method)
+ i += 1
- if nb_users == max_users:
- break
+ if error_message_set:
+ for error_message in error_message_set:
+ print >>sys.stderr, "ERROR: %s" % error_message
- nb_users = min(nb_users + argument_namespace.users_range_increment,
- max_users)
+ return 1
- else:
- runConstantBenchmark(argument_namespace, argument_namespace.users,
- publish_method)
+ return 0
-from slapos.tool.nosqltester import NoSQLTester
+ def run(self):
+ if isinstance(self._argument_namespace.users, tuple):
+ nb_users, max_users = self._argument_namespace.users
+ while True:
+ self._run_constant(nb_users)
+ if nb_users == max_users:
+ break
-class BenchmarkTester(NoSQLTester):
- def run_tester(self):
- runBenchmark(self.send_result_availability_notification,
- self.params['argv'])
+ nb_users = min(nb_users + self._argument_namespace.users_range_increment,
+ max_users)
-from slapos.tool.nosqltester import main
+ return 0
+ else:
+ return self._run_constant(self._argument_namespace.users)
-def runTester():
- main(klass=BenchmarkTester)
+def main():
+ sys.exit(PerformanceTester().run())
if __name__ == '__main__':
- runBenchmark()
+ main()
Removed: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py?rev=45909&view=auto
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py [utf8] (original)
+++ erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/runBenchmark.py (removed)
@@ -1,214 +0,0 @@
-#!/usr/bin/env python
-
-##############################################################################
-#
-# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
-# Arnaud Fontaine <arnaud.fontaine at nexedi.com>
-#
-# WARNING: This program as such is intended to be used by professional
-# programmers who take the whole responsability of assessing all potential
-# consequences resulting from its eventual inadequacies and bugs
-# End users who are looking for a ready-to-use solution with commercial
-# garantees and support are strongly adviced to contract a Free Software
-# Service Company
-#
-# This program is Free Software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-##############################################################################
-
-import argparse
-import os
-
-from benchmark import ArgumentType
-
-def parseArguments(argv):
- parser = argparse.ArgumentParser(description='Run ERP5 benchmarking suites.')
-
- # Optional arguments
- parser.add_argument('--filename-prefix',
- default='result',
- metavar='PREFIX',
- help='Filename prefix for results and logs files '
- '(default: result)')
-
- parser.add_argument('--report-directory',
- type=ArgumentType.directoryType,
- default=os.getcwd(),
- metavar='DIRECTORY',
- help='Directory where the results and logs will be stored '
- '(default: current directory)')
-
- parser.add_argument('--max-global-average',
- type=float,
- default=0,
- metavar='N',
- help='Stop when any suite operation is over this value '
- '(default: disable)')
-
- parser.add_argument('--users-file',
- dest='user_info_filename',
- default='userInfo',
- metavar='MODULE',
- help="Import users from ``user_tuple'' in MODULE")
-
- parser.add_argument('--users-range-increment',
- type=ArgumentType.strictlyPositiveIntType,
- default=1,
- metavar='N',
- help='Number of users being added after each repetition '
- '(default: 1)')
-
- parser.add_argument('--enable-debug',
- dest='is_debug',
- action='store_true',
- default=False,
- help='Enable debug messages')
-
- parser.add_argument('--enable-legacy-listbox',
- dest='is_legacy_listbox',
- action='store_true',
- default=False,
- help='Enable legacy listbox for Browser')
-
- parser.add_argument('--repeat',
- type=ArgumentType.strictlyPositiveIntType,
- default=-1,
- metavar='N',
- help='Repeat the benchmark suite N times '
- '(default: infinite)')
-
- parser.add_argument('--user-index',
- type=int,
- default=0,
- metavar='INDEX',
- help='Index of the first user within userInfo '
- '(default: 0)')
-
- # Mandatory arguments
- parser.add_argument('url',
- type=ArgumentType.ERP5UrlType,
- metavar='URL',
- help='ERP5 base URL')
-
- parser.add_argument('users',
- type=ArgumentType.strictlyPositiveIntOrRangeType,
- metavar='NB_USERS|MIN_NB_USERS,MAX_NB_USERS',
- help='Number of users (fixed or a range)')
-
- parser.add_argument('benchmark_suite_list',
- nargs='+',
- metavar='BENCHMARK_SUITES',
- help='Benchmark suite modules')
-
- namespace = parser.parse_args(argv)
-
- namespace.user_tuple = ArgumentType.objectFromModule(namespace.user_info_filename,
- object_name='user_tuple')
-
- object_benchmark_suite_list = []
- for benchmark_suite in namespace.benchmark_suite_list:
- object_benchmark_suite_list.append(ArgumentType.objectFromModule(benchmark_suite,
- callable_object=True))
-
- namespace.benchmark_suite_list = object_benchmark_suite_list
-
- max_nb_users = isinstance(namespace.users, tuple) and namespace.users[1] or \
- namespace.users
-
- namespace.user_tuple = namespace.user_tuple[namespace.user_index:]
- if max_nb_users > len(namespace.user_tuple):
- raise argparse.ArgumentTypeError("Not enough users in the given file")
-
- return namespace
-
-import sys
-import multiprocessing
-
-from benchmark import BenchmarkProcess
-
-def runConstantBenchmark(argument_namespace, nb_users, publish_method):
- process_list = []
-
- exit_msg_queue = multiprocessing.Queue(nb_users)
-
- for user_index in range(nb_users):
- process = BenchmarkProcess(exit_msg_queue, nb_users, user_index, argument_namespace,
- publish_method)
- process_list.append(process)
-
- for process in process_list:
- process.start()
-
- error_message_set = set()
- i = 0
- while i != len(process_list):
- try:
- msg = exit_msg_queue.get()
- except KeyboardInterrupt:
- if argument_namespace.repeat != -1:
- print >>sys.stderr, "Stopping gracefully"
- for process in process_list:
- process.terminate()
-
- i = 0
- continue
-
- if msg is not None:
- error_message_set.add(msg)
- for process in process_list:
- process.terminate()
-
- break
-
- i += 1
-
- if error_message_set:
- for error_message in error_message_set:
- print >>sys.stderr, "ERROR: %s" % error_message
-
- sys.exit(1)
-
-def runBenchmark(publish_method=None, argv=None):
- argument_namespace = parseArguments(argv)
-
- if isinstance(argument_namespace.users, tuple):
- nb_users, max_users = argument_namespace.users
- while True:
- runConstantBenchmark(argument_namespace, nb_users, publish_method)
-
- if nb_users == max_users:
- break
-
- nb_users = min(nb_users + argument_namespace.users_range_increment,
- max_users)
-
- else:
- runConstantBenchmark(argument_namespace, argument_namespace.users,
- publish_method)
-
-from slapos.tool.nosqltester import NoSQLTester
-
-class BenchmarkTester(NoSQLTester):
- def run_tester(self):
- runBenchmark(self.send_result_availability_notification,
- self.params['argv'])
-
-from slapos.tool.nosqltester import main
-
-def runTester():
- main(klass=BenchmarkTester)
-
-if __name__ == '__main__':
- runBenchmark()
Added: erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/scalability_tester.py
URL: http://svn.erp5.org/erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/scalability_tester.py?rev=45910&view=auto
==============================================================================
--- erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/scalability_tester.py (added)
+++ erp5/trunk/utils/erp5.utils.benchmark/src/erp5/utils/benchmark/scalability_tester.py [utf8] Sat Jun 18 10:26:21 2011
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+##############################################################################
+#
+# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
+# Arnaud Fontaine <arnaud.fontaine at nexedi.com>
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsability of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# garantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+##############################################################################
+
+from slapos.tool.nosqltester import NoSQLTester
+from erp5.utils.benchmark.performance_tester import PerformanceTester
+
+class ScalabilityTester(NoSQLTester):
+ def __init__(self):
+ super(ScalabilityTester, self).__init__()
+
+ def _add_parser_arguments(self, parser):
+ super(ScalabilityTester, self)._add_parser_arguments(parser)
+ PerformanceTester._add_parser_arguments(parser)
+
+ def _parse_arguments(self, parser):
+ namespace = super(ScalabilityTester, self)._parse_arguments(parser)
+ PerformanceTester._check_parsed_arguments(namespace)
+ return namespace
+
+ def run_tester(self):
+ performance_tester = PerformanceTester(
+ self.send_result_availability_notification,
+ self.argument_namespace)
+
+ performance_tester.run()
+
+def main():
+ ScalabilityTester().run()
+
+if __name__ == '__main__':
+ main()
More information about the Erp5-report
mailing list