[Erp5-report] r14156 - /erp5/trunk/utils/oood/pool.py

nobody at svn.erp5.org nobody at svn.erp5.org
Sun Apr 22 01:19:42 CEST 2007


Author: bartek
Date: Sun Apr 22 01:19:41 2007
New Revision: 14156

URL: http://svn.erp5.org?rev=14156&view=rev
Log:
new implementation to be used by Dispatcher (the old implementation is left intact for compatibility)

Modified:
    erp5/trunk/utils/oood/pool.py

Modified: erp5/trunk/utils/oood/pool.py
URL: http://svn.erp5.org/erp5/trunk/utils/oood/pool.py?rev=14156&r1=14155&r2=14156&view=diff
==============================================================================
--- erp5/trunk/utils/oood/pool.py (original)
+++ erp5/trunk/utils/oood/pool.py Sun Apr 22 01:19:41 2007
@@ -54,26 +54,17 @@
     """
       set up semaphore, make workers
     """
-    self.sem = threading.Semaphore(self.total) # this will go out some day
-    for i in range(self.total):
-      self._mkWorker(i)
-
-  def acquire(self):
-    self.sem.acquire()
-
-  def release(self, i):
-    """
-      release semaphore, try to delete timer thread
-    """
-    try:
-      self.timers[i].cancel()
-      del(self.timers[i])
-    except KeyError: # after restart this timer is not there
-      pass
-    self.sem.release()
+    if config.implementation == 1: # compatibility
+      self.sem = threading.Semaphore(self.total)
+      for i in range(self.total):
+        self._mkWorker(i)
+      return
+    # impl. 2
+    self.initializePool()
 
   def getWorkerObject(self, worker=None):
     """
+      Impl. 2
       New way of getting worker object
       if worker is None, only check if one is available
       otherwise return false, all the rest is done by dispatcher
@@ -85,33 +76,90 @@
       or kill if new one could not be made
       so that getSize always returns the right value
     """
-    return 1
-
-  def releaseWorker(self, worker):
-    """
+    if worker is None:
+      for w in self.inst.values():
+        if w and not w.busy:
+          w.busy = True
+          return w
+      return False
+    # else we produce and return a new worker (flagged as busy, to maintain consistency)
+    # we assume we are getting a worker to do something with it
+    i = worker.idx
+    self.makeWorkerObject(i)
+    self.inst[i].busy = True
+    return self.inst[i]
+
+  def releaseWorkerObject(self, worker):
+    """
+      Impl. 2
       Release worker so that it is available
-      Return True if successful
-      XXX what to do if not successful? I'd kill it to reduce pool size.
-    """
+      Return True if successful (successful means that the worker was busy and we set it free)
+      If something went wrong, we kill the worker
+    """
+    try:
+      if not worker.busy:
+        return False
+      worker.busy = False
+    except Exception, e:
+      Log.logException(e)
+      if hasattr(worker, 'idx'): # sometimes it can be None, for example
+        self.inst[worker.idx] = None
+      return False
     return True
 
   def getSize(self):
     """
+      Impl. 2
       return number of existing workers (busy or not)
       so that we can check if the pool is usable at all
     """
-    pass
+    return len([w for w in self.inst.values() if w and not w.busy])
 
   def initializePool(self):
     """
+      Impl. 2
       kill all workers if any are present
       build new ones
-      (same as in __init__ - can we call __init__ from here?)
-    """
-    pass
+    """
+    for w in self.inst.values():
+      self.releaseWorkerObject(w)
+    self.inst = {}
+    for i in range(self.total):
+      self.makeWorkerObject(i)
+
+  def makeWorkerObject(self, i):
+    """
+      Impl. 2
+      make worker object and OOo instance for it (no need to run start.py separately)
+      kill OOo if existed
+    """
+    self.inst[i] = None
+    start.killInstance(i)
+    start.startInstance(i, keep_control=True)
+    time.sleep(config.instance_load_time)
+    self.inst[i] = (factory.builder.build(self, i))
+
+  def acquire(self):
+    """
+      Impl. 1
+    """
+    self.sem.acquire()
+
+  def release(self, i):
+    """
+      Impl. 1
+      release semaphore, try to delete timer thread
+    """
+    try:
+      self.timers[i].cancel()
+      del(self.timers[i])
+    except KeyError: # after restart this timer is not there
+      pass
+    self.sem.release()
 
   def getWorker(self):
     """
+      Impl. 1
       wait until there is an available worker
       returns first available instance, marks it as busy etc.
       we use index to name temporary files which are per
@@ -138,11 +186,13 @@
 
   def rebuild(self, i):
     """
+      Impl. 1
       triggered by a timer thread in case worker takes too long
       rebuilds the worker instance
     """
     def _rebuild():
       """
+        Impl. 1
         This is run when a timer thread expires, which means that the OOo instance if frozen
         kills OOod instance, kills worker, starts new instance and worker, replaces the old worker
         in the pool with the new one
@@ -160,10 +210,14 @@
     return _rebuild
 
   def _mkWorker(self, i):
+    """
+      Impl. 1
+    """
     self.inst[i] = (factory.builder.build(self, i))
 
   def _killWorker(self, i):
     """
+      Impl. 1
       this basically means marking worker as "dead"
       there is no way to really destroy the object; it is removed from the pool so it should be
       removed by garbage collector




More information about the Erp5-report mailing list