[haizea-commit] r470 - in trunk/src/haizea: common resourcemanager resourcemanager/deployment resourcemanager/enact resourcemanager/enact/opennebula resourcemanager/enact/simulated resourcemanager/frontends

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Thu Aug 7 11:21:09 CDT 2008


Author: borja
Date: 2008-08-07 11:21:09 -0500 (Thu, 07 Aug 2008)
New Revision: 470

Modified:
   trunk/src/haizea/common/constants.py
   trunk/src/haizea/common/utils.py
   trunk/src/haizea/resourcemanager/configfile.py
   trunk/src/haizea/resourcemanager/datastruct.py
   trunk/src/haizea/resourcemanager/deployment/base.py
   trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
   trunk/src/haizea/resourcemanager/enact/base.py
   trunk/src/haizea/resourcemanager/enact/opennebula/info.py
   trunk/src/haizea/resourcemanager/enact/opennebula/vm.py
   trunk/src/haizea/resourcemanager/enact/simulated/info.py
   trunk/src/haizea/resourcemanager/enact/simulated/vm.py
   trunk/src/haizea/resourcemanager/frontends/opennebula.py
   trunk/src/haizea/resourcemanager/frontends/rpc.py
   trunk/src/haizea/resourcemanager/frontends/tracefile.py
   trunk/src/haizea/resourcemanager/log.py
   trunk/src/haizea/resourcemanager/resourcepool.py
   trunk/src/haizea/resourcemanager/rm.py
   trunk/src/haizea/resourcemanager/rpcserver.py
   trunk/src/haizea/resourcemanager/scheduler.py
   trunk/src/haizea/resourcemanager/slottable.py
Log:
Changed logging code so there will be multiple loggers, accessible through logging.getLogger, instead of a single logger in the ResourceManager class (which required traversing the class hierarchy to get to that logger). Also made ResourceManager a singleton.

Modified: trunk/src/haizea/common/constants.py
===================================================================
--- trunk/src/haizea/common/constants.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/common/constants.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -125,16 +125,6 @@
 CLOCK_SIMULATED = "simulated"
 CLOCK_REAL = "real"
 
-# Component names
-RM="RM"
-SCHED="SCHED"
-ST="SLOT"
-DS="STRUCT"
-ENACT="ENACT"
-CLOCK="CLOCK"
-ONE="ONE"
-TRACE="TRACE"
-
 # Transfer required in deployment
 REQTRANSFER_NO = 0
 REQTRANSFER_YES = 1
@@ -219,3 +209,6 @@
 AVERAGE_TIMEWEIGHTED=2
 
 EVENT_END_VM=0
+
+LOGLEVEL_VDEBUG = 5
+LOGLEVEL_STATUS = 25
\ No newline at end of file

Modified: trunk/src/haizea/common/utils.py
===================================================================
--- trunk/src/haizea/common/utils.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/common/utils.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -95,4 +95,23 @@
         return value.seconds
     else:
         return value
+    
+class Singleton(object):
+     """ 
+     A singleton base class. 
+     Based on: http://code.activestate.com/recipes/52558/
+     """
+     _singleton = None
+     def __new__(cls, *args, **kwargs):
+         if cls._singleton == None:
+             cls._singleton = object.__new__(cls, *args, **kwargs)
+         return cls._singleton
+     
+     @classmethod
+     def get_singleton(cls):
+         if '_singleton' not in vars(cls):
+             return None
+         else:
+             return cls._singleton
+
      
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/configfile.py
===================================================================
--- trunk/src/haizea/resourcemanager/configfile.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/configfile.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -42,7 +42,7 @@
                 type        = OPTTYPE_STRING,
                 required    = False,
                 default     = "INFO",
-                valid       = ["STATUS","INFO","DEBUG","EXTREMEDEBUG"],
+                valid       = ["STATUS","INFO","DEBUG","VDEBUG"],
                 doc         = """
                 Controls the level (and amount) of 
                 log messages. Valid values are:
@@ -50,7 +50,7 @@
                  - STATUS: Only print status messages
                  - INFO: Slightly more verbose that STATUS
                  - DEBUG: Prints information useful for debugging the scheduler.
-                 - EXTREMEDEBUG: Prints very verbose information
+                 - VDEBUG: Prints very verbose information
                    on the scheduler's internal data structures. Use only
                    for short runs.        
                 """),

Modified: trunk/src/haizea/resourcemanager/datastruct.py
===================================================================
--- trunk/src/haizea/resourcemanager/datastruct.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/datastruct.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -39,14 +39,16 @@
   * Duration: A wrapper around requested/accumulated/actual durations
 """
 
-from haizea.common.constants import state_str, rstate_str, DS, RES_STATE_SCHEDULED, RES_STATE_ACTIVE, RES_MEM, MIGRATE_NONE, MIGRATE_MEM, MIGRATE_MEMDISK
+from haizea.common.constants import state_str, rstate_str, RES_STATE_SCHEDULED, RES_STATE_ACTIVE, RES_MEM, MIGRATE_NONE, MIGRATE_MEM, MIGRATE_MEMDISK, LOGLEVEL_VDEBUG
 from haizea.common.utils import roundDateTimeDelta, get_lease_id, pretty_nodemap, estimate_transfer_time, xmlrpc_marshall_singlevalue
 
 from operator import attrgetter
 from mx.DateTime import TimeDelta
 from math import floor
 
+import logging
 
+
 #-------------------------------------------------------------------#
 #                                                                   #
 #                     LEASE DATA STRUCTURES                         #
@@ -83,31 +85,32 @@
         # Enactment information. Should only be manipulated by enactment module
         self.enactment_info = None
         self.vnode_enactment_info = None
+        
+        self.logger = logging.getLogger("LEASES")
 
     # TODO: Remove the link to the scheduler, and pass all necessary information
     # as parameters to methods.
     def set_scheduler(self, scheduler):
         self.scheduler = scheduler
-        self.logger = scheduler.rm.logger
         
-    def print_contents(self, loglevel="EXTREMEDEBUG"):
-        self.logger.log(loglevel, "Lease ID       : %i" % self.id, DS)
-        self.logger.log(loglevel, "Submission time: %s" % self.submit_time, DS)
-        self.logger.log(loglevel, "Duration       : %s" % self.duration, DS)
-        self.logger.log(loglevel, "State          : %s" % state_str(self.state), DS)
-        self.logger.log(loglevel, "VM image       : %s" % self.diskimage_id, DS)
-        self.logger.log(loglevel, "VM image size  : %s" % self.diskimage_size, DS)
-        self.logger.log(loglevel, "Num nodes      : %s" % self.numnodes, DS)
-        self.logger.log(loglevel, "Resource req   : %s" % self.requested_resources, DS)
-        self.logger.log(loglevel, "VM image map   : %s" % pretty_nodemap(self.vmimagemap), DS)
-        self.logger.log(loglevel, "Mem image map  : %s" % pretty_nodemap(self.memimagemap), DS)
+    def print_contents(self, loglevel="VDEBUG"):
+        self.logger.log(loglevel, "Lease ID       : %i" % self.id)
+        self.logger.log(loglevel, "Submission time: %s" % self.submit_time)
+        self.logger.log(loglevel, "Duration       : %s" % self.duration)
+        self.logger.log(loglevel, "State          : %s" % state_str(self.state))
+        self.logger.log(loglevel, "VM image       : %s" % self.diskimage_id)
+        self.logger.log(loglevel, "VM image size  : %s" % self.diskimage_size)
+        self.logger.log(loglevel, "Num nodes      : %s" % self.numnodes)
+        self.logger.log(loglevel, "Resource req   : %s" % self.requested_resources)
+        self.logger.log(loglevel, "VM image map   : %s" % pretty_nodemap(self.vmimagemap))
+        self.logger.log(loglevel, "Mem image map  : %s" % pretty_nodemap(self.memimagemap))
 
-    def print_rrs(self, loglevel="EXTREMEDEBUG"):
-        self.logger.log(loglevel, "RESOURCE RESERVATIONS", DS)
-        self.logger.log(loglevel, "~~~~~~~~~~~~~~~~~~~~~", DS)
+    def print_rrs(self, loglevel="VDEBUG"):
+        self.logger.log(loglevel, "RESOURCE RESERVATIONS")
+        self.logger.log(loglevel, "~~~~~~~~~~~~~~~~~~~~~")
         for r in self.rr:
             r.print_contents(loglevel)
-            self.logger.log(loglevel, "##", DS)
+            self.logger.log(loglevel, "##")
             
         
     def has_starting_reservations(self, time):
@@ -253,13 +256,13 @@
         LeaseBase.__init__(self, submit_time, start, duration, diskimage_id,
                            diskimage_size, numnodes, resreq, preemptible)
         
-    def print_contents(self, loglevel="EXTREMEDEBUG"):
-        self.logger.log(loglevel, "__________________________________________________", DS)
+    def print_contents(self, loglevel="VDEBUG"):
+        self.logger.log(loglevel, "__________________________________________________")
         LeaseBase.print_contents(self, loglevel)
-        self.logger.log(loglevel, "Type           : AR", DS)
-        self.logger.log(loglevel, "Start time     : %s" % self.start, DS)
+        self.logger.log(loglevel, "Type           : AR")
+        self.logger.log(loglevel, "Start time     : %s" % self.start)
         self.print_rrs(loglevel)
-        self.logger.log(loglevel, "--------------------------------------------------", DS)
+        self.logger.log(loglevel, "--------------------------------------------------")
     
     def xmlrpc_marshall(self):
         l = LeaseBase.xmlrpc_marshall(self)
@@ -280,13 +283,13 @@
         LeaseBase.__init__(self, submit_time, start, duration, diskimage_id,
                            diskimage_size, numnodes, resreq, preemptible)
 
-    def print_contents(self, loglevel="EXTREMEDEBUG"):
-        self.logger.log(loglevel, "__________________________________________________", DS)
+    def print_contents(self, loglevel=LOGLEVEL_VDEBUG):
+        self.logger.log(loglevel, "__________________________________________________")
         LeaseBase.print_contents(self, loglevel)
-        self.logger.log(loglevel, "Type           : BEST-EFFORT", DS)
-        self.logger.log(loglevel, "Images Avail @ : %s" % self.imagesavail, DS)
+        self.logger.log(loglevel, "Type           : BEST-EFFORT")
+        self.logger.log(loglevel, "Images Avail @ : %s" % self.imagesavail)
         self.print_rrs(loglevel)
-        self.logger.log(loglevel, "--------------------------------------------------", DS)
+        self.logger.log(loglevel, "--------------------------------------------------")
         
     def get_waiting_time(self):
         return self.start.actual - self.submit_time
@@ -316,12 +319,12 @@
         LeaseBase.__init__(self, submit_time, start, duration, diskimage_id,
                            diskimage_size, numnodes, resreq, preemptible)
 
-    def print_contents(self, loglevel="EXTREMEDEBUG"):
-        self.logger.log(loglevel, "__________________________________________________", DS)
+    def print_contents(self, loglevel="VDEBUG"):
+        self.logger.log(loglevel, "__________________________________________________")
         LeaseBase.print_contents(self, loglevel)
-        self.logger.log(loglevel, "Type           : IMMEDIATE", DS)
+        self.logger.log(loglevel, "Type           : IMMEDIATE")
         self.print_rrs(loglevel)
-        self.logger.log(loglevel, "--------------------------------------------------", DS)
+        self.logger.log(loglevel, "--------------------------------------------------")
 
     def xmlrpc_marshall(self):
         l = LeaseBase.xmlrpc_marshall(self)
@@ -344,13 +347,13 @@
         self.end = end
         self.state = None
         self.resources_in_pnode = res
-        self.logger = lease.scheduler.rm.logger
+        self.logger = logging.getLogger("LEASES")
                         
-    def print_contents(self, loglevel="EXTREMEDEBUG"):
-        self.logger.log(loglevel, "Start          : %s" % self.start, DS)
-        self.logger.log(loglevel, "End            : %s" % self.end, DS)
-        self.logger.log(loglevel, "State          : %s" % rstate_str(self.state), DS)
-        self.logger.log(loglevel, "Resources      : \n%s" % "\n".join(["N%i: %s" %(i, x) for i, x in self.resources_in_pnode.items()]), DS) 
+    def print_contents(self, loglevel="VDEBUG"):
+        self.logger.log(loglevel, "Start          : %s" % self.start)
+        self.logger.log(loglevel, "End            : %s" % self.end)
+        self.logger.log(loglevel, "State          : %s" % rstate_str(self.state))
+        self.logger.log(loglevel, "Resources      : \n%s" % "\n".join(["N%i: %s" %(i, x) for i, x in self.resources_in_pnode.items()])) 
                 
     def xmlrpc_marshall(self):
         # Convert to something we can send through XMLRPC
@@ -378,13 +381,13 @@
         else:
             self.prematureend = None 
 
-    def print_contents(self, loglevel="EXTREMEDEBUG"):
+    def print_contents(self, loglevel="VDEBUG"):
         ResourceReservationBase.print_contents(self, loglevel)
         if self.prematureend != None:
-            self.logger.log(loglevel, "Premature end  : %s" % self.prematureend, DS)
-        self.logger.log(loglevel, "Type           : VM", DS)
-        self.logger.log(loglevel, "Nodes          : %s" % pretty_nodemap(self.nodes), DS)
-        self.logger.log(loglevel, "On Complete    : %s" % self.oncomplete, DS)
+            self.logger.log(loglevel, "Premature end  : %s" % self.prematureend)
+        self.logger.log(loglevel, "Type           : VM")
+        self.logger.log(loglevel, "Nodes          : %s" % pretty_nodemap(self.nodes))
+        self.logger.log(loglevel, "On Complete    : %s" % self.oncomplete)
         
     def is_preemptible(self):
         return self.lease.preemptible
@@ -401,10 +404,10 @@
         ResourceReservationBase.__init__(self, lease, start, end, res)
         self.nodes = nodes
 
-    def print_contents(self, loglevel="EXTREMEDEBUG"):
+    def print_contents(self, loglevel="VDEBUG"):
         ResourceReservationBase.print_contents(self, loglevel)
-        self.logger.log(loglevel, "Type           : SUSPEND", DS)
-        self.logger.log(loglevel, "Nodes          : %s" % pretty_nodemap(self.nodes), DS)
+        self.logger.log(loglevel, "Type           : SUSPEND")
+        self.logger.log(loglevel, "Nodes          : %s" % pretty_nodemap(self.nodes))
         
     # TODO: Suspension RRs should be preemptible, but preempting a suspension RR
     # has wider implications (with a non-trivial handling). For now, we leave them 
@@ -423,10 +426,10 @@
         ResourceReservationBase.__init__(self, lease, start, end, res)
         self.nodes = nodes
 
-    def print_contents(self, loglevel="EXTREMEDEBUG"):
+    def print_contents(self, loglevel="VDEBUG"):
         ResourceReservationBase.print_contents(self, loglevel)
-        self.logger.log(loglevel, "Type           : RESUME", DS)
-        self.logger.log(loglevel, "Nodes          : %s" % pretty_nodemap(self.nodes), DS)
+        self.logger.log(loglevel, "Type           : RESUME")
+        self.logger.log(loglevel, "Nodes          : %s" % pretty_nodemap(self.nodes))
 
     # TODO: Suspension RRs should be preemptible, but preempting a suspension RR
     # has wider implications (with a non-trivial handling). For now, we leave them 

Modified: trunk/src/haizea/resourcemanager/deployment/base.py
===================================================================
--- trunk/src/haizea/resourcemanager/deployment/base.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/deployment/base.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -16,14 +16,15 @@
 # limitations under the License.                                             #
 # -------------------------------------------------------------------------- #
 
+import logging
 
 class DeploymentBase(object):
     def __init__(self, scheduler):
         self.scheduler = scheduler
         self.slottable = scheduler.slottable
         self.resourcepool = scheduler.rm.resourcepool
-        self.logger = scheduler.rm.logger
+        self.logger = logging.getLogger("DEPLOY")
+
         
-        
 class DeploymentSchedException(Exception):
     pass
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
===================================================================
--- trunk/src/haizea/resourcemanager/deployment/imagetransfer.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/deployment/imagetransfer.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -62,17 +62,17 @@
         end = lease.start.requested + lease.duration.requested
         for (vnode, pnode) in nodeassignment.items():
             lease_id = lease.id
-            self.logger.debug("Scheduling image transfer of '%s' from vnode %i to physnode %i" % (lease.diskimage_id, vnode, pnode), constants.SCHED)
+            self.logger.debug("Scheduling image transfer of '%s' from vnode %i to physnode %i" % (lease.diskimage_id, vnode, pnode))
 
             if reusealg == constants.REUSE_IMAGECACHES:
                 if self.resourcepool.isInPool(pnode, lease.diskimage_id, start):
-                    self.logger.debug("No need to schedule an image transfer (reusing an image in pool)", constants.SCHED)
+                    self.logger.debug("No need to schedule an image transfer (reusing an image in pool)")
                     mustpool[vnode] = pnode                            
                 else:
-                    self.logger.debug("Need to schedule a transfer.", constants.SCHED)
+                    self.logger.debug("Need to schedule a transfer.")
                     musttransfer[vnode] = pnode
             else:
-                self.logger.debug("Need to schedule a transfer.", constants.SCHED)
+                self.logger.debug("Need to schedule a transfer.")
                 musttransfer[vnode] = pnode
 
         if len(musttransfer) == 0:
@@ -85,7 +85,7 @@
                 for vnode, pnode in musttransfer:
                     if transferRRs.has_key(pnode):
                         # We've already scheduled a transfer to this node. Reuse it.
-                        self.logger.debug("No need to schedule an image transfer (reusing an existing transfer)", constants.SCHED)
+                        self.logger.debug("No need to schedule an image transfer (reusing an existing transfer)")
                         transferRR = transferRRs[pnode]
                         transferRR.piggyback(lease_id, vnode, pnode, end)
                     else:
@@ -116,18 +116,18 @@
             reqtransfer = earliest[pnode][1]
             if reqtransfer == constants.REQTRANSFER_COWPOOL:
                 # Add to pool
-                self.logger.debug("Reusing image for V%i->P%i." % (vnode, pnode), constants.SCHED)
+                self.logger.debug("Reusing image for V%i->P%i." % (vnode, pnode))
                 self.resourcepool.addToPool(pnode, lease.diskimage_id, lease.id, vnode, vmrr.end)
             elif reqtransfer == constants.REQTRANSFER_PIGGYBACK:
                 # We can piggyback on an existing transfer
                 transferRR = earliest[pnode][2]
                 transferRR.piggyback(lease.id, vnode, pnode)
-                self.logger.debug("Piggybacking transfer for V%i->P%i on existing transfer in lease %i." % (vnode, pnode, transferRR.lease.id), constants.SCHED)
+                self.logger.debug("Piggybacking transfer for V%i->P%i on existing transfer in lease %i." % (vnode, pnode, transferRR.lease.id))
                 piggybacking.append(transferRR)
             else:
                 # Transfer
                 musttransfer[vnode] = pnode
-                self.logger.debug("Must transfer V%i->P%i." % (vnode, pnode), constants.SCHED)
+                self.logger.debug("Must transfer V%i->P%i." % (vnode, pnode))
         if len(musttransfer)>0:
             transferRRs = self.scheduleImageTransferFIFO(lease, musttransfer, nexttime)
             endtransfer = transferRRs[-1].end
@@ -366,7 +366,7 @@
 
     @staticmethod
     def handle_start_filetransfer(sched, lease, rr):
-        sched.rm.logger.debug("LEASE-%i Start of handleStartFileTransfer" % lease.id, constants.SCHED)
+        sched.rm.logger.debug("LEASE-%i Start of handleStartFileTransfer" % lease.id)
         lease.print_contents()
         if lease.state == constants.LEASE_STATE_SCHEDULED or lease.state == constants.LEASE_STATE_DEPLOYED:
             lease.state = constants.LEASE_STATE_DEPLOYING
@@ -376,12 +376,12 @@
             pass # This shouldn't happen
         lease.print_contents()
         sched.updateNodeTransferState(rr.transfers.keys(), constants.DOING_TRANSFER, lease.id)
-        sched.logger.debug("LEASE-%i End of handleStartFileTransfer" % lease.id, constants.SCHED)
-        sched.logger.info("Starting image transfer for lease %i" % (lease.id), constants.SCHED)
+        sched.logger.debug("LEASE-%i End of handleStartFileTransfer" % lease.id)
+        sched.logger.info("Starting image transfer for lease %i" % (lease.id))
 
     @staticmethod
     def handle_end_filetransfer(sched, lease, rr):
-        sched.rm.logger.debug("LEASE-%i Start of handleEndFileTransfer" % lease.id, constants.SCHED)
+        sched.rm.logger.debug("LEASE-%i Start of handleEndFileTransfer" % lease.id)
         lease.print_contents()
         if lease.state == constants.LEASE_STATE_DEPLOYING:
             lease.state = constants.LEASE_STATE_DEPLOYED
@@ -410,8 +410,8 @@
             # TODO: Migrating
         lease.print_contents()
         sched.updateNodeTransferState(rr.transfers.keys(), constants.DOING_IDLE, lease.id)
-        sched.rm.logger.debug("LEASE-%i End of handleEndFileTransfer" % lease.id, constants.SCHED)
-        sched.rm.logger.info("Completed image transfer for lease %i" % (lease.id), constants.SCHED)
+        sched.rm.logger.debug("LEASE-%i End of handleEndFileTransfer" % lease.id)
+        sched.rm.logger.info("Completed image transfer for lease %i" % (lease.id))
 
 class FileTransferResourceReservation(ResourceReservationBase):
     def __init__(self, lease, res, start=None, end=None):
@@ -421,12 +421,12 @@
         # Dictionary of  physnode -> [ (lease_id, vnode)* ]
         self.transfers = {}
 
-    def print_contents(self, loglevel="EXTREMEDEBUG"):
+    def print_contents(self, loglevel="VDEBUG"):
         ResourceReservationBase.print_contents(self, loglevel)
-        self.logger.log(loglevel, "Type           : FILE TRANSFER", constants.DS)
-        self.logger.log(loglevel, "Deadline       : %s" % self.deadline, constants.DS)
-        self.logger.log(loglevel, "File           : %s" % self.file, constants.DS)
-        self.logger.log(loglevel, "Transfers      : %s" % self.transfers, constants.DS)
+        self.logger.log(loglevel, "Type           : FILE TRANSFER")
+        self.logger.log(loglevel, "Deadline       : %s" % self.deadline)
+        self.logger.log(loglevel, "File           : %s" % self.file)
+        self.logger.log(loglevel, "Transfers      : %s" % self.transfers)
         
     def piggyback(self, lease_id, vnode, physnode):
         if self.transfers.has_key(physnode):

Modified: trunk/src/haizea/resourcemanager/enact/base.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/base.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/enact/base.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -22,7 +22,6 @@
 class ResourcePoolInfoBase(object):
     def __init__(self, resourcepool):
         self.resourcepool = resourcepool
-        self.logger = resourcepool.rm.logger
         
         resourcetypes = self.getResourceTypes() #IGNORE:E1111
         ds.ResourceTuple.set_resource_types(resourcetypes)
@@ -56,12 +55,10 @@
 class StorageEnactmentBase(object):
     def __init__(self, resourcepool):
         self.resourcepool = resourcepool
-        self.logger = resourcepool.rm.logger
     
 class VMEnactmentBase(object):
     def __init__(self, resourcepool):
         self.resourcepool = resourcepool
-        self.logger = resourcepool.rm.logger
         
     def start(self, vms): abstract()
     

Modified: trunk/src/haizea/resourcemanager/enact/opennebula/info.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/opennebula/info.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/enact/opennebula/info.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -21,6 +21,7 @@
 import haizea.common.constants as constants
 import haizea.resourcemanager.datastruct as ds
 from pysqlite2 import dbapi2 as sqlite
+import logging
 
 oneattr2haizea = { "TOTALCPU": constants.RES_CPU,
                    "TOTALMEMORY": constants.RES_MEM }
@@ -29,7 +30,7 @@
     def __init__(self, resourcepool):
         ResourcePoolInfoBase.__init__(self, resourcepool)
         config = self.resourcepool.rm.config
-        self.logger = self.resourcepool.rm.logger
+        self.logger = logging.getLogger("ENACT.ONE.INFO")
         self.suspendresumerate = config.get("one.suspendresume-rate-estimate")
 
         # Get information about nodes from DB
@@ -59,7 +60,7 @@
             node.enactment_info = int(enactID)
             self.nodes.append(node)
             
-        self.logger.info("Fetched %i nodes from ONE db" % len(self.nodes), constants.ONE)
+        self.logger.info("Fetched %i nodes from ONE db" % len(self.nodes))
         
         # Image repository nodes
         # TODO: No image transfers in OpenNebula yet

Modified: trunk/src/haizea/resourcemanager/enact/opennebula/vm.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/opennebula/vm.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/enact/opennebula/vm.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -19,11 +19,14 @@
 from haizea.resourcemanager.enact.base import VMEnactmentBase
 import haizea.common.constants as constants
 import commands
+import logging
 from pysqlite2 import dbapi2 as sqlite
 
 class VMEnactment(VMEnactmentBase):
     def __init__(self, resourcepool):
         VMEnactmentBase.__init__(self, resourcepool)
+        self.logger = logging.getLogger("ENACT.ONE.VM")
+
         self.onevm = self.resourcepool.rm.config.get("onevm")
         
         self.conn = sqlite.connect(self.resourcepool.rm.config.get("one.db"))
@@ -31,9 +34,9 @@
 
         
     def run_command(self, cmd):
-        self.logger.debug("Running command: %s" % cmd, constants.ONE)
+        self.logger.debug("Running command: %s" % cmd)
         (status, output) = commands.getstatusoutput(cmd)
-        self.logger.debug("Returned status=%i, output='%s'" % (status, output), constants.ONE)
+        self.logger.debug("Returned status=%i, output='%s'" % (status, output))
         return status, output
 
     def start(self, action):
@@ -46,12 +49,12 @@
             memory = action.vnodes[vnode].resources.get_by_type(constants.RES_MEM)
             
             self.logger.debug("Received request to start VM for L%iV%i on host %i, image=%s, cpu=%i, mem=%i"
-                         % (action.lease_haizea_id, vnode, hostID, image, cpu, memory), constants.ONE)
+                         % (action.lease_haizea_id, vnode, hostID, image, cpu, memory))
 
             cmd = "%s deploy %i %i" % (self.onevm, vmid, hostID)
             status, output = self.run_command(cmd)
             if status == 0:
-                self.logger.debug("Command returned succesfully.", constants.ONE)
+                self.logger.debug("Command returned succesfully.")
             else:
                 raise Exception, "Error when running onevm deploy (status=%i, output='%s')" % (status, output)
             
@@ -62,7 +65,7 @@
             cmd = "%s shutdown %i" % (self.onevm, vmid)
             status, output = self.run_command(cmd)
             if status == 0:
-                self.logger.debug("Command returned succesfully.", constants.ONE)
+                self.logger.debug("Command returned succesfully.")
             else:
                 raise Exception, "Error when running onevm shutdown (status=%i, output='%s')" % (status, output)
 
@@ -73,7 +76,7 @@
             cmd = "%s suspend %i" % (self.onevm, vmid)
             status, output = self.run_command(cmd)
             if status == 0:
-                self.logger.debug("Command returned succesfully.", constants.ONE)
+                self.logger.debug("Command returned succesfully.")
             else:
                 raise Exception, "Error when running onevm suspend (status=%i, output='%s')" % (status, output)
         
@@ -84,7 +87,7 @@
             cmd = "%s resume %i" % (self.onevm, vmid)
             status, output = self.run_command(cmd)
             if status == 0:
-                self.logger.debug("Command returned succesfully.", constants.ONE)
+                self.logger.debug("Command returned succesfully.")
             else:
                 raise Exception, "Error when running onevm resume (status=%i, output='%s')" % (status, output)
 
@@ -99,9 +102,9 @@
             onevm = cur.fetchone()        
             state = onevm["state"]
             if state == 5:
-                self.logger.debug("Suspend of L%iV%i correct." % (action.lease_haizea_id, vnode), constants.ONE)
+                self.logger.debug("Suspend of L%iV%i correct." % (action.lease_haizea_id, vnode))
             else:
-                self.logger.warning("ONE did not complete suspend  of L%iV%i on time. State is %i" % (action.lease_haizea_id, vnode, state), constants.ONE)
+                self.logger.warning("ONE did not complete suspend  of L%iV%i on time. State is %i" % (action.lease_haizea_id, vnode, state))
                 result = 1
         return result
         
@@ -116,9 +119,9 @@
             onevm = cur.fetchone()        
             state = onevm["state"]
             if state == 3:
-                self.logger.debug("Resume of L%iV%i correct." % (action.lease_haizea_id, vnode), constants.ONE)
+                self.logger.debug("Resume of L%iV%i correct." % (action.lease_haizea_id, vnode))
             else:
-                self.logger.warning("ONE did not complete resume of L%iV%i on time. State is %i" % (action.lease_haizea_id, vnode, state), constants.ONE)
+                self.logger.warning("ONE did not complete resume of L%iV%i on time. State is %i" % (action.lease_haizea_id, vnode, state))
                 result = 1
         return result
 

Modified: trunk/src/haizea/resourcemanager/enact/simulated/info.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/simulated/info.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/enact/simulated/info.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -20,10 +20,12 @@
 from haizea.resourcemanager.enact.base import ResourcePoolInfoBase
 import haizea.common.constants as constants
 import haizea.resourcemanager.datastruct as ds
+import logging
 
 class ResourcePoolInfo(ResourcePoolInfoBase):
     def __init__(self, resourcepool):
         ResourcePoolInfoBase.__init__(self, resourcepool)
+        self.logger = logging.getLogger("ENACT.SIMUL.INFO")
         config = self.resourcepool.rm.config
         self.suspendresumerate = config.get("simul.suspendresume-rate")
                 

Modified: trunk/src/haizea/resourcemanager/enact/simulated/vm.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/simulated/vm.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/enact/simulated/vm.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -18,10 +18,12 @@
 
 from haizea.resourcemanager.enact.base import VMEnactmentBase
 import haizea.common.constants as constants
+import logging
 
 class VMEnactment(VMEnactmentBase):
     def __init__(self, resourcepool):
         VMEnactmentBase.__init__(self, resourcepool)
+        self.logger = logging.getLogger("ENACT.SIMUL.VM")
         
     def start(self, action):
         for vnode in action.vnodes:
@@ -31,22 +33,22 @@
             cpu = action.vnodes[vnode].resources.get_by_type(constants.RES_CPU)
             memory = action.vnodes[vnode].resources.get_by_type(constants.RES_MEM)
             self.logger.debug("Received request to start VM for L%iV%i on host %i, image=%s, cpu=%i, mem=%i"
-                         % (action.lease_haizea_id, vnode, pnode, image, cpu, memory), "SIMUL")
+                         % (action.lease_haizea_id, vnode, pnode, image, cpu, memory))
     
     def stop(self, action):
         for vnode in action.vnodes:
             self.logger.debug("Received request to stop VM for L%iV%i"
-                         % (action.lease_haizea_id, vnode), "SIMUL")
+                         % (action.lease_haizea_id, vnode))
 
     def suspend(self, action):
         for vnode in action.vnodes:
             self.logger.debug("Received request to suspend VM for L%iV%i"
-                         % (action.lease_haizea_id, vnode), "SIMUL")
+                         % (action.lease_haizea_id, vnode))
 
     def resume(self, action):
         for vnode in action.vnodes:
             self.logger.debug("Received request to resume VM for L%iV%i"
-                         % (action.lease_haizea_id, vnode), "SIMUL")
+                         % (action.lease_haizea_id, vnode))
 
     def verifySuspend(self, action):
         return 0

Modified: trunk/src/haizea/resourcemanager/frontends/opennebula.py
===================================================================
--- trunk/src/haizea/resourcemanager/frontends/opennebula.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/frontends/opennebula.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -44,7 +44,7 @@
     def __init__(self, rm):
         self.rm = rm
         self.processed = []
-        self.logger = self.rm.logger
+        self.logger = self.logger
         config = self.rm.config
 
         self.conn = sqlite.connect(config.get("one.db"))

Modified: trunk/src/haizea/resourcemanager/frontends/rpc.py
===================================================================
--- trunk/src/haizea/resourcemanager/frontends/rpc.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/frontends/rpc.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -28,7 +28,7 @@
 class RPCFrontend(RequestFrontend):
     def __init__(self, rm):
         self.rm = rm
-        self.logger = self.rm.logger
+        self.logger = self.logger
         self.accumulated = []
         config = self.rm.config
         self.rm.rpc_server.register_rpc(self.create_lease)

Modified: trunk/src/haizea/resourcemanager/frontends/tracefile.py
===================================================================
--- trunk/src/haizea/resourcemanager/frontends/tracefile.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/frontends/tracefile.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -21,12 +21,12 @@
 import haizea.traces.readers as tracereaders
 from haizea.resourcemanager.datastruct import ARLease, BestEffortLease 
 import operator
+import logging
 
-
 class TracefileFrontend(RequestFrontend):
     def __init__(self, rm, starttime):
         RequestFrontend.__init__(self, rm)
-        
+        self.logger = logging.getLogger("TRACEFILE")
         config = rm.config
 
         tracefile = config.get("tracefile")
@@ -35,7 +35,7 @@
         
         # Read trace file
         # Requests is a list of lease requests
-        self.rm.logger.info("Loading tracefile %s" % tracefile, constants.TRACE)
+        #self.logger.info("Loading tracefile %s" % tracefile)
 
         self.requests = None
         if tracefile.endswith(".swf"):
@@ -44,13 +44,13 @@
             self.requests = tracereaders.LWF(tracefile, starttime)
     
         if injectfile != None:
-            self.rm.logger.info("Loading injection file %s" % injectfile, constants.TRACE)
+            self.logger.info("Loading injection file %s" % injectfile)
             injectedleases = tracereaders.LWF(injectfile, starttime)
             self.requests += injectedleases
             self.requests.sort(key=operator.attrgetter("submit_time"))
 
         if imagefile != None:
-            self.rm.logger.info("Loading image file %s" % imagefile, constants.TRACE)
+            self.logger.info("Loading image file %s" % imagefile)
             imagesizes, images = tracereaders.IMG(imagefile)
             for r, i in zip(self.requests, images):
                 r.vmimage = i
@@ -75,7 +75,7 @@
             
         num_besteffort = len([x for x in self.requests if isinstance(x,BestEffortLease)])
         num_ar = len([x for x in self.requests if isinstance(x,ARLease)])
-        self.rm.logger.info("Loaded workload with %i requests (%i best-effort + %i AR)" % (num_besteffort+num_ar, num_besteffort, num_ar), constants.TRACE)
+        #self.logger.info("Loaded workload with %i requests (%i best-effort + %i AR)" % (num_besteffort+num_ar, num_besteffort, num_ar))
         
         
     def getAccumulatedRequests(self):

Modified: trunk/src/haizea/resourcemanager/log.py
===================================================================
--- trunk/src/haizea/resourcemanager/log.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/log.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -17,57 +17,28 @@
 # -------------------------------------------------------------------------- #
 
 import logging
+from haizea.resourcemanager.rm import ResourceManager
+from haizea.common.constants import LOGLEVEL_VDEBUG, LOGLEVEL_STATUS
 
-class Logger(object):
-    def __init__(self, rm, file = None):
-        self.rm = rm
+logging.addLevelName(LOGLEVEL_VDEBUG, "VDEBUG")
+logging.addLevelName(LOGLEVEL_STATUS, "STATUS")
 
-        self.logger = logging.getLogger("haizea")
-        self.extremedebug = False
-        if file == None:
-            handler = logging.StreamHandler()
-        else:
-            handler = logging.FileHandler(file)
-        formatter = logging.Formatter('%(message)s')
-        handler.setFormatter(formatter)
-        self.logger.addHandler(handler)
-
-        self.loglevel = {"CRITICAL": 50,
-                    "ERROR": 40,
-                    "WARNING": 30,
-                    "STATUS": 25,
-                    "INFO": 20,
-                    "DEBUG": 10,
-                    "EXTREMEDEBUG": 5,
-                    "NOTSET": 0}
-        
-        level = self.rm.config.get("loglevel")
-        self.logger.setLevel(self.loglevel[level])
-        self.extremedebug = (level == "EXTREMEDEBUG")
-
-    def info(self, msg, comp):
-        self.logger.info("[%s] %s %s" % (self.rm.clock.get_time(), comp.ljust(7), msg))
-        
-    def debug(self, msg, comp):
-        self.logger.debug("[%s] %s %s" % (self.rm.clock.get_time(), comp.ljust(7), msg))
+# Custom logger that uses our log record
+class HaizeaLogger(logging.Logger):
     
-    def edebug(self, msg, comp):
-        # Since there is such a huge amount of edebug messages, we use the
-        # extremedebug variable to decide if we call the log function
+    def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func, extra):
+        # Modify "extra" parameter keyword
+        haizeatime = ResourceManager.get_singleton().clock.get_time()
+        extra = { "haizeatime" : haizeatime}
+        return logging.Logger.makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func, extra)
+    
+    def status(self, msg):
+        self.log(logging.getLevelName("STATUS"), msg)
+
+    def vdebug(self, msg):
+        # Since there is such a huge amount of vdebug messages, we check the
+        # log level manually to decide if we call the log function or not.
         # (this actually saves quite a bit of cycles spent in logging functions
         # that ultimately determine that the message doesn't have to printed)
-        if self.extremedebug:
-            self.logger.log(self.loglevel["EXTREMEDEBUG"],"[%s] %s %s" % (self.rm.clock.get_time(), comp.ljust(7), msg))
-    
-    def status(self, msg, comp):
-        self.logger.log(self.loglevel["STATUS"],"[%s] %s %s" % (self.rm.clock.get_time(), comp.ljust(7), msg))
-        
-    def warning(self, msg, comp):
-        self.logger.warning("[%s] %s %s" % (self.rm.clock.get_time(), comp.ljust(7), msg))    
-        
-    def error(self, msg, comp):
-        self.logger.error("[%s] %s %s" % (self.rm.clock.get_time(), comp.ljust(7), msg))        
-        
-    def log(self, level, msg, comp):
-        if level != "EXTREMEDEBUG" or self.extremedebug:
-            self.logger.log(self.loglevel[level], "[%s] %s %s" % (self.rm.clock.get_time(), comp.ljust(7), msg))        
\ No newline at end of file
+        if self.getEffectiveLevel() == LOGLEVEL_VDEBUG:
+            self.log(logging.getLevelName("VDEBUG"), msg)
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/resourcepool.py
===================================================================
--- trunk/src/haizea/resourcemanager/resourcepool.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/resourcepool.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -19,10 +19,12 @@
 from haizea.common.utils import vnodemapstr
 import haizea.common.constants as constants
 import haizea.resourcemanager.enact.actions as actions
+import logging 
 
 class ResourcePool(object):
     def __init__(self, rm):
         self.rm = rm
+        self.logger = logging.getLogger("RESOURCEPOOL")
         
         self.info = None
         self.vm = None
@@ -57,7 +59,7 @@
             self.vm = enact.vm(self) #IGNORE:E0602
             self.storage = enact.storage(self) #IGNORE:E0602
         except Exception, msg:
-            self.rm.logger.error("Unable to load enactment modules for mode '%s'" % mode, constants.RM)
+            self.logger.error("Unable to load enactment modules for mode '%s'" % mode)
             raise                
         
         
@@ -121,7 +123,7 @@
         try:
             self.vm.start(startAction)
         except Exception, msg:
-            self.rm.logger.error("Enactment of start VM failed: %s" % msg, constants.RM)
+            self.logger.error("Enactment of start VM failed: %s" % msg)
             self.rm.cancel_lease(lease.id)
         
     def stopVMs(self, lease, rr):
@@ -130,7 +132,7 @@
         try:
             self.vm.stop(stopAction)
         except Exception, msg:
-            self.rm.logger.error("Enactment of end VM failed: %s" % msg, constants.RM)
+            self.logger.error("Enactment of end VM failed: %s" % msg)
             self.rm.cancel_lease(lease)
         
     def transferFiles(self):
@@ -147,7 +149,7 @@
     
     # TODO: This has to be divided into the above three functions.
     def addImageToNode(self, nod_id, imagefile, imagesize, vnodes, timeout=None):
-        self.rm.logger.debug("Adding image for leases=%s in nod_id=%i" % (vnodes, nod_id), constants.ENACT)
+        self.logger.debug("Adding image for leases=%s in nod_id=%i" % (vnodes, nod_id))
         self.getNode(nod_id).printFiles()
 
         if self.reusealg == constants.REUSE_NONE:
@@ -172,11 +174,11 @@
                     reqsize = poolsize + imagesize
                     if reqsize > self.maxcachesize:
                         desiredsize = self.maxcachesize - imagesize
-                        self.rm.logger.debug("Adding the image would make the size of pool in node %i = %iMB. Will try to bring it down to %i" % (nod_id, reqsize, desiredsize), constants.ENACT)
+                        self.logger.debug("Adding the image would make the size of pool in node %i = %iMB. Will try to bring it down to %i" % (nod_id, reqsize, desiredsize))
                         self.getNode(nod_id).printFiles()
                         success = self.getNode(nod_id).purgePoolDownTo(self.maxcachesize)
                         if not success:
-                            self.rm.logger.debug("Unable to add to pool. Creating tainted image instead.", constants.ENACT)
+                            self.logger.debug("Unable to add to pool. Creating tainted image instead.")
                             # If unsuccessful, this just means we couldn't add the image
                             # to the pool. We will have to create tainted images to be used
                             # only by these leases
@@ -204,7 +206,7 @@
         try:
             self.vm.suspend(suspendAction)
         except Exception, msg:
-            self.rm.logger.error("Enactment of suspend VM failed: %s" % msg, constants.RM)
+            self.logger.error("Enactment of suspend VM failed: %s" % msg)
             self.rm.cancel_lease(lease)
     
     def verifySuspend(self, lease, rr):
@@ -224,7 +226,7 @@
         try:
             self.vm.resume(resumeAction)
         except Exception, msg:
-            self.rm.logger.error("Enactment of resume VM failed: %s" % msg, constants.RM)
+            self.logger.error("Enactment of resume VM failed: %s" % msg)
             self.rm.cancel_lease(lease)
     
     def verifyResume(self, lease, rr):
@@ -269,7 +271,7 @@
         return self.EDFnode
         
     def addTaintedImageToNode(self, pnode, diskImageID, imagesize, lease_id, vnode):
-        self.rm.logger.debug("Adding tainted image for L%iV%i in pnode=%i" % (lease_id, vnode, pnode), constants.ENACT)
+        self.logger.debug("Adding tainted image for L%iV%i in pnode=%i" % (lease_id, vnode, pnode))
         self.getNode(pnode).printFiles()
         imagefile = self.storage.resolveToFile(lease_id, vnode, diskImageID)
         img = VMImageFile(imagefile, imagesize, diskImageID=diskImageID, masterimg=False)
@@ -282,10 +284,10 @@
     def checkImage(self, pnode, lease_id, vnode, imagefile):
         node = self.getNode(pnode)
         if self.rm.config.get("lease-preparation") == constants.DEPLOYMENT_UNMANAGED:
-            self.rm.logger.debug("Adding tainted image for L%iV%i in node %i" % (lease_id, vnode, pnode), constants.ENACT)
+            self.logger.debug("Adding tainted image for L%iV%i in node %i" % (lease_id, vnode, pnode))
         elif self.reusealg == constants.REUSE_NONE:
             if not node.hasTaintedImage(lease_id, vnode, imagefile):
-                self.rm.logger.debug("ERROR: Image for L%iV%i is not deployed on node %i" % (lease_id, vnode, pnode), constants.ENACT)
+                self.logger.debug("ERROR: Image for L%iV%i is not deployed on node %i" % (lease_id, vnode, pnode))
         elif self.reusealg == constants.REUSE_IMAGECACHES:
             poolentry = node.getPoolEntry(imagefile, lease_id=lease_id, vnode=vnode)
             if poolentry == None:
@@ -293,10 +295,10 @@
                 # we had to fall back on creating a tainted image right
                 # when the image was transferred. We have to check this.
                 if not node.hasTaintedImage(lease_id, vnode, imagefile):
-                    self.rm.logger.error("ERROR: Image for L%iV%i is not in pool on node %i, and there is no tainted image" % (lease_id, vnode, pnode), constants.ENACT)
+                    self.logger.error("ERROR: Image for L%iV%i is not in pool on node %i, and there is no tainted image" % (lease_id, vnode, pnode))
             else:
                 # Create tainted image
-                self.rm.logger.debug("Adding tainted image for L%iV%i in node %i" % (lease_id, vnode, pnode), constants.ENACT)
+                self.logger.debug("Adding tainted image for L%iV%i in node %i" % (lease_id, vnode, pnode))
                 node.printFiles()
                 img = VMImageFile(imagefile, poolentry.filesize, masterimg=False)
                 img.addMapping(lease_id, vnode)
@@ -317,7 +319,7 @@
         node = self.getNode(pnode)
         node.printFiles()
         if self.reusealg == constants.REUSE_IMAGECACHES:
-            self.rm.logger.debug("Removing pooled images for L%iV%i in node %i" % (lease, vnode, pnode), constants.ENACT)
+            self.logger.debug("Removing pooled images for L%iV%i in node %i" % (lease, vnode, pnode))
             toremove = []
             for img in node.getPoolImages():
                 if (lease, vnode) in img.mappings:
@@ -331,7 +333,7 @@
                 node.files.remove(img)
             node.printFiles()
 
-        self.rm.logger.debug("Removing tainted images for L%iV%i in node %i" % (lease, vnode, pnode), constants.ENACT)
+        self.logger.debug("Removing tainted images for L%iV%i in node %i" % (lease, vnode, pnode))
         node.removeTainted(lease, vnode)
 
         node.printFiles()
@@ -340,7 +342,7 @@
         
     def addRAMFileToNode(self, pnode, lease_id, vnode, size):
         node = self.getNode(pnode)
-        self.rm.logger.debug("Adding RAM file for L%iV%i in node %i" % (lease_id, vnode, pnode), constants.ENACT)
+        self.logger.debug("Adding RAM file for L%iV%i in node %i" % (lease_id, vnode, pnode))
         node.printFiles()
         f = RAMImageFile("RAM_L%iV%i" % (lease_id, vnode), size, lease_id, vnode)
         node.addFile(f)        
@@ -349,7 +351,7 @@
 
     def removeRAMFileFromNode(self, pnode, lease_id, vnode):
         node = self.getNode(pnode)
-        self.rm.logger.debug("Removing RAM file for L%iV%i in node %i" % (lease_id, vnode, pnode), constants.ENACT)
+        self.logger.debug("Removing RAM file for L%iV%i in node %i" % (lease_id, vnode, pnode))
         node.printFiles()
         node.removeRAMFile(lease_id, vnode)
         node.printFiles()
@@ -361,7 +363,7 @@
 class Node(object):
     def __init__(self, resourcepool, nod_id, hostname, capacity):
         self.resourcepool = resourcepool
-        self.logger = resourcepool.rm.logger
+        self.logger = logging.getLogger("RESOURCEPOOL")
         self.nod_id = nod_id
         self.hostname = hostname
         self.files = []
@@ -405,7 +407,7 @@
         elif len(image) == 1:
             return image[0]
         elif len(image) > 1:
-            self.logger.warning("More than one tainted image for L%iV%i on node %i" % (lease_id, vnode, self.nod_id), constants.ENACT)
+            self.logger.warning("More than one tainted image for L%iV%i on node %i" % (lease_id, vnode, self.nod_id))
             return image[0]
         
     def addToPool(self, imagefile, lease_id, vnode, timeout):
@@ -478,7 +480,7 @@
         images = ""
         if len(self.files) > 0:
             images = ", ".join([str(img) for img in self.files])
-        self.logger.edebug("Node %i has %iMB %s" % (self.nod_id, self.getTotalFileSize(), images), constants.ENACT)
+        self.logger.vdebug("Node %i has %iMB %s" % (self.nod_id, self.getTotalFileSize(), images))
 
     def getState(self):
         if self.vm_doing == constants.DOING_IDLE and self.transfer_doing == constants.DOING_TRANSFER:

Modified: trunk/src/haizea/resourcemanager/rm.py
===================================================================
--- trunk/src/haizea/resourcemanager/rm.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/rm.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -40,10 +40,10 @@
 from haizea.resourcemanager.resourcepool import ResourcePool
 from haizea.resourcemanager.scheduler import Scheduler
 from haizea.resourcemanager.rpcserver import RPCServer
-from haizea.resourcemanager.log import Logger
-from haizea.common.utils import abstract, roundDateTime
+from haizea.common.utils import abstract, roundDateTime, Singleton
 
 import operator
+import logging
 import signal
 import sys, os
 from time import sleep
@@ -54,7 +54,7 @@
 DAEMON_STDERR = "/var/tmp/haizea.err"
 DEFAULT_LOGFILE = "/var/tmp/haizea.log"
 
-class ResourceManager(object):
+class ResourceManager(Singleton):
     """The resource manager
     
     This class is the root of Haizea. Pretty much everything else (scheduler,
@@ -78,12 +78,13 @@
         
         mode = config.get("mode")
         clock = config.get("clock")
-
+        
         if mode == "simulated" and clock == constants.CLOCK_SIMULATED:
             # Simulations always run in the foreground
             self.daemon = False
-            # Logger
-            self.logger = Logger(self)
+            
+            self.init_logging()
+
             # The clock
             starttime = config.get("starttime")
             self.clock = SimulatedClock(self, starttime)
@@ -91,12 +92,9 @@
         elif mode == "opennebula" or (mode == "simulated" and clock == constants.CLOCK_REAL):
             self.daemon = daemon
             self.pidfile = pidfile
-            # Logger
-            if daemon:
-                self.logger = Logger(self, DEFAULT_LOGFILE)
-            else:
-                self.logger = Logger(self)
 
+            self.init_logging()
+
             # The clock
             wakeup_interval = config.get("wakeup-interval")
             non_sched = config.get("non-schedulable-interval")
@@ -123,7 +121,23 @@
             
         # Statistics collection 
         self.accounting = accounting.AccountingDataCollection(self, self.config.get("datafile"))
+        
+        self.logger = logging.getLogger("RM")
 
+    def init_logging(self):       
+        from haizea.resourcemanager.log import HaizeaLogger
+        logger = logging.getLogger("")
+        if self.daemon:
+            handler = logging.FileHandler(file)
+        else:
+            handler = logging.StreamHandler()
+        formatter = logging.Formatter('[%(haizeatime)s] %(name)-15s  - %(message)s')
+        handler.setFormatter(formatter)
+        logger.addHandler(handler)
+        level = logging.getLevelName(self.config.get("loglevel"))
+        logger.setLevel(level)
+        logging.setLoggerClass(HaizeaLogger)
+        
     def daemonize(self):
         """Daemonizes the Haizea process.
         
@@ -174,7 +188,7 @@
 
     def start(self):
         """Starts the resource manager"""
-        self.logger.status("Starting resource manager", constants.RM)
+        self.logger.info("Starting resource manager")
 
         # Create counters to keep track of interesting data.
         self.accounting.create_counter(constants.COUNTER_ARACCEPTED, constants.AVERAGE_NONE)
@@ -196,7 +210,7 @@
     def stop(self):
         """Stops the resource manager"""
         
-        self.logger.status("Stopping resource manager", constants.RM)
+        self.logger.status("Stopping resource manager")
         
         # Stop collecting data (this finalizes counters)
         self.accounting.stop()
@@ -204,9 +218,9 @@
         # TODO: When gracefully stopping mid-scheduling, we need to figure out what to
         #       do with leases that are still running.
 
-        self.logger.status("  Completed best-effort leases: %i" % self.accounting.data.counters[constants.COUNTER_BESTEFFORTCOMPLETED], constants.RM)
-        self.logger.status("  Accepted AR leases: %i" % self.accounting.data.counters[constants.COUNTER_ARACCEPTED], constants.RM)
-        self.logger.status("  Rejected AR leases: %i" % self.accounting.data.counters[constants.COUNTER_ARREJECTED], constants.RM)
+        self.logger.status("  Completed best-effort leases: %i" % self.accounting.data.counters[constants.COUNTER_BESTEFFORTCOMPLETED])
+        self.logger.status("  Accepted AR leases: %i" % self.accounting.data.counters[constants.COUNTER_ARACCEPTED])
+        self.logger.status("  Rejected AR leases: %i" % self.accounting.data.counters[constants.COUNTER_ARREJECTED])
         
         # In debug mode, dump the lease descriptors.
         for lease in self.scheduler.completedleases.entries.values():
@@ -255,7 +269,7 @@
             self.scheduler.schedule(nexttime)
         except Exception, msg:
             # Exit if something goes horribly wrong
-            self.logger.error("Exception in scheduling function. Dumping state..." , constants.RM)
+            self.logger.error("Exception in scheduling function. Dumping state..." )
             self.print_stats("ERROR", verbose=True)
             raise      
 
@@ -267,7 +281,7 @@
             self.scheduler.process_reservations(time)
         except Exception, msg:
             # Exit if something goes horribly wrong
-            self.logger.error("Exception when processing reservations. Dumping state..." , constants.RM)
+            self.logger.error("Exception when processing reservations. Dumping state..." )
             self.print_stats("ERROR", verbose=True)
             raise      
 
@@ -283,25 +297,25 @@
         
         # Print clock stats and the next changepoint in slot table
         self.clock.print_stats(loglevel)
-        self.logger.log(loglevel, "Next change point (in slot table): %s" % self.get_next_changepoint(), constants.RM)
+        self.logger.log(loglevel, "Next change point (in slot table): %s" % self.get_next_changepoint())
 
         # Print descriptors of scheduled leases
         scheduled = self.scheduler.scheduledleases.entries.keys()
-        self.logger.log(loglevel, "Scheduled requests: %i" % len(scheduled), constants.RM)
+        self.logger.log(loglevel, "Scheduled requests: %i" % len(scheduled))
         if verbose and len(scheduled)>0:
-            self.logger.log(loglevel, "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv", constants.RM)
+            self.logger.log(loglevel, "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
             for k in scheduled:
                 lease = self.scheduler.scheduledleases.get_lease(k)
                 lease.print_contents(loglevel=loglevel)
-            self.logger.log(loglevel, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", constants.RM)
+            self.logger.log(loglevel, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
 
         # Print queue size and descriptors of queued leases
-        self.logger.log(loglevel, "Queue size: %i" % self.scheduler.queue.length(), constants.RM)
+        self.logger.log(loglevel, "Queue size: %i" % self.scheduler.queue.length())
         if verbose and self.scheduler.queue.length()>0:
-            self.logger.log(loglevel, "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv", constants.RM)
+            self.logger.log(loglevel, "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
             for lease in self.scheduler.queue:
                 lease.print_contents(loglevel=loglevel)
-            self.logger.log(loglevel, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", constants.RM)
+            self.logger.log(loglevel, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
 
     def get_next_changepoint(self):
         """Return next changepoint in the slot table"""
@@ -319,7 +333,7 @@
             self.scheduler.notify_event(lease_id, event)
         except Exception, msg:
             # Exit if something goes horribly wrong
-            self.logger.error("Exception when notifying an event for lease %i. Dumping state..." % lease_id , constants.RM)
+            self.logger.error("Exception when notifying an event for lease %i. Dumping state..." % lease_id )
             self.print_stats("ERROR", verbose=True)
             raise            
         
@@ -333,7 +347,7 @@
             self.scheduler.cancel_lease(lease_id)
         except Exception, msg:
             # Exit if something goes horribly wrong
-            self.logger.error("Exception when canceling lease %i. Dumping state..." % lease_id, constants.RM)
+            self.logger.error("Exception when canceling lease %i. Dumping state..." % lease_id)
             self.print_stats("ERROR", verbose=True)
             raise          
 
@@ -397,7 +411,7 @@
         Clock.__init__(self, rm)
         self.starttime = starttime
         self.time = starttime
-        self.logger = self.rm.logger
+        self.logger = logging.getLogger("CLOCK")
         self.statusinterval = self.rm.config.get("status-message-interval")
        
     def get_time(self):
@@ -426,7 +440,7 @@
         The simulated clock can only work in conjunction with the
         tracefile request frontend.
         """
-        self.rm.logger.status("Starting simulated clock", constants.CLOCK)
+        self.logger.status("Starting simulated clock")
         self.rm.accounting.start(self.get_start_time())
         prevstatustime = self.time
         done = False
@@ -458,7 +472,7 @@
             self.time, done = self.__get_next_time()
                     
         # Stop the resource manager
-        self.rm.logger.status("Stopping simulated clock", constants.CLOCK)
+        self.logger.status("Stopping simulated clock")
         self.rm.stop()
         
     def print_stats(self, loglevel):
@@ -467,13 +481,13 @@
         
     def __print_status(self):
         """Prints status summary."""
-        self.logger.status("STATUS ---Begin---", constants.RM)
-        self.logger.status("STATUS Completed best-effort leases: %i" % self.rm.accounting.data.counters[constants.COUNTER_BESTEFFORTCOMPLETED], constants.RM)
-        self.logger.status("STATUS Queue size: %i" % self.rm.accounting.data.counters[constants.COUNTER_QUEUESIZE], constants.RM)
-        self.logger.status("STATUS Best-effort reservations: %i" % self.rm.scheduler.numbesteffortres, constants.RM)
-        self.logger.status("STATUS Accepted AR leases: %i" % self.rm.accounting.data.counters[constants.COUNTER_ARACCEPTED], constants.RM)
-        self.logger.status("STATUS Rejected AR leases: %i" % self.rm.accounting.data.counters[constants.COUNTER_ARREJECTED], constants.RM)
-        self.logger.status("STATUS ----End----", constants.RM)        
+        self.logger.status("STATUS ---Begin---")
+        self.logger.status("STATUS Completed best-effort leases: %i" % self.rm.accounting.data.counters[constants.COUNTER_BESTEFFORTCOMPLETED])
+        self.logger.status("STATUS Queue size: %i" % self.rm.accounting.data.counters[constants.COUNTER_QUEUESIZE])
+        self.logger.status("STATUS Best-effort reservations: %i" % self.rm.scheduler.numbesteffortres)
+        self.logger.status("STATUS Accepted AR leases: %i" % self.rm.accounting.data.counters[constants.COUNTER_ARACCEPTED])
+        self.logger.status("STATUS Rejected AR leases: %i" % self.rm.accounting.data.counters[constants.COUNTER_ARREJECTED])
+        self.logger.status("STATUS ----End----")        
     
     def __get_next_time(self):
         """Determines what is the next point in time to skip to.
@@ -490,9 +504,9 @@
         nextchangepoint = self.rm.get_next_changepoint()
         nextprematureend = self.rm.scheduler.slottable.getNextPrematureEnd(self.time)
         nextreqtime = tracefrontend.getNextReqTime()
-        self.rm.logger.debug("Next change point (in slot table): %s" % nextchangepoint, constants.CLOCK)
-        self.rm.logger.debug("Next request time: %s" % nextreqtime, constants.CLOCK)
-        self.rm.logger.debug("Next premature end: %s" % nextprematureend, constants.CLOCK)
+        self.logger.debug("Next change point (in slot table): %s" % nextchangepoint)
+        self.logger.debug("Next request time: %s" % nextreqtime)
+        self.logger.debug("Next premature end: %s" % nextprematureend)
         
         # The previous time is now
         prevtime = self.time
@@ -541,7 +555,7 @@
         # If we didn't arrive at a new time, and we're not done, we've fallen into
         # an infinite loop. This is A Bad Thing(tm).
         if newtime == prevtime and done != True:
-            self.rm.logger.error("Simulated clock has fallen into an infinite loop. Dumping state..." , constants.CLOCK)
+            self.logger.error("Simulated clock has fallen into an infinite loop. Dumping state..." )
             self.rm.print_stats("ERROR", verbose=True)
             raise Exception, "Simulated clock has fallen into an infinite loop."
         
@@ -579,6 +593,7 @@
             self.lastwakeup = None
         else:
             self.lastwakeup = roundDateTime(now())
+        self.logger = logging.getLogger("CLOCK")
         self.starttime = self.get_time()
         self.nextschedulable = None
         self.nextperiodicwakeup = None
@@ -616,7 +631,7 @@
         The clock keeps on tickin' until a SIGINT signal (Ctrl-C if running in the
         foreground) or a SIGTERM signal is received.
         """
-        self.rm.logger.status("Starting clock", constants.CLOCK)
+        self.logger.status("Starting clock")
         self.rm.accounting.start(self.get_start_time())
         
         signal.signal(signal.SIGINT, self.signalhandler_gracefulstop)
@@ -625,14 +640,14 @@
         done = False
         # Main loop
         while not done:
-            self.rm.logger.status("Waking up to manage resources", constants.CLOCK)
+            self.logger.status("Waking up to manage resources")
             
             # Save the waking time. We want to use a consistent time in the 
             # resource manager operations (if we use now(), we'll get a different
             # time every time)
             if not self.fastforward:
                 self.lastwakeup = roundDateTime(self.get_time())
-            self.rm.logger.status("Wake-up time recorded as %s" % self.lastwakeup, constants.CLOCK)
+            self.logger.status("Wake-up time recorded as %s" % self.lastwakeup)
                 
             # Next schedulable time
             self.nextschedulable = roundDateTime(self.lastwakeup + self.non_sched)
@@ -657,11 +672,11 @@
                 # We need to wake up earlier to handle a slot table event
                 nextwakeup = nextchangepoint
                 self.rm.scheduler.slottable.getNextChangePoint(self.lastwakeup)
-                self.rm.logger.status("Going back to sleep. Waking up at %s to handle slot table event." % nextwakeup, constants.CLOCK)
+                self.logger.status("Going back to sleep. Waking up at %s to handle slot table event." % nextwakeup)
             else:
                 # Nothing to do before waking up
                 nextwakeup = self.nextperiodicwakeup
-                self.rm.logger.status("Going back to sleep. Waking up at %s to see if something interesting has happened by then." % nextwakeup, constants.CLOCK)
+                self.logger.status("Going back to sleep. Waking up at %s to see if something interesting has happened by then." % nextwakeup)
             
             # Sleep
             if not self.fastforward:
@@ -680,8 +695,8 @@
             sigstr = " (SIGTERM)"
         elif signum == signal.SIGINT:
             sigstr = " (SIGINT)"
-        self.rm.logger.status("Received signal %i%s" %(signum, sigstr), constants.CLOCK)
-        self.rm.logger.status("Stopping gracefully...", constants.CLOCK)
+        self.logger.status("Received signal %i%s" %(signum, sigstr))
+        self.logger.status("Stopping gracefully...")
         self.rm.stop()
         sys.exit()
 
@@ -694,6 +709,7 @@
     except ConfigException, msg:
         print >> sys.stderr, "Error in configuration file:"
         print >> sys.stderr, msg
-        exit(1)        
+        exit(1)
+    from haizea.resourcemanager.rm import ResourceManager
     RM = ResourceManager(CONFIG)
     RM.start()
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/rpcserver.py
===================================================================
--- trunk/src/haizea/resourcemanager/rpcserver.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/rpcserver.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -17,6 +17,7 @@
 # -------------------------------------------------------------------------- #
 
 import threading
+import logging
 from SimpleXMLRPCServer import SimpleXMLRPCServer
 
 DEFAULT_HAIZEA_PORT = 42493
@@ -24,7 +25,7 @@
 class RPCServer(object):
     def __init__(self, rm):
         self.rm = rm
-        self.logger = self.rm.logger
+        self.logger = logging.getLogger("RPCSERVER")
         self.port = DEFAULT_HAIZEA_PORT
         self.server = SimpleXMLRPCServer(("localhost", self.port), allow_none=True)
         self.register_rpc(self.test_func)
@@ -43,11 +44,11 @@
         self.server.register_function(func)
         
     def serve(self):
-        self.logger.info("RPC server started on port %i" % self.port, "RPC")
+        self.logger.info("RPC server started on port %i" % self.port)
         self.server.serve_forever()        
         
     def test_func(self):
-        self.logger.info("Test RPC function called", "RPC")
+        self.logger.info("Test RPC function called")
         return 0
     
     def cancel_lease(self, lease_id):

Modified: trunk/src/haizea/resourcemanager/scheduler.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/scheduler.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -38,6 +38,7 @@
 from haizea.resourcemanager.deployment.imagetransfer import ImageTransferDeployment
 from haizea.resourcemanager.datastruct import ARLease, ImmediateLease, VMResourceReservation 
 
+import logging
 
 class SchedException(Exception):
     """A simple exception class used for scheduling exceptions"""
@@ -75,7 +76,7 @@
     """
     def __init__(self, rm):
         self.rm = rm
-        self.logger = self.rm.logger
+        self.logger = logging.getLogger("SCHEDULER")
         self.slottable = SlotTable(self)
         self.queue = ds.Queue(self)
         self.scheduledleases = ds.LeaseTable(self)
@@ -161,7 +162,7 @@
         """Queues a best-effort lease request"""
         self.rm.accounting.incr_counter(constants.COUNTER_QUEUESIZE, lease_req.id)
         self.queue.enqueue(lease_req)
-        self.rm.logger.info("Received (and queueing) best-effort lease request #%i, %i nodes for %s." % (lease_req.id, lease_req.numnodes, lease_req.duration.requested), constants.SCHED)
+        self.logger.info("Received (and queueing) best-effort lease request #%i, %i nodes for %s." % (lease_req.id, lease_req.numnodes, lease_req.duration.requested))
 
     def add_pending_lease(self, lease_req):
         """
@@ -190,20 +191,20 @@
         """
         time = self.rm.clock.get_time()
         
-        self.rm.logger.info("Cancelling lease %i..." % lease_id, constants.SCHED)
+        self.logger.info("Cancelling lease %i..." % lease_id)
         if self.scheduledleases.has_lease(lease_id):
             # The lease is either running, or scheduled to run
             lease = self.scheduledleases.get_lease(lease_id)
             
             if lease.state == constants.LEASE_STATE_ACTIVE:
-                self.rm.logger.info("Lease %i is active. Stopping active reservation..." % lease_id, constants.SCHED)
+                self.logger.info("Lease %i is active. Stopping active reservation..." % lease_id)
                 rr = lease.get_active_reservations(time)[0]
                 if isinstance(rr, VMResourceReservation):
                     self._handle_unscheduled_end_vm(lease, rr, enact=True)
                 # TODO: Handle cancelations in middle of suspensions and
                 # resumptions                
             elif lease.state in [constants.LEASE_STATE_SCHEDULED, constants.LEASE_STATE_DEPLOYED]:
-                self.rm.logger.info("Lease %i is scheduled. Cancelling reservations." % lease_id, constants.SCHED)
+                self.logger.info("Lease %i is scheduled. Cancelling reservations." % lease_id)
                 rrs = lease.get_scheduled_reservations()
                 for r in rrs:
                     lease.remove_rr(r)
@@ -214,7 +215,7 @@
         elif self.queue.has_lease(lease_id):
             # The lease is in the queue, waiting to be scheduled.
             # Cancelling is as simple as removing it from the queue
-            self.rm.logger.info("Lease %i is in the queue. Removing..." % lease_id, constants.SCHED)
+            self.logger.info("Lease %i is in the queue. Removing..." % lease_id)
             l = self.queue.get_lease(lease_id)
             self.queue.remove_lease(lease)
     
@@ -228,10 +229,10 @@
 
     
     def __process_ar_request(self, lease_req, nexttime):
-        self.rm.logger.info("Received AR lease request #%i, %i nodes from %s to %s." % (lease_req.id, lease_req.numnodes, lease_req.start.requested, lease_req.start.requested + lease_req.duration.requested), constants.SCHED)
-        self.rm.logger.debug("  Start   : %s" % lease_req.start, constants.SCHED)
-        self.rm.logger.debug("  Duration: %s" % lease_req.duration, constants.SCHED)
-        self.rm.logger.debug("  ResReq  : %s" % lease_req.requested_resources, constants.SCHED)
+        self.logger.info("Received AR lease request #%i, %i nodes from %s to %s." % (lease_req.id, lease_req.numnodes, lease_req.start.requested, lease_req.start.requested + lease_req.duration.requested))
+        self.logger.debug("  Start   : %s" % lease_req.start)
+        self.logger.debug("  Duration: %s" % lease_req.duration)
+        self.logger.debug("  ResReq  : %s" % lease_req.requested_resources)
         
         accepted = False
         try:
@@ -244,20 +245,20 @@
             # without avoiding preemption.
             # TODO: Roll this into the exact slot fitting algorithm
             try:
-                self.rm.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg), constants.SCHED)
-                self.rm.logger.debug("LEASE-%i Trying again without avoiding preemption" % lease_req.id, constants.SCHED)
+                self.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg))
+                self.logger.debug("LEASE-%i Trying again without avoiding preemption" % lease_req.id)
                 self.__schedule_ar_lease(lease_req, nexttime, avoidpreempt=False)
                 self.scheduledleases.add(lease_req)
                 self.rm.accounting.incr_counter(constants.COUNTER_ARACCEPTED, lease_req.id)
                 accepted = True
             except SchedException, msg:
                 self.rm.accounting.incr_counter(constants.COUNTER_ARREJECTED, lease_req.id)
-                self.rm.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg), constants.SCHED)
+                self.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg))
 
         if accepted:
-            self.rm.logger.info("AR lease request #%i has been accepted." % lease_req.id, constants.SCHED)
+            self.logger.info("AR lease request #%i has been accepted." % lease_req.id)
         else:
-            self.rm.logger.info("AR lease request #%i has been rejected." % lease_req.id, constants.SCHED)
+            self.logger.info("AR lease request #%i has been rejected." % lease_req.id)
         
     
     def __schedule_ar_lease(self, lease_req, nexttime, avoidpreempt=True):
@@ -268,7 +269,7 @@
             
             if len(preemptions) > 0:
                 leases = self.slottable.findLeasesToPreempt(preemptions, start, end)
-                self.rm.logger.info("Must preempt leases %s to make room for AR lease #%i" % ([l.id for l in leases], lease_req.id), constants.SCHED)
+                self.logger.info("Must preempt leases %s to make room for AR lease #%i" % ([l.id for l in leases], lease_req.id))
                 for lease in leases:
                     self.preempt(lease, time=start)
 
@@ -292,22 +293,22 @@
         newqueue = ds.Queue(self)
         while not done and not self.is_queue_empty():
             if self.numbesteffortres == self.maxres and self.slottable.isFull(nexttime):
-                self.rm.logger.debug("Used up all reservations and slot table is full. Skipping rest of queue.", constants.SCHED)
+                self.logger.debug("Used up all reservations and slot table is full. Skipping rest of queue.")
                 done = True
             else:
                 lease_req = self.queue.dequeue()
                 try:
-                    self.rm.logger.info("Next request in the queue is lease %i. Attempting to schedule..." % lease_req.id, constants.SCHED)
-                    self.rm.logger.debug("  Duration: %s" % lease_req.duration, constants.SCHED)
-                    self.rm.logger.debug("  ResReq  : %s" % lease_req.requested_resources, constants.SCHED)
+                    self.logger.info("Next request in the queue is lease %i. Attempting to schedule..." % lease_req.id)
+                    self.logger.debug("  Duration: %s" % lease_req.duration)
+                    self.logger.debug("  ResReq  : %s" % lease_req.requested_resources)
                     self.__schedule_besteffort_lease(lease_req, nexttime)
                     self.scheduledleases.add(lease_req)
                     self.rm.accounting.decr_counter(constants.COUNTER_QUEUESIZE, lease_req.id)
                 except SchedException, msg:
                     # Put back on queue
                     newqueue.enqueue(lease_req)
-                    self.rm.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg), constants.SCHED)
-                    self.rm.logger.info("Lease %i could not be scheduled at this time." % lease_req.id, constants.SCHED)
+                    self.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg))
+                    self.logger.info("Lease %i could not be scheduled at this time." % lease_req.id)
                     if not self.is_backfilling():
                         done = True
         
@@ -382,18 +383,18 @@
 
         
     def __process_im_request(self, lease_req, nexttime):
-        self.rm.logger.info("Received immediate lease request #%i (%i nodes)" % (lease_req.id, lease_req.numnodes), constants.SCHED)
-        self.rm.logger.debug("  Duration: %s" % lease_req.duration, constants.SCHED)
-        self.rm.logger.debug("  ResReq  : %s" % lease_req.requested_resources, constants.SCHED)
+        self.logger.info("Received immediate lease request #%i (%i nodes)" % (lease_req.id, lease_req.numnodes))
+        self.logger.debug("  Duration: %s" % lease_req.duration)
+        self.logger.debug("  ResReq  : %s" % lease_req.requested_resources)
         
         try:
             self.__schedule_immediate_lease(lease_req, nexttime=nexttime)
             self.scheduledleases.add(lease_req)
             self.rm.accounting.incr_counter(constants.COUNTER_IMACCEPTED, lease_req.id)
-            self.rm.logger.info("Immediate lease request #%i has been accepted." % lease_req.id, constants.SCHED)
+            self.logger.info("Immediate lease request #%i has been accepted." % lease_req.id)
         except SchedException, msg:
             self.rm.accounting.incr_counter(constants.COUNTER_IMREJECTED, lease_req.id)
-            self.rm.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg), constants.SCHED)
+            self.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg))
         
         
     def __schedule_immediate_lease(self, req, nexttime):
@@ -412,15 +413,15 @@
             raise SchedException, "The requested immediate lease is infeasible. Reason: %s" % msg
         
     def preempt(self, req, time):
-        self.rm.logger.info("Preempting lease #%i..." % (req.id), constants.SCHED)
-        self.rm.logger.edebug("Lease before preemption:", constants.SCHED)
+        self.logger.info("Preempting lease #%i..." % (req.id))
+        self.logger.vdebug("Lease before preemption:")
         req.print_contents()
         vmrr, susprr  = req.get_last_vmrr()
         suspendresumerate = self.rm.resourcepool.info.getSuspendResumeRate()
         
         if vmrr.state == constants.RES_STATE_SCHEDULED and vmrr.start >= time:
-            self.rm.logger.info("... lease #%i has been cancelled and requeued." % req.id, constants.SCHED)
-            self.rm.logger.debug("Lease was set to start in the middle of the preempting lease.", constants.SCHED)
+            self.logger.info("... lease #%i has been cancelled and requeued." % req.id)
+            self.logger.debug("Lease was set to start in the middle of the preempting lease.")
             req.state = constants.LEASE_STATE_PENDING
             if vmrr.backfill_reservation == True:
                 self.numbesteffortres -= 1
@@ -446,10 +447,10 @@
             # We can't suspend if we're under the suspend threshold
             suspendable = timebeforesuspend >= suspendthreshold
             if suspendable and (susptype == constants.SUSPENSION_ALL or (req.numnodes == 1 and susptype == constants.SUSPENSION_SERIAL)):
-                self.rm.logger.info("... lease #%i will be suspended at %s." % (req.id, time), constants.SCHED)
+                self.logger.info("... lease #%i will be suspended at %s." % (req.id, time))
                 self.slottable.suspend(req, time)
             else:
-                self.rm.logger.info("... lease #%i has been cancelled and requeued (cannot be suspended)" % req.id, constants.SCHED)
+                self.logger.info("... lease #%i has been cancelled and requeued (cannot be suspended)" % req.id)
                 req.state = constants.LEASE_STATE_PENDING
                 if vmrr.backfill_reservation == True:
                     self.numbesteffortres -= 1
@@ -469,15 +470,15 @@
                 self.scheduledleases.remove(req)
                 self.queue.enqueue_in_order(req)
                 self.rm.accounting.incr_counter(constants.COUNTER_QUEUESIZE, req.id)
-        self.rm.logger.edebug("Lease after preemption:", constants.SCHED)
+        self.logger.vdebug("Lease after preemption:")
         req.print_contents()
         
     def reevaluate_schedule(self, endinglease, nodes, nexttime, checkedleases):
-        self.rm.logger.debug("Reevaluating schedule. Checking for leases scheduled in nodes %s after %s" %(nodes, nexttime), constants.SCHED) 
+        self.logger.debug("Reevaluating schedule. Checking for leases scheduled in nodes %s after %s" %(nodes, nexttime)) 
         leases = self.scheduledleases.getNextLeasesScheduledInNodes(nexttime, nodes)
         leases = [l for l in leases if isinstance(l, ds.BestEffortLease) and not l in checkedleases]
         for l in leases:
-            self.rm.logger.debug("Found lease %i" % l.id, constants.SCHED)
+            self.logger.debug("Found lease %i" % l.id)
             l.print_contents()
             # Earliest time can't be earlier than time when images will be
             # available in node
@@ -497,7 +498,7 @@
     #-------------------------------------------------------------------#
 
     def _handle_start_vm(self, l, rr):
-        self.rm.logger.debug("LEASE-%i Start of handleStartVM" % l.id, constants.SCHED)
+        self.logger.debug("LEASE-%i Start of handleStartVM" % l.id)
         l.print_contents()
         if l.state == constants.LEASE_STATE_DEPLOYED:
             l.state = constants.LEASE_STATE_ACTIVE
@@ -512,9 +513,8 @@
                 for (vnode, pnode) in rr.nodes.items():
                     l.vmimagemap[vnode] = pnode
             except Exception, e:
-                self.rm.logger.error("ERROR when starting VMs.", constants.SCHED)
+                self.logger.error("ERROR when starting VMs.")
                 raise
-
         elif l.state == constants.LEASE_STATE_SUSPENDED:
             l.state = constants.LEASE_STATE_ACTIVE
             rr.state = constants.RES_STATE_ACTIVE
@@ -522,15 +522,15 @@
             # handled during the suspend/resume RRs
         l.print_contents()
         self.updateNodeVMState(rr.nodes.values(), constants.DOING_VM_RUN, l.id)
-        self.rm.logger.debug("LEASE-%i End of handleStartVM" % l.id, constants.SCHED)
-        self.rm.logger.info("Started VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()), constants.SCHED)
+        self.logger.debug("LEASE-%i End of handleStartVM" % l.id)
+        self.logger.info("Started VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()))
 
     # TODO: Replace enact with a saner way of handling leases that have failed or
     #       ended prematurely.
     #       Possibly factor out the "clean up" code to a separate function
     def _handle_end_vm(self, l, rr, enact=True):
-        self.rm.logger.debug("LEASE-%i Start of handleEndVM" % l.id, constants.SCHED)
-        self.rm.logger.edebug("LEASE-%i Before:" % l.id, constants.SCHED)
+        self.logger.debug("LEASE-%i Start of handleEndVM" % l.id)
+        self.logger.vdebug("LEASE-%i Before:" % l.id)
         l.print_contents()
         now_time = self.rm.clock.get_time()
         diff = now_time - rr.start
@@ -551,14 +551,14 @@
         if isinstance(l, ds.BestEffortLease):
             if rr.backfill_reservation == True:
                 self.numbesteffortres -= 1
-        self.rm.logger.edebug("LEASE-%i After:" % l.id, constants.SCHED)
+        self.logger.vdebug("LEASE-%i After:" % l.id)
         l.print_contents()
         self.updateNodeVMState(rr.nodes.values(), constants.DOING_IDLE, l.id)
-        self.rm.logger.debug("LEASE-%i End of handleEndVM" % l.id, constants.SCHED)
-        self.rm.logger.info("Stopped VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()), constants.SCHED)
+        self.logger.debug("LEASE-%i End of handleEndVM" % l.id)
+        self.logger.info("Stopped VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()))
 
     def _handle_unscheduled_end_vm(self, l, rr, enact=False):
-        self.rm.logger.info("LEASE-%i The VM has ended prematurely." % l.id, constants.SCHED)
+        self.logger.info("LEASE-%i The VM has ended prematurely." % l.id)
         self._handle_end_rr(l, rr)
         if rr.oncomplete == constants.ONCOMPLETE_SUSPEND:
             rrs = l.next_rrs(rr)
@@ -575,7 +575,7 @@
             self.reevaluate_schedule(l, rr.nodes.values(), nexttime, [])
 
     def _handle_start_suspend(self, l, rr):
-        self.rm.logger.debug("LEASE-%i Start of handleStartSuspend" % l.id, constants.SCHED)
+        self.logger.debug("LEASE-%i Start of handleStartSuspend" % l.id)
         l.print_contents()
         rr.state = constants.RES_STATE_ACTIVE
         self.rm.resourcepool.suspendVMs(l, rr)
@@ -584,11 +584,11 @@
             l.memimagemap[vnode] = pnode
         l.print_contents()
         self.updateNodeVMState(rr.nodes.values(), constants.DOING_VM_SUSPEND, l.id)
-        self.rm.logger.debug("LEASE-%i End of handleStartSuspend" % l.id, constants.SCHED)
-        self.rm.logger.info("Suspending lease %i..." % (l.id), constants.SCHED)
+        self.logger.debug("LEASE-%i End of handleStartSuspend" % l.id)
+        self.logger.info("Suspending lease %i..." % (l.id))
 
     def _handle_end_suspend(self, l, rr):
-        self.rm.logger.debug("LEASE-%i Start of handleEndSuspend" % l.id, constants.SCHED)
+        self.logger.debug("LEASE-%i Start of handleEndSuspend" % l.id)
         l.print_contents()
         # TODO: React to incomplete suspend
         self.rm.resourcepool.verifySuspend(l, rr)
@@ -599,21 +599,21 @@
         self.rm.accounting.incr_counter(constants.COUNTER_QUEUESIZE, l.id)
         l.print_contents()
         self.updateNodeVMState(rr.nodes.values(), constants.DOING_IDLE, l.id)
-        self.rm.logger.debug("LEASE-%i End of handleEndSuspend" % l.id, constants.SCHED)
-        self.rm.logger.info("Lease %i suspended." % (l.id), constants.SCHED)
+        self.logger.debug("LEASE-%i End of handleEndSuspend" % l.id)
+        self.logger.info("Lease %i suspended." % (l.id))
 
     def _handle_start_resume(self, l, rr):
-        self.rm.logger.debug("LEASE-%i Start of handleStartResume" % l.id, constants.SCHED)
+        self.logger.debug("LEASE-%i Start of handleStartResume" % l.id)
         l.print_contents()
         self.rm.resourcepool.resumeVMs(l, rr)
         rr.state = constants.RES_STATE_ACTIVE
         l.print_contents()
         self.updateNodeVMState(rr.nodes.values(), constants.DOING_VM_RESUME, l.id)
-        self.rm.logger.debug("LEASE-%i End of handleStartResume" % l.id, constants.SCHED)
-        self.rm.logger.info("Resuming lease %i..." % (l.id), constants.SCHED)
+        self.logger.debug("LEASE-%i End of handleStartResume" % l.id)
+        self.logger.info("Resuming lease %i..." % (l.id))
 
     def _handle_end_resume(self, l, rr):
-        self.rm.logger.debug("LEASE-%i Start of handleEndResume" % l.id, constants.SCHED)
+        self.logger.debug("LEASE-%i Start of handleEndResume" % l.id)
         l.print_contents()
         # TODO: React to incomplete resume
         self.rm.resourcepool.verifyResume(l, rr)
@@ -622,8 +622,8 @@
             self.rm.resourcepool.removeRAMFileFromNode(pnode, l.id, vnode)
         l.print_contents()
         self.updateNodeVMState(rr.nodes.values(), constants.DOING_IDLE, l.id)
-        self.rm.logger.debug("LEASE-%i End of handleEndResume" % l.id, constants.SCHED)
-        self.rm.logger.info("Resumed lease %i" % (l.id), constants.SCHED)
+        self.logger.debug("LEASE-%i End of handleEndResume" % l.id)
+        self.logger.info("Resumed lease %i" % (l.id))
 
     def _handle_end_rr(self, l, rr):
         self.slottable.removeReservation(rr)

Modified: trunk/src/haizea/resourcemanager/slottable.py
===================================================================
--- trunk/src/haizea/resourcemanager/slottable.py	2008-08-07 11:09:59 UTC (rev 469)
+++ trunk/src/haizea/resourcemanager/slottable.py	2008-08-07 16:21:09 UTC (rev 470)
@@ -23,6 +23,7 @@
 from haizea.common.utils import roundDateTimeDelta
 import bisect
 import copy
+import logging
 
 class SlotFittingException(Exception):
     pass
@@ -81,6 +82,7 @@
     def __init__(self, scheduler):
         self.scheduler = scheduler
         self.rm = scheduler.rm
+        self.logger = logging.getLogger("SLOTTABLE")
         self.resourcepool = scheduler.rm.resourcepool
         self.nodes = NodeList()
         self.reservations = []
@@ -325,9 +327,9 @@
         # At this point we know if the lease is feasible, and if
         # will require preemption.
         if not mustpreempt:
-           self.rm.logger.debug("The VM reservations for this lease are feasible without preemption.", constants.ST)
+           self.logger.debug("The VM reservations for this lease are feasible without preemption.")
         else:
-           self.rm.logger.debug("The VM reservations for this lease are feasible but will require preemption.", constants.ST)
+           self.logger.debug("The VM reservations for this lease are feasible but will require preemption.")
 
         # merge canfitnopreempt and canfitpreempt
         canfit = {}
@@ -343,7 +345,7 @@
 
         orderednodes = self.prioritizenodes(canfit, diskImageID, start, canpreempt, avoidpreempt)
             
-        self.rm.logger.debug("Node ordering: %s" % orderednodes, constants.ST)
+        self.logger.debug("Node ordering: %s" % orderednodes)
         
         # vnode -> pnode
         nodeassignment = {}
@@ -480,8 +482,8 @@
                     if preemptedEnough(amountToPreempt):
                         break
             
-        self.rm.logger.debug("Preempting leases (at start of reservation): %s" % [r.lease.id for r in atstart], constants.ST)
-        self.rm.logger.debug("Preempting leases (in middle of reservation): %s" % [r.lease.id for r in atmiddle], constants.ST)
+        self.logger.debug("Preempting leases (at start of reservation): %s" % [r.lease.id for r in atstart])
+        self.logger.debug("Preempting leases (in middle of reservation): %s" % [r.lease.id for r in atmiddle])
         
         leases = [r.lease for r in atstart|atmiddle]
         
@@ -681,7 +683,7 @@
             res_str = " (resuming)"
         if mustsuspend:
             susp_str = " (suspending)"
-        self.rm.logger.info("Lease #%i has been scheduled on nodes %s from %s%s to %s%s" % (lease.id, mappings.values(), start, res_str, end, susp_str), constants.ST)
+        self.logger.info("Lease #%i has been scheduled on nodes %s from %s%s to %s%s" % (lease.id, mappings.values(), start, res_str, end, susp_str))
 
         return resmrr, vmrr, susprr, reservation
 
@@ -700,11 +702,11 @@
                 maxend = start + remdur
                 end, canfit = self.availabilitywindow.findPhysNodesForVMs(numnodes, maxend)
         
-                self.rm.logger.debug("This lease can be scheduled from %s to %s" % (start, end), constants.ST)
+                self.logger.debug("This lease can be scheduled from %s to %s" % (start, end))
                 
                 if end < maxend:
                     mustsuspend=True
-                    self.rm.logger.debug("This lease will require suspension (maxend = %s)" % (maxend), constants.ST)
+                    self.logger.debug("This lease will require suspension (maxend = %s)" % (maxend))
                     
                     if suspendable:
                         # It the lease is suspendable...
@@ -712,7 +714,7 @@
                             if end-start > suspendthreshold:
                                 break
                             else:
-                                self.rm.logger.debug("This starting time does not meet the suspend threshold (%s < %s)" % (end-start, suspendthreshold), constants.ST)
+                                self.logger.debug("This starting time does not meet the suspend threshold (%s < %s)" % (end-start, suspendthreshold))
                                 start = None
                         else:
                             pass
@@ -776,7 +778,7 @@
             if self.availabilitywindow.fitAtStart(nodes=nodes) >= lease.numnodes:
                 (end, canfit) = self.availabilitywindow.findPhysNodesForVMs(lease.numnodes, originalstart)
                 if end == originalstart and set(nodes) <= set(canfit.keys()):
-                    self.rm.logger.debug("Can slide back to %s" % p, constants.ST)
+                    self.logger.debug("Can slide back to %s" % p)
                     newstart = p
                     break
         if newstart == None:
@@ -806,7 +808,7 @@
                 vmrrnew.prematureend -= diff
             self.updateReservationWithKeyChange(vmrr, vmrrnew)
             self.dirty()
-            self.rm.logger.edebug("New lease descriptor (after slideback):", constants.ST)
+            self.logger.vdebug("New lease descriptor (after slideback):")
             lease.print_contents()
 
 
@@ -938,7 +940,7 @@
 class AvailabilityWindow(object):
     def __init__(self, slottable):
         self.slottable = slottable
-        self.logger = slottable.rm.logger
+        self.logger = logging.getLogger("SLOTTABLE.WIN")
         self.time = None
         self.resreq = None
         self.onlynodes = None
@@ -1042,7 +1044,7 @@
             
                     
     def printContents(self, nodes = None, withpreemption = False):
-        if self.logger.extremedebug:
+        if self.logger.getEffectiveLevel() == constants.LOGLEVEL_VDEBUG:
             if nodes == None:
                 physnodes = self.avail.keys()
             else:
@@ -1052,7 +1054,7 @@
                 p = "(with preemption)"
             else:
                 p = "(without preemption)"
-            self.logger.edebug("AVAILABILITY WINDOW (time=%s, nodes=%s) %s"%(self.time, nodes, p), constants.ST)
+            self.logger.vdebug("AVAILABILITY WINDOW (time=%s, nodes=%s) %s"%(self.time, nodes, p))
             for n in physnodes:
                 contents = "Node %i --- " % n
                 for x in self.avail[n]:
@@ -1069,7 +1071,7 @@
                             canfit = x.canfit
                         contents += "%s" % res
                     contents += "} (Fits: %i) ]  " % canfit
-                self.logger.edebug(contents, constants.ST)
+                self.logger.vdebug(contents)
                 
 
                 



More information about the Haizea-commit mailing list