[haizea-commit] r492 - in trunk/src/haizea: cli common resourcemanager resourcemanager/deployment resourcemanager/frontends

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Thu Sep 11 11:48:08 CDT 2008


Author: borja
Date: 2008-09-11 11:48:08 -0500 (Thu, 11 Sep 2008)
New Revision: 492

Modified:
   trunk/src/haizea/cli/rpc_commands.py
   trunk/src/haizea/common/utils.py
   trunk/src/haizea/resourcemanager/configfile.py
   trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
   trunk/src/haizea/resourcemanager/deployment/unmanaged.py
   trunk/src/haizea/resourcemanager/frontends/opennebula.py
   trunk/src/haizea/resourcemanager/frontends/rpc.py
   trunk/src/haizea/resourcemanager/rm.py
Log:
Miscellaneous code cleaning, most of it required by the previous two revisions.

Modified: trunk/src/haizea/cli/rpc_commands.py
===================================================================
--- trunk/src/haizea/cli/rpc_commands.py	2008-09-11 16:47:36 UTC (rev 491)
+++ trunk/src/haizea/cli/rpc_commands.py	2008-09-11 16:48:08 UTC (rev 492)
@@ -15,13 +15,13 @@
 # See the License for the specific language governing permissions and        #
 # limitations under the License.                                             #
 # -------------------------------------------------------------------------- #
+import haizea.common.defaults as defaults
+from haizea.resourcemanager.datastruct import Lease
 from haizea.cli.optionparser import OptionParser, Option
-from haizea.common.constants import state_str
-from mx.DateTime import TimeDelta
-import haizea.common.defaults as defaults
 from haizea.cli import Command
 import xmlrpclib
 import sys
+from mx.DateTime import TimeDelta
 from mx.DateTime import ISO
 
 class RPCCommand(Command):
@@ -226,7 +226,7 @@
     
 def pretty_print_rpcvalue(name, value):
     if name == "state":
-        value = state_str(value)
+        value = Lease.state_str[value]
     elif name == "duration_req":
         value = TimeDelta(seconds=value)
     elif name == "start_req":

Modified: trunk/src/haizea/common/utils.py
===================================================================
--- trunk/src/haizea/common/utils.py	2008-09-11 16:47:36 UTC (rev 491)
+++ trunk/src/haizea/common/utils.py	2008-09-11 16:48:08 UTC (rev 492)
@@ -33,10 +33,10 @@
     name = profile + "_" + name
     return name
     
-def roundDateTimeDelta(d):
+def round_datetime_delta(d):
     return DateTime.DateTimeDelta(d.day, d.hour, d.minute, int(ceil(d.second)))
 
-def roundDateTime(d):
+def round_datetime(d):
     d += DateTime.TimeDelta(seconds=0.5)
     return DateTime.DateTime(d.year, d.month, d.day, d.hour, d.minute, int(floor(d.second)))
 
@@ -86,7 +86,7 @@
 def estimate_transfer_time(size, bandwidth):
     bandwidthMBs = float(bandwidth) / 8
     seconds = size / bandwidthMBs
-    return roundDateTimeDelta(DateTime.TimeDelta(seconds = seconds)) 
+    return round_datetime_delta(DateTime.TimeDelta(seconds = seconds)) 
  
 def xmlrpc_marshall_singlevalue(value):
     if isinstance(value, DateTime.DateTimeType):

Modified: trunk/src/haizea/resourcemanager/configfile.py
===================================================================
--- trunk/src/haizea/resourcemanager/configfile.py	2008-09-11 16:47:36 UTC (rev 491)
+++ trunk/src/haizea/resourcemanager/configfile.py	2008-09-11 16:48:08 UTC (rev 492)
@@ -199,17 +199,28 @@
              - all: any lease can be suspended                
             """),
 
-     Option(name        = "suspend-threshold-factor",
-            getter      = "suspend-threshold-factor",
+     Option(name        = "suspendresume-exclusion",
+            getter      = "suspendresume-exclusion",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = constants.SUSPRES_EXCLUSION_LOCAL,
+            valid       = [constants.SUSPRES_EXCLUSION_LOCAL,
+                           constants.SUSPRES_EXCLUSION_GLOBAL],
+            doc         = """
+            Documentation            
+            """),
+
+     Option(name        = "scheduling-threshold-factor",
+            getter      = "scheduling-threshold-factor",
             type        = OPTTYPE_INT,
             required    = False,
-            default     = 0,
+            default     = 1,
             doc         = """
             Documentation                
             """),
 
-     Option(name        = "force-suspend-threshold",
-            getter      = "force-suspend-threshold",
+     Option(name        = "force-scheduling-threshold",
+            getter      = "force-scheduling-threshold",
             type        = OPTTYPE_TIMEDELTA,
             required    = False,
             doc         = """

Modified: trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
===================================================================
--- trunk/src/haizea/resourcemanager/deployment/imagetransfer.py	2008-09-11 16:47:36 UTC (rev 491)
+++ trunk/src/haizea/resourcemanager/deployment/imagetransfer.py	2008-09-11 16:48:08 UTC (rev 492)
@@ -19,7 +19,8 @@
 import haizea.common.constants as constants
 import haizea.resourcemanager.datastruct as ds
 from haizea.resourcemanager.deployment.base import DeploymentBase, DeploymentSchedException
-from haizea.resourcemanager.datastruct import ResourceReservationBase, ARLease, BestEffortLease
+from haizea.resourcemanager.datastruct import ResourceReservation, Lease, ARLease, BestEffortLease
+from haizea.common.utils import estimate_transfer_time
 
 import copy
 
@@ -66,7 +67,7 @@
         reusealg = config.get("diskimage-reuse")
         avoidredundant = config.get("avoid-redundant-transfers")
         
-        lease.state = constants.LEASE_STATE_SCHEDULED
+        lease.state = Lease.STATE_SCHEDULED
         
         if avoidredundant:
             pass # TODO
@@ -92,7 +93,7 @@
                 musttransfer[vnode] = pnode
 
         if len(musttransfer) == 0:
-            lease.state = constants.LEASE_STATE_DEPLOYED
+            lease.state = Lease.STATE_READY
         else:
             if mechanism == constants.TRANSFER_UNICAST:
                 # Dictionary of transfer RRs. Key is the physical node where
@@ -124,7 +125,7 @@
         reusealg = config.get("diskimage-reuse")
         avoidredundant = config.get("avoid-redundant-transfers")
         earliest = self.find_earliest_starting_times(lease, nexttime)
-        lease.state = constants.LEASE_STATE_SCHEDULED
+        lease.state = Lease.STATE_SCHEDULED
         transferRRs = []
         musttransfer = {}
         piggybacking = []
@@ -151,14 +152,14 @@
         else:
             # TODO: Not strictly correct. Should mark the lease
             # as deployed when piggybacked transfers have concluded
-            lease.state = constants.LEASE_STATE_DEPLOYED
+            lease.state = Lease.STATE_READY
         if len(piggybacking) > 0: 
             endtimes = [t.end for t in piggybacking]
             if len(musttransfer) > 0:
                 endtimes.append(endtransfer)
             lease.imagesavail = max(endtimes)
         if len(musttransfer)==0 and len(piggybacking)==0:
-            lease.state = constants.LEASE_STATE_DEPLOYED
+            lease.state = Lease.STATE_READY
             lease.imagesavail = nexttime
         for rr in transferRRs:
             lease.append_rr(rr)
@@ -174,7 +175,7 @@
         # Figure out starting time assuming we have to transfer the image
         nextfifo = self.get_next_fifo_transfer_time(nexttime)
         
-        imgTransferTime=lease_req.estimate_image_transfer_time(self.imagenode_bandwidth)
+        imgTransferTime=self.estimate_image_transfer_time(lease_req, self.imagenode_bandwidth)
         
         # Find worst-case earliest start time
         if lease_req.numnodes == 1:
@@ -205,7 +206,7 @@
                 # TODO
             if mechanism == constants.TRANSFER_MULTICAST:                
                 # We can only piggyback on transfers that haven't started yet
-                transfers = [t for t in self.transfers_fifo if t.state == constants.RES_STATE_SCHEDULED]
+                transfers = [t for t in self.transfers_fifo if t.state == ResourceReservation.STATE_SCHEDULED]
                 for t in transfers:
                     if t.file == lease_req.diskImageID:
                         startTime = t.end
@@ -219,16 +220,16 @@
     def schedule_imagetransfer_edf(self, req, vnodes, nexttime):
         # Estimate image transfer time 
         bandwidth = self.resourcepool.deployment.get_bandwidth()
-        imgTransferTime=req.estimate_image_transfer_time(bandwidth)
+        imgTransferTime=self.estimate_image_transfer_time(req, bandwidth)
 
         # Determine start time
-        activetransfers = [t for t in self.transfers_edf if t.state == constants.RES_STATE_ACTIVE]
+        activetransfers = [t for t in self.transfers_edf if t.state == ResourceReservation.STATE_ACTIVE]
         if len(activetransfers) > 0:
             startTime = activetransfers[-1].end
         else:
             startTime = nexttime
         
-        transfermap = dict([(copy.copy(t), t) for t in self.transfers_edf if t.state == constants.RES_STATE_SCHEDULED])
+        transfermap = dict([(copy.copy(t), t) for t in self.transfers_edf if t.state == ResourceReservation.STATE_SCHEDULED])
         newtransfers = transfermap.keys()
         
         res = {}
@@ -242,7 +243,7 @@
         
         newtransfer = FileTransferResourceReservation(req, res)
         newtransfer.deadline = req.start.requested
-        newtransfer.state = constants.RES_STATE_SCHEDULED
+        newtransfer.state = ResourceReservation.STATE_SCHEDULED
         newtransfer.file = req.diskimage_id
         for vnode, pnode in vnodes.items():
             newtransfer.piggyback(req.id, vnode, pnode)
@@ -316,7 +317,7 @@
     def schedule_imagetransfer_fifo(self, req, reqtransfers, nexttime):
         # Estimate image transfer time 
         bandwidth = self.imagenode_bandwidth
-        imgTransferTime=req.estimate_image_transfer_time(bandwidth)
+        imgTransferTime=self.estimate_image_transfer_time(req, bandwidth)
         config = self.scheduler.rm.config
         mechanism = config.get("transfer-mechanism")
         startTime = self.get_next_fifo_transfer_time(nexttime)
@@ -342,7 +343,7 @@
             newtransfer.start = startTime
             newtransfer.end = startTime+imgTransferTime
             newtransfer.deadline = None
-            newtransfer.state = constants.RES_STATE_SCHEDULED
+            newtransfer.state = ResourceReservation.STATE_SCHEDULED
             newtransfer.file = req.diskimage_id
             for vnode in reqtransfers:
                 physnode = reqtransfers[vnode]
@@ -354,8 +355,17 @@
         
         return newtransfers
     
+    def estimate_image_transfer_time(self, lease, bandwidth):
+        from haizea.resourcemanager.rm import ResourceManager
+        config = ResourceManager.get_singleton().config
+        forceTransferTime = config.get("force-imagetransfer-time")
+        if forceTransferTime != None:
+            return forceTransferTime
+        else:      
+            return estimate_transfer_time(lease.diskimage_size, bandwidth)    
+    
     def get_next_fifo_transfer_time(self, nexttime):
-        transfers = [t for t in self.transfers_fifo if t.state != constants.RES_STATE_DONE]
+        transfers = [t for t in self.transfers_fifo if t.state != ResourceReservation.STATE_DONE]
         if len(transfers) > 0:
             startTime = transfers[-1].end
         else:
@@ -363,7 +373,7 @@
         return startTime
 
     def __remove_from_fifo_transfers(self, lease_id):
-        transfers = [t for t in self.transfers_fifo if t.state != constants.RES_STATE_DONE]
+        transfers = [t for t in self.transfers_fifo if t.state != ResourceReservation.STATE_DONE]
         toremove = []
         for t in transfers:
             for pnode in t.transfers:
@@ -384,12 +394,10 @@
     def handle_start_filetransfer(sched, lease, rr):
         sched.rm.logger.debug("LEASE-%i Start of handleStartFileTransfer" % lease.id)
         lease.print_contents()
-        if lease.state == constants.LEASE_STATE_SCHEDULED or lease.state == constants.LEASE_STATE_DEPLOYED:
-            lease.state = constants.LEASE_STATE_DEPLOYING
-            rr.state = constants.RES_STATE_ACTIVE
+        if lease.state == Lease.STATE_SCHEDULED or lease.state == Lease.STATE_READY:
+            lease.state = Lease.STATE_PREPARING
+            rr.state = ResourceReservation.STATE_ACTIVE
             # TODO: Enactment
-        elif lease.state == constants.LEASE_STATE_SUSPENDED:
-            pass # This shouldn't happen
         lease.print_contents()
         sched.updateNodeTransferState(rr.transfers.keys(), constants.DOING_TRANSFER, lease.id)
         sched.logger.debug("LEASE-%i End of handleStartFileTransfer" % lease.id)
@@ -399,9 +407,9 @@
     def handle_end_filetransfer(sched, lease, rr):
         sched.rm.logger.debug("LEASE-%i Start of handleEndFileTransfer" % lease.id)
         lease.print_contents()
-        if lease.state == constants.LEASE_STATE_DEPLOYING:
-            lease.state = constants.LEASE_STATE_DEPLOYED
-            rr.state = constants.RES_STATE_DONE
+        if lease.state == Lease.STATE_PREPARING:
+            lease.state = Lease.STATE_READY
+            rr.state = ResourceReservation.STATE_DONE
             for physnode in rr.transfers:
                 vnodes = rr.transfers[physnode]
                 
@@ -512,16 +520,16 @@
         for vnode, pnode in lease.vmimagemap.items():
             self.resourcepool.remove_diskimage(pnode, lease.id, vnode)
 
-class FileTransferResourceReservation(ResourceReservationBase):
+class FileTransferResourceReservation(ResourceReservation):
     def __init__(self, lease, res, start=None, end=None):
-        ResourceReservationBase.__init__(self, lease, start, end, res)
+        ResourceReservation.__init__(self, lease, start, end, res)
         self.deadline = None
         self.file = None
         # Dictionary of  physnode -> [ (lease_id, vnode)* ]
         self.transfers = {}
 
     def print_contents(self, loglevel="VDEBUG"):
-        ResourceReservationBase.print_contents(self, loglevel)
+        ResourceReservation.print_contents(self, loglevel)
         self.logger.log(loglevel, "Type           : FILE TRANSFER")
         self.logger.log(loglevel, "Deadline       : %s" % self.deadline)
         self.logger.log(loglevel, "File           : %s" % self.file)

Modified: trunk/src/haizea/resourcemanager/deployment/unmanaged.py
===================================================================
--- trunk/src/haizea/resourcemanager/deployment/unmanaged.py	2008-09-11 16:47:36 UTC (rev 491)
+++ trunk/src/haizea/resourcemanager/deployment/unmanaged.py	2008-09-11 16:48:08 UTC (rev 492)
@@ -16,6 +16,7 @@
 # limitations under the License.                                             #
 # -------------------------------------------------------------------------- #
 
+from haizea.resourcemanager.datastruct import Lease
 from haizea.resourcemanager.deployment.base import DeploymentBase
 import haizea.common.constants as constants
 
@@ -25,7 +26,7 @@
     
     # Add dummy disk images
     def schedule(self, lease, vmrr, nexttime):
-        lease.state = constants.LEASE_STATE_DEPLOYED
+        lease.state = Lease.STATE_READY
         for (vnode, pnode) in vmrr.nodes.items():
             self.resourcepool.add_diskimage(pnode, lease.diskimage_id, lease.diskimage_size, lease.id, vnode)
             
@@ -55,5 +56,5 @@
         return True
 
     def cleanup(self, lease, vmrr):
-        for vnode, pnode in lease.vmimagemap.items():
+        for vnode, pnode in lease.diskimagemap.items():
                 self.resourcepool.remove_diskimage(pnode, lease.id, vnode)
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/frontends/opennebula.py
===================================================================
--- trunk/src/haizea/resourcemanager/frontends/opennebula.py	2008-09-11 16:47:36 UTC (rev 491)
+++ trunk/src/haizea/resourcemanager/frontends/opennebula.py	2008-09-11 16:48:08 UTC (rev 492)
@@ -22,7 +22,7 @@
 from haizea.common.utils import UNIX2DateTime
 from pysqlite2 import dbapi2 as sqlite
 from mx.DateTime import DateTimeDelta, TimeDelta, ISO
-from haizea.common.utils import roundDateTime
+from haizea.common.utils import round_datetime
 import operator
 import logging
 
@@ -116,7 +116,7 @@
         tSubmit, vmimage, vmimagesize, numnodes, resreq, duration, preemptible = self.get_common_attrs(req, attrs, haizea_param)
  
         leasereq = BestEffortLease(tSubmit, duration, vmimage, vmimagesize, numnodes, resreq, preemptible)
-        leasereq.state = constants.LEASE_STATE_PENDING
+
         # Enactment info should be changed to the "array id" when groups
         # are implemented in OpenNebula
         leasereq.enactment_info = int(req["oid"])
@@ -133,11 +133,11 @@
             # Relative time
             # For testing, should be:
             # tStart = tSubmit + ISO.ParseTime(tStart[1:])
-            start = roundDateTime(self.rm.clock.get_time() + ISO.ParseTime(start[1:]))
+            start = round_datetime(self.rm.clock.get_time() + ISO.ParseTime(start[1:]))
         else:
             start = ISO.ParseDateTime(start)
         leasereq = ARLease(tSubmit, start, duration, vmimage, vmimagesize, numnodes, resreq, preemptible)
-        leasereq.state = constants.LEASE_STATE_PENDING
+
         # Enactment info should be changed to the "array id" when groups
         # are implemented in OpenNebula
         leasereq.enactmentInfo = int(req["oid"])
@@ -150,7 +150,7 @@
         tSubmit, vmimage, vmimagesize, numnodes, resreq, duration, preemptible = self.get_common_attrs(req, attrs, haizea_param)
  
         leasereq = ImmediateLease(tSubmit, duration, vmimage, vmimagesize, numnodes, resreq, preemptible)
-        leasereq.state = constants.LEASE_STATE_PENDING
+
         # Enactment info should be changed to the "array id" when groups
         # are implemented in OpenNebula
         leasereq.enactment_info = int(req["oid"])

Modified: trunk/src/haizea/resourcemanager/frontends/rpc.py
===================================================================
--- trunk/src/haizea/resourcemanager/frontends/rpc.py	2008-09-11 16:47:36 UTC (rev 491)
+++ trunk/src/haizea/resourcemanager/frontends/rpc.py	2008-09-11 16:48:08 UTC (rev 492)
@@ -18,7 +18,7 @@
 import haizea.common.constants as constants
 from haizea.resourcemanager.datastruct import ARLease, BestEffortLease, ImmediateLease, ResourceTuple
 from haizea.resourcemanager.frontends.base import RequestFrontend
-from haizea.common.utils import roundDateTime
+from haizea.common.utils import round_datetime
 from mx.DateTime import DateTimeDelta, TimeDelta, ISO
 import logging
 
@@ -43,7 +43,7 @@
         return True
             
     def create_lease(self, start, duration, preemptible, numnodes, cpu, mem, vmimage, vmimagesize):
-        tSubmit = roundDateTime(self.rm.clock.get_time())
+        tSubmit = round_datetime(self.rm.clock.get_time())
         resreq = ResourceTuple.create_empty()
         resreq.set_by_type(constants.RES_CPU, float(cpu))
         resreq.set_by_type(constants.RES_MEM, int(mem))        
@@ -61,12 +61,10 @@
         else:
             if start[0] == "+":
                 # Relative time
-                start = roundDateTime(tSubmit + ISO.ParseTime(start[1:]))
+                start = round_datetime(tSubmit + ISO.ParseTime(start[1:]))
             else:
                 start = ISO.ParseDateTime(start)
             leasereq = ARLease(tSubmit, start, duration, vmimage, vmimagesize, numnodes, resreq, preemptible)
-
-        leasereq.state = constants.LEASE_STATE_PENDING
         
         self.accumulated.append(leasereq)
         

Modified: trunk/src/haizea/resourcemanager/rm.py
===================================================================
--- trunk/src/haizea/resourcemanager/rm.py	2008-09-11 16:47:36 UTC (rev 491)
+++ trunk/src/haizea/resourcemanager/rm.py	2008-09-11 16:48:08 UTC (rev 492)
@@ -36,10 +36,10 @@
 from haizea.resourcemanager.frontends.tracefile import TracefileFrontend
 from haizea.resourcemanager.frontends.opennebula import OpenNebulaFrontend
 from haizea.resourcemanager.frontends.rpc import RPCFrontend
-from haizea.resourcemanager.datastruct import ARLease, BestEffortLease, ImmediateLease 
+from haizea.resourcemanager.datastruct import Lease, ARLease, BestEffortLease, ImmediateLease 
 from haizea.resourcemanager.scheduler import Scheduler
 from haizea.resourcemanager.rpcserver import RPCServer
-from haizea.common.utils import abstract, roundDateTime, Singleton
+from haizea.common.utils import abstract, round_datetime, Singleton
 
 import operator
 import logging
@@ -250,18 +250,9 @@
         for frontend in self.frontends:
             requests += frontend.getAccumulatedRequests()
         requests.sort(key=operator.attrgetter("submit_time"))
-                
-        ar_leases = [req for req in requests if isinstance(req, ARLease)]
-        be_leases = [req for req in requests if isinstance(req, BestEffortLease)]
-        im_leases = [req for req in requests if isinstance(req, ImmediateLease)]
         
-        # Queue best-effort
-        for req in be_leases:
-            self.scheduler.enqueue(req)
-            
-        # Add AR leases and immediate leases
-        for req in ar_leases + im_leases:
-            self.scheduler.add_pending_lease(req)
+        for req in requests:
+            self.scheduler.request_lease(req)
         
         # Run the scheduling function.
         try:
@@ -299,12 +290,12 @@
         self.logger.log(loglevel, "Next change point (in slot table): %s" % self.get_next_changepoint())
 
         # Print descriptors of scheduled leases
-        scheduled = self.scheduler.scheduledleases.entries.keys()
+        scheduled = self.scheduler.leases.entries.keys()
         self.logger.log(loglevel, "Scheduled requests: %i" % len(scheduled))
         if verbose and len(scheduled)>0:
             self.logger.log(loglevel, "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
             for k in scheduled:
-                lease = self.scheduler.scheduledleases.get_lease(k)
+                lease = self.scheduler.leases.get_lease(k)
                 lease.print_contents(loglevel=loglevel)
             self.logger.log(loglevel, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
 
@@ -541,10 +532,10 @@
         # We can also be done if we've specified that we want to stop when
         # the best-effort requests are all done or when they've all been submitted.
         stopwhen = self.rm.config.get("stop-when")
-        scheduledbesteffort = self.rm.scheduler.scheduledleases.get_leases(type = BestEffortLease)
+        besteffort = self.rm.scheduler.leases.get_leases(type = BestEffortLease)
         pendingbesteffort = [r for r in tracefrontend.requests if isinstance(r, BestEffortLease)]
         if stopwhen == constants.STOPWHEN_BEDONE:
-            if self.rm.scheduler.isQueueEmpty() and len(scheduledbesteffort) + len(pendingbesteffort) == 0:
+            if self.rm.scheduler.isQueueEmpty() and len(besteffort) + len(pendingbesteffort) == 0:
                 done = True
         elif stopwhen == constants.STOPWHEN_BESUBMITTED:
             if len(pendingbesteffort) == 0:
@@ -554,7 +545,7 @@
         # an infinite loop. This is A Bad Thing(tm).
         if newtime == prevtime and done != True:
             self.logger.error("Simulated clock has fallen into an infinite loop. Dumping state..." )
-            self.rm.print_stats("ERROR", verbose=True)
+            self.rm.print_stats(logging.getLevelName("ERROR"), verbose=True)
             raise Exception, "Simulated clock has fallen into an infinite loop."
         
         return newtime, done
@@ -590,7 +581,7 @@
         if not self.fastforward:
             self.lastwakeup = None
         else:
-            self.lastwakeup = roundDateTime(now())
+            self.lastwakeup = round_datetime(now())
         self.logger = logging.getLogger("CLOCK")
         self.starttime = self.get_time()
         self.nextschedulable = None
@@ -644,11 +635,11 @@
             # resource manager operations (if we use now(), we'll get a different
             # time every time)
             if not self.fastforward:
-                self.lastwakeup = roundDateTime(self.get_time())
+                self.lastwakeup = round_datetime(self.get_time())
             self.logger.status("Wake-up time recorded as %s" % self.lastwakeup)
                 
             # Next schedulable time
-            self.nextschedulable = roundDateTime(self.lastwakeup + self.non_sched)
+            self.nextschedulable = round_datetime(self.lastwakeup + self.non_sched)
             
             # Wake up the resource manager
             self.rm.process_reservations(self.lastwakeup)
@@ -660,9 +651,9 @@
             if self.lastwakeup + self.quantum <= time_now:
                 quantums = (time_now - self.lastwakeup) / self.quantum
                 quantums = int(ceil(quantums)) * self.quantum
-                self.nextperiodicwakeup = roundDateTime(self.lastwakeup + quantums)
+                self.nextperiodicwakeup = round_datetime(self.lastwakeup + quantums)
             else:
-                self.nextperiodicwakeup = roundDateTime(self.lastwakeup + self.quantum)
+                self.nextperiodicwakeup = round_datetime(self.lastwakeup + self.quantum)
             
             # Determine if there's anything to do before the next wakeup time
             nextchangepoint = self.rm.get_next_changepoint()
@@ -701,7 +692,7 @@
 if __name__ == "__main__":
     from haizea.resourcemanager.configfile import HaizeaConfig
     from haizea.common.config import ConfigException
-    CONFIGFILE = "../../../etc/sample_trace.conf"
+    CONFIGFILE = "../../../etc/suspendresume.conf"
     try:
         CONFIG = HaizeaConfig.from_file(CONFIGFILE)
     except ConfigException, msg:



More information about the Haizea-commit mailing list