[haizea-commit] r612 - branches/TP2.0/src/haizea/core/scheduler

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Wed Jul 22 05:30:26 CDT 2009


Author: borja
Date: 2009-07-22 05:30:14 -0500 (Wed, 22 Jul 2009)
New Revision: 612

Modified:
   branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py
   branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py
Log:
Cleaned up and documented most of the VM scheduler (except for the *ResourceReservation classes)

Modified: branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py	2009-07-21 13:55:33 UTC (rev 611)
+++ branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py	2009-07-22 10:30:14 UTC (rev 612)
@@ -61,7 +61,6 @@
         vm_scheduler -- VM scheduler
         preparation_scheduler -- Preparation scheduler
         slottable -- Slottable
-        policy -- Policy manager
         """
         
         # Logger
@@ -473,7 +472,7 @@
                     # Put back on queue
                     newqueue.enqueue(lease)
                     self.logger.info("Lease %i could not be scheduled at this time." % lease.id)
-                    if not self.vm_scheduler.is_backfilling():
+                    if get_config().get("backfilling") == constants.BACKFILLING_OFF:
                         done = True
         
         for lease in self.queue:

Modified: branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py	2009-07-21 13:55:33 UTC (rev 611)
+++ branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py	2009-07-22 10:30:14 UTC (rev 612)
@@ -16,6 +16,14 @@
 # limitations under the License.                                             #
 # -------------------------------------------------------------------------- #
 
+"""This module provides the main classes for Haizea's VM Scheduler. All the
+scheduling code that decides when and where a lease is scheduled is contained
+in the VMScheduler class (except for the code that specifically decides
+what physical machines each virtual machine is mapped to, which is factored out
+into the "mapper" module). This module also provides the classes for the
+reservations that will be placed in the slot table and correspond to VMs. 
+"""
+
 import haizea.common.constants as constants
 from haizea.common.utils import round_datetime_delta, round_datetime, estimate_transfer_time, pretty_nodemap, get_config, get_clock, get_policy
 from haizea.core.leases import Lease, Capacity
@@ -31,16 +39,30 @@
 class VMScheduler(object):
     """The Haizea VM Scheduler
     
-    TODO: This class needs to be documented. It will also change quite a bit in TP2.0, when all
-    policy decisions are factored out into a separate module.
+    This class is responsible for taking a lease and scheduling VMs to satisfy
+    the requirements of that lease.
     """
     
     def __init__(self, slottable, resourcepool, mapper):
+        """Constructor
+        
+        The constructor does little more than create the VM scheduler's
+        attributes. However, it does expect (in the arguments) a fully-constructed 
+        SlotTable, ResourcePool, and Mapper (these are constructed in the 
+        Manager's constructor). 
+        
+        Arguments:
+        slottable -- Slot table
+        resourcepool -- Resource pool where enactment commands will be sent to
+        mapper -- Mapper
+        """        
         self.slottable = slottable
         self.resourcepool = resourcepool
         self.mapper = mapper
         self.logger = logging.getLogger("VMSCHED")
         
+        # Register the handlers for the types of reservations used by
+        # the VM scheduler
         self.handlers = {}
         self.handlers[VMResourceReservation] = ReservationEventHandler(
                                 sched    = self,
@@ -67,37 +89,287 @@
                                 on_start = VMScheduler._handle_start_migrate,
                                 on_end   = VMScheduler._handle_end_migrate)
         
+        # When using backfilling, set the number of leases that can be
+        # scheduled in the future.
         backfilling = get_config().get("backfilling")
         if backfilling == constants.BACKFILLING_OFF:
-            self.maxres = 0
+            self.max_in_future = 0
         elif backfilling == constants.BACKFILLING_AGGRESSIVE:
-            self.maxres = 1
+            self.max_in_future = 1
         elif backfilling == constants.BACKFILLING_CONSERVATIVE:
-            self.maxres = 1000000 # Arbitrarily large
+            self.max_in_future = -1 # Unlimited
         elif backfilling == constants.BACKFILLING_INTERMEDIATE:
-            self.maxres = get_config().get("backfilling-reservations")
+            self.max_in_future = get_config().get("backfilling-reservations")
+        self.future_leases = set()
 
-        self.future_reservations = set()
 
     def schedule(self, lease, nexttime, earliest):
+        """ The scheduling function
+        
+        This particular function doesn't do much except call __schedule_asap
+        and __schedule_exact (which do all the work).
+        
+        Arguments:
+        lease -- Lease to schedule
+        nexttime -- The next time at which the scheduler can allocate resources.
+        earliest -- The earliest possible starting times on each physical node
+        """        
         if lease.get_type() == Lease.BEST_EFFORT:
-            return self.__schedule_asap(lease, nexttime, earliest, allow_reservation_in_future = True)
+            return self.__schedule_asap(lease, nexttime, earliest, allow_in_future = True)
         elif lease.get_type() == Lease.ADVANCE_RESERVATION:
             return self.__schedule_exact(lease, nexttime, earliest)
         elif lease.get_type() == Lease.IMMEDIATE:
-            return self.__schedule_asap(lease, nexttime, earliest, allow_reservation_in_future = False)
+            return self.__schedule_asap(lease, nexttime, earliest, allow_in_future = False)
 
+
+    def estimate_migration_time(self, lease):
+        """ Estimates the time required to migrate a lease's VMs
+
+        This function conservatively estimates that all the VMs are going to
+        be migrated to other nodes. Since all the transfers are intra-node,
+        the bottleneck is the transfer from whatever node has the most
+        memory to transfer.
+        
+        Note that this method only estimates the time to migrate the memory
+        state files for the VMs. Migrating the software environment (which may
+        or may not be a disk image) is the responsibility of the preparation
+        scheduler, which has it's own set of migration scheduling methods.
+
+        Arguments:
+        lease -- Lease that might be migrated
+        """                
+        migration = get_config().get("migration")
+        if migration == constants.MIGRATE_YES:
+            vmrr = lease.get_last_vmrr()
+            mem_in_pnode = dict([(pnode,0) for pnode in set(vmrr.nodes.values())])
+            for (vnode,pnode) in vmrr.nodes.items():
+                mem = vmrr.resources_in_pnode[pnode].get_by_type(constants.RES_MEM)
+                mem_in_pnode[pnode] += mem
+            max_mem_to_transfer = max(mem_in_pnode.values())
+            bandwidth = self.resourcepool.info.get_migration_bandwidth()
+            return estimate_transfer_time(max_mem_to_transfer, bandwidth)
+        elif migration == constants.MIGRATE_YES_NOTRANSFER:
+            return TimeDelta(seconds=0)        
+
+    def schedule_migration(self, lease, vmrr, nexttime):
+        """ Schedules migrations for a lease
+
+        Arguments:
+        lease -- Lease being migrated
+        vmrr -- The VM reservation before which the migration will take place
+        nexttime -- The next time at which the scheduler can allocate resources.
+        """
+        
+        # Determine what migrations have to be done. We do this by looking at
+        # the mapping in the previous VM RR and in the new VM RR
+        last_vmrr = lease.get_last_vmrr()
+        vnode_migrations = dict([(vnode, (last_vmrr.nodes[vnode], vmrr.nodes[vnode])) for vnode in vmrr.nodes])
+        
+        # Determine if we actually have to migrate
+        mustmigrate = False
+        for vnode in vnode_migrations:
+            if vnode_migrations[vnode][0] != vnode_migrations[vnode][1]:
+                mustmigrate = True
+                break
+            
+        if not mustmigrate:
+            return []
+
+        # If Haizea is configured to migrate without doing any transfers,
+        # then we just return a nil-duration migration RR
+        if get_config().get("migration") == constants.MIGRATE_YES_NOTRANSFER:
+            start = nexttime
+            end = nexttime
+            res = {}
+            migr_rr = MemImageMigrationResourceReservation(lease, start, end, res, vmrr, vnode_migrations)
+            migr_rr.state = ResourceReservation.STATE_SCHEDULED
+            return [migr_rr]
+
+        # Figure out what migrations can be done simultaneously
+        migrations = []
+        while len(vnode_migrations) > 0:
+            pnodes = set()
+            migration = {}
+            for vnode in vnode_migrations:
+                origin = vnode_migrations[vnode][0]
+                dest = vnode_migrations[vnode][1]
+                if not origin in pnodes and not dest in pnodes:
+                    migration[vnode] = vnode_migrations[vnode]
+                    pnodes.add(origin)
+                    pnodes.add(dest)
+            for vnode in migration:
+                del vnode_migrations[vnode]
+            migrations.append(migration)
+        
+        # Create migration RRs
+        start = max(last_vmrr.post_rrs[-1].end, nexttime)
+        bandwidth = self.resourcepool.info.get_migration_bandwidth()
+        migr_rrs = []
+        for m in migrations:
+            vnodes_to_migrate = m.keys()
+            max_mem_to_migrate = max([lease.requested_resources[vnode].get_quantity(constants.RES_MEM) for vnode in vnodes_to_migrate])
+            migr_time = estimate_transfer_time(max_mem_to_migrate, bandwidth)
+            end = start + migr_time
+            res = {}
+            for (origin,dest) in m.values():
+                resorigin = Capacity([constants.RES_NETOUT])
+                resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
+                resdest = Capacity([constants.RES_NETIN])
+                resdest.set_quantity(constants.RES_NETIN, bandwidth)
+                res[origin] = self.slottable.create_resource_tuple_from_capacity(resorigin)
+                res[dest] = self.slottable.create_resource_tuple_from_capacity(resdest)                
+            migr_rr = MemImageMigrationResourceReservation(lease, start, start + migr_time, res, vmrr, m)
+            migr_rr.state = ResourceReservation.STATE_SCHEDULED
+            migr_rrs.append(migr_rr)
+            start = end
+            
+        return migr_rrs
+
+    def cancel_vm(self, vmrr):
+        """ Cancels a VM resource reservation
+
+        Arguments:
+        vmrr -- VM RR to be cancelled
+        """         
+        
+        # If this VM RR is part of a lease that was scheduled in the future,
+        # remove that lease from the set of future leases.
+        if vmrr.lease in self.future_leases:
+            self.future_leases.remove(vmrr.lease)
+
+        # If there are any pre-RRs that are scheduled, remove them
+        for rr in vmrr.pre_rrs:
+            if rr.state == ResourceReservation.STATE_SCHEDULED:
+                self.slottable.remove_reservation(rr)
+
+        # If there are any post RRs, remove them
+        for rr in vmrr.post_rrs:
+            self.slottable.remove_reservation(rr)
+        
+        # Remove the reservation itself
+        self.slottable.remove_reservation(vmrr)
+
+
+    def can_suspend_at(self, lease, t):
+        """ Determines if it is possible to suspend a lease before a given time
+
+        Arguments:
+        vmrr -- VM RR to be preempted
+        t -- Time by which the VM must be preempted
+        """                     
+        # TODO: Make more general, should determine vmrr based on current time
+        # This won't currently break, though, since the calling function 
+        # operates on the last VM RR.
+        vmrr = lease.get_last_vmrr()
+        time_until_suspend = t - vmrr.start
+        min_duration = self.__compute_scheduling_threshold(lease)
+        can_suspend = time_until_suspend >= min_duration        
+        return can_suspend
+    
+    
+    def preempt_vm(self, vmrr, t):
+        """ Preempts a VM reservation at a given time
+
+        This method assumes that the lease is, in fact, preemptable,
+        that the VMs are running at the given time, and that there is 
+        enough time to suspend the VMs before the given time (all these
+        checks are done in the lease scheduler).
+        
+        Arguments:
+        vmrr -- VM RR to be preempted
+        t -- Time by which the VM must be preempted
+        """             
+        
+        # Save original start and end time of the vmrr
+        old_start = vmrr.start
+        old_end = vmrr.end
+        
+        # Schedule the VM suspension
+        self.__schedule_suspension(vmrr, t)
+        
+        # Update the VMRR in the slot table
+        self.slottable.update_reservation_with_key_change(vmrr, old_start, old_end)
+        
+        # Add the suspension RRs to the VM's post-RRs
+        for susprr in vmrr.post_rrs:
+            self.slottable.add_reservation(susprr)
+            
+            
+    def get_future_reschedulable_leases(self):
+        """ Returns a list of future leases that are reschedulable.
+
+        Currently, this list is just the best-effort leases scheduled
+        in the future as determined by the backfilling algorithm.
+        Advance reservation leases, by their nature, cannot be 
+        rescheduled to find a "better" starting time.
+        """             
+        return list(self.future_leases)
+    
+
+    def can_schedule_in_future(self):
+        """ Returns True if the backfilling algorithm would allow a lease
+        to be scheduled in the future.
+
+        """             
+        if self.max_in_future == -1: # Unlimited
+            return True
+        else:
+            return len(self.future_leases) < self.max_in_future
+
+        
+    def get_utilization(self, time):
+        """ Computes resource utilization (currently just CPU-based)
+
+        Arguments:
+        time -- Time at which to determine utilization
+        """         
+        total = self.slottable.get_total_capacity(restype = constants.RES_CPU)
+        util = {}
+        reservations = self.slottable.get_reservations_at(time)
+        for r in reservations:
+            for node in r.resources_in_pnode:
+                if isinstance(r, VMResourceReservation):
+                    use = r.resources_in_pnode[node].get_by_type(constants.RES_CPU)
+                    util[type(r)] = use + util.setdefault(type(r),0.0)
+                elif isinstance(r, SuspensionResourceReservation) or isinstance(r, ResumptionResourceReservation) or isinstance(r, ShutdownResourceReservation):
+                    use = r.vmrr.resources_in_pnode[node].get_by_type(constants.RES_CPU)
+                    util[type(r)] = use + util.setdefault(type(r),0.0)
+        util[None] = total - sum(util.values())
+        for k in util:
+            util[k] /= total
+            
+        return util              
+        
+
     def __schedule_exact(self, lease, nexttime, earliest):
+        """ Schedules VMs that must start at an exact time
+        
+        This type of lease is "easy" to schedule because we know the exact
+        start time, which means that's the only starting time we have to
+        check. So, this method does little more than call the mapper.
+        
+        Arguments:
+        lease -- Lease to schedule
+        nexttime -- The next time at which the scheduler can allocate resources.
+        earliest -- The earliest possible starting times on each physical node
+        """             
+        
+        # Determine the start and end time
         start = lease.start.requested
         end = start + lease.duration.requested
         
+        # Convert Capacity objects in lease object into ResourceTuples that
+        # we can hand over to the mapper.
         requested_resources = dict([(k,self.slottable.create_resource_tuple_from_capacity(v)) for k,v in lease.requested_resources.items()])
 
+        # Let the mapper do its magiv
         mapping, actualend, preemptions = self.mapper.map(lease, 
                                                           requested_resources,
                                                           start, 
                                                           end, 
                                                           strictend = True)
+        
+        # If no mapping was found, tell the lease scheduler about it
         if mapping == None:
             raise NotSchedulableException, "Not enough resources in specified interval"
         
@@ -111,28 +383,69 @@
             else:
                 res[pnode] = ResourceTuple.copy(vnode_res)
         
-        vmrr = VMResourceReservation(lease, start, end, mapping, res, False)
+        vmrr = VMResourceReservation(lease, start, end, mapping, res)
         vmrr.state = ResourceReservation.STATE_SCHEDULED
 
+        # Schedule shutdown for the VM
         self.__schedule_shutdown(vmrr)
         
-       
-
         return vmrr, preemptions
 
-    def __schedule_asap(self, lease, nexttime, earliest, allow_reservation_in_future = None):
-        lease_id = lease.id
-        remaining_duration = lease.duration.get_remaining_duration()
-        mustresume = (lease.get_state() in (Lease.STATE_SUSPENDED_QUEUED, Lease.STATE_SUSPENDED_SCHEDULED))
-        shutdown_time = self.__estimate_shutdown_time(lease)
 
-        if allow_reservation_in_future == None:
-            allow_reservation_in_future = self.can_reserve_in_future()
+    def __schedule_asap(self, lease, nexttime, earliest, allow_in_future = None):
+        """ Schedules VMs as soon as possible
+        
+        This method is a bit more complex that __schedule_exact because
+        we need to figure out what "as soon as possible" actually is.
+        This involves attempting several mappings, at different points
+        in time, before we can schedule the lease.
+        
+        This method will always check, at least, if the lease can be scheduled
+        at the earliest possible moment at which the lease could be prepared
+        (e.g., if the lease can't start until 1 hour in the future because that's
+        the earliest possible time at which the disk images it requires can
+        be transferred, then that's when the scheduler will check). Note, however,
+        that this "earliest possible moment" is determined by the preparation
+        scheduler.
+        
+        Additionally, if the lease can't be scheduled at the earliest
+        possible moment, it can also check if the lease can be scheduled
+        in the future. This partially implements a backfilling algorithm
+        (the maximum number of future leases is stored in the max_in_future
+        attribute of VMScheduler), the other part being implemented in the
+        __process_queue method of LeaseScheduler.
+        
+        Note that, if the method is allowed to scheduled in the future,
+        and assuming that the lease doesn't request more resources than
+        the site itself, this method will always schedule the VMs succesfully
+        (since there's always an empty spot somewhere in the future).
+        
+        
+        Arguments:
+        lease -- Lease to schedule
+        nexttime -- The next time at which the scheduler can allocate resources.
+        earliest -- The earliest possible starting times on each physical node
+        allow_in_future -- Boolean indicating whether the scheduler is
+        allowed to schedule the VMs in the future.
+        """                
+        
 
+
         #
-        # STEP 1: FIGURE OUT THE MINIMUM DURATION
+        # STEP 1: PROLEGOMENA
         #
         
+        lease_id = lease.id
+        remaining_duration = lease.duration.get_remaining_duration()
+        shutdown_time = self.__estimate_shutdown_time(lease)
+        
+        # We might be scheduling a suspended lease. If so, we will
+        # also have to schedule its resumption. Right now, just 
+        # figure out if this is such a lease.
+        mustresume = (lease.get_state() in (Lease.STATE_SUSPENDED_QUEUED, Lease.STATE_SUSPENDED_SCHEDULED))
+
+        # This is the minimum duration that we must be able to schedule.
+        # See __compute_scheduling_threshold for more details.
         min_duration = self.__compute_scheduling_threshold(lease)
 
 
@@ -140,16 +453,20 @@
         # STEP 2: FIND THE CHANGEPOINTS
         #
 
-        # Find the changepoints, and the nodes we can use at each changepoint
-        # Nodes may not be available at a changepoint because images
-        # cannot be transferred at that time.
-        if mustresume and get_config().get("migration") == constants.MIGRATE_NO:
-            vmrr = lease.get_last_vmrr()
-            onlynodes = set(vmrr.nodes.values())
-        else:
-            onlynodes = None        
-            
+        # Find the changepoints, and the available nodes at each changepoint
+        # We need to do this because the preparation scheduler may have
+        # determined that some nodes might require more time to prepare
+        # than others (e.g., if using disk image caching, some nodes
+        # might have the required disk image predeployed, while others
+        # may require transferring the image to that node).
+        # 
+        # The end result of this step is a list (cps) where each entry
+        # is a (t,nodes) pair, where "t" is the time of the changepoint
+        # and "nodes" is the set of nodes that are available at that time.
+        
         if not mustresume:
+            # If this is not a suspended lease, then the changepoints
+            # are determined based on the "earliest" parameter.
             cps = [(node, e.time) for node, e in earliest.items()]
             cps.sort(key=itemgetter(1))
             curcp = None
@@ -163,16 +480,24 @@
                 else:
                     changepoints[-1][1] = set(nodes)
         else:
+            # If the lease is suspended, we take into account that, if
+            # migration is disabled, we can only schedule the lease
+            # on the nodes it is currently scheduled on.
+            if get_config().get("migration") == constants.MIGRATE_NO:
+                vmrr = lease.get_last_vmrr()
+                onlynodes = set(vmrr.nodes.values())
+            else:
+                onlynodes = None               
             changepoints = list(set([x.time for x in earliest.values()]))
             changepoints.sort()
             changepoints = [(x, onlynodes) for x in changepoints]
 
-        # If we can make reservations in the future,
+        # If we can schedule VMs in the future,
         # we also consider future changepoints
-        # (otherwise, we only allow the VMs to start "now", accounting
-        #  for the fact that vm images will have to be deployed)
-        if allow_reservation_in_future:
+        if allow_in_future:
             res = self.slottable.get_reservations_ending_after(changepoints[-1][0])
+            # We really only care about changepoints where VMs end (which is
+            # when resources become available)
             futurecp = [r.get_final_end() for r in res if isinstance(r, VMResourceReservation)]
             # Corner case: Sometimes we're right in the middle of a ShutdownReservation, so it won't be
             # included in futurecp.
@@ -183,15 +508,18 @@
                 futurecp = [(p,onlynodes) for p in futurecp]                
         else:
             futurecp = []
+            
 
-
-
-
         #
-        # STEP 3: SLOT FITTING
+        # STEP 3: FIND A MAPPING
         #
         
-        # If resuming, we also have to allocate enough for the resumption
+        # In this step we find a starting time and a mapping for the VMs,
+        # which involves going through the changepoints in order and seeing
+        # if we can find a mapping.
+        # Most of the work is done in the __find_fit_at_points
+        
+        # If resuming, we also have to allocate enough time for the resumption
         if mustresume:
             duration = remaining_duration + self.__estimate_resume_time(lease)
         else:
@@ -199,25 +527,27 @@
 
         duration += shutdown_time
 
-        reservation = False
+        in_future = False
 
+        # Convert Capacity objects in lease object into ResourceTuples that
+        # we can hand over to the mapper.
         requested_resources = dict([(k,self.slottable.create_resource_tuple_from_capacity(v)) for k,v in lease.requested_resources.items()])
 
-        # First, assuming we can't make reservations in the future
+        # First, try to find a mapping assuming we can't schedule in the future
         start, end, mapping, preemptions = self.__find_fit_at_points(lease,
                                                                      requested_resources,
                                                                      changepoints, 
                                                                      duration, 
                                                                      min_duration)
         
-        if start == None and not allow_reservation_in_future:
+        if start == None and not allow_in_future:
                 # We did not find a suitable starting time. This can happen
-                # if we're unable to make future reservations
+                # if we're unable to schedule in the future
                 raise NotSchedulableException, "Could not find enough resources for this request"
 
         # If we haven't been able to fit the lease, check if we can
         # reserve it in the future
-        if start == None and allow_reservation_in_future:
+        if start == None and allow_in_future:
             start, end, mapping, preemptions = self.__find_fit_at_points(lease,
                                                                          requested_resources,
                                                                          futurecp, 
@@ -230,11 +560,18 @@
             if start == None:
                 raise InconsistentScheduleError, "Could not find a mapping in the future (this should not happen)"
 
-            reservation = True
+            in_future = True
 
         #
         # STEP 4: CREATE RESERVATIONS
         #
+        
+        # At this point, the lease is feasible. We just need to create
+        # the reservations for the VMs and, possibly, for the VM resumption,
+        # suspension, and shutdown.
+        
+        
+        # VM resource reservation
         res = {}
         
         for (vnode,pnode) in mapping.items():
@@ -244,12 +581,15 @@
             else:
                 res[pnode] = ResourceTuple.copy(vnode_res)
 
-        vmrr = VMResourceReservation(lease, start, end, mapping, res, reservation)
+        vmrr = VMResourceReservation(lease, start, end, mapping, res)
         vmrr.state = ResourceReservation.STATE_SCHEDULED
 
+        # VM resumption resource reservation
         if mustresume:
             self.__schedule_resumption(vmrr, start)
 
+        # If the mapper couldn't find a mapping for the full duration
+        # of the lease, then we need to schedule a suspension.
         mustsuspend = (vmrr.end - vmrr.start) < remaining_duration
         if mustsuspend:
             self.__schedule_suspension(vmrr, end)
@@ -259,8 +599,8 @@
                 vmrr.end = vmrr.start + remaining_duration + shutdown_time
             self.__schedule_shutdown(vmrr)
         
-        if reservation:
-            self.future_reservations.add(lease)
+        if in_future:
+            self.future_leases.add(lease)
 
         susp_str = res_str = ""
         if mustresume:
@@ -271,160 +611,56 @@
 
         return vmrr, preemptions
 
-    def estimate_migration_time(self, lease):
-        migration = get_config().get("migration")
-        if migration == constants.MIGRATE_YES:
-            vmrr = lease.get_last_vmrr()
-            mem_in_pnode = dict([(pnode,0) for pnode in set(vmrr.nodes.values())])
-            for (vnode,pnode) in vmrr.nodes.items():
-                mem = vmrr.resources_in_pnode[pnode].get_by_type(constants.RES_MEM)
-                mem_in_pnode[pnode] += mem
-            max_mem_to_transfer = max(mem_in_pnode.values())
-            bandwidth = self.resourcepool.info.get_migration_bandwidth()
-            return estimate_transfer_time(max_mem_to_transfer, bandwidth)
-        elif migration == constants.MIGRATE_YES_NOTRANSFER:
-            return TimeDelta(seconds=0)        
 
-    def schedule_migration(self, lease, vmrr, nexttime):
-        last_vmrr = lease.get_last_vmrr()
-        vnode_migrations = dict([(vnode, (last_vmrr.nodes[vnode], vmrr.nodes[vnode])) for vnode in vmrr.nodes])
+    def __find_fit_at_points(self, lease, requested_resources, changepoints, duration, min_duration):
+        """ Tries to map a lease in a given list of points in time
         
-        mustmigrate = False
-        for vnode in vnode_migrations:
-            if vnode_migrations[vnode][0] != vnode_migrations[vnode][1]:
-                mustmigrate = True
-                break
-            
-        if not mustmigrate:
-            return []
-
-        if get_config().get("migration") == constants.MIGRATE_YES_NOTRANSFER:
-            start = nexttime
-            end = nexttime
-            res = {}
-            migr_rr = MemImageMigrationResourceReservation(lease, start, end, res, vmrr, vnode_migrations)
-            migr_rr.state = ResourceReservation.STATE_SCHEDULED
-            return [migr_rr]
-
-        # Figure out what migrations can be done simultaneously
-        migrations = []
-        while len(vnode_migrations) > 0:
-            pnodes = set()
-            migration = {}
-            for vnode in vnode_migrations:
-                origin = vnode_migrations[vnode][0]
-                dest = vnode_migrations[vnode][1]
-                if not origin in pnodes and not dest in pnodes:
-                    migration[vnode] = vnode_migrations[vnode]
-                    pnodes.add(origin)
-                    pnodes.add(dest)
-            for vnode in migration:
-                del vnode_migrations[vnode]
-            migrations.append(migration)
+        This method goes through a given list of points in time and tries
+        to find the earliest time at which that lease can be allocated
+        resources.
         
-        # Create migration RRs
-        start = max(last_vmrr.post_rrs[-1].end, nexttime)
-        bandwidth = self.resourcepool.info.get_migration_bandwidth()
-        migr_rrs = []
-        for m in migrations:
-            vnodes_to_migrate = m.keys()
-            max_mem_to_migrate = max([lease.requested_resources[vnode].get_quantity(constants.RES_MEM) for vnode in vnodes_to_migrate])
-            migr_time = estimate_transfer_time(max_mem_to_migrate, bandwidth)
-            end = start + migr_time
-            res = {}
-            for (origin,dest) in m.values():
-                resorigin = Capacity([constants.RES_NETOUT])
-                resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
-                resdest = Capacity([constants.RES_NETIN])
-                resdest.set_quantity(constants.RES_NETIN, bandwidth)
-                res[origin] = self.slottable.create_resource_tuple_from_capacity(resorigin)
-                res[dest] = self.slottable.create_resource_tuple_from_capacity(resdest)                
-            migr_rr = MemImageMigrationResourceReservation(lease, start, start + migr_time, res, vmrr, m)
-            migr_rr.state = ResourceReservation.STATE_SCHEDULED
-            migr_rrs.append(migr_rr)
-            start = end
-            
-        return migr_rrs
-
-    def cancel_vm(self, vmrr):
-
-        if vmrr.backfill_reservation == True:
-            self.future_reservations.remove(vmrr.lease)
-
-        # If there are any pre-RRs that are scheduled, remove them
-        for rr in vmrr.pre_rrs:
-            if rr.state == ResourceReservation.STATE_SCHEDULED:
-                self.slottable.remove_reservation(rr)
-
-        # If there are any post RRs, remove them
-        for rr in vmrr.post_rrs:
-            self.slottable.remove_reservation(rr)
+        Arguments:
+        lease -- Lease to schedule
+        requested_resources -- A dictionary of lease node -> ResourceTuple.
+        changepoints -- The list of changepoints
+        duration -- The amount of time requested
+        min_duration -- The minimum amount of time that should be allocated
         
-        self.slottable.remove_reservation(vmrr)
-
-    
-    def preempt_vm(self, vmrr, t):
-        # Save original start and end time of the vmrr
-        old_start = vmrr.start
-        old_end = vmrr.end
-        self.__schedule_suspension(vmrr, t)
-        self.slottable.update_reservation_with_key_change(vmrr, old_start, old_end)
-        for susprr in vmrr.post_rrs:
-            self.slottable.add_reservation(susprr)
-            
-    def get_future_reschedulable_leases(self):
-        return list(self.future_reservations)
-        
-    def get_utilization(self, time):
-#        total = self.slottable.get_total_capacity()
-        util = {}
-#        reservations = self.slottable.get_reservations_at(time)
-#        for r in reservations:
-#            for node in r.resources_in_pnode:
-#                if isinstance(r, VMResourceReservation):
-#                    use = r.resources_in_pnode[node].get_by_type(constants.RES_CPU)
-#                    util[type(r)] = use + util.setdefault(type(r),0.0)
-#                elif isinstance(r, SuspensionResourceReservation) or isinstance(r, ResumptionResourceReservation) or isinstance(r, ShutdownResourceReservation):
-#                    use = r.vmrr.resources_in_pnode[node].get_by_type(constants.RES_CPU)
-#                    util[type(r)] = use + util.setdefault(type(r),0.0)
-#        util[None] = total - sum(util.values())
-#        for k in util:
-#            util[k] /= total
-            
-        return util              
-        
-    def can_suspend_at(self, lease, t):
-        # TODO: Make more general, should determine vmrr based on current time
-        vmrr = lease.get_last_vmrr()
-        time_until_suspend = t - vmrr.start
-        min_duration = self.__compute_scheduling_threshold(lease)
-        can_suspend = time_until_suspend >= min_duration        
-        return can_suspend
-
-
-    def can_reserve_in_future(self):
-        return len(self.future_reservations) < self.maxres
-                
-    def is_backfilling(self):
-        return self.maxres > 0
-
-
-    def __find_fit_at_points(self, lease, requested_resources, changepoints, duration, min_duration):
+        Returns:
+        start -- The time at which resources have been found for the lease
+        actualend -- The time at which the resources won't be available. Note
+        that this is not necessarily (start + duration) since the mapper
+        might be unable to find enough resources for the full requested duration.
+        mapping -- A mapping of lease nodes to physical nodes
+        preemptions -- A list of 
+        (if no mapping is found, all these values are set to None)
+        """                 
         found = False
         
         for time, onlynodes in changepoints:
             start = time
             end = start + duration
             self.logger.debug("Attempting to map from %s to %s" % (start, end))
+            
+            # If suspension is disabled, we will only accept mappings that go
+            # from "start" strictly until "end".
+            susptype = get_config().get("suspension")
+            if susptype == constants.SUSPENSION_NONE or (lease.numnodes > 1 and susptype == constants.SUSPENSION_SERIAL):
+                strictend = True
+            else:
+                strictend = False
+
+            # Let the mapper work its magic
             mapping, actualend, preemptions = self.mapper.map(lease, 
                                                               requested_resources,
                                                               start, 
                                                               end, 
-                                                              strictend = False,
+                                                              strictend = strictend,
                                                               onlynodes = onlynodes)
             
+            # We have a mapping; we still have to check if it satisfies
+            # the minimum duration.
             if mapping != None:
-                # TODO: Take into account case where suspension is disabled.
                 if actualend < end:
                     actualduration = actualend - start
                     if actualduration >= min_duration:
@@ -443,7 +679,35 @@
         else:
             return None, None, None, None
     
+    
     def __compute_susprem_times(self, vmrr, time, direction, exclusion, rate, override = None):
+        """ Computes the times at which suspend/resume operations would have to start
+        
+        When suspending or resuming a VM, the VM's memory is dumped to a
+        file on disk. To correctly estimate the time required to suspend
+        a lease with multiple VMs, Haizea makes sure that no two 
+        suspensions/resumptions happen at the same time (e.g., if eight
+        memory files were being saved at the same time to disk, the disk's
+        performance would be reduced in a way that is not as easy to estimate
+        as if only one file were being saved at a time). Based on a number
+        of parameters, this method estimates the times at which the 
+        suspend/resume commands would have to be sent to guarantee this
+        exclusion.
+                    
+        Arguments:
+        vmrr -- The VM reservation that will be suspended/resumed
+        time -- The time at which the suspend should end or the resume should start.
+        direction -- DIRECTION_BACKWARD: start at "time" and compute the times going
+        backward (for suspensions) DIRECTION_FORWARD: start at time "time" and compute
+        the times going forward.
+        exclusion -- SUSPRES_EXCLUSION_GLOBAL (memory is saved to global filesystem)
+        or SUSPRES_EXCLUSION_LOCAL (saved to local filesystem)
+        rate -- The rate at which an individual VM is suspended/resumed
+        override -- If specified, then instead of computing the time to 
+        suspend/resume VM based on its memory and the "rate" parameter,
+        use this override value.
+        
+        """         
         times = [] # (start, end, {pnode -> vnodes})
         enactment_overhead = get_config().get("enactment-overhead") 
 
@@ -547,7 +811,14 @@
         
         return times
     
+    
     def __schedule_shutdown(self, vmrr):
+        """ Schedules the shutdown of a VM reservation
+                            
+        Arguments:
+        vmrr -- The VM reservation that will be shutdown
+        
+        """                 
         config = get_config()
         shutdown_time = self.__estimate_shutdown_time(vmrr.lease)
 
@@ -561,12 +832,23 @@
         
         # If there are any post RRs, remove them
         for rr in vmrr.post_rrs:
-            self.slottable.removeReservation(rr)
+            self.slottable.remove_reservation(rr)
         vmrr.post_rrs = []
 
         vmrr.post_rrs.append(shutdown_rr)
 
+
     def __schedule_suspension(self, vmrr, suspend_by):
+        """ Schedules the suspension of a VM reservation
+                         
+        Most of the work is done in __compute_susprem_times. See that
+        method's documentation for more details.
+                            
+        Arguments:
+        vmrr -- The VM reservation that will be suspended
+        suspend_by -- The time by which the VMs should be suspended.
+        
+        """            
         config = get_config()
         susp_exclusion = config.get("suspendresume-exclusion")
         override = get_config().get("override-suspend-time")
@@ -575,7 +857,10 @@
         if suspend_by < vmrr.start or suspend_by > vmrr.end:
             raise InconsistentScheduleError, "Tried to schedule a suspension by %s, which is outside the VMRR's duration (%s-%s)" % (suspend_by, vmrr.start, vmrr.end)
 
+        # Find the suspension times
         times = self.__compute_susprem_times(vmrr, suspend_by, constants.DIRECTION_BACKWARD, susp_exclusion, rate, override)
+        
+        # Create the suspension resource reservations
         suspend_rrs = []
         for (start, end, node_mappings) in times:
             suspres = {}
@@ -608,10 +893,22 @@
             self.slottable.remove_reservation(rr)
         vmrr.post_rrs = []
 
+        # Add the suspension RRs to the VM RR
         for susprr in suspend_rrs:
             vmrr.post_rrs.append(susprr)       
             
+            
     def __schedule_resumption(self, vmrr, resume_at):
+        """ Schedules the resumption of a VM reservation
+                         
+        Most of the work is done in __compute_susprem_times. See that
+        method's documentation for more details.
+                            
+        Arguments:
+        vmrr -- The VM reservation that will be resumed
+        resume_at -- The time at which the resumption should start
+        
+        """                 
         config = get_config()
         resm_exclusion = config.get("suspendresume-exclusion")        
         override = get_config().get("override-resume-time")
@@ -620,7 +917,10 @@
         if resume_at < vmrr.start or resume_at > vmrr.end:
             raise InconsistentScheduleError, "Tried to schedule a resumption at %s, which is outside the VMRR's duration (%s-%s)" % (resume_at, vmrr.start, vmrr.end)
 
+        # Find the resumption times
         times = self.__compute_susprem_times(vmrr, resume_at, constants.DIRECTION_FORWARD, resm_exclusion, rate, override)
+        
+        # Create the resumption resource reservations
         resume_rrs = []
         for (start, end, node_mappings) in times:
             resmres = {}
@@ -646,15 +946,73 @@
             raise InconsistentScheduleError, "Determined resumption would end at %s, after the VMRR's end (%s) -- Resume time not being properly estimated?" % (resm_end, vmrr.end)
         
         vmrr.update_start(resm_end)
+        
+        # Add the resumption RRs to the VM RR
         for resmrr in resume_rrs:
             vmrr.pre_rrs.append(resmrr)        
            
+           
     def __compute_suspend_resume_time(self, mem, rate):
+        """ Compute the time to suspend/resume a single VM
+                            
+        Arguments:
+        mem -- Amount of memory used by the VM
+        rate -- The rate at which an individual VM is suspended/resumed
+        
+        """            
         time = float(mem) / rate
         time = round_datetime_delta(TimeDelta(seconds = time))
         return time
     
+    
+    def __estimate_suspend_time(self, lease):
+        """ Estimate the time to suspend an entire lease
+                            
+        Most of the work is done in __estimate_suspend_resume_time. See
+        that method's documentation for more details.
+        
+        Arguments:
+        lease -- Lease that is going to be suspended
+        
+        """               
+        rate = get_config().get("suspend-rate")
+        override = get_config().get("override-suspend-time")
+        if override != None:
+            return override
+        else:
+            return self.__estimate_suspend_resume_time(lease, rate)
+
+
+    def __estimate_resume_time(self, lease):
+        """ Estimate the time to resume an entire lease
+                            
+        Most of the work is done in __estimate_suspend_resume_time. See
+        that method's documentation for more details.
+        
+        Arguments:
+        lease -- Lease that is going to be resumed
+        
+        """           
+        rate = get_config().get("resume-rate") 
+        override = get_config().get("override-resume-time")
+        if override != None:
+            return override
+        else:
+            return self.__estimate_suspend_resume_time(lease, rate)    
+    
+    
     def __estimate_suspend_resume_time(self, lease, rate):
+        """ Estimate the time to suspend/resume an entire lease
+                            
+        Note that, unlike __compute_suspend_resume_time, this estimates
+        the time to suspend/resume an entire lease (which may involve
+        suspending several VMs)
+        
+        Arguments:
+        lease -- Lease that is going to be suspended/resumed
+        rate -- The rate at which an individual VM is suspended/resumed
+        
+        """              
         susp_exclusion = get_config().get("suspendresume-exclusion")        
         enactment_overhead = get_config().get("enactment-overhead") 
         mem = 0
@@ -666,29 +1024,44 @@
             # Overestimating
             return lease.numnodes * (self.__compute_suspend_resume_time(mem, rate) + enactment_overhead)
 
+
     def __estimate_shutdown_time(self, lease):
+        """ Estimate the time to shutdown an entire lease
+                            
+        Arguments:
+        lease -- Lease that is going to be shutdown
+        
+        """            
         enactment_overhead = get_config().get("enactment-overhead").seconds
         return get_config().get("shutdown-time") + (enactment_overhead * lease.numnodes)
 
-    def __estimate_suspend_time(self, lease):
-        rate = get_config().get("suspend-rate")
-        override = get_config().get("override-suspend-time")
-        if override != None:
-            return override
-        else:
-            return self.__estimate_suspend_resume_time(lease, rate)
 
-    def __estimate_resume_time(self, lease):
-        rate = get_config().get("resume-rate") 
-        override = get_config().get("override-resume-time")
-        if override != None:
-            return override
-        else:
-            return self.__estimate_suspend_resume_time(lease, rate)
-
-
-    # TODO: Take into account other things like boot overhead, migration overhead, etc.
     def __compute_scheduling_threshold(self, lease):
+        """ Compute the scheduling threshold (the 'minimum duration') of a lease
+        
+        To avoid thrashing, Haizea will not schedule a lease unless all overheads
+        can be correctly scheduled (which includes image transfers, suspensions, etc.).
+        However, this can still result in situations where a lease is prepared,
+        and then immediately suspended because of a blocking lease in the future.
+        The scheduling threshold is used to specify that a lease must
+        not be scheduled unless it is guaranteed to run for a minimum amount of
+        time (the rationale behind this is that you ideally don't want leases
+        to be scheduled if they're not going to be active for at least as much time
+        as was spent in overheads).
+        
+        An important part of computing this value is the "scheduling threshold factor".
+        The default value is 1, meaning that the lease will be active for at least
+        as much time T as was spent on overheads (e.g., if preparing the lease requires
+        60 seconds, and we know that it will have to be suspended, requiring 30 seconds,
+        Haizea won't schedule the lease unless it can run for at least 90 minutes).
+        In other words, a scheduling factor of F required a minimum duration of 
+        F*T. A value of 0 could lead to thrashing, since Haizea could end up with
+        situations where a lease starts and immediately gets suspended.         
+        
+        Arguments:
+        lease -- Lease for which we want to find the scheduling threshold
+        """
+        # TODO: Take into account other things like boot overhead, migration overhead, etc.
         config = get_config()
         threshold = config.get("force-scheduling-threshold")
         if threshold != None:
@@ -696,6 +1069,10 @@
             return threshold
         else:
             factor = config.get("scheduling-threshold-factor")
+            
+            # First, figure out the "safe duration" (the minimum duration
+            # so that we at least allocate enough time for all the
+            # overheads).
             susp_overhead = self.__estimate_suspend_time(lease)
             safe_duration = susp_overhead
             
@@ -726,6 +1103,12 @@
     #-------------------------------------------------------------------#
 
     def _handle_start_vm(self, l, rr):
+        """ Handles the start of a VMResourceReservation       
+        
+        Arguments:
+        l -- Lease the VMResourceReservation belongs to
+        rr -- THe VMResourceReservation
+        """        
         self.logger.debug("LEASE-%i Start of handleStartVM" % l.id)
         l.print_contents()
         lease_state = l.get_state()
@@ -755,8 +1138,8 @@
         
         # If this was a future reservation (as determined by backfilling),
         # remove that status, since the future is now.
-        if rr.backfill_reservation == True:
-            self.future_reservations.remove(l)
+        if rr.lease in self.future_leases:
+            self.future_leases.remove(l)
         
         l.print_contents()
         self.logger.debug("LEASE-%i End of handleStartVM" % l.id)
@@ -764,6 +1147,12 @@
 
 
     def _handle_end_vm(self, l, rr):
+        """ Handles the end of a VMResourceReservation       
+        
+        Arguments:
+        l -- Lease the VMResourceReservation belongs to
+        rr -- THe VMResourceReservation
+        """        
         self.logger.debug("LEASE-%i Start of handleEndVM" % l.id)
         self.logger.vdebug("LEASE-%i Before:" % l.id)
         l.print_contents()
@@ -777,44 +1166,31 @@
         self.logger.debug("LEASE-%i End of handleEndVM" % l.id)
         self.logger.info("Stopped VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()))
 
+
     def _handle_unscheduled_end_vm(self, l, vmrr):
+        """ Handles the unexpected end of a VMResourceReservation
+        
+        Arguments:
+        l -- Lease the VMResourceReservation belongs to
+        rr -- THe VMResourceReservation
+        """        
+        
         self.logger.info("LEASE-%i The VM has ended prematurely." % l.id)
         for rr in vmrr.post_rrs:
             self.slottable.remove_reservation(rr)
         vmrr.post_rrs = []
-        # TODO: slideback shutdown RRs
         vmrr.end = get_clock().get_time()
         self._handle_end_vm(l, vmrr)
 
-    def _handle_start_shutdown(self, l, rr):
-        self.logger.debug("LEASE-%i Start of handleStartShutdown" % l.id)
-        l.print_contents()
-        rr.state = ResourceReservation.STATE_ACTIVE
-        try:
-            self.resourcepool.stop_vms(l, rr)
-        except EnactmentError, exc:
-            self.logger.error("Enactment error when shutting down VMs.")
-            # Right now, this is a non-recoverable error, so we just
-            # propagate it upwards to the lease scheduler
-            # In the future, it may be possible to react to these
-            # kind of errors.
-            raise
-        
-        l.print_contents()
-        self.logger.debug("LEASE-%i End of handleStartShutdown" % l.id)
 
-    def _handle_end_shutdown(self, l, rr):
-        self.logger.debug("LEASE-%i Start of handleEndShutdown" % l.id)
-        l.print_contents()
-        rr.state = ResourceReservation.STATE_DONE
-        l.print_contents()
-        self.logger.debug("LEASE-%i End of handleEndShutdown" % l.id)
-        self.logger.info("Lease %i's VMs have shutdown." % (l.id))
-        raise NormalEndLeaseException
-
-
-
     def _handle_start_suspend(self, l, rr):
+        """ Handles the start of a SuspensionResourceReservation       
+        
+        Arguments:
+        l -- Lease the SuspensionResourceReservation belongs to
+        rr -- The SuspensionResourceReservation
+        
+        """
         self.logger.debug("LEASE-%i Start of handleStartSuspend" % l.id)
         l.print_contents()
         rr.state = ResourceReservation.STATE_ACTIVE
@@ -835,7 +1211,14 @@
             self.logger.info("Suspending lease %i..." % (l.id))
         self.logger.debug("LEASE-%i End of handleStartSuspend" % l.id)
 
+
     def _handle_end_suspend(self, l, rr):
+        """ Handles the end of a SuspensionResourceReservation       
+        
+        Arguments:
+        l -- Lease the SuspensionResourceReservation belongs to
+        rr -- The SuspensionResourceReservation
+        """               
         self.logger.debug("LEASE-%i Start of handleEndSuspend" % l.id)
         l.print_contents()
         # TODO: React to incomplete suspend
@@ -850,7 +1233,15 @@
         if l.get_state() == Lease.STATE_SUSPENDED_PENDING:
             raise RescheduleLeaseException
 
+
     def _handle_start_resume(self, l, rr):
+        """ Handles the start of a ResumptionResourceReservation       
+        
+        Arguments:
+        l -- Lease the ResumptionResourceReservation belongs to
+        rr -- The ResumptionResourceReservation
+        
+        """             
         self.logger.debug("LEASE-%i Start of handleStartResume" % l.id)
         l.print_contents()
         
@@ -871,7 +1262,15 @@
             self.logger.info("Resuming lease %i..." % (l.id))
         self.logger.debug("LEASE-%i End of handleStartResume" % l.id)
 
+
     def _handle_end_resume(self, l, rr):
+        """ Handles the end of a ResumptionResourceReservation       
+        
+        Arguments:
+        l -- Lease the ResumptionResourceReservation belongs to
+        rr -- The ResumptionResourceReservation
+        
+        """        
         self.logger.debug("LEASE-%i Start of handleEndResume" % l.id)
         l.print_contents()
         # TODO: React to incomplete resume
@@ -885,7 +1284,57 @@
         l.print_contents()
         self.logger.debug("LEASE-%i End of handleEndResume" % l.id)
 
+
+    def _handle_start_shutdown(self, l, rr):
+        """ Handles the start of a ShutdownResourceReservation       
+        
+        Arguments:
+        l -- Lease the SuspensionResourceReservation belongs to
+        rr -- The SuspensionResourceReservation
+        """        
+        
+        self.logger.debug("LEASE-%i Start of handleStartShutdown" % l.id)
+        l.print_contents()
+        rr.state = ResourceReservation.STATE_ACTIVE
+        try:
+            self.resourcepool.stop_vms(l, rr)
+        except EnactmentError, exc:
+            self.logger.error("Enactment error when shutting down VMs.")
+            # Right now, this is a non-recoverable error, so we just
+            # propagate it upwards to the lease scheduler
+            # In the future, it may be possible to react to these
+            # kind of errors.
+            raise
+        
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleStartShutdown" % l.id)
+
+
+    def _handle_end_shutdown(self, l, rr):
+        """ Handles the end of a SuspensionResourceReservation       
+        
+        Arguments:
+        l -- Lease the SuspensionResourceReservation belongs to
+        rr -- The SuspensionResourceReservation
+        
+        """
+        self.logger.debug("LEASE-%i Start of handleEndShutdown" % l.id)
+        l.print_contents()
+        rr.state = ResourceReservation.STATE_DONE
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleEndShutdown" % l.id)
+        self.logger.info("Lease %i's VMs have shutdown." % (l.id))
+        raise NormalEndLeaseException
+    
+
     def _handle_start_migrate(self, l, rr):
+        """ Handles the start of a MemImageMigrationResourceReservation       
+        
+        Arguments:
+        l -- Lease the MemImageMigrationResourceReservation belongs to
+        rr -- The MemImageMigrationResourceReservation
+        
+        """             
         self.logger.debug("LEASE-%i Start of handleStartMigrate" % l.id)
         l.print_contents()
         rr.state = ResourceReservation.STATE_ACTIVE
@@ -893,7 +1342,15 @@
         self.logger.debug("LEASE-%i End of handleStartMigrate" % l.id)
         self.logger.info("Migrating lease %i..." % (l.id))
 
+
     def _handle_end_migrate(self, l, rr):
+        """ Handles the end of a MemImageMigrationResourceReservation       
+        
+        Arguments:
+        l -- Lease the MemImageMigrationResourceReservation belongs to
+        rr -- The MemImageMigrationResourceReservation
+        
+        """                
         self.logger.debug("LEASE-%i Start of handleEndMigrate" % l.id)
         l.print_contents()
 
@@ -913,10 +1370,9 @@
 
 
 class VMResourceReservation(ResourceReservation):
-    def __init__(self, lease, start, end, nodes, res, backfill_reservation):
+    def __init__(self, lease, start, end, nodes, res):
         ResourceReservation.__init__(self, lease, start, end, res)
         self.nodes = nodes # { vnode -> pnode }
-        self.backfill_reservation = backfill_reservation
         self.pre_rrs = []
         self.post_rrs = []
 
@@ -976,12 +1432,6 @@
             self.logger.log(loglevel, "--")
             susprr.print_contents(loglevel)
 
-    def xmlrpc_marshall(self):
-        rr = ResourceReservation.xmlrpc_marshall(self)
-        rr["type"] = "VM"
-        rr["nodes"] = self.nodes.items()
-        return rr
-
         
 class SuspensionResourceReservation(ResourceReservation):
     def __init__(self, lease, start, end, res, vnodes, vmrr):
@@ -1000,10 +1450,6 @@
     def is_last(self):
         return (self == self.vmrr.post_rrs[-1])   
         
-    def xmlrpc_marshall(self):
-        rr = ResourceReservation.xmlrpc_marshall(self)
-        rr["type"] = "SUSP"
-        return rr
         
 class ResumptionResourceReservation(ResourceReservation):
     def __init__(self, lease, start, end, res, vnodes, vmrr):
@@ -1024,10 +1470,6 @@
         resm_rrs = [r for r in self.vmrr.pre_rrs if isinstance(r, ResumptionResourceReservation)]
         return (self == resm_rrs[-1])
     
-    def xmlrpc_marshall(self):
-        rr = ResourceReservation.xmlrpc_marshall(self)
-        rr["type"] = "RESM"
-        return rr
     
 class ShutdownResourceReservation(ResourceReservation):
     def __init__(self, lease, start, end, res, vnodes, vmrr):
@@ -1039,10 +1481,6 @@
         self.logger.log(loglevel, "Type           : SHUTDOWN")
         ResourceReservation.print_contents(self, loglevel)
 
-    def xmlrpc_marshall(self):
-        rr = ResourceReservation.xmlrpc_marshall(self)
-        rr["type"] = "SHTD"
-        return rr
 
 class MemImageMigrationResourceReservation(MigrationResourceReservation):
     def __init__(self, lease, start, end, res, vmrr, transfers):



More information about the Haizea-commit mailing list