[haizea-commit] r784 - in branches/1.1/src/haizea: core core/scheduler pluggable/policies

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Wed Jan 20 16:23:24 CST 2010


Author: borja
Date: 2010-01-20 16:23:24 -0600 (Wed, 20 Jan 2010)
New Revision: 784

Modified:
   branches/1.1/src/haizea/core/manager.py
   branches/1.1/src/haizea/core/scheduler/lease_scheduler.py
   branches/1.1/src/haizea/core/scheduler/mapper.py
   branches/1.1/src/haizea/core/scheduler/vm_scheduler.py
   branches/1.1/src/haizea/pluggable/policies/preemption.py
Log:
When scheduling deadline leases, do the preemptions when mapping (fixes the problem that the preemption policy could select leases that "looked" preemptable because of their slack, but turned out not to be because they couldn't be rescheduled).

Modified: branches/1.1/src/haizea/core/manager.py
===================================================================
--- branches/1.1/src/haizea/core/manager.py	2010-01-20 22:21:42 UTC (rev 783)
+++ branches/1.1/src/haizea/core/manager.py	2010-01-20 22:23:24 UTC (rev 784)
@@ -223,10 +223,10 @@
             preparation_scheduler = ImageTransferPreparationScheduler(slottable, resourcepool, deploy_enact)    
     
         # VM mapper and scheduler
-        mapper = self.config.get("mapper")
-        mapper = mapper_mappings.get(mapper, mapper)
-        mapper = import_class(mapper)
-        mapper = mapper(slottable, self.policy)
+        mapper_name = self.config.get("mapper")
+        mapper_cls = mapper_mappings.get(mapper_name, mapper_name)
+        mapper_cls = import_class(mapper_cls)
+        mapper = mapper_cls(slottable, self.policy)
         
         # When using backfilling, set the number of leases that can be
         # scheduled in the future.
@@ -241,7 +241,10 @@
             max_in_future = self.config.get("backfilling-reservations")
         
         vm_scheduler = VMScheduler(slottable, resourcepool, mapper, max_in_future)
-    
+        
+        if mapper_name == "deadline":  # Kludge
+            mapper.set_vm_scheduler(vm_scheduler)
+
         # Statistics collection 
         attrs = dict([(attr, self.config.get_attr(attr)) for attr in self.config.get_attrs()])    
         

Modified: branches/1.1/src/haizea/core/scheduler/lease_scheduler.py
===================================================================
--- branches/1.1/src/haizea/core/scheduler/lease_scheduler.py	2010-01-20 22:21:42 UTC (rev 783)
+++ branches/1.1/src/haizea/core/scheduler/lease_scheduler.py	2010-01-20 22:23:24 UTC (rev 784)
@@ -502,7 +502,7 @@
         leases.sort(key= lambda l: (l.deadline - nexttime) / l.get_remaining_duration_at(nexttime))
         if len(leases) > 0:
             self.logger.debug("Rescheduling future deadline leases")
-            #self.slottable.save(leases)
+            #self.slottable.push_state(leases)
             feasible = True
             node_ids = self.slottable.nodes.keys()
             earliest = {}
@@ -530,6 +530,7 @@
                 feasible = False
             
             if feasible:
+                self.slottable.pop_state(discard=True)
                 for l in [l2 for l2 in orig_vmrrs if l2 in scheduled]:
                     for vmrr in orig_vmrrs[l]:
                         vmrr.lease.remove_vmrr(vmrr)
@@ -537,8 +538,7 @@
                 for lease2, vmrr in add_vmrrs.items():
                     lease2.append_vmrr(vmrr)            
             else:
-                self.slottable.restore()
-            print "FOO2"
+                self.slottable.pop_state()
 
 
 
@@ -670,11 +670,8 @@
         # go ahead and preempt them.
         if len(preemptions) > 0:
             self.logger.info("Must preempt leases %s to make room for lease #%i" % ([l.id for l in preemptions], lease.id))
-            if lease.get_type() == Lease.DEADLINE:
-                self.__preempt_leases_deadline(l, vmrr, preemptions, preemption_time=vmrr.start, nexttime=nexttime)
-            else:
-                for l in preemptions:
-                    self.__preempt_lease(l, preemption_time=vmrr.start)
+            for l in preemptions:
+                self.__preempt_lease(l, preemption_time=vmrr.start)
                 
         # Schedule lease preparation
         is_ready = False
@@ -797,162 +794,6 @@
 
         self.logger.vdebug("Lease after preemption:")
         lease.print_contents()
-                
-    def __preempt_leases_deadline(self, lease, vmrr, preempted_leases, preemption_time, nexttime):
-        self.slottable.save(preempted_leases)  
-        
-        # Pre-VM RRs (if any)
-        for rr in vmrr.pre_rrs:
-            self.slottable.add_reservation(rr)
-            
-        # VM
-        self.slottable.add_reservation(vmrr)
-        
-        # Post-VM RRs (if any)
-        for rr in vmrr.post_rrs:
-            self.slottable.add_reservation(rr)        
-             
-        feasible = True
-        cancelled = []
-        new_state = {}
-        durs = {}
-        for lease_to_preempt in preempted_leases:
-            preempt_vmrr = lease_to_preempt.get_vmrr_at(preemption_time)
-            
-            susptype = get_config().get("suspension")
-            
-            cancel = False
-            
-            if susptype == constants.SUSPENSION_NONE:
-                self.logger.debug("Lease %i will be cancelled because suspension is not supported." % lease_to_preempt.id)
-                cancel = True
-            else:
-                if preempt_vmrr == None:
-                    self.logger.debug("Lease %i was set to start in the middle of the preempting lease." % lease_to_preempt.id)
-                    cancel = True
-                else:
-                    can_suspend = self.vm_scheduler.can_suspend_at(lease_to_preempt, preemption_time, nexttime)
-                    
-                    if not can_suspend:
-                        self.logger.debug("Suspending lease %i does not meet scheduling threshold." % lease_to_preempt.id)
-                        cancel = True
-                    else:
-                        self.logger.debug("Lease %i will be suspended." % lease_to_preempt.id)
-                        
-            after_vmrrs = lease_to_preempt.get_vmrr_after(preemption_time)
-
-            if not cancel:
-                # Preempting
-                durs[lease_to_preempt] = lease_to_preempt.get_remaining_duration_at(preemption_time)             
-                self.vm_scheduler.preempt_vm(preempt_vmrr, min(preemption_time,preempt_vmrr.end))
-                susp_time = preempt_vmrr.post_rrs[-1].end - preempt_vmrr.post_rrs[0].start
-                durs[lease_to_preempt] += susp_time
-                                        
-            else:                                
-                cancelled.append(lease_to_preempt.id)
-
-                if preempt_vmrr != None:
-                    durs[lease_to_preempt] = lease_to_preempt.get_remaining_duration_at(preempt_vmrr.start)             
-                    
-                    lease_to_preempt.remove_vmrr(preempt_vmrr)
-                    self.vm_scheduler.cancel_vm(preempt_vmrr)
-
-                    # Cancel future VMs
-                    for after_vmrr in after_vmrrs:
-                        lease_to_preempt.remove_vmrr(after_vmrr)
-                        self.vm_scheduler.cancel_vm(after_vmrr)                   
-                    after_vmrrs=[]
-                    if preempt_vmrr.state == ResourceReservation.STATE_ACTIVE:
-                        last_vmrr = lease_to_preempt.get_last_vmrr()
-                        if last_vmrr != None and last_vmrr.is_suspending():
-                            new_state[lease_to_preempt] = Lease.STATE_SUSPENDED_SCHEDULED
-                        else:
-                            # The VMRR we're preempting is the active one
-                            new_state[lease_to_preempt] = Lease.STATE_READY
-                else:
-                    durs[lease_to_preempt] = lease_to_preempt.get_remaining_duration_at(preemption_time)             
-                    lease_state = lease_to_preempt.get_state()
-                    if lease_state == Lease.STATE_ACTIVE:
-                        # Don't do anything. The lease is active, but not in the VMs
-                        # we're preempting.
-                        new_state[lease_to_preempt] = None
-                    elif lease_state in (Lease.STATE_SUSPENDING, Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_SCHEDULED):
-                        # Don't do anything. The lease is suspending or suspended. 
-                        # Must stay that way.
-                        new_state[lease_to_preempt] = None
-                    elif lease_state != Lease.STATE_READY:
-                        new_state[lease_to_preempt] = Lease.STATE_READY   
-                        
-            # Cancel future VMs
-            for after_vmrr in after_vmrrs:
-                lease_to_preempt.remove_vmrr(after_vmrr)
-                self.vm_scheduler.cancel_vm(after_vmrr)                   
-
-        for lease_to_preempt in preempted_leases:
-            dur = durs[lease_to_preempt]
-            node_ids = self.slottable.nodes.keys()
-            earliest = {}
-   
-            try:
-                if lease_to_preempt.id in cancelled:
-                    last_vmrr = lease_to_preempt.get_last_vmrr()
-                    if last_vmrr != None and last_vmrr.is_suspending():
-                        override_state = Lease.STATE_SUSPENDED_PENDING
-                        earliest_time = max(last_vmrr.post_rrs[-1].end, nexttime)
-                    else:
-                        override_state = None
-                        earliest_time = nexttime
-                    for node in node_ids:
-                        earliest[node] = EarliestStartingTime(earliest_time, EarliestStartingTime.EARLIEST_NOPREPARATION)                
-                    (new_vmrr, preemptions) = self.vm_scheduler.reschedule_deadline(lease_to_preempt, dur, nexttime, earliest, override_state)
-                else:
-                    for node in node_ids:
-                        earliest[node] = EarliestStartingTime(preemption_time, EarliestStartingTime.EARLIEST_NOPREPARATION)                
-                    (new_vmrr, preemptions) = self.vm_scheduler.reschedule_deadline(lease_to_preempt, dur, nexttime, earliest, override_state = Lease.STATE_SUSPENDED_PENDING)
-
-                # Add VMRR to lease
-                lease_to_preempt.append_vmrr(new_vmrr)
-                
-        
-                # Add resource reservations to slottable
-                
-                # Pre-VM RRs (if any)
-                for rr in new_vmrr.pre_rrs:
-                    self.slottable.add_reservation(rr)
-                    
-                # VM
-                self.slottable.add_reservation(new_vmrr)
-                
-                # Post-VM RRs (if any)
-                for rr in new_vmrr.post_rrs:
-                    self.slottable.add_reservation(rr)                    
-            except NotSchedulableException:
-                feasible = False
-                break
-
-        if not feasible:
-            self.slottable.restore()
-            raise NotSchedulableException, "Unable to preempt leases to make room for lease."
-        else:
-            # Pre-VM RRs (if any)
-            for rr in vmrr.pre_rrs:
-                self.slottable.remove_reservation(rr)
-                
-            # VM
-            self.slottable.remove_reservation(vmrr)
-            
-            # Post-VM RRs (if any)
-            for rr in vmrr.post_rrs:
-                self.slottable.remove_reservation(rr)     
-
-            for l in new_state:
-                if new_state[l] != None:
-                    l.state_machine.state = new_state[l]
-
-            for l in preempted_leases:
-                self.logger.vdebug("Lease %i after preemption:" % l.id)
-                l.print_contents()                
-            
   
     def __enqueue(self, lease):
         """Queues a best-effort lease request

Modified: branches/1.1/src/haizea/core/scheduler/mapper.py
===================================================================
--- branches/1.1/src/haizea/core/scheduler/mapper.py	2010-01-20 22:21:42 UTC (rev 783)
+++ branches/1.1/src/haizea/core/scheduler/mapper.py	2010-01-20 22:23:24 UTC (rev 784)
@@ -22,13 +22,20 @@
 VMs) and maps them to physical nodes (if such a mapping exists).
 """
 
-from haizea.common.utils import abstract
+from haizea.common.utils import abstract, get_config, get_clock
+from haizea.core.scheduler import NotSchedulableException, EarliestStartingTime
+from haizea.core.scheduler.slottable import ResourceReservation
+from haizea.core.leases import Lease
+import haizea.common.constants as constants
+
 import operator
+import logging
 
 # This dictionary provides a shorthand notation for any mappers
 # included in this module (this shorthand notation can be used in
 # the configuration file)
-class_mappings = {"greedy": "haizea.core.scheduler.mapper.GreedyMapper"}
+class_mappings = {"greedy": "haizea.core.scheduler.mapper.GreedyMapper",
+                  "deadline": "haizea.core.scheduler.mapper.DeadlineMapper"}
 
 class Mapper(object):
     """Base class for mappers
@@ -44,6 +51,7 @@
         """
         self.slottable = slottable
         self.policy = policy
+        self.logger = logging.getLogger("MAP")
     
     
     def map(self, requested_resources, start, end, strictend, onlynodes = None):
@@ -266,3 +274,314 @@
         vnodes = [k for k,v in vnodes]
         return vnodes      
                     
+
+class DeadlineMapper(Mapper):
+    """Haizea's greedy mapper w/ deadline-sensitive preemptions
+    
+    """
+    
+    def __init__(self, slottable, policy):
+        """Constructor
+        
+        Arguments
+        slottable -- A fully constructed SlotTable
+        policy -- A fully constructed PolicyManager
+        """        
+        Mapper.__init__(self, slottable, policy)
+        
+    def set_vm_scheduler(self, vm_scheduler):
+        self.vm_scheduler = vm_scheduler
+        
+    def map(self, lease, requested_resources, start, end, strictend, allow_preemption=False, onlynodes=None):
+        """The mapping function
+        
+        See documentation in Mapper for more details
+        """        
+        # Generate an availability window at time "start"
+        aw = self.slottable.get_availability_window(start)
+
+        nodes = aw.get_nodes_at(start)     
+        if onlynodes != None:
+            nodes = list(set(nodes) & onlynodes)
+
+        # Get an ordered list of physical nodes
+        pnodes = self.policy.sort_hosts(nodes, start, lease)
+        
+        # Get an ordered list of lease nodes
+        vnodes = self.__sort_vnodes(requested_resources)
+        
+        if allow_preemption:
+            # Get the leases that intersect with the requested interval.
+            leases = aw.get_leases_between(start, end)
+            # Ask the policy engine to sort the leases based on their
+            # preemptability
+            leases = self.policy.sort_leases(lease, leases, start)
+            
+            preemptable_leases = leases
+        else:
+            preemptable_leases = []
+
+        if allow_preemption:
+            self.slottable.push_state(preemptable_leases) 
+
+        preempting = []
+        nexttime = get_clock().get_next_schedulable_time()
+        
+        # Try to find a mapping. Each iteration of this loop goes through
+        # all the lease nodes and tries to find a mapping. The first
+        # iteration assumes no leases can be preempted, and each successive
+        # iteration assumes one more lease can be preempted.
+        mapping = {}
+        done = False
+        while not done:
+            # Start at the first lease node
+            vnodes_pos = 0
+            cur_vnode = vnodes[vnodes_pos]
+            cur_vnode_capacity = requested_resources[cur_vnode]
+            maxend = end 
+            
+            # Go through all the physical nodes.
+            # In each iteration, we try to map as many lease nodes
+            # as possible into the physical nodes.
+            # "cur_vnode_capacity" holds the capacity of the vnode we are currently
+            # trying to map. "need_to_map" is the amount of resources we are 
+            # trying to map into the current physical node (which might be
+            # more than one lease node).
+            for pnode in pnodes:
+                # need_to_map is initialized to the capacity of whatever
+                # lease node we are trying to map now.
+                need_to_map = self.slottable.create_empty_resource_tuple()
+                need_to_map.incr(cur_vnode_capacity)
+                avail=aw.get_ongoing_availability(start, pnode, preempted_leases = preempting)
+                
+                # Try to fit as many lease nodes as we can into this physical node
+                pnode_done = False
+                while not pnode_done:
+                    if avail.fits(need_to_map, until = maxend):
+                        # In this case, we can fit "need_to_map" into the
+                        # physical node.
+                        mapping[cur_vnode] = pnode
+                        vnodes_pos += 1
+                        if vnodes_pos >= len(vnodes):
+                            # No more lease nodes to map, we're done.
+                            done = True
+                            break
+                        else:
+                            # Advance to the next lease node, and add its
+                            # capacity to need_to_map
+                            cur_vnode = vnodes[vnodes_pos]
+                            cur_vnode_capacity = requested_resources[cur_vnode]
+                            need_to_map.incr(cur_vnode_capacity)
+                    else:
+                        # We couldn't fit the lease node. If we need to
+                        # find a mapping that spans the entire requested
+                        # interval, then we're done checking this physical node.
+                        if strictend:
+                            pnode_done = True
+                        else:
+                            # Otherwise, check what the longest interval
+                            # we could fit in this physical node
+                            latest = avail.latest_fit(need_to_map)
+                            if latest == None:
+                                pnode_done = True
+                            else:
+                                maxend = latest
+                    
+                if done:
+                    break
+
+            # If there's no more leases that we could preempt,
+            # we're done.
+            if len(preemptable_leases) == 0:
+                done = True
+            elif not done:
+                # Otherwise, add another lease to the list of
+                # leases we are preempting
+                added = False
+                while not added:
+                    preemptee = preemptable_leases.pop()
+                    try:
+                        self.__preempt_lease_deadline(preemptee, start, end, nexttime)
+                        preempting.append(preemptee)
+                        added = True
+                    except NotSchedulableException:
+                        if len(preemptable_leases) == 0:
+                            done = True
+                            break
+                    
+
+        if len(mapping) != len(requested_resources):
+            # No mapping found
+            if allow_preemption:
+                self.slottable.pop_state()
+            return None, None, None
+        else:
+            if allow_preemption:
+                self.slottable.pop_state(discard = True)
+            return mapping, maxend, preempting
+
+    def __sort_vnodes(self, requested_resources):
+        """Sorts the lease nodes
+        
+        Greedily sorts the lease nodes so the mapping algorithm
+        will first try to map those that require the highest
+        capacity.
+        """            
+        
+        # Find the maximum requested resources for each resource type
+        max_res = self.slottable.create_empty_resource_tuple()
+        for res in requested_resources.values():
+            for i in range(len(res._single_instance)):
+                if res._single_instance[i] > max_res._single_instance[i]:
+                    max_res._single_instance[i] = res._single_instance[i]
+                    
+        # Normalize the capacities of the lease nodes (divide each
+        # requested amount of a resource type by the maximum amount)
+        norm_res = {}
+        for k,v in requested_resources.items():
+            norm_capacity = 0
+            for i in range(len(max_res._single_instance)):
+                if max_res._single_instance[i] > 0:
+                    norm_capacity += v._single_instance[i] / float(max_res._single_instance[i])
+            norm_res[k] = norm_capacity
+             
+        vnodes = norm_res.items()
+        vnodes.sort(key=operator.itemgetter(1), reverse = True)
+        vnodes = [k for k,v in vnodes]
+        return vnodes      
+                    
+    def __preempt_lease_deadline(self, lease_to_preempt, preemption_start_time, preemption_end_time, nexttime):
+        self.logger.debug("Attempting to preempt lease %i" % lease_to_preempt.id)
+        self.slottable.push_state([lease_to_preempt])  
+        print "PREEMPTING..."
+         
+        feasible = True
+        cancelled = []
+        new_state = {}
+        durs = {}
+
+        preempt_vmrr = lease_to_preempt.get_vmrr_at(preemption_start_time)
+        
+        susptype = get_config().get("suspension")
+        
+        cancel = False
+        
+        if susptype == constants.SUSPENSION_NONE:
+            self.logger.debug("Lease %i will be cancelled because suspension is not supported." % lease_to_preempt.id)
+            cancel = True
+        else:
+            if preempt_vmrr == None:
+                self.logger.debug("Lease %i was set to start in the middle of the preempting lease." % lease_to_preempt.id)
+                cancel = True
+            else:
+                can_suspend = self.vm_scheduler.can_suspend_at(lease_to_preempt, preemption_start_time, nexttime)
+                
+                if not can_suspend:
+                    self.logger.debug("Suspending lease %i does not meet scheduling threshold." % lease_to_preempt.id)
+                    cancel = True
+                else:
+                    self.logger.debug("Lease %i will be suspended." % lease_to_preempt.id)
+                    
+        after_vmrrs = lease_to_preempt.get_vmrr_after(preemption_start_time)
+
+        if not cancel:
+            # Preempting
+            durs[lease_to_preempt] = lease_to_preempt.get_remaining_duration_at(preemption_start_time)             
+            self.vm_scheduler.preempt_vm(preempt_vmrr, min(preemption_start_time,preempt_vmrr.end))
+            susp_time = preempt_vmrr.post_rrs[-1].end - preempt_vmrr.post_rrs[0].start
+            durs[lease_to_preempt] += susp_time
+                                    
+        else:                                
+            cancelled.append(lease_to_preempt.id)
+
+            if preempt_vmrr != None:
+                durs[lease_to_preempt] = lease_to_preempt.get_remaining_duration_at(preempt_vmrr.start)             
+                
+                lease_to_preempt.remove_vmrr(preempt_vmrr)
+                self.vm_scheduler.cancel_vm(preempt_vmrr)
+
+                # Cancel future VMs
+                for after_vmrr in after_vmrrs:
+                    lease_to_preempt.remove_vmrr(after_vmrr)
+                    self.vm_scheduler.cancel_vm(after_vmrr)                   
+                after_vmrrs=[]
+                if preempt_vmrr.state == ResourceReservation.STATE_ACTIVE:
+                    last_vmrr = lease_to_preempt.get_last_vmrr()
+                    if last_vmrr != None and last_vmrr.is_suspending():
+                        new_state[lease_to_preempt] = Lease.STATE_SUSPENDED_SCHEDULED
+                    else:
+                        # The VMRR we're preempting is the active one
+                        new_state[lease_to_preempt] = Lease.STATE_READY
+            else:
+                durs[lease_to_preempt] = lease_to_preempt.get_remaining_duration_at(preemption_start_time)             
+                lease_state = lease_to_preempt.get_state()
+                if lease_state == Lease.STATE_ACTIVE:
+                    # Don't do anything. The lease is active, but not in the VMs
+                    # we're preempting.
+                    new_state[lease_to_preempt] = None
+                elif lease_state in (Lease.STATE_SUSPENDING, Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_SCHEDULED):
+                    # Don't do anything. The lease is suspending or suspended. 
+                    # Must stay that way.
+                    new_state[lease_to_preempt] = None
+                elif lease_state != Lease.STATE_READY:
+                    new_state[lease_to_preempt] = Lease.STATE_READY   
+                    
+        # Cancel future VMs
+        for after_vmrr in after_vmrrs:
+            lease_to_preempt.remove_vmrr(after_vmrr)
+            self.vm_scheduler.cancel_vm(after_vmrr)                   
+
+        dur = durs[lease_to_preempt]
+        node_ids = self.slottable.nodes.keys()
+        earliest = {}
+   
+        try:
+            if lease_to_preempt.id in cancelled:
+                last_vmrr = lease_to_preempt.get_last_vmrr()
+                if last_vmrr != None and last_vmrr.is_suspending():
+                    override_state = Lease.STATE_SUSPENDED_PENDING
+                else:
+                    override_state = None
+                for node in node_ids:
+                    earliest[node] = EarliestStartingTime(preemption_end_time, EarliestStartingTime.EARLIEST_NOPREPARATION)                
+                (new_vmrr, preemptions) = self.vm_scheduler.reschedule_deadline(lease_to_preempt, dur, nexttime, earliest, override_state)
+            else:
+                for node in node_ids:
+                    earliest[node] = EarliestStartingTime(preemption_end_time, EarliestStartingTime.EARLIEST_NOPREPARATION)                
+                (new_vmrr, preemptions) = self.vm_scheduler.reschedule_deadline(lease_to_preempt, dur, nexttime, earliest, override_state = Lease.STATE_SUSPENDED_PENDING)
+
+            # Add VMRR to lease
+            lease_to_preempt.append_vmrr(new_vmrr)
+            
+    
+            # Add resource reservations to slottable
+            
+            # Pre-VM RRs (if any)
+            for rr in new_vmrr.pre_rrs:
+                self.slottable.add_reservation(rr)
+                
+            # VM
+            self.slottable.add_reservation(new_vmrr)
+            
+            # Post-VM RRs (if any)
+            for rr in new_vmrr.post_rrs:
+                self.slottable.add_reservation(rr)                    
+        except NotSchedulableException:
+            feasible = False
+
+        if not feasible:
+            print "UNABLE TO PREEMPT"
+            self.logger.debug("Unable to preempt lease %i" % lease_to_preempt.id)
+            self.slottable.pop_state()
+            raise NotSchedulableException, "Unable to preempt leases to make room for lease."
+        else:
+            self.logger.debug("Was able to preempt lease %i" % lease_to_preempt.id)
+            print "PREEMPTED"
+            self.slottable.pop_state(discard = True)
+
+            for l in new_state:
+                if new_state[l] != None:
+                    l.state_machine.state = new_state[l]
+
+            self.logger.vdebug("Lease %i after preemption:" % lease_to_preempt.id)
+            lease_to_preempt.print_contents()                         

Modified: branches/1.1/src/haizea/core/scheduler/vm_scheduler.py
===================================================================
--- branches/1.1/src/haizea/core/scheduler/vm_scheduler.py	2010-01-20 22:21:42 UTC (rev 783)
+++ branches/1.1/src/haizea/core/scheduler/vm_scheduler.py	2010-01-20 22:23:24 UTC (rev 784)
@@ -639,7 +639,8 @@
             try:
                 self.logger.debug("Trying to schedule lease #%i as an advance reservation..." % lease.id)
                 vmrr, preemptions = self.__schedule_exact(lease, duration, nexttime, earliest)
-                return vmrr, preemptions
+                # Don't return preemptions. They have already been preempted by the deadline mapper
+                return vmrr, []
             except NotSchedulableException:
                 self.logger.debug("Lease #%i cannot be scheduled as an advance reservation, trying as best-effort..." % lease.id)
                 try:
@@ -653,6 +654,7 @@
                 else:
                     return vmrr, preemptions
         else:
+            self.logger.debug("Trying to schedule lease #%i as best-effort..." % lease.id)
             try:
                 vmrr, preemptions = self.__schedule_asap(lease, duration, nexttime, earliest, allow_in_future = True, override_state=override_state)
             except NotSchedulableException:
@@ -660,8 +662,7 @@
                 preemptions = []
 
         if vmrr == None or vmrr.end - vmrr.start != duration or vmrr.end > lease.deadline or len(preemptions)>0:
-            self.logger.debug("Lease #%i cannot be scheduled before deadline using best-effort." % lease.id)
-            print "BAR1"            
+            self.logger.debug("Trying to schedule lease #%i by rescheduling other leases..." % lease.id)
             dirtynodes = set()
             dirtytime = earliest_time
 
@@ -674,7 +675,7 @@
 
             leases = list(set([future_vmrr.lease for future_vmrr in future_vmrrs]))
 
-            self.slottable.save(leases)
+            self.slottable.push_state(leases)
             
             for future_vmrr in future_vmrrs:
                 #print "REMOVE", future_vmrr.lease.id, future_vmrr.start, future_vmrr.end
@@ -715,7 +716,7 @@
                     preemptions = []
                 if vmrr == None or vmrr.end - vmrr.start != dur or vmrr.end > lease2.deadline or len(preemptions) != 0:
                     self.logger.debug("Lease %s could not be rescheduled, undoing changes." % lease2.id)
-                    self.slottable.restore()
+                    self.slottable.pop_state()
 
                     raise NotSchedulableException, "Could not schedule before deadline without making other leases miss deadline"
                     
@@ -768,9 +769,11 @@
                 new_vmrrs.update(add_vmrrs)
             except NotSchedulableException:
                 self.logger.debug("Lease %s could not be rescheduled, undoing changes." % l.id)
-                self.slottable.restore()
+                self.slottable.pop_state()
                 raise
                                 
+            self.slottable.pop_state(discard=True)
+
             for l in leases:
                 if l not in scheduled:
                     for l_vmrr in orig_vmrrs[l]:
@@ -799,7 +802,7 @@
                 if l in scheduled:
                     self.logger.vdebug("Lease %i after rescheduling:" % l.id)
                     l.print_contents()                   
-            print "BAR2"
+
             return return_vmrr, []
         else:
             return vmrr, preemptions

Modified: branches/1.1/src/haizea/pluggable/policies/preemption.py
===================================================================
--- branches/1.1/src/haizea/pluggable/policies/preemption.py	2010-01-20 22:21:42 UTC (rev 783)
+++ branches/1.1/src/haizea/pluggable/policies/preemption.py	2010-01-20 22:23:24 UTC (rev 784)
@@ -23,8 +23,9 @@
 
 from haizea.core.leases import Lease
 from haizea.core.scheduler.policy import PreemptabilityPolicy
+from haizea.common.utils import get_config
+import haizea.common.constants as constants
 
-
 class NoPreemptionPolicy(PreemptabilityPolicy):
     """Simple preemption policy: preemption is never allowed.
     """
@@ -105,6 +106,8 @@
         preemptee -- Preemptee lease
         time -- Time at which preemption would take place
         """
+        susptype = get_config().get("suspension")
+        
         if preemptee.get_type() == Lease.DEADLINE:
             # We can only preempt leases in these states
             if not preemptee.get_state() in (Lease.STATE_SCHEDULED, Lease.STATE_READY,
@@ -113,8 +116,12 @@
                 return -1
 
             deadline = preemptee.deadline
-            remaining_duration = preemptee.get_remaining_duration_at(time)
-            delay = preemptee.estimate_suspend_time() + preemptor.duration.requested + preemptee.estimate_resume_time()
+            if susptype == constants.SUSPENSION_NONE:
+                remaining_duration = preemptee.duration.requested
+                delay =  preemptor.duration.requested
+            else:
+                remaining_duration = preemptee.get_remaining_duration_at(time)
+                delay = preemptee.estimate_suspend_time() + preemptor.duration.requested + preemptee.estimate_resume_time()
             if time + delay + remaining_duration < deadline:
                 slack = (deadline - (time+delay)) / remaining_duration
                 return slack



More information about the Haizea-commit mailing list