[haizea-commit] r615 - in branches/TP2.0: src/haizea/core src/haizea/core/scheduler src/haizea/core/scheduler/preparation_schedulers tests

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Fri Jul 24 05:35:04 CDT 2009


Author: borja
Date: 2009-07-24 05:34:56 -0500 (Fri, 24 Jul 2009)
New Revision: 615

Modified:
   branches/TP2.0/src/haizea/core/leases.py
   branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py
   branches/TP2.0/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py
   branches/TP2.0/src/haizea/core/scheduler/resourcepool.py
   branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py
   branches/TP2.0/tests/base_config_simulator.conf
   branches/TP2.0/tests/sample_slottables.py
   branches/TP2.0/tests/test_mapper.py
   branches/TP2.0/tests/test_slottable.py
Log:
Slottable, mapper, and all trace-based tests pass! Several small bugs discovered and fixed along the way.

Modified: branches/TP2.0/src/haizea/core/leases.py
===================================================================
--- branches/TP2.0/src/haizea/core/leases.py	2009-07-22 15:03:19 UTC (rev 614)
+++ branches/TP2.0/src/haizea/core/leases.py	2009-07-24 10:34:56 UTC (rev 615)
@@ -494,6 +494,7 @@
                                                      
                    Lease.STATE_SCHEDULED:           [(Lease.STATE_PREPARING,  ""),
                                                      (Lease.STATE_QUEUED,     ""),
+                                                     (Lease.STATE_PENDING,     ""),
                                                      (Lease.STATE_READY,      ""),
                                                      (Lease.STATE_CANCELLED,  "")],
                                                      
@@ -501,11 +502,13 @@
                                                      (Lease.STATE_CANCELLED,  "")],
                                                      
                    Lease.STATE_PREPARING:           [(Lease.STATE_READY,      ""),
+                                                     (Lease.STATE_PENDING,     ""),
                                                      (Lease.STATE_CANCELLED,  ""),
                                                      (Lease.STATE_FAIL,       "")],
                                                      
                    Lease.STATE_READY:               [(Lease.STATE_ACTIVE,     ""),
                                                      (Lease.STATE_QUEUED,     ""),
+                                                     (Lease.STATE_PENDING,     ""),
                                                      (Lease.STATE_CANCELLED,  ""),
                                                      (Lease.STATE_FAIL,       "")],
                                                      
@@ -529,6 +532,7 @@
                                                      (Lease.STATE_FAIL,       "")],
                                                      
                    Lease.STATE_SUSPENDED_SCHEDULED: [(Lease.STATE_SUSPENDED_QUEUED,     ""),
+                                                     (Lease.STATE_SUSPENDED_PENDING,  ""),
                                                      (Lease.STATE_MIGRATING,  ""),
                                                      (Lease.STATE_RESUMING,   ""),
                                                      (Lease.STATE_CANCELLED,  ""),
@@ -1131,7 +1135,7 @@
         """              
         for node_set in self.node_sets:
             r = node_set[1]
-            r.set_ninstances(type, len(amounts))
+            r.set_ninstances(name, len(amounts))
             for ninstance, amount in enumerate(amounts):
-                r.set_quantity_instance(type, ninstance+1, amount)
+                r.set_quantity_instance(name, ninstance+1, amount)
 

Modified: branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py	2009-07-22 15:03:19 UTC (rev 614)
+++ branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py	2009-07-24 10:34:56 UTC (rev 615)
@@ -410,7 +410,7 @@
         future = self.vm_scheduler.get_future_reschedulable_leases()
         for l in future:
             # We can only reschedule leases in the following four states
-            if l.state in (Lease.STATE_PREPARING, Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_SUSPENDED_SCHEDULED):
+            if l.get_state() in (Lease.STATE_PREPARING, Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_SUSPENDED_SCHEDULED):
                 # For each reschedulable lease already scheduled in the
                 # future, we cancel the lease's preparantion and
                 # the last scheduled VM.
@@ -418,11 +418,11 @@
                 self.preparation_scheduler.cancel_preparation(l)
                 self.vm_scheduler.cancel_vm(vmrr)
                 l.remove_vmrr(vmrr)
-                if l.state in (Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_PREPARING):
-                    l.state = Lease.STATE_PENDING
-                elif l.state == Lease.STATE_SUSPENDED_SCHEDULED:
-                    l.state = Lease.STATE_SUSPENDED_PENDING
-                    
+                if l.get_state() in (Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_PREPARING):
+                    l.set_state(Lease.STATE_PENDING)
+                elif l.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:
+                    l.set_state(Lease.STATE_SUSPENDED_PENDING)
+
                 # At this point, the lease just looks like a regular
                 # pending lease that can be handed off directly to the
                 # __schedule_lease method.
@@ -458,7 +458,7 @@
         done = False
         newqueue = Queue()
         while not done and not self.is_queue_empty():
-            if not self.vm_scheduler.can_reserve_in_future() and self.slottable.is_full(nexttime, restype = constants.RES_CPU):
+            if not self.vm_scheduler.can_schedule_in_future() and self.slottable.is_full(nexttime, restype = constants.RES_CPU):
                 self.logger.debug("Used up all future reservations and slot table is full. Skipping rest of queue.")
                 done = True
             else:
@@ -545,7 +545,7 @@
         # Schedule lease preparation
         is_ready = False
         preparation_rrs = []
-        if lease_state == Lease.STATE_SUSPENDED_QUEUED and migration != constants.MIGRATE_NO:
+        if lease_state in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED) and migration != constants.MIGRATE_NO:
             # The lease might require migration
             migr_rrs = self.preparation_scheduler.schedule_migration(lease, vmrr, nexttime)
             if len(migr_rrs) > 0:
@@ -558,10 +558,10 @@
                 vmrr.pre_rrs.insert(0, migr_rr)
             if len(migr_rrs) == 0:
                 is_ready = True
-        elif lease_state == Lease.STATE_SUSPENDED_QUEUED and migration == constants.MIGRATE_NO:
+        elif lease_state in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED) and migration == constants.MIGRATE_NO:
             # No migration means the lease is ready
             is_ready = True
-        elif lease_state != Lease.STATE_SUSPENDED_QUEUED:
+        elif lease_state in (Lease.STATE_PENDING, Lease.STATE_QUEUED):
             # The lease might require initial preparation
             preparation_rrs, is_ready = self.preparation_scheduler.schedule(lease, vmrr, earliest)
 
@@ -598,10 +598,8 @@
             lease.set_state(Lease.STATE_SCHEDULED)
             if is_ready:
                 lease.set_state(Lease.STATE_READY)
-        elif lease_state == Lease.STATE_SUSPENDED_QUEUED:
+        elif lease_state == Lease.STATE_SUSPENDED_PENDING or lease_state == Lease.STATE_SUSPENDED_QUEUED:
             lease.set_state(Lease.STATE_SUSPENDED_SCHEDULED)
-            if is_ready:
-                lease.set_state(Lease.STATE_SUSPENDED_READY)
 
         lease.print_contents()
 

Modified: branches/TP2.0/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py	2009-07-22 15:03:19 UTC (rev 614)
+++ branches/TP2.0/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py	2009-07-24 10:34:56 UTC (rev 615)
@@ -224,7 +224,7 @@
             self.logger.debug("Scheduling image transfer of '%s' for vnode %i to physnode %i" % (lease.software.image_id, vnode, pnode))
 
             if reusealg == constants.REUSE_IMAGECACHES:
-                if self.resourcepool.exists_reusable_image(pnode, lease.diskimage_id, start):
+                if self.resourcepool.exists_reusable_image(pnode, lease.software.image_id, start):
                     self.logger.debug("No need to schedule an image transfer (reusing an image in pool)")
                     mustpool[vnode] = pnode                            
                 else:
@@ -267,8 +267,8 @@
             if earliest_type == ImageTransferEarliestStartingTime.EARLIEST_REUSE:
                 # Add to pool
                 self.logger.debug("Reusing image for V%i->P%i." % (vnode, pnode))
-                self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.diskimage_id, lease.id, vnode, vmrr.end)
-                self.resourcepool.add_diskimage(pnode, lease.diskimage_id, lease.diskimage_size, lease.id, vnode)
+                self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.software.image_id, lease.id, vnode, vmrr.end)
+                self.resourcepool.add_diskimage(pnode, lease.software.image_id, lease.software.image_size, lease.id, vnode)
             elif earliest_type == ImageTransferEarliestStartingTime.EARLIEST_PIGGYBACK:
                 # We can piggyback on an existing transfer
                 transfer_rr = earliest[pnode].piggybacking_on
@@ -403,12 +403,10 @@
             return self.transfers[0].start - required_duration
 
     def __remove_transfers(self, lease):
-        print lease
         toremove = []
         for t in self.transfers:
             for pnode in t.transfers:
                 leases = [l for l, v in t.transfers[pnode]]
-                print leases
                 if lease in leases:
                     newtransfers = [(l, v) for l, v in t.transfers[pnode] if l!=lease]
                     t.transfers[pnode] = newtransfers
@@ -542,8 +540,8 @@
                     
             # Besides adding the image to the cache, we need to create a separate image for
             # this specific lease
-            for (lease_id, vnode) in vnodes:
-                self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease_id, vnode)
+            for (lease, vnode) in vnodes:
+                self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease.id, vnode)
                     
         pnode.print_files()
 

Modified: branches/TP2.0/src/haizea/core/scheduler/resourcepool.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/resourcepool.py	2009-07-22 15:03:19 UTC (rev 614)
+++ branches/TP2.0/src/haizea/core/scheduler/resourcepool.py	2009-07-24 10:34:56 UTC (rev 615)
@@ -265,7 +265,7 @@
     def __init__(self, info_enact, vm_enact, deploy_enact):
         ResourcePool.__init__(self, info_enact, vm_enact, deploy_enact)
         
-        self.nodes = [NodeWithReusableImages.from_node(n) for n in self.nodes]
+        self.nodes = dict([(id,ResourcePoolNodeWithReusableImages.from_node(node)) for id, node in self.nodes.items()])
     
     def add_reusable_image(self, pnode, diskimage_id, imagesize, mappings, timeout):
         self.logger.debug("Adding reusable image for %s in pnode=%i" % (mappings, pnode))
@@ -302,7 +302,7 @@
             # for other images
         
     def get_nodes_with_reusable_image(self, diskimage_id, after = None):
-        return [n.id for n in self.nodes if n.exists_reusable_image(diskimage_id, after=after)]
+        return [n.id for n in self.get_nodes() if n.exists_reusable_image(diskimage_id, after=after)]
 
     def exists_reusable_image(self, pnode_id, diskimage_id, after):
         return self.get_node(pnode_id).exists_reusable_image(diskimage_id, after = after)
@@ -310,7 +310,7 @@
     
 class ResourcePoolNodeWithReusableImages(ResourcePoolNode):
     def __init__(self, node_id, hostname, capacity):
-        Node.__init__(self, node_id, hostname, capacity)
+        ResourcePoolNode.__init__(self, node_id, hostname, capacity)
         self.reusable_images = []
 
     @classmethod
@@ -381,7 +381,7 @@
         return success
 
     def print_files(self):
-        Node.print_files(self)
+        ResourcePoolNode.print_files(self)
         images = ""
         if len(self.reusable_images) > 0:
             images = ", ".join([str(img) for img in self.reusable_images])

Modified: branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py	2009-07-22 15:03:19 UTC (rev 614)
+++ branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py	2009-07-24 10:34:56 UTC (rev 615)
@@ -441,13 +441,13 @@
         # We might be scheduling a suspended lease. If so, we will
         # also have to schedule its resumption. Right now, just 
         # figure out if this is such a lease.
-        mustresume = (lease.get_state() in (Lease.STATE_SUSPENDED_QUEUED, Lease.STATE_SUSPENDED_SCHEDULED))
+        mustresume = (lease.get_state() in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED, Lease.STATE_SUSPENDED_SCHEDULED))
 
         # This is the minimum duration that we must be able to schedule.
         # See __compute_scheduling_threshold for more details.
         min_duration = self.__compute_scheduling_threshold(lease)
+        
 
-
         #
         # STEP 2: FIND THE CHANGEPOINTS
         #
@@ -491,6 +491,7 @@
             changepoints.sort()
             changepoints = [(x, onlynodes) for x in changepoints]
 
+
         # If we can schedule VMs in the future,
         # we also consider future changepoints
         if allow_in_future:
@@ -567,9 +568,8 @@
         
         # At this point, the lease is feasible. We just need to create
         # the reservations for the VMs and, possibly, for the VM resumption,
-        # suspension, and shutdown.
+        # suspension, and shutdown.    
         
-        
         # VM resource reservation
         res = {}
         

Modified: branches/TP2.0/tests/base_config_simulator.conf
===================================================================
--- branches/TP2.0/tests/base_config_simulator.conf	2009-07-22 15:03:19 UTC (rev 614)
+++ branches/TP2.0/tests/base_config_simulator.conf	2009-07-24 10:34:56 UTC (rev 615)
@@ -3,7 +3,7 @@
 mode: simulated
 lease-preparation: unmanaged
 datafile: /var/tmp/haizea/results.dat
-lease-failure-handling: exit
+lease-failure-handling: exit-raise
 
 [simulation]
 clock: simulated
@@ -27,8 +27,7 @@
 suspension: all
 suspend-rate: 32
 resume-rate: 32
-migration: True
-what-to-migrate: nothing
+migration: yes
 
 
 [deploy-imagetransfer]

Modified: branches/TP2.0/tests/sample_slottables.py
===================================================================
--- branches/TP2.0/tests/sample_slottables.py	2009-07-22 15:03:19 UTC (rev 614)
+++ branches/TP2.0/tests/sample_slottables.py	2009-07-24 10:34:56 UTC (rev 615)
@@ -51,7 +51,7 @@
     slottable.add_node(3, FULL_NODE)  
     slottable.add_node(4, FULL_NODE)  
 
-    lease1 = Lease(None,[],None,None,None,1,None)
+    lease1 = Lease(None,{},None,None,None,1,None)
     lease1.id = 1
     res1 = {2: HALF_NODE}
     rr1_1 = ResourceReservation(lease1, T1315, T1325, res1)
@@ -59,13 +59,13 @@
     slottable.add_reservation(rr1_1)
     slottable.add_reservation(rr1_2)
 
-    lease2 = Lease(None,[],None,None,None,2,None)
+    lease2 = Lease(None,{},None,None,None,2,None)
     lease2.id = 2
     res2 = {2: FULL_NODE, 3: FULL_NODE}
     rr2 = ResourceReservation(lease2, T1330, T1345, res2)
     slottable.add_reservation(rr2)
 
-    lease3 = Lease(None,[],None,None,None,1,None)
+    lease3 = Lease(None,{},None,None,None,1,None)
     lease3.id = 3
     res3 = {4: FULL_NODE}
     rr3_1 = ResourceReservation(lease3, T1330, T1355, res3)
@@ -73,19 +73,19 @@
     slottable.add_reservation(rr3_1)
     slottable.add_reservation(rr3_2)
 
-    lease4 = Lease(None,[],None,None,None,1,None)
+    lease4 = Lease(None,{},None,None,None,1,None)
     lease4.id = 4
     res4 = {2: QRTR_NODE, 3: HALF_NODE}
     rr4 = ResourceReservation(lease4, T1350, T1415, res4)
     slottable.add_reservation(rr4)
 
-    lease5 = Lease(None,[],None,None,None,1,None)
+    lease5 = Lease(None,{},None,None,None,1,None)
     lease5.id = 5
     res5 = {2: QRTR_NODE}
     rr5 = ResourceReservation(lease5, T1350, T1415, res5)
     slottable.add_reservation(rr5)
     
-    lease6 = Lease(None,[],None,None,None,1,None)
+    lease6 = Lease(None,{},None,None,None,1,None)
     lease6.id = 6
     res6 = {1: FULL_NODE}
     rr6 = ResourceReservation(lease6, T1255, T1305, res6)

Modified: branches/TP2.0/tests/test_mapper.py
===================================================================
--- branches/TP2.0/tests/test_mapper.py	2009-07-22 15:03:19 UTC (rev 614)
+++ branches/TP2.0/tests/test_mapper.py	2009-07-24 10:34:56 UTC (rev 615)
@@ -11,7 +11,7 @@
 
 class SimplePolicy(PolicyManager):
     def __init__(self, slottable, preemption):
-        PolicyManager.__init__(self, slottable, None, None, None)
+        PolicyManager.__init__(self, None, None, None)
         self.preemption = preemption
         self.host_selection = GreedyPolicy(slottable)
     

Modified: branches/TP2.0/tests/test_slottable.py
===================================================================
--- branches/TP2.0/tests/test_slottable.py	2009-07-22 15:03:19 UTC (rev 614)
+++ branches/TP2.0/tests/test_slottable.py	2009-07-24 10:34:56 UTC (rev 615)
@@ -666,10 +666,6 @@
                               leases = {lease1:HALF_NODE,lease2:QRTR_NODE}, next_cp = T1415)
         
         avail = aw.get_availability_at_node(T1300, 1)
-        print avail.avail_list
-        print avail.avail_list[0].available, avail.avail_list[0].until
-        print avail.avail_list[1].available, avail.avail_list[1].until
-        print avail.avail_list[2].available, avail.avail_list[2].until
         assert(len(avail.avail_list)==3)
         assert(avail.avail_list[0].available == FULL_NODE)
         assert(avail.avail_list[0].until     == T1315)



More information about the Haizea-commit mailing list