[haizea-commit] r508 - in trunk/src/haizea/resourcemanager: . deployment enact

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Wed Sep 24 13:28:32 CDT 2008


Author: borja
Date: 2008-09-24 13:28:31 -0500 (Wed, 24 Sep 2008)
New Revision: 508

Modified:
   trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
   trunk/src/haizea/resourcemanager/enact/simulated.py
   trunk/src/haizea/resourcemanager/rm.py
   trunk/src/haizea/resourcemanager/rpcserver.py
   trunk/src/haizea/resourcemanager/scheduler.py
Log:
Small fixes, detected while testing for TP1.2

Modified: trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
===================================================================
--- trunk/src/haizea/resourcemanager/deployment/imagetransfer.py	2008-09-24 10:19:28 UTC (rev 507)
+++ trunk/src/haizea/resourcemanager/deployment/imagetransfer.py	2008-09-24 18:28:31 UTC (rev 508)
@@ -112,8 +112,11 @@
                         transferRRs[pnode] = filetransfer
                         lease.appendRR(filetransfer)
             elif mechanism == constants.TRANSFER_MULTICAST:
-                filetransfer = self.schedule_imagetransfer_edf(lease, musttransfer, nexttime)
-                lease.append_deployrr(filetransfer)
+                try:
+                    filetransfer = self.schedule_imagetransfer_edf(lease, musttransfer, nexttime)
+                    lease.append_deployrr(filetransfer)
+                except DeploymentSchedException, msg:
+                    raise
  
         # No chance of scheduling exception at this point. It's safe
         # to add entries to the pools
@@ -287,7 +290,7 @@
             startTime = t.end
              
         if not fits:
-             raise DeploymentSchedException, "Adding this VW results in an unfeasible image transfer schedule."
+             raise DeploymentSchedException, "Adding this lease results in an unfeasible image transfer schedule."
 
         # Push image transfers as close as possible to their deadlines. 
         feasibleEndTime=newtransfers[-1].deadline

Modified: trunk/src/haizea/resourcemanager/enact/simulated.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/simulated.py	2008-09-24 10:19:28 UTC (rev 507)
+++ trunk/src/haizea/resourcemanager/enact/simulated.py	2008-09-24 18:28:31 UTC (rev 508)
@@ -76,7 +76,6 @@
             image = action.vnodes[vnode].diskimage
             cpu = action.vnodes[vnode].resources.get_by_type(constants.RES_CPU)
             memory = action.vnodes[vnode].resources.get_by_type(constants.RES_MEM)
-            print (action.lease_haizea_id, vnode, pnode, image, cpu, memory)
             self.logger.debug("Received request to start VM for L%iV%i on host %i, image=%s, cpu=%i, mem=%i"
                          % (action.lease_haizea_id, vnode, pnode, image, cpu, memory))
     

Modified: trunk/src/haizea/resourcemanager/rm.py
===================================================================
--- trunk/src/haizea/resourcemanager/rm.py	2008-09-24 10:19:28 UTC (rev 507)
+++ trunk/src/haizea/resourcemanager/rm.py	2008-09-24 18:28:31 UTC (rev 508)
@@ -102,13 +102,13 @@
         
         """
 
-        # Simulations always run in the foreground
-        self.daemon = False
+        # Simulated-time simulations always run in the foreground
+        clock = self.config.get("clock")
+        if clock == constants.CLOCK_SIMULATED:
+            self.daemon = False
         
         self.init_logging()
-        clock = self.config.get("clock")
-        
-        # The clock
+                
         if clock == constants.CLOCK_SIMULATED:
             starttime = self.config.get("starttime")
             self.clock = SimulatedClock(self, starttime)
@@ -764,9 +764,15 @@
             
             # The only exit condition from the real clock is if the stop_when_no_more_leases
             # is set to True, and there's no more work left to do.
-            stop_when_no_more_leases = self.rm.config.get("stop-when-no-more-leases")
-            if stop_when_no_more_leases and not self.rm.exists_leases_in_rm():
-                done = True
+            # TODO: This first if is a kludge. Other options should only interact with
+            # options through the configfile's get method. The "stop-when-no-more-leases"
+            # option is currently OpenNebula-specific (while the real clock isn't; it can
+            # be used by both the simulator and the OpenNebula mode). This has to be
+            # fixed.            
+            if self.rm.config._options.has_key("stop-when-no-more-leases"):
+                stop_when_no_more_leases = self.rm.config.get("stop-when-no-more-leases")
+                if stop_when_no_more_leases and not self.rm.exists_leases_in_rm():
+                    done = True
             
             # Sleep
             if not done:

Modified: trunk/src/haizea/resourcemanager/rpcserver.py
===================================================================
--- trunk/src/haizea/resourcemanager/rpcserver.py	2008-09-24 10:19:28 UTC (rev 507)
+++ trunk/src/haizea/resourcemanager/rpcserver.py	2008-09-24 18:28:31 UTC (rev 508)
@@ -27,7 +27,7 @@
 
     def serve_forever(self):
         self.run = True
-        while not self.run:
+        while self.run:
             self.handle_request()
 
     def stop(self):
@@ -71,7 +71,7 @@
         return 0
 
     def get_leases(self):
-        return [l.xmlrpc_marshall() for l in self.rm.scheduler.scheduledleases.get_leases()]
+        return [l.xmlrpc_marshall() for l in self.rm.scheduler.leases.get_leases()]
 
     def get_lease(self, lease_id):
         return 0

Modified: trunk/src/haizea/resourcemanager/scheduler.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler.py	2008-09-24 10:19:28 UTC (rev 507)
+++ trunk/src/haizea/resourcemanager/scheduler.py	2008-09-24 18:28:31 UTC (rev 508)
@@ -166,7 +166,9 @@
     
     def process_reservations(self, nowtime):
         starting = self.slottable.get_reservations_starting_at(nowtime)
+        starting = [res for res in starting if res.state == ResourceReservation.STATE_SCHEDULED]
         ending = self.slottable.get_reservations_ending_at(nowtime)
+        ending = [res for res in ending if res.state == ResourceReservation.STATE_ACTIVE]
         for rr in ending:
             self._handle_end_rr(rr.lease, rr)
             self.handlers[type(rr)].on_end(self, rr.lease, rr)
@@ -368,7 +370,7 @@
             # scheduling could still throw an exception)
             lease_req.append_vmrr(vmrr)
             self.slottable.addReservation(vmrr)
-        except SlotFittingException, msg:
+        except Exception, msg:
             raise SchedException, "The requested AR lease is infeasible. Reason: %s" % msg
 
 
@@ -1171,19 +1173,22 @@
             must_cancel_and_requeue = True
         else:
             susptype = get_config().get("suspension")
-            time_until_suspend = preemption_time - vmrr.start
-            min_duration = self.__compute_scheduling_threshold(lease)
-            can_suspend = time_until_suspend >= min_duration        
-            if not can_suspend:
-                self.logger.debug("Suspending the lease does not meet scheduling threshold.")
+            if susptype == constants.SUSPENSION_NONE:
                 must_cancel_and_requeue = True
             else:
-                if lease.numnodes > 1 and susptype == constants.SUSPENSION_SERIAL:
-                    self.logger.debug("Can't suspend lease because only suspension of single-node leases is allowed.")
+                time_until_suspend = preemption_time - vmrr.start
+                min_duration = self.__compute_scheduling_threshold(lease)
+                can_suspend = time_until_suspend >= min_duration        
+                if not can_suspend:
+                    self.logger.debug("Suspending the lease does not meet scheduling threshold.")
                     must_cancel_and_requeue = True
                 else:
-                    self.logger.debug("Lease can be suspended")
-                    must_cancel_and_requeue = False
+                    if lease.numnodes > 1 and susptype == constants.SUSPENSION_SERIAL:
+                        self.logger.debug("Can't suspend lease because only suspension of single-node leases is allowed.")
+                        must_cancel_and_requeue = True
+                    else:
+                        self.logger.debug("Lease can be suspended")
+                        must_cancel_and_requeue = False
                     
         if must_cancel_and_requeue:
             self.logger.info("... lease #%i has been cancelled and requeued." % lease.id)



More information about the Haizea-commit mailing list