[haizea-commit] r464 - in trunk: bin src/haizea/cli src/haizea/common src/haizea/resourcemanager src/haizea/resourcemanager/enact/simulated

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Tue Aug 5 10:34:20 CDT 2008


Author: borja
Date: 2008-08-05 10:34:20 -0500 (Tue, 05 Aug 2008)
New Revision: 464

Added:
   trunk/bin/haizea-cancel-lease
   trunk/bin/haizea-list-leases
Modified:
   trunk/src/haizea/cli/rpc_commands.py
   trunk/src/haizea/common/constants.py
   trunk/src/haizea/common/utils.py
   trunk/src/haizea/resourcemanager/datastruct.py
   trunk/src/haizea/resourcemanager/enact/simulated/info.py
   trunk/src/haizea/resourcemanager/enact/simulated/vm.py
   trunk/src/haizea/resourcemanager/resourcepool.py
   trunk/src/haizea/resourcemanager/rm.py
   trunk/src/haizea/resourcemanager/rpcserver.py
   trunk/src/haizea/resourcemanager/scheduler.py
Log:
Added RPC/CLI for listing leases and cancelling leases

Added: trunk/bin/haizea-cancel-lease
===================================================================
--- trunk/bin/haizea-cancel-lease	                        (rev 0)
+++ trunk/bin/haizea-cancel-lease	2008-08-05 15:34:20 UTC (rev 464)
@@ -0,0 +1,6 @@
+#!/usr/bin/python
+
+from haizea.cli import rpc_commands
+import sys
+
+rpc_commands.haizea_cancel_lease(sys.argv)
\ No newline at end of file


Property changes on: trunk/bin/haizea-cancel-lease
___________________________________________________________________
Name: svn:executable
   + *

Added: trunk/bin/haizea-list-leases
===================================================================
--- trunk/bin/haizea-list-leases	                        (rev 0)
+++ trunk/bin/haizea-list-leases	2008-08-05 15:34:20 UTC (rev 464)
@@ -0,0 +1,6 @@
+#!/usr/bin/python
+
+from haizea.cli import rpc_commands
+import sys
+
+rpc_commands.haizea_list_leases(sys.argv)
\ No newline at end of file


Property changes on: trunk/bin/haizea-list-leases
___________________________________________________________________
Name: svn:executable
   + *

Modified: trunk/src/haizea/cli/rpc_commands.py
===================================================================
--- trunk/src/haizea/cli/rpc_commands.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/cli/rpc_commands.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -23,7 +23,7 @@
     op.add_option(Option("-s", "--server", action="store", type="string", dest="server", default="http://localhost:42493"))
 
 def create_rpc_proxy(server):
-    return xmlrpclib.ServerProxy(server)
+    return xmlrpclib.ServerProxy(server, allow_none=True)
 
 def haizea_request_lease(argv):
     p = OptionParser()
@@ -55,3 +55,54 @@
         print "Lease ID: %i" % lease_id
     except Exception, msg:
         print >> sys.stderr, "Error: %s" % msg
+        
+
+def haizea_cancel_lease(argv):
+    p = OptionParser()
+    add_rpc_options(p)
+    
+    p.add_option(Option("-l", "--lease", action="store", type="int", dest="lease"))
+
+    opt, args = p.parse_args(argv)
+
+    server = create_rpc_proxy(opt.server)
+    
+    try:
+        code = server.cancel_lease(opt.lease)
+    except Exception, msg:
+        print >> sys.stderr, "Error: %s" % msg
+        
+
+def haizea_list_leases(argv):
+    p = OptionParser()
+    add_rpc_options(p)
+    
+    opt, args = p.parse_args(argv)
+
+    server = create_rpc_proxy(opt.server)
+    
+    # TODO: Make this configurable. Ideally, use some sort of
+    # "console table printer"
+    fields = [("id",3),
+              ("type",4),
+              ("state",7),
+              ("start_req",19),
+              ("duration_req",9),
+              ("numnodes",3)]
+    
+    try:
+        leases = server.get_leases()
+        print "\33[1m\33[4m",
+        for (name,width) in fields:
+            width = max(len(name),width) + 1
+            centered = name.ljust(width)
+            print centered,
+        print "\33[0m"
+        for l in leases:
+            for (name,width) in fields:
+                value = l[name]
+                width = max(len(name),width)
+                print " %s" % str(value).ljust(width),
+            print
+    except Exception, msg:
+        print >> sys.stderr, "Error: %s" % msg
\ No newline at end of file

Modified: trunk/src/haizea/common/constants.py
===================================================================
--- trunk/src/haizea/common/constants.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/common/constants.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -312,3 +312,5 @@
 AVERAGE_NONE=0
 AVERAGE_NORMAL=1
 AVERAGE_TIMEWEIGHTED=2
+
+EVENT_END_VM=0

Modified: trunk/src/haizea/common/utils.py
===================================================================
--- trunk/src/haizea/common/utils.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/common/utils.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -19,6 +19,7 @@
 from mx import DateTime
 from math import ceil, floor
 from cPickle import dump, load, HIGHEST_PROTOCOL
+from datetime import datetime
 
 def gen_traceinj_name(tracefile, injectedfile):
     tracename=tracefile.split("/")[-1].split(".")[0]
@@ -89,4 +90,12 @@
     bandwidthMBs = float(bandwidth) / 8
     seconds = size / bandwidthMBs
     return roundDateTimeDelta(DateTime.TimeDelta(seconds = seconds)) 
- 
\ No newline at end of file
+ 
+def xmlrpc_marshall_singlevalue(value):
+    if isinstance(value, DateTime.DateTimeType):
+        return datetime.fromtimestamp(value)
+    elif isinstance(value, DateTime.DateTimeDeltaType):
+        return value.seconds
+    else:
+        return value
+     
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/datastruct.py
===================================================================
--- trunk/src/haizea/resourcemanager/datastruct.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/resourcemanager/datastruct.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -40,7 +40,7 @@
 """
 
 from haizea.common.constants import state_str, rstate_str, DS, RES_STATE_SCHEDULED, RES_STATE_ACTIVE, RES_MEM, MIGRATE_NONE, MIGRATE_MEM, MIGRATE_MEMVM, TRANSFER_NONE
-from haizea.common.utils import roundDateTimeDelta, get_lease_id, pretty_nodemap, estimate_transfer_time
+from haizea.common.utils import roundDateTimeDelta, get_lease_id, pretty_nodemap, estimate_transfer_time, xmlrpc_marshall_singlevalue
 
 from operator import attrgetter
 from mx.DateTime import TimeDelta
@@ -122,6 +122,12 @@
     def get_ending_reservations(self, time):
         return [r for r in self.rr if r.end <= time and r.state == RES_STATE_ACTIVE]
 
+    def get_active_reservations(self, time):
+        return [r for r in self.rr if r.start <= time and time <= r.end and r.state == RES_STATE_ACTIVE]
+
+    def get_scheduled_reservations(self):
+        return [r for r in self.rr if r.state == RES_STATE_SCHEDULED]
+
     def get_endtime(self):
         vmrr, resrr = self.get_last_vmrr()
         return vmrr.end
@@ -213,9 +219,29 @@
                 threshold = self.scheduler.rm.config.getBootOverhead() + self.estimate_suspend_resume_time(suspendrate)
             factor = self.scheduler.rm.config.getSuspendThresholdFactor() + 1
             return roundDateTimeDelta(threshold * factor)
-            
         
+    def xmlrpc_marshall(self):
+        # Convert to something we can send through XMLRPC
+        l = {}
+        l["id"] = self.id
+        l["submit_time"] = xmlrpc_marshall_singlevalue(self.submit_time)
+        l["start_req"] = xmlrpc_marshall_singlevalue(self.start.requested)
+        l["start_sched"] = xmlrpc_marshall_singlevalue(self.start.scheduled)
+        l["start_actual"] = xmlrpc_marshall_singlevalue(self.start.actual)
+        l["duration_req"] = xmlrpc_marshall_singlevalue(self.duration.requested)
+        l["duration_acc"] = xmlrpc_marshall_singlevalue(self.duration.accumulated)
+        l["duration_actual"] = xmlrpc_marshall_singlevalue(self.duration.actual)
+        l["end"] = xmlrpc_marshall_singlevalue(self.end)
+        l["diskimage_id"] = self.diskimage_id
+        l["diskimage_size"] = self.diskimage_size
+        l["numnodes"] = self.numnodes
+        l["resources"] = `self.requested_resources`
+        l["preemptible"] = self.preemptible
+        l["state"] = self.state
+        l["rr"] = [rr.xmlrpc_marshall() for rr in self.rr]
+        return l
         
+        
 class ARLease(LeaseBase):
     def __init__(self, submit_time, start, duration, diskimage_id, 
                  diskimage_size, numnodes, resreq, preemptible,
@@ -235,6 +261,10 @@
         self.print_rrs(loglevel)
         self.logger.log(loglevel, "--------------------------------------------------", DS)
     
+    def xmlrpc_marshall(self):
+        l = LeaseBase.xmlrpc_marshall(self)
+        l["type"] = "AR"
+        return l
 
         
 class BestEffortLease(LeaseBase):
@@ -269,7 +299,12 @@
             time_on_dedicated = bound
         return time_on_loaded / time_on_dedicated
 
+    def xmlrpc_marshall(self):
+        l = LeaseBase.xmlrpc_marshall(self)
+        l["type"] = "BE"
+        return l
 
+
 class ImmediateLease(LeaseBase):
     def __init__(self, submit_time, duration, diskimage_id, 
                  diskimage_size, numnodes, resreq, preemptible,
@@ -288,7 +323,12 @@
         self.print_rrs(loglevel)
         self.logger.log(loglevel, "--------------------------------------------------", DS)
 
+    def xmlrpc_marshall(self):
+        l = LeaseBase.xmlrpc_marshall(self)
+        l["type"] = "IM"
+        return l
 
+
 #-------------------------------------------------------------------#
 #                                                                   #
 #                        RESOURCE RESERVATION                       #
@@ -312,6 +352,14 @@
         self.logger.log(loglevel, "State          : %s" % rstate_str(self.state), DS)
         self.logger.log(loglevel, "Resources      : \n%s" % "\n".join(["N%i: %s" %(i, x) for i, x in self.resources_in_pnode.items()]), DS) 
                 
+    def xmlrpc_marshall(self):
+        # Convert to something we can send through XMLRPC
+        rr = {}                
+        rr["start"] = xmlrpc_marshall_singlevalue(self.start)
+        rr["end"] = xmlrpc_marshall_singlevalue(self.end)
+        rr["state"] = self.state
+        return rr
+                
 class VMResourceReservation(ResourceReservationBase):
     def __init__(self, lease, start, end, nodes, res, oncomplete, backfill_reservation):
         ResourceReservationBase.__init__(self, lease, start, end, res)
@@ -341,6 +389,12 @@
     def is_preemptible(self):
         return self.lease.preemptible
 
+    def xmlrpc_marshall(self):
+        rr = ResourceReservationBase.xmlrpc_marshall(self)
+        rr["type"] = "VM"
+        rr["nodes"] = self.nodes.items()
+        return rr
+
         
 class SuspensionResourceReservation(ResourceReservationBase):
     def __init__(self, lease, start, end, res, nodes):
@@ -358,6 +412,12 @@
     def is_preemptible(self):
         return False        
         
+    def xmlrpc_marshall(self):
+        rr = ResourceReservationBase.xmlrpc_marshall(self)
+        rr["type"] = "SUSP"
+        rr["nodes"] = self.nodes.items()
+        return rr
+        
 class ResumptionResourceReservation(ResourceReservationBase):
     def __init__(self, lease, start, end, res, nodes):
         ResourceReservationBase.__init__(self, lease, start, end, res)
@@ -374,6 +434,11 @@
     def is_preemptible(self):
         return False        
         
+    def xmlrpc_marshall(self):
+        rr = ResourceReservationBase.xmlrpc_marshall(self)
+        rr["type"] = "RESM"
+        rr["nodes"] = self.nodes.items()
+        return rr
 
 #-------------------------------------------------------------------#
 #                                                                   #
@@ -402,6 +467,15 @@
     def length(self):
         return len(self.__q)
     
+    def has_lease(self, lease_id):
+        return (1 == len([l for l in self.__q if l.id == lease_id]))
+    
+    def get_lease(self, lease_id):
+        return [l for l in self.__q if l.id == lease_id][0]
+    
+    def remove_lease(self, lease):
+        self.__q.remove(lease)
+    
     def __iter__(self):
         return iter(self.__q)
         
@@ -410,6 +484,9 @@
         self.scheduler = scheduler
         self.entries = {}
         
+    def has_lease(self, lease_id):
+        return self.entries.has_key(lease_id)
+        
     def get_lease(self, lease_id):
         return self.entries[lease_id]
     
@@ -521,7 +598,6 @@
             r += "%s:%.2f " % (self.descriptions[i], x)
         return r
 
-
 class Timestamp(object):
     def __init__(self, requested):
         self.requested = requested

Modified: trunk/src/haizea/resourcemanager/enact/simulated/info.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/simulated/info.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/resourcemanager/enact/simulated/info.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -33,7 +33,9 @@
         capacity = self.parseResourcesString(config.getResourcesPerPhysNode())
         
         self.nodes = [Node(self.resourcepool, i+1, "simul-%i" % (i+1), capacity) for i in range(numnodes)]
-        
+        for n in self.nodes:
+            n.enactment_info = n.nod_id
+            
         # Image repository nodes
         imgcapacity = ds.ResourceTuple.create_empty()
         imgcapacity.set_by_type(constants.RES_NETOUT, self.bandwidth)

Modified: trunk/src/haizea/resourcemanager/enact/simulated/vm.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/simulated/vm.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/resourcemanager/enact/simulated/vm.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -17,22 +17,36 @@
 # -------------------------------------------------------------------------- #
 
 from haizea.resourcemanager.enact.base import VMEnactmentBase
+import haizea.common.constants as constants
 
 class VMEnactment(VMEnactmentBase):
     def __init__(self, resourcepool):
         VMEnactmentBase.__init__(self, resourcepool)
         
     def start(self, action):
-        pass
+        for vnode in action.vnodes:
+            # Unpack action
+            pnode = action.vnodes[vnode].pnode
+            image = action.vnodes[vnode].diskimage
+            cpu = action.vnodes[vnode].resources.get_by_type(constants.RES_CPU)
+            memory = action.vnodes[vnode].resources.get_by_type(constants.RES_MEM)
+            self.logger.debug("Received request to start VM for L%iV%i on host %i, image=%s, cpu=%i, mem=%i"
+                         % (action.lease_haizea_id, vnode, pnode, image, cpu, memory), "SIMUL")
     
     def stop(self, action):
-        pass
+        for vnode in action.vnodes:
+            self.logger.debug("Received request to stop VM for L%iV%i"
+                         % (action.lease_haizea_id, vnode), "SIMUL")
 
     def suspend(self, action):
-        pass
+        for vnode in action.vnodes:
+            self.logger.debug("Received request to suspend VM for L%iV%i"
+                         % (action.lease_haizea_id, vnode), "SIMUL")
 
     def resume(self, action):
-        pass
+        for vnode in action.vnodes:
+            self.logger.debug("Received request to resume VM for L%iV%i"
+                         % (action.lease_haizea_id, vnode), "SIMUL")
 
     def verifySuspend(self, action):
         return 0

Modified: trunk/src/haizea/resourcemanager/resourcepool.py
===================================================================
--- trunk/src/haizea/resourcemanager/resourcepool.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/resourcemanager/resourcepool.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -118,7 +118,7 @@
             self.vm.start(startAction)
         except Exception, msg:
             self.rm.logger.error("Enactment of start VM failed: %s" % msg, constants.RM)
-            self.rm.cancel_lease(lease)
+            self.rm.cancel_lease(lease.id)
         
     def stopVMs(self, lease, rr):
         stopAction = actions.VMEnactmentStopAction()

Modified: trunk/src/haizea/resourcemanager/rm.py
===================================================================
--- trunk/src/haizea/resourcemanager/rm.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/resourcemanager/rm.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -311,21 +311,19 @@
         """Return True if there are any leases still "in the system" """
         return self.scheduler.exists_scheduled_leases() or not self.scheduler.is_queue_empty()
     
-    def notify_event(self, lease, event):
-        pass
+    # TODO: Add more events. This is pending on actually getting interesting
+    # events in OpenNebula 1.2. For now, the only event is a prematurely
+    # ending VM.
+    def notify_event(self, lease_id, event):
+        self.scheduler.notify_event(lease_id, event)
 
-    def cancel_lease(self, lease):
+    def cancel_lease(self, lease_id):
         """Cancels a lease.
         
-        TODO: Right now, a lease only gets cancelled if an enactment action
-        fails. If this happens, Haizea will continue running, but won't clean
-        up the lease (so, in effect, it will think resources are still being
-        used, and any future enactment actions for that lease will also fail)
-        
         Arguments:
         lease -- Lease to cancel
         """
-        pass
+        self.scheduler.cancel_lease(lease_id)
 
             
 class Clock(object):
@@ -427,7 +425,7 @@
             
             # Notify the resource manager about the premature ends
             for rr in prematureends:
-                self.rm.notify_end_vm(rr.lease, rr)
+                self.rm.notify_event(rr.lease.id, constants.EVENT_END_VM)
                 
             # Process reservations starting/stopping at the current time and
             # check if there are any new requests.

Modified: trunk/src/haizea/resourcemanager/rpcserver.py
===================================================================
--- trunk/src/haizea/resourcemanager/rpcserver.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/resourcemanager/rpcserver.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -26,11 +26,11 @@
         self.rm = rm
         self.logger = self.rm.logger
         self.port = DEFAULT_HAIZEA_PORT
-        self.server = SimpleXMLRPCServer(("localhost", self.port))
+        self.server = SimpleXMLRPCServer(("localhost", self.port), allow_none=True)
         self.register_rpc(self.test_func)
         self.register_rpc(self.cancel_lease)
         self.register_rpc(self.get_leases)
-        self.register_rpc(self.get_lease_info)
+        self.register_rpc(self.get_lease)
         self.register_rpc(self.get_queue)
         self.register_rpc(self.notify_event)
 
@@ -51,16 +51,17 @@
         return 0
     
     def cancel_lease(self, lease_id):
-        pass
+        self.rm.cancel_lease(lease_id)
+        return 0
 
     def get_leases(self):
-        pass
+        return [l.xmlrpc_marshall() for l in self.rm.scheduler.scheduledleases.get_leases()]
 
-    def get_lease_info(self):
-        pass
+    def get_lease(self, lease_id):
+        return 0
 
     def get_queue(self):
-        pass
+        return [l.xmlrpc_marshall() for l in self.rm.queue]
 
     def notify_event(self, lease_id, enactment_id, event):
         pass
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/scheduler.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler.py	2008-08-04 15:27:22 UTC (rev 463)
+++ trunk/src/haizea/resourcemanager/scheduler.py	2008-08-05 15:34:20 UTC (rev 464)
@@ -36,7 +36,7 @@
 from haizea.resourcemanager.deployment.unmanaged import UnmanagedDeployment
 from haizea.resourcemanager.deployment.predeployed import PredeployedImagesDeployment
 from haizea.resourcemanager.deployment.imagetransfer import ImageTransferDeployment
-from haizea.resourcemanager.datastruct import ARLease, ImmediateLease 
+from haizea.resourcemanager.datastruct import ARLease, ImmediateLease, VMResourceReservation 
 
 
 class SchedException(Exception):
@@ -80,7 +80,6 @@
         self.queue = ds.Queue(self)
         self.scheduledleases = ds.LeaseTable(self)
         self.completedleases = ds.LeaseTable(self)
-        self.rejectedleases = ds.LeaseTable(self)
         self.pending_leases = []
             
         self.handlers = {}
@@ -174,24 +173,51 @@
         """Return True if there are any leases scheduled in the future"""
         return not self.scheduledleases.is_empty()    
 
-    # TODO: Replace this with a more general event handling system
-    def notify_premature_end_vm(self, l, rr):
-        self.rm.logger.info("LEASE-%i The VM has ended prematurely." % l.id, constants.SCHED)
-        self._handle_end_rr(l, rr)
-        if rr.oncomplete == constants.ONCOMPLETE_SUSPEND:
-            rrs = l.next_rrs(rr)
-            for r in rrs:
-                l.remove_rr(r)
-                self.slottable.removeReservation(r)
-        rr.oncomplete = constants.ONCOMPLETE_ENDLEASE
-        rr.end = self.rm.clock.get_time()
-        self._handle_end_vm(l, rr, enact=False)
-        nexttime = self.rm.clock.get_next_schedulable_time()
-        if self.rm.config.isBackfilling():
-            # We need to reevaluate the schedule to see if there are any future
-            # reservations that we can slide back.
-            self.reevaluate_schedule(l, rr.nodes.values(), nexttime, [])
+    def cancel_lease(self, lease_id):
+        """Cancels a lease.
+        
+        Arguments:
+        lease_id -- ID of lease to cancel
+        """
+        time = self.rm.clock.get_time()
+        
+        self.rm.logger.info("Cancelling lease %i..." % lease_id, constants.SCHED)
+        if self.scheduledleases.has_lease(lease_id):
+            # The lease is either running, or scheduled to run
+            lease = self.scheduledleases.get_lease(lease_id)
+            
+            if lease.state == constants.LEASE_STATE_ACTIVE:
+                self.rm.logger.info("Lease %i is active. Stopping active reservation..." % lease_id, constants.SCHED)
+                rr = lease.get_active_reservations(time)[0]
+                if isinstance(rr, VMResourceReservation):
+                    self._handle_unscheduled_end_vm(lease, rr, enact=True)
+                # TODO: Handle cancelations in middle of suspensions and
+                # resumptions                
+            elif lease.state in [constants.LEASE_STATE_SCHEDULED, constants.LEASE_STATE_DEPLOYED]:
+                self.rm.logger.info("Lease %i is scheduled. Cancelling reservations." % lease_id, constants.SCHED)
+                rrs = lease.get_scheduled_reservations()
+                for r in rrs:
+                    lease.remove_rr(r)
+                    self.slottable.removeReservation(r)
+                lease.state = constants.LEASE_STATE_DONE
+                self.completedleases.add(lease)
+                self.scheduledleases.remove(lease)
+        elif self.queue.has_lease(lease_id):
+            # The lease is in the queue, waiting to be scheduled.
+            # Cancelling is as simple as removing it from the queue
+            self.rm.logger.info("Lease %i is in the queue. Removing..." % lease_id, constants.SCHED)
+            l = self.queue.get_lease(lease_id)
+            self.queue.remove_lease(lease)
     
+    
+    def notify_event(self, lease_id, event):
+        time = self.rm.clock.get_time()
+        if event == constants.EVENT_END_VM:
+            lease = self.scheduledleases.get_lease(lease_id)
+            rr = lease.get_active_reservations(time)[0]
+            self._handle_unscheduled_end_vm(lease, rr, enact=False)
+
+    
     def __process_ar_request(self, lease_req, nexttime):
         self.rm.logger.info("Received AR lease request #%i, %i nodes from %s to %s." % (lease_req.id, lease_req.numnodes, lease_req.start.requested, lease_req.start.requested + lease_req.duration.requested), constants.SCHED)
         self.rm.logger.debug("  Start   : %s" % lease_req.start, constants.SCHED)
@@ -500,6 +526,7 @@
 
     # TODO: Replace enact with a saner way of handling leases that have failed or
     #       ended prematurely.
+    #       Possibly factor out the "clean up" code to a separate function
     def _handle_end_vm(self, l, rr, enact=True):
         self.rm.logger.debug("LEASE-%i Start of handleEndVM" % l.id, constants.SCHED)
         self.rm.logger.edebug("LEASE-%i Before:" % l.id, constants.SCHED)
@@ -529,6 +556,23 @@
         self.rm.logger.debug("LEASE-%i End of handleEndVM" % l.id, constants.SCHED)
         self.rm.logger.info("Stopped VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()), constants.SCHED)
 
+    def _handle_unscheduled_end_vm(self, l, rr, enact=False):
+        self.rm.logger.info("LEASE-%i The VM has ended prematurely." % l.id, constants.SCHED)
+        self._handle_end_rr(l, rr)
+        if rr.oncomplete == constants.ONCOMPLETE_SUSPEND:
+            rrs = l.next_rrs(rr)
+            for r in rrs:
+                l.remove_rr(r)
+                self.slottable.removeReservation(r)
+        rr.oncomplete = constants.ONCOMPLETE_ENDLEASE
+        rr.end = self.rm.clock.get_time()
+        self._handle_end_vm(l, rr, enact=enact)
+        nexttime = self.rm.clock.get_next_schedulable_time()
+        if self.rm.config.isBackfilling():
+            # We need to reevaluate the schedule to see if there are any future
+            # reservations that we can slide back.
+            self.reevaluate_schedule(l, rr.nodes.values(), nexttime, [])
+
     def _handle_start_suspend(self, l, rr):
         self.rm.logger.debug("LEASE-%i Start of handleStartSuspend" % l.id, constants.SCHED)
         l.print_contents()



More information about the Haizea-commit mailing list