[haizea-commit] r493 - in trunk/src/haizea/resourcemanager: . deployment enact
haizea-commit at mailman.cs.uchicago.edu
haizea-commit at mailman.cs.uchicago.edu
Fri Sep 12 12:23:15 CDT 2008
Author: borja
Date: 2008-09-12 12:23:15 -0500 (Fri, 12 Sep 2008)
New Revision: 493
Modified:
trunk/src/haizea/resourcemanager/accounting.py
trunk/src/haizea/resourcemanager/datastruct.py
trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
trunk/src/haizea/resourcemanager/enact/actions.py
trunk/src/haizea/resourcemanager/resourcepool.py
trunk/src/haizea/resourcemanager/scheduler.py
Log:
- Added support for globally and locally exclusive suspend/resume operations.
- Removed "what is the node doing?" accounting, since it assumed that only one VM is running per node (an assumption we're slowly getting rid of). There is currently no way of producing utilization statistics, and a new one (which accounts for multiple VMs, potentially heterogeneous ones, per physical node) will have to be added.
Modified: trunk/src/haizea/resourcemanager/accounting.py
===================================================================
--- trunk/src/haizea/resourcemanager/accounting.py 2008-09-11 16:48:08 UTC (rev 492)
+++ trunk/src/haizea/resourcemanager/accounting.py 2008-09-12 17:23:15 UTC (rev 493)
@@ -29,9 +29,6 @@
self.counters = {}
self.counter_lists = {}
self.counter_avg_type = {}
-
- # What are the nodes doing?
- self.nodes = {}
# Lease data
self.leases = {}
@@ -100,27 +97,7 @@
for counter_id in self.data.counters:
initial = self.data.counters[counter_id]
self.append_stat(counter_id, initial, time = time)
-
- # Start the doing
- numnodes = self.rm.scheduler.resourcepool.get_num_nodes()
- for n in range(numnodes):
- self.data.nodes[n+1] = [(time, constants.DOING_IDLE)]
- def tick(self):
- time = self.rm.clock.get_time()
- # Update the doing
- for node in self.rm.scheduler.resourcepool.nodes:
- nodenum = node.nod_id
- doing = node.get_state()
- (lasttime, lastdoing) = self.data.nodes[nodenum][-1]
- if doing == lastdoing:
- # No need to update
- pass
- else:
- if lasttime == time:
- self.data.nodes[nodenum][-1] = (time, doing)
- else:
- self.data.nodes[nodenum].append((time, doing))
def stop(self):
time = self.rm.clock.get_time()
@@ -139,16 +116,6 @@
self.data.counter_lists[counter_id] = self.add_average(l)
elif avgtype == constants.AVERAGE_TIMEWEIGHTED:
self.data.counter_lists[counter_id] = self.add_timeweighted_average(l)
-
- # Stop the doing
- for node in self.rm.scheduler.resourcepool.nodes:
- nodenum = node.nod_id
- doing = node.vm_doing
- (lasttime, lastdoing) = self.data.nodes[nodenum][-1]
- if time != lasttime:
- self.data.nodes[nodenum].append((time, doing))
-
- self.normalize_doing()
def normalize_times(self, data):
return [((v[0] - self.starttime).seconds, v[1], v[2]) for v in data]
@@ -191,20 +158,6 @@
return stats
- def normalize_doing(self):
- nodes = dict([(i+1, []) for i in range(self.rm.scheduler.resourcepool.get_num_nodes())])
- for n in self.data.nodes:
- nodes[n] = []
- prevtime = None
- prevdoing = None
- for (time, doing) in self.data.nodes[n]:
- if prevtime != None:
- difftime = (time-prevtime).seconds
- nodes[n].append((difftime, prevdoing))
- prevtime = time
- prevdoing = doing
- self.data.nodes = nodes
-
def save_to_disk(self):
try:
dirname = os.path.dirname(self.datafile)
Modified: trunk/src/haizea/resourcemanager/datastruct.py
===================================================================
--- trunk/src/haizea/resourcemanager/datastruct.py 2008-09-11 16:48:08 UTC (rev 492)
+++ trunk/src/haizea/resourcemanager/datastruct.py 2008-09-12 17:23:15 UTC (rev 493)
@@ -88,6 +88,7 @@
STATE_SUSPENDED : "Suspended",
STATE_MIGRATING : "Migrating",
STATE_RESUMING : "Resuming",
+ STATE_RESUMED_READY: "Resumed-Ready",
STATE_DONE : "Done",
STATE_FAIL : "Fail"}
@@ -119,7 +120,7 @@
# Enactment information. Should only be manipulated by enactment module
self.enactment_info = None
- self.vnode_enactment_info = None
+ self.vnode_enactment_info = dict([(n+1, None) for n in range(numnodes)])
self.logger = logging.getLogger("LEASES")
@@ -382,14 +383,21 @@
class SuspensionResourceReservation(ResourceReservation):
- def __init__(self, lease, start, end, res, vmrr):
+ def __init__(self, lease, start, end, res, vnodes, vmrr):
ResourceReservation.__init__(self, lease, start, end, res)
self.vmrr = vmrr
+ self.vnodes = vnodes
def print_contents(self, loglevel=LOGLEVEL_VDEBUG):
self.logger.log(loglevel, "Type : SUSPEND")
ResourceReservation.print_contents(self, loglevel)
+ def is_first(self):
+ return (self == self.vmrr.susp_rrs[0])
+
+ def is_last(self):
+ return (self == self.vmrr.susp_rrs[-1])
+
# TODO: Suspension RRs should be preemptible, but preempting a suspension RR
# has wider implications (with a non-trivial handling). For now, we leave them
# as non-preemptible, since the probability of preempting a suspension RR is slim.
@@ -402,17 +410,24 @@
return rr
class ResumptionResourceReservation(ResourceReservation):
- def __init__(self, lease, start, end, res, vmrr):
+ def __init__(self, lease, start, end, res, vnodes, vmrr):
ResourceReservation.__init__(self, lease, start, end, res)
self.vmrr = vmrr
+ self.vnodes = vnodes
def print_contents(self, loglevel=LOGLEVEL_VDEBUG):
ResourceReservation.print_contents(self, loglevel)
self.logger.log(loglevel, "Type : RESUME")
- # TODO: Suspension RRs should be preemptible, but preempting a suspension RR
+ def is_first(self):
+ return (self == self.vmrr.resm_rrs[0])
+
+ def is_last(self):
+ return (self == self.vmrr.resm_rrs[-1])
+
+ # TODO: Resumption RRs should be preemptible, but preempting a resumption RR
# has wider implications (with a non-trivial handling). For now, we leave them
- # as non-preemptible, since the probability of preempting a suspension RR is slim.
+ # as non-preemptible, since the probability of preempting a resumption RR is slim.
def is_preemptible(self):
return False
Modified: trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
===================================================================
--- trunk/src/haizea/resourcemanager/deployment/imagetransfer.py 2008-09-11 16:48:08 UTC (rev 492)
+++ trunk/src/haizea/resourcemanager/deployment/imagetransfer.py 2008-09-12 17:23:15 UTC (rev 493)
@@ -399,7 +399,6 @@
rr.state = ResourceReservation.STATE_ACTIVE
# TODO: Enactment
lease.print_contents()
- sched.updateNodeTransferState(rr.transfers.keys(), constants.DOING_TRANSFER, lease.id)
sched.logger.debug("LEASE-%i End of handleStartFileTransfer" % lease.id)
sched.logger.info("Starting image transfer for lease %i" % (lease.id))
@@ -431,7 +430,6 @@
sched.deployment.add_diskimages(physnode, rr.file, lease.diskimage_size, vnodes, timeout=maxend)
lease.print_contents()
- sched.updateNodeTransferState(rr.transfers.keys(), constants.DOING_IDLE, lease.id)
sched.rm.logger.debug("LEASE-%i End of handleEndFileTransfer" % lease.id)
sched.rm.logger.info("Completed image transfer for lease %i" % (lease.id))
Modified: trunk/src/haizea/resourcemanager/enact/actions.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/actions.py 2008-09-11 16:48:08 UTC (rev 492)
+++ trunk/src/haizea/resourcemanager/enact/actions.py 2008-09-12 17:23:15 UTC (rev 493)
@@ -39,11 +39,7 @@
def from_rr(self, rr):
EnactmentAction.from_rr(self, rr)
- # TODO: This is very kludgy
- if rr.lease.vnode_enactment_info == None:
- self.vnodes = dict([(vnode+1, VNode(None)) for vnode in range(rr.lease.numnodes)])
- else:
- self.vnodes = dict([(vnode, VNode(info)) for (vnode, info) in rr.lease.vnode_enactment_info.items()])
+ self.vnodes = dict([(vnode, VNode(info)) for (vnode, info) in rr.lease.vnode_enactment_info.items()])
class VMEnactmentStartAction(VMEnactmentAction):
def __init__(self):
@@ -57,10 +53,18 @@
def __init__(self):
VMEnactmentAction.__init__(self)
+ def from_rr(self, rr):
+ VMEnactmentAction.from_rr(self, rr)
+ self.vnodes = dict([(k, v) for (k,v) in self.vnodes.items() if k in rr.vnodes])
+
class VMEnactmentResumeAction(VMEnactmentAction):
def __init__(self):
VMEnactmentAction.__init__(self)
+ def from_rr(self, rr):
+ VMEnactmentAction.from_rr(self, rr)
+ self.vnodes = dict([(k, v) for (k,v) in self.vnodes.items() if k in rr.vnodes])
+
class VMEnactmentConfirmSuspendAction(VMEnactmentAction):
def __init__(self):
VMEnactmentAction.__init__(self)
Modified: trunk/src/haizea/resourcemanager/resourcepool.py
===================================================================
--- trunk/src/haizea/resourcemanager/resourcepool.py 2008-09-11 16:48:08 UTC (rev 492)
+++ trunk/src/haizea/resourcemanager/resourcepool.py 2008-09-12 17:23:15 UTC (rev 493)
@@ -75,6 +75,12 @@
self.rm.fail_lease(lease)
def suspend_vms(self, lease, rr):
+ # Add memory image files
+ for vnode in rr.vnodes:
+ pnode = rr.vmrr.nodes[vnode]
+ self.add_ramfile(pnode, lease.id, vnode, lease.requested_resources.get_by_type(constants.RES_MEM))
+
+ # Enact suspend
suspend_action = actions.VMEnactmentSuspendAction()
suspend_action.from_rr(rr)
try:
@@ -95,6 +101,12 @@
# pass
def resume_vms(self, lease, rr):
+ # Remove memory image files
+ for vnode in rr.vnodes:
+ pnode = rr.vmrr.nodes[vnode]
+ self.remove_ramfile(pnode, lease.id, vnode)
+
+ # Enact resume
resume_action = actions.VMEnactmentResumeAction()
resume_action.from_rr(rr)
try:
@@ -196,12 +208,6 @@
# enactment-specific information
self.enactment_info = None
- # Kludgy way of keeping track of utilization
- # TODO: Compute this information based on the lease reservations,
- # either on the fly or when Haizea stops running.
- self.transfer_doing = constants.DOING_IDLE
- self.vm_doing = constants.DOING_IDLE
-
def get_capacity(self):
return self.capacity
@@ -249,12 +255,6 @@
images = ", ".join([str(img) for img in self.files])
self.logger.vdebug("Node %i files: %iMB %s" % (self.nod_id, self.get_disk_usage(), images))
- def get_state(self):
- if self.vm_doing == constants.DOING_IDLE and self.transfer_doing == constants.DOING_TRANSFER:
- return constants.DOING_TRANSFER_NOVM
- else:
- return self.vm_doing
-
def xmlrpc_marshall(self):
# Convert to something we can send through XMLRPC
h = {}
Modified: trunk/src/haizea/resourcemanager/scheduler.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler.py 2008-09-11 16:48:08 UTC (rev 492)
+++ trunk/src/haizea/resourcemanager/scheduler.py 2008-09-12 17:23:15 UTC (rev 493)
@@ -160,11 +160,8 @@
self.handlers[type(rr)].on_start(self, rr.lease, rr)
util = self.slottable.getUtilization(nowtime)
- self.rm.accounting.append_stat(constants.COUNTER_CPUUTILIZATION, util)
- # TODO: Should be moved to rm.py
- self.rm.accounting.tick()
+ self.rm.accounting.append_stat(constants.COUNTER_CPUUTILIZATION, util)
-
def register_handler(self, type, on_start, on_end):
handler = ReservationEventHandler(on_start=on_start, on_end=on_end)
self.handlers[type] = handler
@@ -734,6 +731,10 @@
if mustsuspend:
self.__schedule_suspension(vmrr, end)
+ # Compensate for any overestimation
+ if (vmrr.end - vmrr.start) > remaining_duration:
+ vmrr.end = vmrr.start + remaining_duration
+
susp_str = res_str = ""
if mustresume:
res_str = " (resuming)"
@@ -783,46 +784,159 @@
return start, end, canfit
+ # TODO: There is a LOT of common code between __schedule_resumption and
+ # __schedule_suspension. This has to be factored out.
+ # Also, we need to "consolidate" RRs when doing local exclusion.
+
def __schedule_resumption(self, vmrr, resume_at):
- resumetime = self.__estimate_resume_time(vmrr.lease)
- vmrr.update_start(resume_at + resumetime)
+ from haizea.resourcemanager.rm import ResourceManager
+ config = ResourceManager.get_singleton().config
+ resm_exclusion = config.get("suspendresume-exclusion")
+ rate = self.resourcepool.info.get_suspendresume_rate()
+
+ if resume_at < vmrr.start or resume_at > vmrr.end:
+ raise SchedException, "Tried to schedule a resumption at %s, which is outside the VMRR's duration (%s-%s)" % (resume_at, vmrr.start, vmrr.end)
+
+ resume_rrs = []
+ if resm_exclusion == constants.SUSPRES_EXCLUSION_GLOBAL:
+ # Global exclusion (which represents, e.g., reading the memory image files
+ # from a global file system) meaning no two suspensions can happen at the same
+ # time in the entire resource pool.
+ start = resume_at
+ for (vnode,pnode) in vmrr.nodes.items():
+ mem = vmrr.lease.requested_resources.get_by_type(constants.RES_MEM)
+ single_resm_time = self.__compute_suspend_resume_time(mem, rate)
+ end = start + single_resm_time
+
+ r = ds.ResourceTuple.create_empty()
+ r.set_by_type(constants.RES_MEM, mem)
+ r.set_by_type(constants.RES_DISK, mem)
+ resmres = {pnode: r}
+ vnodes = [vnode]
+ resmrr = ds.ResumptionResourceReservation(vmrr.lease, start, end, resmres, vnodes, vmrr)
+ resmrr.state = ResourceReservation.STATE_SCHEDULED
+ resume_rrs.append(resmrr)
+
+ start = end
+ elif resm_exclusion == constants.SUSPRES_EXCLUSION_LOCAL:
+ # Local exclusion (which represents, e.g., reading the memory image files
+ # from a local file system) means no two resumptions can happen at the same
+ # time in the same physical node.
+ vnodes_in_pnode = {}
+ for (vnode,pnode) in vmrr.nodes.items():
+ vnodes_in_pnode.setdefault(pnode, []).append(vnode)
+ for pnode in vnodes_in_pnode:
+ start = resume_at
+ for vnode in vnodes_in_pnode[pnode]:
+ mem = vmrr.lease.requested_resources.get_by_type(constants.RES_MEM)
+ single_resm_time = self.__compute_suspend_resume_time(mem, rate)
+ end = start + single_resm_time
+
+ r = ds.ResourceTuple.create_empty()
+ r.set_by_type(constants.RES_MEM, mem)
+ r.set_by_type(constants.RES_DISK, mem)
+ resmres = {pnode: r}
+ vnodes = [vnode]
+ resmrr = ds.ResumptionResourceReservation(vmrr.lease, start, end, resmres, vnodes, vmrr)
+ resmrr.state = ResourceReservation.STATE_SCHEDULED
+ resume_rrs.append(resmrr)
+
+ start = end
+
+ resume_rrs.sort(key=attrgetter("start"))
+
+ resm_end = resume_rrs[-1].end
+ if resm_end > vmrr.end:
+ raise SchedException, "Determined resumption would end at %s, after the VMRR's end (%s) -- Resume time not being properly estimated?" % (resm_end, vmrr.end)
- mappings = vmrr.nodes
- resmres = {}
- for n in mappings.values():
- r = ds.ResourceTuple.create_empty()
- r.set_by_type(constants.RES_MEM, vmrr.resources_in_pnode[n].get_by_type(constants.RES_MEM))
- r.set_by_type(constants.RES_DISK, vmrr.resources_in_pnode[n].get_by_type(constants.RES_DISK))
- resmres[n] = r
- resmrr = ds.ResumptionResourceReservation(vmrr.lease, resume_at, resume_at + resumetime, resmres, vmrr)
- resmrr.state = ResourceReservation.STATE_SCHEDULED
+ vmrr.update_start(resm_end)
+ for resmrr in resume_rrs:
+ vmrr.resm_rrs.append(resmrr)
+
+
+ def __schedule_suspension(self, vmrr, suspend_by):
+ from haizea.resourcemanager.rm import ResourceManager
+ config = ResourceManager.get_singleton().config
+ susp_exclusion = config.get("suspendresume-exclusion")
+ rate = self.resourcepool.info.get_suspendresume_rate()
- vmrr.resm_rrs.append(resmrr)
+ if suspend_by < vmrr.start or suspend_by > vmrr.end:
+ raise SchedException, "Tried to schedule a suspension by %s, which is outside the VMRR's duration (%s-%s)" % (suspend_by, vmrr.start, vmrr.end)
+
+ suspend_rrs = []
+ if susp_exclusion == constants.SUSPRES_EXCLUSION_GLOBAL:
+ # Global exclusion (which represents, e.g., saving the memory image files
+ # to a global file system) means no two suspensions can happen at the same
+ # time in the entire resource pool.
+ end = suspend_by
+ for (vnode,pnode) in vmrr.nodes.items():
+ mem = vmrr.lease.requested_resources.get_by_type(constants.RES_MEM)
+ single_susp_time = self.__compute_suspend_resume_time(mem, rate)
+ start = end - single_susp_time
+
+ r = ds.ResourceTuple.create_empty()
+ r.set_by_type(constants.RES_MEM, mem)
+ r.set_by_type(constants.RES_DISK, mem)
+ suspres = {pnode: r}
+ vnodes = [vnode]
+ susprr = ds.SuspensionResourceReservation(vmrr.lease, start, end, suspres, vnodes, vmrr)
+ susprr.state = ResourceReservation.STATE_SCHEDULED
+ suspend_rrs.append(susprr)
+
+ end = start
+
+ suspend_rrs.reverse()
+ elif susp_exclusion == constants.SUSPRES_EXCLUSION_LOCAL:
+ # Local exclusion (which represents, e.g., saving the memory image files
+ # to a local file system) means no two suspensions can happen at the same
+ # time in the same physical node.
+ vnodes_in_pnode = {}
+ for (vnode,pnode) in vmrr.nodes.items():
+ vnodes_in_pnode.setdefault(pnode, []).append(vnode)
+ for pnode in vnodes_in_pnode:
+ end = suspend_by
+ for vnode in vnodes_in_pnode[pnode]:
+ mem = vmrr.lease.requested_resources.get_by_type(constants.RES_MEM)
+ single_susp_time = self.__compute_suspend_resume_time(mem, rate)
+ start = end - single_susp_time
- def __schedule_suspension(self, vmrr, suspend_by):
- suspendtime = self.__estimate_suspend_time(vmrr.lease)
- vmrr.update_end(suspend_by - suspendtime)
+ r = ds.ResourceTuple.create_empty()
+ r.set_by_type(constants.RES_MEM, mem)
+ r.set_by_type(constants.RES_DISK, mem)
+ suspres = {pnode: r}
+ vnodes = [vnode]
+ susprr = ds.SuspensionResourceReservation(vmrr.lease, start, end, suspres, vnodes, vmrr)
+ susprr.state = ResourceReservation.STATE_SCHEDULED
+ suspend_rrs.append(susprr)
+
+ end = start
+
+ suspend_rrs.sort(key=attrgetter("start"))
+
+ susp_start = suspend_rrs[0].start
+ if susp_start < vmrr.start:
+ raise SchedException, "Determined suspension should start at %s, before the VMRR's start (%s) -- Suspend time not being properly estimated?" % (susp_start, vmrr.start)
- mappings = vmrr.nodes
- suspres = {}
- for n in mappings.values():
- r = ds.ResourceTuple.create_empty()
- r.set_by_type(constants.RES_MEM, vmrr.resources_in_pnode[n].get_by_type(constants.RES_MEM))
- r.set_by_type(constants.RES_DISK, vmrr.resources_in_pnode[n].get_by_type(constants.RES_DISK))
- suspres[n] = r
-
- susprr = ds.SuspensionResourceReservation(vmrr.lease, suspend_by - suspendtime, suspend_by, suspres, vmrr)
- susprr.state = ResourceReservation.STATE_SCHEDULED
-
- vmrr.susp_rrs.append(susprr)
+ vmrr.update_end(susp_start)
+ for susprr in suspend_rrs:
+ vmrr.susp_rrs.append(susprr)
+ def __compute_suspend_resume_time(self, mem, rate):
+ time = float(mem) / rate
+ time = round_datetime_delta(TimeDelta(seconds = time))
+ return time
+
def __estimate_suspend_resume_time(self, lease):
from haizea.resourcemanager.rm import ResourceManager
config = ResourceManager.get_singleton().config
+ susp_exclusion = config.get("suspendresume-exclusion")
rate = self.resourcepool.info.get_suspendresume_rate()
- time = float(lease.requested_resources.get_by_type(constants.RES_MEM)) / rate
- time = round_datetime_delta(TimeDelta(seconds = time))
- return time
+ mem = lease.requested_resources.get_by_type(constants.RES_MEM)
+ if susp_exclusion == constants.SUSPRES_EXCLUSION_GLOBAL:
+ return lease.numnodes * self.__compute_suspend_resume_time(mem, rate)
+ elif susp_exclusion == constants.SUSPRES_EXCLUSION_LOCAL:
+ # Overestimating
+ return lease.numnodes * self.__compute_suspend_resume_time(mem, rate)
def __estimate_suspend_time(self, lease):
return self.__estimate_suspend_resume_time(lease)
@@ -1221,7 +1335,6 @@
# No enactment to do here, since all the suspend/resume actions are
# handled during the suspend/resume RRs
l.print_contents()
- self.updateNodeVMState(rr.nodes.values(), constants.DOING_VM_RUN, l.id)
self.logger.debug("LEASE-%i End of handleStartVM" % l.id)
self.logger.info("Started VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()))
@@ -1252,7 +1365,6 @@
self.numbesteffortres -= 1
self.logger.vdebug("LEASE-%i After:" % l.id)
l.print_contents()
- self.updateNodeVMState(rr.nodes.values(), constants.DOING_IDLE, l.id)
self.logger.debug("LEASE-%i End of handleEndVM" % l.id)
self.logger.info("Stopped VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()))
@@ -1277,13 +1389,14 @@
l.print_contents()
rr.state = ResourceReservation.STATE_ACTIVE
self.resourcepool.suspend_vms(l, rr)
- for vnode, pnode in rr.vmrr.nodes.items():
- self.resourcepool.add_ramfile(pnode, l.id, vnode, l.requested_resources.get_by_type(constants.RES_MEM))
+ for vnode in rr.vnodes:
+ pnode = rr.vmrr.nodes[vnode]
l.memimagemap[vnode] = pnode
- l.print_contents()
- self.updateNodeVMState(rr.vmrr.nodes.values(), constants.DOING_VM_SUSPEND, l.id)
+ if rr.is_first():
+ l.state = Lease.STATE_SUSPENDING
+ l.print_contents()
+ self.logger.info("Suspending lease %i..." % (l.id))
self.logger.debug("LEASE-%i End of handleStartSuspend" % l.id)
- self.logger.info("Suspending lease %i..." % (l.id))
def _handle_end_suspend(self, l, rr):
self.logger.debug("LEASE-%i Start of handleEndSuspend" % l.id)
@@ -1291,11 +1404,10 @@
# TODO: React to incomplete suspend
self.resourcepool.verify_suspend(l, rr)
rr.state = ResourceReservation.STATE_DONE
- l.state = Lease.STATE_SUSPENDED
- self.queue.enqueue_in_order(l)
- self.rm.accounting.incr_counter(constants.COUNTER_QUEUESIZE, l.id)
+ if rr.is_last():
+ l.state = Lease.STATE_SUSPENDED
+ self.__enqueue_in_order(l)
l.print_contents()
- self.updateNodeVMState(rr.vmrr.nodes.values(), constants.DOING_IDLE, l.id)
self.logger.debug("LEASE-%i End of handleEndSuspend" % l.id)
self.logger.info("Lease %i suspended." % (l.id))
@@ -1304,10 +1416,11 @@
l.print_contents()
self.resourcepool.resume_vms(l, rr)
rr.state = ResourceReservation.STATE_ACTIVE
- l.print_contents()
- self.updateNodeVMState(rr.vmrr.nodes.values(), constants.DOING_VM_RESUME, l.id)
+ if rr.is_first():
+ l.state = Lease.STATE_RESUMING
+ l.print_contents()
+ self.logger.info("Resuming lease %i..." % (l.id))
self.logger.debug("LEASE-%i End of handleStartResume" % l.id)
- self.logger.info("Resuming lease %i..." % (l.id))
def _handle_end_resume(self, l, rr):
self.logger.debug("LEASE-%i Start of handleEndResume" % l.id)
@@ -1315,12 +1428,13 @@
# TODO: React to incomplete resume
self.resourcepool.verify_resume(l, rr)
rr.state = ResourceReservation.STATE_DONE
+ if rr.is_last():
+ l.state = Lease.STATE_RESUMED_READY
+ self.logger.info("Resumed lease %i" % (l.id))
for vnode, pnode in rr.vmrr.nodes.items():
self.resourcepool.remove_ramfile(pnode, l.id, vnode)
l.print_contents()
- self.updateNodeVMState(rr.vmrr.nodes.values(), constants.DOING_IDLE, l.id)
self.logger.debug("LEASE-%i End of handleEndResume" % l.id)
- self.logger.info("Resumed lease %i" % (l.id))
def _handle_start_migrate(self, l, rr):
self.logger.debug("LEASE-%i Start of handleStartMigrate" % l.id)
@@ -1357,26 +1471,12 @@
def _handle_end_rr(self, l, rr):
self.slottable.removeReservation(rr)
-
-
- def __enqueue_in_order(self, req):
- self.rm.accounting.incr_counter(constants.COUNTER_QUEUESIZE, req.id)
- self.queue.enqueue_in_order(req)
-
-
-
+ def __enqueue_in_order(self, lease):
+ self.rm.accounting.incr_counter(constants.COUNTER_QUEUESIZE, lease.id)
+ self.queue.enqueue_in_order(lease)
def __can_reserve_besteffort_in_future(self):
return self.numbesteffortres < self.maxres
-
-
- def updateNodeVMState(self, nodes, state, lease_id):
- for n in nodes:
- self.resourcepool.get_node(n).vm_doing = state
-
- def updateNodeTransferState(self, nodes, state, lease_id):
- for n in nodes:
- self.resourcepool.get_node(n).transfer_doing = state
-
+
def is_backfilling(self):
return self.maxres > 0
More information about the Haizea-commit
mailing list