[haizea-commit] r831 - in branches/1.1/src/haizea/core: . enact scheduler scheduler/preparation_schedulers
haizea-commit at mailman.cs.uchicago.edu
haizea-commit at mailman.cs.uchicago.edu
Fri Jul 16 11:44:33 CDT 2010
Author: borja
Date: 2010-07-16 11:44:33 -0500 (Fri, 16 Jul 2010)
New Revision: 831
Modified:
branches/1.1/src/haizea/core/enact/simulated.py
branches/1.1/src/haizea/core/leases.py
branches/1.1/src/haizea/core/scheduler/lease_scheduler.py
branches/1.1/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py
branches/1.1/src/haizea/core/scheduler/preparation_schedulers/unmanaged.py
branches/1.1/src/haizea/core/scheduler/resourcepool.py
branches/1.1/src/haizea/core/scheduler/vm_scheduler.py
Log:
- Allow a lease in preparing state to be preempted
- Multiple minor fixes to preparation scheduler (mostly avoid referring to leases by their ID, and simply store the lease object directly)
Modified: branches/1.1/src/haizea/core/enact/simulated.py
===================================================================
--- branches/1.1/src/haizea/core/enact/simulated.py 2010-07-15 23:52:49 UTC (rev 830)
+++ branches/1.1/src/haizea/core/enact/simulated.py 2010-07-16 16:44:33 UTC (rev 831)
@@ -122,5 +122,5 @@
def get_bandwidth(self):
return self.bandwidth
- def resolve_to_file(self, lease_id, vnode, diskimage_id):
- return "/var/haizea/images/%s-L%iV%i" % (diskimage_id, lease_id, vnode)
\ No newline at end of file
+ def resolve_to_file(self, lease, vnode, diskimage_id):
+ return "/var/haizea/images/%s-L%iV%i" % (diskimage_id, lease.id, vnode)
\ No newline at end of file
Modified: branches/1.1/src/haizea/core/leases.py
===================================================================
--- branches/1.1/src/haizea/core/leases.py 2010-07-15 23:52:49 UTC (rev 830)
+++ branches/1.1/src/haizea/core/leases.py 2010-07-16 16:44:33 UTC (rev 831)
@@ -510,7 +510,11 @@
"""
vmrr = self.get_last_vmrr()
- return vmrr.end
+ if vmrr == None:
+ # Nothing scheduled, no endtime
+ return None
+ else:
+ return vmrr.get_final_end()
def get_accumulated_duration_at(self, time):
"""Returns the amount of time required to fulfil the entire
@@ -771,6 +775,10 @@
# Overestimating when susp_exclusion == SUSPRES_EXCLUSION_LOCAL
time += compute_suspend_resume_time(mem, rate) + enactment_overhead
return time
+
+ def __repr__(self):
+ """Returns a string representation of the Lease"""
+ return "L%i" % self.id
# ONLY for simulation
def _update_prematureend(self):
Modified: branches/1.1/src/haizea/core/scheduler/lease_scheduler.py
===================================================================
--- branches/1.1/src/haizea/core/scheduler/lease_scheduler.py 2010-07-15 23:52:49 UTC (rev 830)
+++ branches/1.1/src/haizea/core/scheduler/lease_scheduler.py 2010-07-16 16:44:33 UTC (rev 831)
@@ -469,21 +469,23 @@
for l in future_best_effort:
# We can only reschedule leases in the following four states
# TODO: Leases in PREPARING state should be rescheduleable.
+ #if l.get_state() in (Lease.STATE_PREPARING, Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_SUSPENDED_SCHEDULED):
if l.get_state() in (Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_SUSPENDED_SCHEDULED):
self.logger.debug("Rescheduling lease %i" % l.id)
- #if l.get_state() in (Lease.STATE_PREPARING, Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_SUSPENDED_SCHEDULED):
# For each reschedulable lease already scheduled in the
# future, we cancel the lease's preparation and
# the last scheduled VM.
- vmrr = l.get_last_vmrr()
- self.preparation_scheduler.cancel_preparation(l)
- self.vm_scheduler.cancel_vm(vmrr)
- l.remove_vmrr(vmrr)
if l.get_state() in (Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_PREPARING):
+ self.preparation_scheduler.cancel_preparation(l)
l.set_state(Lease.STATE_PENDING)
elif l.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:
+ self.preparation_scheduler.cancel_preparation(l, remove_files = False)
l.set_state(Lease.STATE_SUSPENDED_PENDING)
+ vmrr = l.get_last_vmrr()
+ self.vm_scheduler.cancel_vm(vmrr)
+ l.remove_vmrr(vmrr)
+
# At this point, the lease just looks like a regular
# pending lease that can be handed off directly to the
# __schedule_lease method.
@@ -856,7 +858,6 @@
l.set_state(Lease.STATE_DONE)
l.duration.actual = l.duration.accumulated
l.end = round_datetime(get_clock().get_time())
-
if get_config().get("sanity-check"):
if l.duration.known != None and l.duration.known < l.duration.requested:
duration = l.duration.known
Modified: branches/1.1/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py
===================================================================
--- branches/1.1/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py 2010-07-15 23:52:49 UTC (rev 830)
+++ branches/1.1/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py 2010-07-16 16:44:33 UTC (rev 831)
@@ -192,12 +192,13 @@
return earliest
- def cancel_preparation(self, lease):
+ def cancel_preparation(self, lease, remove_files = True):
toremove = self.__remove_transfers(lease)
for t in toremove:
t.lease.remove_preparationrr(t)
self.slottable.remove_reservation(t)
- self.__remove_files(lease)
+ if remove_files:
+ self.__remove_files(lease)
def cleanup(self, lease):
self.__remove_files(lease)
@@ -215,7 +216,6 @@
start = lease.start.requested
end = lease.start.requested + lease.duration.requested
for (vnode, pnode) in nodeassignment.items():
- lease_id = lease.id
self.logger.debug("Scheduling image transfer of '%s' for vnode %i to physnode %i" % (lease.software.image_id, vnode, pnode))
if reusealg == constants.REUSE_IMAGECACHES:
@@ -242,8 +242,8 @@
# to add entries to the pools
if reusealg == constants.REUSE_IMAGECACHES:
for (vnode, pnode) in mustpool.items():
- self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.software.image_id, lease.id, vnode, start)
- self.resourcepool.add_diskimage(pnode, lease.software.image_id, lease.software.image_size, lease.id, vnode)
+ self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.software.image_id, lease, vnode, start)
+ self.resourcepool.add_diskimage(pnode, lease.software.image_id, lease.software.image_size, lease, vnode)
return transfer_rrs, is_ready
@@ -263,12 +263,12 @@
if earliest_type == ImageTransferEarliestStartingTime.EARLIEST_REUSE:
# Add to pool
self.logger.debug("Reusing image for V%i->P%i." % (vnode, pnode))
- self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.software.image_id, lease.id, vnode, vmrr.end)
- self.resourcepool.add_diskimage(pnode, lease.software.image_id, lease.software.image_size, lease.id, vnode)
+ self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.software.image_id, lease, vnode, vmrr.end)
+ self.resourcepool.add_diskimage(pnode, lease.software.image_id, lease.software.image_size, lease, vnode)
elif earliest_type == ImageTransferEarliestStartingTime.EARLIEST_PIGGYBACK:
# We can piggyback on an existing transfer
transfer_rr = earliest[pnode].piggybacking_on
- transfer_rr.piggyback(lease.id, vnode, pnode)
+ transfer_rr.piggyback(lease, vnode, pnode)
self.logger.debug("Piggybacking transfer for V%i->P%i on existing transfer in lease %i." % (vnode, pnode, transfer_rr.lease.id))
piggybacking.append(transfer_rr)
else:
@@ -315,7 +315,7 @@
newtransfer.start = start
newtransfer.end = start + transfer_duration
for vnode, pnode in musttransfer.items():
- newtransfer.piggyback(lease.id, vnode, pnode)
+ newtransfer.piggyback(lease, vnode, pnode)
bisect.insort(self.transfers, newtransfer)
@@ -353,7 +353,7 @@
newtransfer.state = ResourceReservation.STATE_SCHEDULED
newtransfer.file = lease.software.image_id
for vnode, pnode in musttransfer.items():
- newtransfer.piggyback(lease.id, vnode, pnode)
+ newtransfer.piggyback(lease, vnode, pnode)
bisect.insort(self.transfers, newtransfer)
@@ -421,8 +421,8 @@
for t in self.transfers:
for pnode in t.transfers:
leases = [l for l, v in t.transfers[pnode]]
- if lease.id in leases:
- newtransfers = [(l, v) for l, v in t.transfers[pnode] if l!=lease.id]
+ if lease in leases:
+ newtransfers = [(l, v) for l, v in t.transfers[pnode] if l!=lease]
t.transfers[pnode] = newtransfers
# Check if the transfer has to be cancelled
a = sum([len(l) for l in t.transfers.values()])
@@ -435,7 +435,7 @@
def __remove_files(self, lease):
for vnode, pnode in lease.get_last_vmrr().nodes.items():
- self.resourcepool.remove_diskimage(pnode, lease.id, vnode)
+ self.resourcepool.remove_diskimage(pnode, lease, vnode)
@staticmethod
def _handle_start_filetransfer(sched, lease, rr):
@@ -459,27 +459,42 @@
def _handle_end_filetransfer(sched, lease, rr):
sched.logger.debug("LEASE-%i Start of handleEndFileTransfer" % lease.id)
lease.print_contents()
- lease_state = lease.get_state()
- if lease_state == Lease.STATE_PREPARING:
- lease.set_state(Lease.STATE_READY)
- rr.state = ResourceReservation.STATE_DONE
- for physnode in rr.transfers:
- vnodes = rr.transfers[physnode]
-
-# # Find out timeout of image. It will be the latest end time of all the
-# # leases being used by that image.
-# leases = [l for (l, v) in vnodes]
-# maxend=None
-# for lease_id in leases:
-# l = sched.leases.get_lease(lease_id)
-# end = lease.get_endtime()
-# if maxend==None or end>maxend:
-# maxend=end
- maxend = None
- # TODO: ENACTMENT: Verify the image was transferred correctly
- sched._add_diskimages(physnode, rr.file, lease.software.image_size, vnodes, timeout=maxend)
+
+ rr.state = ResourceReservation.STATE_DONE
+
+ lease_in_transfer = False
+ for physnode in rr.transfers:
+ vnodes = rr.transfers[physnode]
+
+ leases = [l for (l, v) in vnodes]
+
+ if lease in leases:
+ lease_in_transfer = True
+
+ # Find out timeout of image. It will be the latest end time of all the
+ # leases being used by that image.
+ maxend=None
+ for l in leases:
+ end = l.get_endtime()
+ if maxend==None or end>maxend:
+ maxend=end
+
+ # TODO: ENACTMENT: Verify the image was transferred correctly
+ sched._add_diskimages(physnode, rr.file, lease.software.image_size, vnodes, timeout=maxend)
+
+ if not lease_in_transfer:
+ # Even if the transfer started out associated to this lease, the transfers
+ # for that lease might have ended up being canceled. So, the transfer's purpose
+ # is just to deploy images for leases that are piggybacking on that transfer.
+ # No need to change anything in the lease.
+ pass
else:
- raise InconsistentLeaseStateError(lease, doing = "ending a file transfer")
+ lease_state = lease.get_state()
+
+ if lease_state == Lease.STATE_PREPARING:
+ lease.set_state(Lease.STATE_READY)
+ else:
+ raise InconsistentLeaseStateError(lease, doing = "ending a file transfer")
sched.transfers.remove(rr)
lease.print_contents()
@@ -502,8 +517,8 @@
origin = rr.transfers[vnode][0]
dest = rr.transfers[vnode][1]
- self.resourcepool.remove_diskimage(origin, l.id, vnode)
- self.resourcepool.add_diskimage(dest, l.software.image_id, l.software.image_size, l.id, vnode)
+ self.resourcepool.remove_diskimage(origin, l, vnode)
+ self.resourcepool.add_diskimage(dest, l.software.image_id, l.software.image_size, l, vnode)
rr.state = ResourceReservation.STATE_DONE
l.print_contents()
@@ -523,15 +538,15 @@
pnode = self.resourcepool.get_node(pnode_id)
if reusealg == constants.REUSE_NONE:
- for (lease_id, vnode) in vnodes:
- self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease_id, vnode)
+ for (lease, vnode) in vnodes:
+ self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease, vnode)
elif reusealg == constants.REUSE_IMAGECACHES:
# Sometimes we might find that the image is already deployed
# (although unused). In that case, don't add another copy to
# the pool. Just "reactivate" it.
if pnode.exists_reusable_image(diskimage_id):
- for (lease_id, vnode) in vnodes:
- pnode.add_mapping_to_existing_reusable_image(diskimage_id, lease_id, vnode, timeout)
+ for (lease, vnode) in vnodes:
+ pnode.add_mapping_to_existing_reusable_image(diskimage_id, lease, vnode, timeout)
else:
if maxcachesize == constants.CACHESIZE_UNLIMITED:
can_add_to_cache = True
@@ -561,8 +576,8 @@
# Besides adding the image to the cache, we need to create a separate image for
# this specific lease
- for (lease_id, vnode) in vnodes:
- self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease_id, vnode)
+ for (lease, vnode) in vnodes:
+ self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease, vnode)
pnode.print_files()
@@ -571,7 +586,7 @@
ResourceReservation.__init__(self, lease, start, end, res)
self.deadline = None
self.file = None
- # Dictionary of physnode -> [ (lease_id, vnode)* ]
+ # Dictionary of physnode -> [ (Lease, vnode)* ]
self.transfers = {}
def print_contents(self, loglevel="VDEBUG"):
@@ -582,11 +597,11 @@
logger.log(loglevel, "File : %s" % self.file)
logger.log(loglevel, "Transfers : %s" % self.transfers)
- def piggyback(self, lease_id, vnode, physnode):
+ def piggyback(self, lease, vnode, physnode):
if self.transfers.has_key(physnode):
- self.transfers[physnode].append((lease_id, vnode))
+ self.transfers[physnode].append((lease, vnode))
else:
- self.transfers[physnode] = [(lease_id, vnode)]
+ self.transfers[physnode] = [(lease, vnode)]
def is_preemptible(self):
return False
Modified: branches/1.1/src/haizea/core/scheduler/preparation_schedulers/unmanaged.py
===================================================================
--- branches/1.1/src/haizea/core/scheduler/preparation_schedulers/unmanaged.py 2010-07-15 23:52:49 UTC (rev 830)
+++ branches/1.1/src/haizea/core/scheduler/preparation_schedulers/unmanaged.py 2010-07-16 16:44:33 UTC (rev 831)
@@ -43,7 +43,7 @@
def schedule_migration(self, lease, vmrr, nexttime):
return []
- def cancel_preparation(self, lease):
+ def cancel_preparation(self, lease, remove_files = True):
self.cleanup(lease)
def cleanup(self, lease):
Modified: branches/1.1/src/haizea/core/scheduler/resourcepool.py
===================================================================
--- branches/1.1/src/haizea/core/scheduler/resourcepool.py 2010-07-15 23:52:49 UTC (rev 830)
+++ branches/1.1/src/haizea/core/scheduler/resourcepool.py 2010-07-16 16:44:33 UTC (rev 831)
@@ -68,7 +68,7 @@
return True
elif isinstance(lease.software, DiskImageSoftwareEnvironment):
for (vnode, pnode) in rr.nodes.items():
- img = self.get_node(pnode).get_diskimage(lease.id, vnode, lease.software.image_id)
+ img = self.get_node(pnode).get_diskimage(lease, vnode, lease.software.image_id)
if img == None:
return False
return True
@@ -77,7 +77,7 @@
# Add memory image files
for vnode in rr.vnodes:
pnode = rr.vmrr.nodes[vnode]
- self.add_ramfile(pnode, lease.id, vnode, lease.requested_resources[vnode].get_quantity(constants.RES_MEM))
+ self.add_ramfile(pnode, lease, vnode, lease.requested_resources[vnode].get_quantity(constants.RES_MEM))
# Enact suspend
suspend_action = actions.VMEnactmentSuspendAction()
@@ -97,7 +97,7 @@
# Remove memory image files
for vnode in rr.vnodes:
pnode = rr.vmrr.nodes[vnode]
- self.remove_ramfile(pnode, lease.id, vnode)
+ self.remove_ramfile(pnode, lease, vnode)
# Enact resume
resume_action = actions.VMEnactmentResumeAction()
@@ -136,14 +136,14 @@
def get_node(self, node_id):
return self.nodes[node_id]
- def add_diskimage(self, pnode, diskimage_id, imagesize, lease_id, vnode):
- self.logger.debug("Adding disk image for L%iV%i in pnode=%i" % (lease_id, vnode, pnode))
+ def add_diskimage(self, pnode, diskimage_id, imagesize, lease, vnode):
+ self.logger.debug("Adding disk image for L%iV%i in pnode=%i" % (lease.id, vnode, pnode))
self.logger.vdebug("Files BEFORE:")
self.get_node(pnode).print_files()
- imagefile = self.deployment.resolve_to_file(lease_id, vnode, diskimage_id)
- img = DiskImageFile(imagefile, imagesize, lease_id, vnode, diskimage_id)
+ imagefile = self.deployment.resolve_to_file(lease, vnode, diskimage_id)
+ img = DiskImageFile(imagefile, imagesize, lease, vnode, diskimage_id)
self.get_node(pnode).add_file(img)
self.logger.vdebug("Files AFTER:")
@@ -154,24 +154,24 @@
def remove_diskimage(self, pnode, lease, vnode):
node = self.get_node(pnode)
node.print_files()
- self.logger.debug("Removing disk image for L%iV%i in node %i" % (lease, vnode, pnode))
+ self.logger.debug("Removing disk image for L%iV%i in node %i" % (lease.id, vnode, pnode))
node.remove_diskimage(lease, vnode)
node.print_files()
- def add_ramfile(self, pnode, lease_id, vnode, size):
+ def add_ramfile(self, pnode, lease, vnode, size):
node = self.get_node(pnode)
- self.logger.debug("Adding RAM file for L%iV%i in node %i" % (lease_id, vnode, pnode))
+ self.logger.debug("Adding RAM file for L%iV%i in node %i" % (lease.id, vnode, pnode))
node.print_files()
- f = RAMImageFile("RAM_L%iV%i" % (lease_id, vnode), size, lease_id, vnode)
+ f = RAMImageFile("RAM_L%iV%i" % (lease.id, vnode), size, lease, vnode)
node.add_file(f)
node.print_files()
- def remove_ramfile(self, pnode, lease_id, vnode):
+ def remove_ramfile(self, pnode, lease, vnode):
node = self.get_node(pnode)
- self.logger.debug("Removing RAM file for L%iV%i in node %i" % (lease_id, vnode, pnode))
+ self.logger.debug("Removing RAM file for L%iV%i in node %i" % (lease.id, vnode, pnode))
node.print_files()
- node.remove_ramfile(lease_id, vnode)
+ node.remove_ramfile(lease, vnode)
node.print_files()
def get_max_disk_usage(self):
@@ -194,29 +194,29 @@
def add_file(self, f):
self.files.append(f)
- def get_diskimage(self, lease_id, vnode, diskimage_id):
+ def get_diskimage(self, lease, vnode, diskimage_id):
image = [f for f in self.files if isinstance(f, DiskImageFile) and
f.diskimage_id == diskimage_id and
- f.lease_id == lease_id and
+ f.lease == lease and
f.vnode == vnode]
if len(image) == 0:
return None
elif len(image) == 1:
return image[0]
elif len(image) > 1:
- self.logger.warning("More than one tainted image for L%iV%i on node %i" % (lease_id, vnode, self.id))
+ self.logger.warning("More than one tainted image for L%iV%i on node %i" % (lease.id, vnode, self.id))
return image[0]
- def remove_diskimage(self, lease_id, vnode):
+ def remove_diskimage(self, lease, vnode):
image = [f for f in self.files if isinstance(f, DiskImageFile) and
- f.lease_id == lease_id and
+ f.lease == lease and
f.vnode == vnode]
if len(image) > 0:
image = image[0]
self.files.remove(image)
- def remove_ramfile(self, lease_id, vnode):
- ramfile = [f for f in self.files if isinstance(f, RAMImageFile) and f.lease_id==lease_id and f.vnode==vnode]
+ def remove_ramfile(self, lease, vnode):
+ ramfile = [f for f in self.files if isinstance(f, RAMImageFile) and f.lease==lease and f.vnode==vnode]
if len(ramfile) > 0:
ramfile = ramfile[0]
self.files.remove(ramfile)
@@ -253,24 +253,24 @@
self.filesize = filesize
class DiskImageFile(File):
- def __init__(self, filename, filesize, lease_id, vnode, diskimage_id):
+ def __init__(self, filename, filesize, lease, vnode, diskimage_id):
File.__init__(self, filename, filesize)
- self.lease_id = lease_id
+ self.lease = lease
self.vnode = vnode
self.diskimage_id = diskimage_id
def __str__(self):
- return "(DISK L%iv%i %s %s)" % (self.lease_id, self.vnode, self.diskimage_id, self.filename)
+ return "(DISK L%iv%i %s %s)" % (self.lease.id, self.vnode, self.diskimage_id, self.filename)
class RAMImageFile(File):
- def __init__(self, filename, filesize, lease_id, vnode):
+ def __init__(self, filename, filesize, lease, vnode):
File.__init__(self, filename, filesize)
- self.lease_id = lease_id
+ self.lease = lease
self.vnode = vnode
def __str__(self):
- return "(RAM L%iv%i %s)" % (self.lease_id, self.vnode, self.filename)
+ return "(RAM L%iv%i %s)" % (self.lease.id, self.vnode, self.filename)
class ResourcePoolWithReusableImages(ResourcePool):
def __init__(self, info_enact, vm_enact, deploy_enact):
@@ -286,8 +286,8 @@
imagefile = "reusable-%s" % diskimage_id
img = ReusableDiskImageFile(imagefile, imagesize, diskimage_id, timeout)
- for (lease_id, vnode) in mappings:
- img.add_mapping(lease_id, vnode)
+ for (lease, vnode) in mappings:
+ img.add_mapping(lease, vnode)
self.get_node(pnode).add_reusable_image(img)
@@ -296,12 +296,12 @@
return img
- def add_mapping_to_existing_reusable_image(self, pnode_id, diskimage_id, lease_id, vnode, timeout):
- self.get_node(pnode_id).add_mapping_to_existing_reusable_image(diskimage_id, lease_id, vnode, timeout)
+ def add_mapping_to_existing_reusable_image(self, pnode_id, diskimage_id, lease, vnode, timeout):
+ self.get_node(pnode_id).add_mapping_to_existing_reusable_image(diskimage_id, lease, vnode, timeout)
def remove_diskimage(self, pnode_id, lease, vnode):
ResourcePool.remove_diskimage(self, pnode_id, lease, vnode)
- self.logger.debug("Removing cached images for L%iV%i in node %i" % (lease, vnode, pnode_id))
+ self.logger.debug("Removing cached images for L%iV%i in node %i" % (lease.id, vnode, pnode_id))
for img in self.get_node(pnode_id).get_reusable_images():
if (lease, vnode) in img.mappings:
img.mappings.remove((lease, vnode))
@@ -332,27 +332,27 @@
def add_reusable_image(self, f):
self.reusable_images.append(f)
- def add_mapping_to_existing_reusable_image(self, diskimage_id, lease_id, vnode, timeout):
+ def add_mapping_to_existing_reusable_image(self, diskimage_id, lease, vnode, timeout):
for f in self.reusable_images:
if f.diskimage_id == diskimage_id:
- f.add_mapping(lease_id, vnode)
+ f.add_mapping(lease, vnode)
f.update_timeout(timeout)
break # Ugh
self.print_files()
- def get_reusable_image(self, diskimage_id, after = None, lease_id=None, vnode=None):
+ def get_reusable_image(self, diskimage_id, after = None, lease=None, vnode=None):
images = [i for i in self.reusable_images if i.diskimage_id == diskimage_id]
if after != None:
images = [i for i in images if i.timeout >= after]
- if lease_id != None and vnode != None:
- images = [i for i in images if i.has_mapping(lease_id, vnode)]
+ if lease != None and vnode != None:
+ images = [i for i in images if i.has_mapping(lease, vnode)]
if len(images)>0:
return images[0]
else:
return None
- def exists_reusable_image(self, imagefile, after = None, lease_id=None, vnode=None):
- entry = self.get_reusable_image(imagefile, after = after, lease_id=lease_id, vnode=vnode)
+ def exists_reusable_image(self, imagefile, after = None, lease=None, vnode=None):
+ entry = self.get_reusable_image(imagefile, after = after, lease=lease, vnode=vnode)
if entry == None:
return False
else:
@@ -404,11 +404,11 @@
self.mappings = set([])
self.timeout = timeout
- def add_mapping(self, lease_id, vnode):
- self.mappings.add((lease_id, vnode))
+ def add_mapping(self, lease, vnode):
+ self.mappings.add((lease, vnode))
- def has_mapping(self, lease_id, vnode):
- return (lease_id, vnode) in self.mappings
+ def has_mapping(self, lease, vnode):
+ return (lease, vnode) in self.mappings
def has_mappings(self):
return len(self.mappings) > 0
Modified: branches/1.1/src/haizea/core/scheduler/vm_scheduler.py
===================================================================
--- branches/1.1/src/haizea/core/scheduler/vm_scheduler.py 2010-07-15 23:52:49 UTC (rev 830)
+++ branches/1.1/src/haizea/core/scheduler/vm_scheduler.py 2010-07-16 16:44:33 UTC (rev 831)
@@ -1502,7 +1502,7 @@
l.set_state(Lease.STATE_RESUMED_READY)
self.logger.info("Resumed lease %i" % (l.id))
for vnode, pnode in rr.vmrr.nodes.items():
- self.resourcepool.remove_ramfile(pnode, l.id, vnode)
+ self.resourcepool.remove_ramfile(pnode, l, vnode)
l.print_contents()
self.logger.debug("LEASE-%i End of handleEndResume" % l.id)
@@ -1581,8 +1581,8 @@
dest = rr.transfers[vnode][1]
# Update RAM files
- self.resourcepool.remove_ramfile(origin, l.id, vnode)
- self.resourcepool.add_ramfile(dest, l.id, vnode, l.requested_resources[vnode].get_quantity(constants.RES_MEM))
+ self.resourcepool.remove_ramfile(origin, l, vnode)
+ self.resourcepool.add_ramfile(dest, l, vnode, l.requested_resources[vnode].get_quantity(constants.RES_MEM))
rr.state = ResourceReservation.STATE_DONE
l.print_contents()
More information about the Haizea-commit
mailing list