[haizea-commit] r447 - in trunk/src/haizea/resourcemanager: . deployment
haizea-commit at mailman.cs.uchicago.edu
haizea-commit at mailman.cs.uchicago.edu
Thu Jul 24 03:57:48 CDT 2008
Author: borja
Date: 2008-07-24 03:57:48 -0500 (Thu, 24 Jul 2008)
New Revision: 447
Modified:
trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
trunk/src/haizea/resourcemanager/scheduler.py
Log:
Minor fixes in deployment code
Modified: trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
===================================================================
--- trunk/src/haizea/resourcemanager/deployment/imagetransfer.py 2008-07-23 18:20:20 UTC (rev 446)
+++ trunk/src/haizea/resourcemanager/deployment/imagetransfer.py 2008-07-24 08:57:48 UTC (rev 447)
@@ -62,10 +62,10 @@
end = lease.start.requested + lease.duration.requested
for (vnode, pnode) in nodeassignment.items():
lease_id = lease.id
- self.logger.debug("Scheduling image transfer of '%s' from vnode %i to physnode %i" % (lease.diskImageID, vnode, pnode), constants.SCHED)
+ self.logger.debug("Scheduling image transfer of '%s' from vnode %i to physnode %i" % (lease.diskimage_id, vnode, pnode), constants.SCHED)
if reusealg == constants.REUSE_IMAGECACHES:
- if self.resourcepool.isInPool(pnode, lease.diskImageID, start):
+ if self.resourcepool.isInPool(pnode, lease.diskimage_id, start):
self.logger.debug("No need to schedule an image transfer (reusing an image in pool)", constants.SCHED)
mustpool[vnode] = pnode
else:
@@ -94,7 +94,7 @@
lease.appendRR(filetransfer)
elif mechanism == constants.TRANSFER_MULTICAST:
filetransfer = self.scheduleImageTransferEDF(lease, musttransfer, nexttime)
- lease.appendRR(filetransfer)
+ lease.append_rr(filetransfer)
# No chance of scheduling exception at this point. It's safe
# to add entries to the pools
@@ -117,7 +117,7 @@
if reqtransfer == constants.REQTRANSFER_COWPOOL:
# Add to pool
self.logger.debug("Reusing image for V%i->P%i." % (vnode, pnode), constants.SCHED)
- self.resourcepool.addToPool(pnode, lease.diskImageID, lease.id, vnode, vmrr.end)
+ self.resourcepool.addToPool(pnode, lease.diskimage_id, lease.id, vnode, vmrr.end)
elif reqtransfer == constants.REQTRANSFER_PIGGYBACK:
# We can piggyback on an existing transfer
transferRR = earliest[pnode][2]
@@ -145,7 +145,7 @@
lease.state = constants.LEASE_STATE_DEPLOYED
lease.imagesavail = nexttime
for rr in transferRRs:
- lease.appendRR(rr)
+ lease.append_rr(rr)
def find_earliest_starting_times(self, lease_req, nexttime):
@@ -158,7 +158,7 @@
# Figure out starting time assuming we have to transfer the image
nextfifo = self.getNextFIFOTransferTime(nexttime)
- imgTransferTime=lease_req.estimateImageTransferTime()
+ imgTransferTime=lease_req.estimate_image_transfer_time(self.resourcepool.imagenode_bandwidth)
# Find worst-case earliest start time
if lease_req.numnodes == 1:
@@ -177,7 +177,7 @@
# Check if we can reuse images
if reusealg==constants.REUSE_IMAGECACHES:
- nodeswithimg = self.resourcepool.getNodesWithImgInPool(lease_req.diskImageID)
+ nodeswithimg = self.resourcepool.getNodesWithImgInPool(lease_req.diskimage_id)
for node in nodeswithimg:
earliest[node] = [nexttime, constants.REQTRANSFER_COWPOOL]
@@ -202,8 +202,8 @@
def scheduleImageTransferEDF(self, req, vnodes, nexttime):
# Estimate image transfer time
- imgTransferTime=req.estimateImageTransferTime()
bandwidth = self.resourcepool.imagenode_bandwidth
+ imgTransferTime=req.estimate_image_transfer_time(bandwidth)
# Determine start time
activetransfers = [t for t in self.transfersEDF if t.state == constants.RES_STATE_ACTIVE]
@@ -227,7 +227,7 @@
newtransfer = FileTransferResourceReservation(req, res)
newtransfer.deadline = req.start.requested
newtransfer.state = constants.RES_STATE_SCHEDULED
- newtransfer.file = req.diskImageID
+ newtransfer.file = req.diskimage_id
for vnode, pnode in vnodes.items():
newtransfer.piggyback(req.id, vnode, pnode)
newtransfers.append(newtransfer)
@@ -299,11 +299,11 @@
def scheduleImageTransferFIFO(self, req, reqtransfers, nexttime):
# Estimate image transfer time
- imgTransferTime=req.estimateImageTransferTime()
+ bandwidth = self.resourcepool.imagenode_bandwidth
+ imgTransferTime=req.estimate_image_transfer_time(bandwidth)
config = self.scheduler.rm.config
mechanism = config.get_transfer_mechanism()
startTime = self.getNextFIFOTransferTime(nexttime)
- bandwidth = self.resourcepool.imagenode_bandwidth
newtransfers = []
@@ -327,7 +327,7 @@
newtransfer.end = startTime+imgTransferTime
newtransfer.deadline = None
newtransfer.state = constants.RES_STATE_SCHEDULED
- newtransfer.file = req.diskImageID
+ newtransfer.file = req.diskimage_id
for vnode in reqtransfers:
physnode = reqtransfers[vnode]
newtransfer.piggyback(req.id, vnode, physnode)
@@ -391,7 +391,7 @@
# Update VM Image maps
for lease_id, v in vnodes:
- lease = sched.scheduledleases.getLease(lease_id)
+ lease = sched.scheduledleases.get_lease(lease_id)
lease.vmimagemap[v] = physnode
# Find out timeout of image. It will be the latest end time of all the
@@ -399,12 +399,12 @@
leases = [l for (l, v) in vnodes]
maxend=None
for lease_id in leases:
- l = sched.scheduledleases.getLease(lease_id)
- end = lease.getEnd()
+ l = sched.scheduledleases.get_lease(lease_id)
+ end = lease.get_endtime()
if maxend==None or end>maxend:
maxend=end
# TODO: ENACTMENT: Verify the image was transferred correctly
- sched.rm.resourcepool.addImageToNode(physnode, rr.file, lease.diskImageSize, vnodes, timeout=maxend)
+ sched.rm.resourcepool.addImageToNode(physnode, rr.file, lease.diskimage_size, vnodes, timeout=maxend)
elif lease.state == constants.LEASE_STATE_SUSPENDED:
pass
# TODO: Migrating
@@ -434,5 +434,5 @@
else:
self.transfers[physnode] = [(lease_id, vnode)]
- def isPreemptible(self):
+ def is_preemptible(self):
return False
\ No newline at end of file
Modified: trunk/src/haizea/resourcemanager/scheduler.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler.py 2008-07-23 18:20:20 UTC (rev 446)
+++ trunk/src/haizea/resourcemanager/scheduler.py 2008-07-24 08:57:48 UTC (rev 447)
@@ -82,14 +82,6 @@
self.completedleases = ds.LeaseTable(self)
self.rejectedleases = ds.LeaseTable(self)
self.pending_leases = []
-
- deploy_type = self.rm.config.get_lease_deployment_type()
- if deploy_type == constants.DEPLOYMENT_UNMANAGED:
- self.deployment = UnmanagedDeployment(self)
- elif deploy_type == constants.DEPLOYMENT_PREDEPLOY:
- self.deployment = PredeployedImagesDeployment(self)
- elif deploy_type == constants.DEPLOYMENT_TRANSFER:
- self.deployment = ImageTransferDeployment(self)
self.handlers = {}
@@ -105,6 +97,14 @@
on_start = Scheduler._handle_start_resume,
on_end = Scheduler._handle_end_resume)
+ deploy_type = self.rm.config.get_lease_deployment_type()
+ if deploy_type == constants.DEPLOYMENT_UNMANAGED:
+ self.deployment = UnmanagedDeployment(self)
+ elif deploy_type == constants.DEPLOYMENT_PREDEPLOY:
+ self.deployment = PredeployedImagesDeployment(self)
+ elif deploy_type == constants.DEPLOYMENT_TRANSFER:
+ self.deployment = ImageTransferDeployment(self)
+
self.maxres = self.rm.config.getMaxReservations()
self.numbesteffortres = 0
@@ -174,6 +174,7 @@
"""Return True if there are any leases scheduled in the future"""
return not self.scheduledleases.is_empty()
+ # TODO: Replace this with a more general event handling system
def notify_premature_end_vm(self, l, rr):
self.rm.logger.info("LEASE-%i The VM has ended prematurely." % l.id, constants.SCHED)
self._handle_end_rr(l, rr)
@@ -184,7 +185,7 @@
self.slottable.removeReservation(r)
rr.oncomplete = constants.ONCOMPLETE_ENDLEASE
rr.end = self.rm.clock.get_time()
- self._handle_end_vm(l, rr)
+ self._handle_end_vm(l, rr, enact=False)
nexttime = self.rm.clock.get_next_schedulable_time()
if self.rm.config.isBackfilling():
# We need to reevaluate the schedule to see if there are any future
@@ -244,10 +245,9 @@
for lease in leases:
self.preempt(lease, time=start)
- # Add VM resource reservations
+ # Create VM resource reservations
vmrr = ds.VMResourceReservation(lease_req, start, end, nodeassignment, res, constants.ONCOMPLETE_ENDLEASE, False)
vmrr.state = constants.RES_STATE_SCHEDULED
- lease_req.append_rr(vmrr)
# Schedule deployment overhead
self.deployment.schedule(lease_req, vmrr, nexttime)
@@ -255,6 +255,7 @@
# Commit reservation to slot table
# (we don't do this until the very end because the deployment overhead
# scheduling could still throw an exception)
+ lease_req.append_rr(vmrr)
self.slottable.addReservation(vmrr)
except SlotFittingException, msg:
raise SchedException, "The requested AR lease is infeasible. Reason: %s" % msg
@@ -497,7 +498,9 @@
self.rm.logger.debug("LEASE-%i End of handleStartVM" % l.id, constants.SCHED)
self.rm.logger.info("Started VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()), constants.SCHED)
- def _handle_end_vm(self, l, rr):
+ # TODO: Replace enact with a saner way of handling leases that have failed or
+ # ended prematurely.
+ def _handle_end_vm(self, l, rr, enact=True):
self.rm.logger.debug("LEASE-%i Start of handleEndVM" % l.id, constants.SCHED)
self.rm.logger.edebug("LEASE-%i Before:" % l.id, constants.SCHED)
l.print_contents()
More information about the Haizea-commit
mailing list