[haizea-commit] r562 - in trunk: src/haizea/resourcemanager/scheduler src/haizea/resourcemanager/scheduler/preparation_schedulers tests
haizea-commit at mailman.cs.uchicago.edu
haizea-commit at mailman.cs.uchicago.edu
Tue Feb 3 16:57:49 CST 2009
Author: borja
Date: 2009-02-03 16:57:47 -0600 (Tue, 03 Feb 2009)
New Revision: 562
Modified:
trunk/src/haizea/resourcemanager/scheduler/lease_scheduler.py
trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/__init__.py
trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/imagetransfer.py
trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/unmanaged.py
trunk/tests/migrate.lwf
trunk/tests/preemption.lwf
trunk/tests/preemption_prematureend.lwf
trunk/tests/preemption_prematureend2.lwf
trunk/tests/reservation.lwf
trunk/tests/reservation_prematureend.lwf
trunk/tests/wait.lwf
Log:
- Minor fixes to tests.
- Fixes of bugs uncovered by aforementioned tests.
Modified: trunk/src/haizea/resourcemanager/scheduler/lease_scheduler.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler/lease_scheduler.py 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/src/haizea/resourcemanager/scheduler/lease_scheduler.py 2009-02-03 22:57:47 UTC (rev 562)
@@ -268,7 +268,6 @@
get_accounting().incr_counter(constants.COUNTER_ARACCEPTED, lease.id)
accepted = True
except SchedException, msg:
- raise
get_accounting().incr_counter(constants.COUNTER_ARREJECTED, lease.id)
self.logger.debug("LEASE-%i Scheduling exception: %s" % (lease.id, msg))
@@ -337,11 +336,17 @@
self.__preempt(l, preemption_time=vmrr.start)
# Schedule deployment overhead
- self.preparation_scheduler.schedule(lease, vmrr, nexttime)
+ deploy_rrs, is_ready = self.preparation_scheduler.schedule(lease, vmrr, nexttime)
# Commit reservation to slot table
# (we don't do this until the very end because the deployment overhead
# scheduling could still throw an exception)
+ for rr in deploy_rrs:
+ lease.append_deployrr(rr)
+
+ for rr in deploy_rrs:
+ self.slottable.addReservation(rr)
+
lease.append_vmrr(vmrr)
self.slottable.addReservation(vmrr)
@@ -351,7 +356,7 @@
lease.set_state(Lease.STATE_SCHEDULED)
- if self.preparation_scheduler.is_ready(lease):
+ if is_ready:
lease.set_state(Lease.STATE_READY)
except SchedException, msg:
raise SchedException, "The requested AR lease is infeasible. Reason: %s" % msg
@@ -381,21 +386,29 @@
(vmrr, in_future) = self.vm_scheduler.fit_asap(lease, nexttime, earliest, allow_reservation_in_future = canreserve)
# Schedule deployment
+ is_ready = False
+ deploy_rrs = []
if lease_state == Lease.STATE_SUSPENDED_QUEUED:
self.vm_scheduler.schedule_migration(lease, vmrr, nexttime)
else:
- self.preparation_scheduler.schedule(lease, vmrr, nexttime)
+ deploy_rrs, is_ready = self.preparation_scheduler.schedule(lease, vmrr, nexttime)
# At this point, the lease is feasible.
# Commit changes by adding RRs to lease and to slot table
+ # Add deployment RRs (if any) to lease
+ for rr in deploy_rrs:
+ lease.append_deployrr(rr)
+
# Add VMRR to lease
lease.append_vmrr(vmrr)
# Add resource reservations to slottable
- # TODO: deployment RRs should be added here, not in the preparation scheduler
+ # Deployment RRs (if any)
+ for rr in deploy_rrs:
+ self.slottable.addReservation(rr)
# Pre-VM RRs (if any)
for rr in vmrr.pre_rrs:
@@ -413,7 +426,7 @@
if lease_state == Lease.STATE_QUEUED:
lease.set_state(Lease.STATE_SCHEDULED)
- if self.preparation_scheduler.is_ready(lease):
+ if is_ready:
lease.set_state(Lease.STATE_READY)
elif lease_state == Lease.STATE_SUSPENDED_QUEUED:
lease.set_state(Lease.STATE_SUSPENDED_SCHEDULED)
@@ -524,6 +537,7 @@
l.set_state(Lease.STATE_DONE)
l.duration.actual = l.duration.accumulated
l.end = round_datetime(get_clock().get_time())
+ self.preparation_scheduler.cleanup(l)
self.completedleases.add(l)
self.leases.remove(l)
if isinstance(l, BestEffortLease):
Modified: trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/__init__.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/__init__.py 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/__init__.py 2009-02-03 22:57:47 UTC (rev 562)
@@ -25,8 +25,8 @@
self.resourcepool = resourcepool
self.deployment_enact = deployment_enact
self.logger = logging.getLogger("DEPLOY")
-
- def is_ready(self, lease):
+
+ def cleanup(self, lease):
abstract()
class PreparationSchedException(Exception):
Modified: trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/imagetransfer.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/imagetransfer.py 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/imagetransfer.py 2009-02-03 22:57:47 UTC (rev 562)
@@ -23,7 +23,7 @@
from haizea.resourcemanager.scheduler import ReservationEventHandler
from haizea.common.utils import estimate_transfer_time, get_config
from haizea.resourcemanager.scheduler.slottable import ResourceTuple
-from haizea.resourcemanager.scheduler import ReservationEventHandler, PreparationSchedException
+from haizea.resourcemanager.scheduler import ReservationEventHandler, PreparationSchedException, CriticalSchedException
import copy
@@ -59,15 +59,15 @@
def schedule(self, lease, vmrr, nexttime):
if isinstance(lease, ARLease):
- self.schedule_for_ar(lease, vmrr, nexttime)
+ return self.schedule_for_ar(lease, vmrr, nexttime)
elif isinstance(lease, BestEffortLease):
- self.schedule_for_besteffort(lease, vmrr, nexttime)
+ return self.schedule_for_besteffort(lease, vmrr, nexttime)
def cancel_deployment(self, lease):
if isinstance(lease, BestEffortLease):
self.__remove_from_fifo_transfers(lease.id)
- def is_ready(self, lease):
+ def is_ready(self, lease, vmrr):
return False
def schedule_for_ar(self, lease, vmrr, nexttime):
@@ -75,6 +75,7 @@
mechanism = config.get("transfer-mechanism")
reusealg = config.get("diskimage-reuse")
avoidredundant = config.get("avoid-redundant-transfers")
+ is_ready = False
if avoidredundant:
pass # TODO
@@ -100,26 +101,14 @@
musttransfer[vnode] = pnode
if len(musttransfer) == 0:
- lease.set_state(Lease.STATE_READY)
+ is_ready = True
else:
if mechanism == constants.TRANSFER_UNICAST:
- # Dictionary of transfer RRs. Key is the physical node where
- # the image is being transferred to
- transferRRs = {}
- for vnode, pnode in musttransfer:
- if transferRRs.has_key(pnode):
- # We've already scheduled a transfer to this node. Reuse it.
- self.logger.debug("No need to schedule an image transfer (reusing an existing transfer)")
- transferRR = transferRRs[pnode]
- transferRR.piggyback(lease_id, vnode, pnode, end)
- else:
- filetransfer = self.schedule_imagetransfer_edf(lease, {vnode:pnode}, nexttime)
- transferRRs[pnode] = filetransfer
- lease.appendRR(filetransfer)
+ pass
+ # TODO: Not supported
elif mechanism == constants.TRANSFER_MULTICAST:
try:
filetransfer = self.schedule_imagetransfer_edf(lease, musttransfer, nexttime)
- lease.append_deployrr(filetransfer)
except PreparationSchedException, msg:
raise
@@ -128,6 +117,9 @@
if reusealg == constants.REUSE_IMAGECACHES:
for (vnode, pnode) in mustpool.items():
self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.diskimage_id, lease.id, vnode, start)
+ self.resourcepool.add_diskimage(pnode, lease.diskimage_id, lease.diskimage_size, lease.id, vnode)
+
+ return [filetransfer], is_ready
def schedule_for_besteffort(self, lease, vmrr, nexttime):
config = get_config()
@@ -135,6 +127,7 @@
reusealg = config.get("diskimage-reuse")
avoidredundant = config.get("avoid-redundant-transfers")
earliest = self.find_earliest_starting_times(lease, nexttime)
+ is_ready = False
transferRRs = []
musttransfer = {}
@@ -145,6 +138,7 @@
# Add to pool
self.logger.debug("Reusing image for V%i->P%i." % (vnode, pnode))
self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.diskimage_id, lease.id, vnode, vmrr.end)
+ self.resourcepool.add_diskimage(pnode, lease.diskimage_id, lease.diskimage_size, lease.id, vnode)
elif reqtransfer == constants.REQTRANSFER_PIGGYBACK:
# We can piggyback on an existing transfer
transferRR = earliest[pnode][2]
@@ -155,25 +149,23 @@
# Transfer
musttransfer[vnode] = pnode
self.logger.debug("Must transfer V%i->P%i." % (vnode, pnode))
+
if len(musttransfer)>0:
transferRRs = self.schedule_imagetransfer_fifo(lease, musttransfer, nexttime)
endtransfer = transferRRs[-1].end
lease.imagesavail = endtransfer
- else:
- # TODO: Not strictly correct. Should mark the lease
- # as deployed when piggybacked transfers have concluded
- lease.set_state(Lease.STATE_READY)
+
if len(piggybacking) > 0:
endtimes = [t.end for t in piggybacking]
if len(musttransfer) > 0:
endtimes.append(endtransfer)
lease.imagesavail = max(endtimes)
+
if len(musttransfer)==0 and len(piggybacking)==0:
- lease.set_state(Lease.STATE_READY)
lease.imagesavail = nexttime
- for rr in transferRRs:
- lease.append_deployrr(rr)
-
+ is_ready = True
+
+ return transferRRs, is_ready
def find_earliest_starting_times(self, lease, nexttime):
nodIDs = [n.nod_id for n in self.resourcepool.get_nodes()]
@@ -315,7 +307,6 @@
# Make changes
for new_t in newtransfers:
if new_t == newtransfer:
- self.slottable.addReservation(new_t)
self.transfers_edf.append(new_t)
else:
t_original = transfermap[new_t]
@@ -361,7 +352,6 @@
for vnode in reqtransfers:
physnode = reqtransfers[vnode]
newtransfer.piggyback(req.id, vnode, physnode)
- self.slottable.addReservation(newtransfer)
newtransfers.append(newtransfer)
self.transfers_fifo += newtransfers
@@ -413,7 +403,7 @@
rr.state = ResourceReservation.STATE_ACTIVE
# TODO: Enactment
else:
- raise CriticalSchedException, "Lease is an inconsistent state (tried to start file transfer when state is %s)" % lease_state
+ raise CriticalSchedException, "Lease is an inconsistent state (tried to start file transfer when state is %s)" % Lease.state_str[lease_state]
lease.print_contents()
sched.logger.debug("LEASE-%i End of handleStartFileTransfer" % lease.id)
@@ -493,49 +483,17 @@
self.resourcepool.add_reusable_image(pnode_id, diskimage_id, diskimage_size, vnodes, timeout)
else:
# This just means we couldn't add the image
- # to the pool. We will have to create disk images to be used
- # only by these leases
- self.logger.debug("Unable to add to pool. Must create individual disk images instead.")
- for (lease_id, vnode) in vnodes:
- self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease_id, vnode)
+ # to the pool. We will have to make do with just adding the tainted images.
+ self.logger.debug("Unable to add to pool. Must create individual disk images directly instead.")
+ # Besides adding the image to the cache, we need to create a separate image for
+ # this specific lease
+ for (lease_id, vnode) in vnodes:
+ self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease_id, vnode)
+
pnode.print_files()
-
- def check(self, lease, vmrr):
- # Check that all the required disk images are available.
- # Note that it is the enactment module's responsibility to
- # mark an image as correctly deployed. The check we do here
- # is (1) to catch scheduling errors (i.e., the image transfer
- # was not scheduled) and (2) to create disk images if
- # we can reuse a reusable image in the node'.
- # TODO: However, we're assuming CoW, which means the enactment
- # must support it too. If we can't assume CoW, we would have to
- # make a copy of the master image (which takes time), and should
- # be scheduled.
-
- for (vnode, pnode_id) in vmrr.nodes.items():
- pnode = self.resourcepool.get_node(pnode_id)
-
- diskimage = pnode.get_diskimage(lease.id, vnode, lease.diskimage_id)
- if self.reusealg == constants.REUSE_NONE:
- if diskimage == None:
- raise Exception, "ERROR: No image for L%iV%i is on node %i" % (lease.id, vnode, pnode)
- elif self.reusealg == constants.REUSE_IMAGECACHES:
- reusable_image = pnode.get_reusable_image(lease.diskimage_id, lease_id=lease.id, vnode=vnode)
- if reusable_image == None:
- # Not necessarily an error. Maybe the pool was full, and
- # we had to fall back on creating a tainted image right
- # when the image was transferred. We have to check this.
- if diskimage == None:
- raise Exception, "ERROR: Image for L%iV%i is not in pool on node %i, and there is no tainted image" % (lease.id, vnode, pnode_id)
- else:
- # Create tainted image
- self.resourcepool.add_diskimage(pnode_id, lease.diskimage_id, lease.diskimage_size, lease.id, vnode)
- # ENACTMENT
- # self.storage.createCopyFromCache(pnode, lease.diskImageSize)
-
- def cleanup(self, lease, vmrr):
+ def cleanup(self, lease):
for vnode, pnode in lease.diskimagemap.items():
self.resourcepool.remove_diskimage(pnode, lease.id, vnode)
Modified: trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/unmanaged.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/unmanaged.py 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/src/haizea/resourcemanager/scheduler/preparation_schedulers/unmanaged.py 2009-02-03 22:57:47 UTC (rev 562)
@@ -29,9 +29,7 @@
def schedule(self, lease, vmrr, nexttime):
for (vnode, pnode) in vmrr.nodes.items():
self.resourcepool.add_diskimage(pnode, lease.diskimage_id, lease.diskimage_size, lease.id, vnode)
-
- def is_ready(self, lease):
- return True
+ return [], True
def find_earliest_starting_times(self, lease, nexttime):
nod_ids = [n.nod_id for n in self.resourcepool.get_nodes()]
@@ -40,24 +38,7 @@
def cancel_deployment(self, lease):
pass
-
- def check(self, lease, vmrr):
- # Check that all the required disk images are available,
- # and determine what their physical filenames are.
- # Note that it is the enactment module's responsibility to
- # mark an image as correctly deployed. The check we do here
- # is (1) to catch scheduling errors (i.e., the image transfer
- # was not scheduled).
-
- for (vnode, pnode) in vmrr.nodes.items():
- node = self.resourcepool.get_node(pnode)
-
- diskimage = node.get_diskimage(lease.id, vnode, lease.diskimage_id)
- if diskimage == None:
- raise Exception, "ERROR: No image for L%iV%i is on node %i" % (lease.id, vnode, pnode)
-
- return True
- def cleanup(self, lease, vmrr):
+ def cleanup(self, lease):
for vnode, pnode in lease.diskimagemap.items():
self.resourcepool.remove_diskimage(pnode, lease.id, vnode)
\ No newline at end of file
Modified: trunk/tests/migrate.lwf
===================================================================
--- trunk/tests/migrate.lwf 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/tests/migrate.lwf 2009-02-03 22:57:47 UTC (rev 562)
@@ -4,6 +4,6 @@
# one) to cold-migrate from nodes P3, P4 to nodes P1, P2
# (which become available earlier than expected due to the
# early end of the first AR lease)
-0 1800 3600 900 2 1 1024 0 foobar.img 1024
-0 0 3600 3600 2 1 1024 0 foobar.img 1024
-0 -1 3600 3600 2 1 1024 0 foobar.img 1024
\ No newline at end of file
+0 1800 3600 900 2 1 1024 0 foobar1.img 1024
+0 90 3600 3600 2 1 1024 0 foobar2.img 1024
+0 -1 3600 3600 2 1 1024 0 foobar3.img 1024
\ No newline at end of file
Modified: trunk/tests/preemption.lwf
===================================================================
--- trunk/tests/preemption.lwf 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/tests/preemption.lwf 2009-02-03 22:57:47 UTC (rev 562)
@@ -2,5 +2,5 @@
# Description: A simple trace where an AR lease
# preempts a best-effort lease that is already
# running.
-0 -1 3600 3600 4 1 1024 0 foobar.img 1024
-0 1800 1800 1800 2 1 1024 0 foobar.img 1024
\ No newline at end of file
+0 -1 3600 3600 4 1 1024 0 foobar1.img 1024
+0 1800 1800 1800 2 1 1024 0 foobar2.img 1024
\ No newline at end of file
Modified: trunk/tests/preemption_prematureend.lwf
===================================================================
--- trunk/tests/preemption_prematureend.lwf 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/tests/preemption_prematureend.lwf 2009-02-03 22:57:47 UTC (rev 562)
@@ -1,5 +1,5 @@
# Name: PREEMPT-PREMATUREEND
# Description: Same as PREEMPT, but with
# premature end time for the best-effort lease.
-0 -1 3600 2700 1 1 1024 0 foobar.img 1024
-900 1800 900 900 4 1 1024 0 foobar.img 1024
\ No newline at end of file
+0 -1 3600 2700 1 1 1024 0 foobar1.img 1024
+900 1800 900 900 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
Modified: trunk/tests/preemption_prematureend2.lwf
===================================================================
--- trunk/tests/preemption_prematureend2.lwf 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/tests/preemption_prematureend2.lwf 2009-02-03 22:57:47 UTC (rev 562)
@@ -1,5 +1,5 @@
# Name: PREEMPT-PREMATUREEND2
# Description: Same as PREEMPT, but with
# premature end time for both leases.
-0 -1 3600 2700 1 1 1024 0 foobar.img 1024
-900 1800 900 600 4 1 1024 0 foobar.img 1024
\ No newline at end of file
+0 -1 3600 2700 1 1 1024 0 foobar1.img 1024
+900 1800 900 600 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
Modified: trunk/tests/reservation.lwf
===================================================================
--- trunk/tests/reservation.lwf 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/tests/reservation.lwf 2009-02-03 22:57:47 UTC (rev 562)
@@ -3,5 +3,5 @@
# single-node lease, while the second one is a short wide lease
# using up all the nodes. With backfilling, the scheduler
# should make a future reservation for the second lease.
-0 -1 3600 3600 1 1 1024 0 foobar.img 1024
-900 -1 900 900 4 1 1024 0 foobar.img 1024
\ No newline at end of file
+0 -1 3600 3600 1 1 1024 0 foobar1.img 1024
+900 -1 900 900 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
Modified: trunk/tests/reservation_prematureend.lwf
===================================================================
--- trunk/tests/reservation_prematureend.lwf 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/tests/reservation_prematureend.lwf 2009-02-03 22:57:47 UTC (rev 562)
@@ -4,5 +4,5 @@
# expected. The scheduler should reschedule the second lease
# (for which a reservation was made in the future)
# since it can now start earlier
-0 -1 3600 1800 1 1 1024 0 foobar.img 1024
-900 -1 900 900 4 1 1024 0 foobar.img 1024
\ No newline at end of file
+0 -1 3600 1800 1 1 1024 0 foobar1.img 1024
+900 -1 900 900 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
Modified: trunk/tests/wait.lwf
===================================================================
--- trunk/tests/wait.lwf 2009-02-03 01:55:26 UTC (rev 561)
+++ trunk/tests/wait.lwf 2009-02-03 22:57:47 UTC (rev 562)
@@ -1,11 +1,11 @@
# Name: WAIT
# Description: All best-effort requests, all of which (except the first)
# will have to wait in the queue before starting.
-#0 -1 900 900 4 1 1024 0 foobar.img 1024
-#0 -1 900 900 4 1 1024 0 foobar.img 1024
-#0 -1 900 900 4 1 1024 0 foobar.img 1024
-#0 -1 900 900 4 1 1024 0 foobar.img 1024
-#0 -1 900 900 4 1 1024 0 foobar.img 1024
-#0 -1 900 900 4 1 1024 0 foobar.img 1024
-#0 -1 900 900 4 1 1024 0 foobar.img 1024
-#0 -1 900 900 4 1 1024 0 foobar.img 1024
+#0 -1 900 900 4 1 1024 0 foobar1.img 1024
+#0 -1 900 900 4 1 1024 0 foobar2.img 1024
+#0 -1 900 900 4 1 1024 0 foobar3.img 1024
+#0 -1 900 900 4 1 1024 0 foobar4.img 1024
+#0 -1 900 900 4 1 1024 0 foobar5.img 1024
+#0 -1 900 900 4 1 1024 0 foobar6.img 1024
+#0 -1 900 900 4 1 1024 0 foobar7.img 1024
+#0 -1 900 900 4 1 1024 0 foobar8.img 1024
More information about the Haizea-commit
mailing list