[haizea-commit] r604 - in branches/TP2.0: src/haizea/cli src/haizea/core src/haizea/core/enact src/haizea/core/frontends src/haizea/core/scheduler tests
haizea-commit at mailman.cs.uchicago.edu
haizea-commit at mailman.cs.uchicago.edu
Tue Jul 14 10:27:41 CDT 2009
Author: borja
Date: 2009-07-14 10:27:35 -0500 (Tue, 14 Jul 2009)
New Revision: 604
Modified:
branches/TP2.0/src/haizea/cli/commands.py
branches/TP2.0/src/haizea/core/enact/simulated.py
branches/TP2.0/src/haizea/core/frontends/tracefile.py
branches/TP2.0/src/haizea/core/manager.py
branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py
branches/TP2.0/src/haizea/core/scheduler/mapper.py
branches/TP2.0/src/haizea/core/scheduler/resourcepool.py
branches/TP2.0/src/haizea/core/scheduler/slottable.py
branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py
branches/TP2.0/tests/base_config_simulator.conf
branches/TP2.0/tests/common.py
branches/TP2.0/tests/migrate.lwf
branches/TP2.0/tests/preemption.lwf
branches/TP2.0/tests/preemption_prematureend.lwf
branches/TP2.0/tests/preemption_prematureend2.lwf
branches/TP2.0/tests/reservation.lwf
branches/TP2.0/tests/reservation_prematureend.lwf
branches/TP2.0/tests/reuse1.lwf
branches/TP2.0/tests/reuse2.lwf
branches/TP2.0/tests/wait.lwf
Log:
Updated test LWF files to new format. Many small bugs fixed in the process of getting Haizea TP2.0 to pass the unit tests that rely on those files.
Modified: branches/TP2.0/src/haizea/cli/commands.py
===================================================================
--- branches/TP2.0/src/haizea/cli/commands.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/src/haizea/cli/commands.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -353,7 +353,7 @@
lease_request.set("arrival", str(TimeDelta(seconds=submit_time)))
if real_duration != duration:
realduration = ET.SubElement(lease_request, "realduration")
- realduration.set("time", str(TimeDelta(seconds=realduration)))
+ realduration.set("time", str(TimeDelta(seconds=real_duration)))
lease = ET.SubElement(lease_request, "lease")
lease.set("id", `id`)
Modified: branches/TP2.0/src/haizea/core/enact/simulated.py
===================================================================
--- branches/TP2.0/src/haizea/core/enact/simulated.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/src/haizea/core/enact/simulated.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -40,13 +40,13 @@
# Disk and network should be specified but, if not, we can
# just add arbitrarily large values.
if not "Disk" in site.resource_types:
- site.add_resource("Disk", 1000000)
+ site.add_resource("Disk", [1000000])
if not "Net-in" in site.resource_types:
- site.add_resource("Net-in", 1000000)
+ site.add_resource("Net-in", [1000000])
if not "Net-out" in site.resource_types:
- site.add_resource("Net-out", 1000000)
+ site.add_resource("Net-out", [1000000])
nodes = site.nodes.get_all_nodes()
Modified: branches/TP2.0/src/haizea/core/frontends/tracefile.py
===================================================================
--- branches/TP2.0/src/haizea/core/frontends/tracefile.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/src/haizea/core/frontends/tracefile.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -40,7 +40,7 @@
self.requests = None
if tracefile.endswith(".swf"):
self.requests = tracereaders.SWF(tracefile, config)
- elif tracefile.endswith(".lwf"):
+ elif tracefile.endswith(".lwf") or tracefile.endswith(".xml"):
lease_workload = LeaseWorkload(starttime)
lease_workload.from_xml_file(tracefile)
self.requests = lease_workload.get_leases()
Modified: branches/TP2.0/src/haizea/core/manager.py
===================================================================
--- branches/TP2.0/src/haizea/core/manager.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/src/haizea/core/manager.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -123,7 +123,7 @@
self.rpc_server = RPCServer(self)
resources = self.config.get("simul.resources")
-
+ print resources
if resources == "in-tracefile":
tracefile = self.config.get("tracefile")
site = Site.from_lwf_file(tracefile)
Modified: branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/src/haizea/core/scheduler/lease_scheduler.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -379,8 +379,6 @@
res = list(self.vm_scheduler.future_reservations) # TODO: get through a function
for l in res:
vmrr = l.get_last_vmrr()
- # TODO: Check if it's scheduled on any of the nodes that
- # got freed up
self.vm_scheduler.cancel_vm(vmrr)
l.remove_vmrr(vmrr)
# TODO: This earliest is sure to change to something else
@@ -414,7 +412,7 @@
done = False
newqueue = Queue(self)
while not done and not self.is_queue_empty():
- if not self.vm_scheduler.can_reserve_in_future() and self.slottable.isFull(nexttime):
+ if not self.vm_scheduler.can_reserve_in_future() and self.slottable.is_full(nexttime, restype = constants.RES_CPU):
self.logger.debug("Used up all future reservations and slot table is full. Skipping rest of queue.")
done = True
else:
@@ -457,7 +455,7 @@
# This is a lease that is being rescheduled.
# TODO: The following is not really what has to be done
earliest = self.preparation_scheduler.find_earliest_starting_times(lease, nexttime)
- elif lease_state == Lease.STATE_SUSPENDED_QUEUED:
+ elif lease_state == Lease.STATE_SUSPENDED_QUEUED or lease_state == Lease.STATE_SUSPENDED_SCHEDULED:
# No need to transfer images from repository
# (only intra-node transfer)
migr_time = self.vm_scheduler.estimate_migration_time(lease)
Modified: branches/TP2.0/src/haizea/core/scheduler/mapper.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/mapper.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/src/haizea/core/scheduler/mapper.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -48,7 +48,6 @@
leases = self.policy.sort_leases(lease, leases)
preemptable_leases = leases
preempting = []
-
done = False
while not done:
vnodes_pos = 0
Modified: branches/TP2.0/src/haizea/core/scheduler/resourcepool.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/resourcepool.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/src/haizea/core/scheduler/resourcepool.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -66,7 +66,7 @@
# Add memory image files
for vnode in rr.vnodes:
pnode = rr.vmrr.nodes[vnode]
- self.add_ramfile(pnode, lease.id, vnode, lease.requested_resources[vnode].get_by_type(constants.RES_MEM))
+ self.add_ramfile(pnode, lease.id, vnode, lease.requested_resources[vnode].get_quantity(constants.RES_MEM))
# Enact suspend
suspend_action = actions.VMEnactmentSuspendAction()
Modified: branches/TP2.0/src/haizea/core/scheduler/slottable.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/slottable.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/src/haizea/core/scheduler/slottable.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -109,7 +109,8 @@
r=""
for i, x in enumerate(self._res):
r += "%s:%i " % (i, x)
- r+= `self.multiinst`
+ if self.slottable.has_multiinst:
+ r+= `self.multiinst`
return r
def __eq__(self, res2):
@@ -263,7 +264,7 @@
def is_empty(self):
return (len(self.reservations_by_start) == 0)
- def is_full(self, time, restype = constants.RES_CPU):
+ def is_full(self, time, restype):
nodes = self.get_availability(time)
avail = sum([node.capacity.get_by_type(restype) for node in nodes.values()])
return (avail == 0)
@@ -583,8 +584,9 @@
self.nodes[node] = ChangepointNodeAvail(capacity)
class ChangepointNodeAvail(object):
- def __init__(self, available):
- self.available = ResourceTuple.copy(available)
+ def __init__(self, capacity):
+ self.capacity = capacity
+ self.available = ResourceTuple.copy(capacity)
self.leases = set()
self.available_if_preempting = {}
self.next_cp = None
@@ -601,9 +603,11 @@
self.available_if_preempting[lease].incr(capacity)
def get_avail_withpreemption(self, leases):
- avail = ResourceTuple.copy(available)
- for l in leases:
- avail.decr(self.available_if_preempting[lease])
+ avail = ResourceTuple.copy(self.capacity)
+ for l in self.available_if_preempting:
+ if not l in leases:
+ avail.decr(self.available_if_preempting[l])
+ return avail
class AvailEntry(object):
def __init__(self, available, until):
@@ -616,10 +620,6 @@
def fits(self, capacity, until):
for avail in self.avail_list:
- print capacity
- print avail.until, avail.available
- print capacity.fits_in(avail.available)
- print "----------"
if avail.until == None or avail.until >= until:
return capacity.fits_in(avail.available)
@@ -686,7 +686,8 @@
pos2 = pos + 1
-
+ print rr.start, rr.end
+ print self.cp_list, pos
while self.cp_list[pos2] < rr.end:
cp = self.changepoints[self.cp_list[pos2]]
cp.leases.add(lease)
@@ -715,16 +716,15 @@
def get_availability_at_node(self, time, node, preempted_leases = []):
avails = []
-
node = self.changepoints[time].nodes[node]
prev_avail = None
prev_node = None
while node != None:
- available = ResourceTuple.copy(node.available)
- for l in preempted_leases:
- if node.available_if_preempting.has_key(l):
- available.incr(node.available_if_preempting[l])
+ if len(preempted_leases) == None:
+ available = ResourceTuple.copy(node.available)
+ else:
+ available = node.get_avail_withpreemption(preempted_leases)
if prev_avail != None and available.fits_in(prev_avail.available):
availentry = AvailEntry(available, None)
Modified: branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py
===================================================================
--- branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/src/haizea/core/scheduler/vm_scheduler.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -18,7 +18,7 @@
import haizea.common.constants as constants
from haizea.common.utils import round_datetime_delta, round_datetime, estimate_transfer_time, pretty_nodemap, get_config, get_clock, get_policy
-from haizea.core.leases import Lease
+from haizea.core.leases import Lease, Capacity
from haizea.core.scheduler.slottable import ResourceReservation, ResourceTuple
from haizea.core.scheduler import ReservationEventHandler, RescheduleLeaseException, NormalEndLeaseException, EnactmentError, NotSchedulableException, InconsistentScheduleError, InconsistentLeaseStateError
from haizea.core.scheduler.mapper import GreedyMapper
@@ -123,7 +123,7 @@
def __schedule_asap(self, lease, nexttime, earliest, allow_reservation_in_future = None):
lease_id = lease.id
remaining_duration = lease.duration.get_remaining_duration()
- mustresume = (lease.get_state() == Lease.STATE_SUSPENDED_QUEUED)
+ mustresume = (lease.get_state() in (Lease.STATE_SUSPENDED_QUEUED, Lease.STATE_SUSPENDED_SCHEDULED))
shutdown_time = self.__estimate_shutdown_time(lease)
if allow_reservation_in_future == None:
@@ -318,12 +318,12 @@
end = start + migr_time
res = {}
for (origin,dest) in m.values():
- resorigin = ResourceTuple.create_empty()
- resorigin.set_by_type(constants.RES_NETOUT, bandwidth)
- resdest = ResourceTuple.create_empty()
- resdest.set_by_type(constants.RES_NETIN, bandwidth)
- res[origin] = resorigin
- res[dest] = resdest
+ resorigin = Capacity([constants.RES_NETOUT])
+ resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
+ resdest = Capacity([constants.RES_NETIN])
+ resdest.set_quantity(constants.RES_NETIN, bandwidth)
+ res[origin] = self.slottable.create_resource_tuple_from_capacity(resorigin)
+ res[dest] = self.slottable.create_resource_tuple_from_capacity(resdest)
migr_rr = MigrationResourceReservation(lease, start, start + migr_time, res, vmrr, m)
migr_rr.state = ResourceReservation.STATE_SCHEDULED
migr_rrs.append(migr_rr)
@@ -341,7 +341,7 @@
# If there are any pre-RRs that are scheduled, remove them
for rr in vmrr.pre_rrs:
if rr.state == ResourceReservation.STATE_SCHEDULED:
- self.slottable.removeReservation(rr)
+ self.slottable.remove_reservation(rr)
# If there are any post RRs, remove them
for rr in vmrr.post_rrs:
@@ -470,7 +470,7 @@
t_prev = None
for vnode in vnodes_in_pnode[pnode]:
if override == None:
- mem = vmrr.lease.requested_resources[vnode].get_by_type(constants.RES_MEM)
+ mem = vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
op_time = self.__compute_suspend_resume_time(mem, rate)
else:
op_time = override
@@ -566,14 +566,15 @@
all_vnodes = []
for (pnode,vnodes) in node_mappings.items():
num_vnodes = len(vnodes)
- r = ResourceTuple.create_empty()
+ r = Capacity([constants.RES_MEM,constants.RES_DISK])
mem = 0
for vnode in vnodes:
- mem += vmrr.lease.requested_resources[vnode].get_by_type(constants.RES_MEM)
- r.set_by_type(constants.RES_MEM, mem * num_vnodes)
- r.set_by_type(constants.RES_DISK, mem * num_vnodes)
- suspres[pnode] = r
- all_vnodes += vnodes
+ mem += vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
+ r.set_quantity(constants.RES_MEM, mem * num_vnodes)
+ r.set_quantity(constants.RES_DISK, mem * num_vnodes)
+ suspres[pnode] = self.slottable.create_resource_tuple_from_capacity(r)
+ all_vnodes += vnodes
+
susprr = SuspensionResourceReservation(vmrr.lease, start, end, suspres, all_vnodes, vmrr)
susprr.state = ResourceReservation.STATE_SCHEDULED
suspend_rrs.append(susprr)
@@ -610,13 +611,13 @@
all_vnodes = []
for (pnode,vnodes) in node_mappings.items():
num_vnodes = len(vnodes)
- r = ResourceTuple.create_empty()
+ r = Capacity([constants.RES_MEM,constants.RES_DISK])
mem = 0
for vnode in vnodes:
- mem += vmrr.lease.requested_resources[vnode].get_by_type(constants.RES_MEM)
- r.set_by_type(constants.RES_MEM, mem * num_vnodes)
- r.set_by_type(constants.RES_DISK, mem * num_vnodes)
- resmres[pnode] = r
+ mem += vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
+ r.set_quantity(constants.RES_MEM, mem * num_vnodes)
+ r.set_quantity(constants.RES_DISK, mem * num_vnodes)
+ resmres[pnode] = self.slottable.create_resource_tuple_from_capacity(r)
all_vnodes += vnodes
resmrr = ResumptionResourceReservation(vmrr.lease, start, end, resmres, all_vnodes, vmrr)
resmrr.state = ResourceReservation.STATE_SCHEDULED
@@ -680,7 +681,7 @@
else:
bandwidth = self.resourcepool.info.get_migration_bandwidth()
if whattomigrate == constants.MIGRATE_MEM:
- mbtotransfer = lease.requested_resources.get_by_type(constants.RES_MEM)
+ mbtotransfer = lease.requested_resources.get_quantity(constants.RES_MEM)
elif whattomigrate == constants.MIGRATE_MEMDISK:
mbtotransfer = lease.diskimage_size + lease.requested_resources.get_by_type(constants.RES_MEM)
return estimate_transfer_time(mbtotransfer, bandwidth)
@@ -904,15 +905,17 @@
origin = rr.transfers[vnode][0]
dest = rr.transfers[vnode][1]
+ # Commenting for now
# Update VM image mappings
- self.resourcepool.remove_diskimage(origin, l.id, vnode)
- self.resourcepool.add_diskimage(dest, l.diskimage_id, l.diskimage_size, l.id, vnode)
- l.diskimagemap[vnode] = dest
+ #self.resourcepool.remove_diskimage(origin, l.id, vnode)
+ #self.resourcepool.add_diskimage(dest, l.diskimage_id, l.diskimage_size, l.id, vnode)
+ #l.diskimagemap[vnode] = dest
+ # Commenting for now
# Update RAM file mappings
- self.resourcepool.remove_ramfile(origin, l.id, vnode)
- self.resourcepool.add_ramfile(dest, l.id, vnode, l.requested_resources.get_by_type(constants.RES_MEM))
- l.memimagemap[vnode] = dest
+ #self.resourcepool.remove_ramfile(origin, l.id, vnode)
+ #self.resourcepool.add_ramfile(dest, l.id, vnode, l.requested_resources.get_by_type(constants.RES_MEM))
+ #l.memimagemap[vnode] = dest
rr.state = ResourceReservation.STATE_DONE
l.print_contents()
Modified: branches/TP2.0/tests/base_config_simulator.conf
===================================================================
--- branches/TP2.0/tests/base_config_simulator.conf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/base_config_simulator.conf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,5 +1,5 @@
[general]
-loglevel: VDEBUG
+loglevel: STATUS
mode: simulated
lease-preparation: unmanaged
datafile: /var/tmp/haizea/results.dat
@@ -8,8 +8,7 @@
[simulation]
clock: simulated
starttime: 2006-11-25 13:00:00
-nodes: 2
-resources: CPU,2;Mem,2048;Net (in),100;Net (out),100;Disk,20000
+resources: in-tracefile
imagetransfer-bandwidth: 100
#status-message-interval: 15
@@ -25,7 +24,7 @@
suspend-rate: 32
resume-rate: 32
migration: True
-what-to-migrate: mem+disk
+what-to-migrate: nothing
[deploy-imagetransfer]
Modified: branches/TP2.0/tests/common.py
===================================================================
--- branches/TP2.0/tests/common.py 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/common.py 2009-07-14 15:27:35 UTC (rev 604)
@@ -28,47 +28,47 @@
def test_preemption(self):
self.set_tracefile("preemption.lwf")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
def test_preemption_prematureend(self):
self.set_tracefile("preemption_prematureend.lwf")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
def test_preemption_prematureend2(self):
self.set_tracefile("preemption_prematureend2.lwf")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
def test_reservation(self):
self.set_tracefile("reservation.lwf")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
def test_reservation_prematureend(self):
self.set_tracefile("reservation_prematureend.lwf")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
def test_migrate(self):
self.set_tracefile("migrate.lwf")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
def test_reuse1(self):
self.set_tracefile("reuse1.lwf")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
def test_reuse2(self):
self.set_tracefile("reuse2.lwf")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
def test_wait(self):
self.set_tracefile("wait.lwf")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
@@ -78,7 +78,7 @@
def do_test(self, db):
shutil.copyfile(db, "one.db")
- haizea = Haizea(HaizeaConfig(self.config))
+ haizea = Manager(HaizeaConfig(self.config))
haizea.start()
os.remove("one.db")
@@ -88,7 +88,7 @@
self.haizea_thread = None
def start(self):
- self.haizea = Haizea(HaizeaConfig(self.config))
+ self.haizea = Manager(HaizeaConfig(self.config))
self.haizea_thread = threading.Thread(target=self.haizea.start)
self.haizea_thread.start()
Modified: branches/TP2.0/tests/migrate.lwf
===================================================================
--- branches/TP2.0/tests/migrate.lwf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/migrate.lwf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,9 +1,72 @@
-# Name: MIGRATE
-# Description: Slightly convoluted combination of requests with
-# the ultimate purpose of forcing one of the leases (the third
-# one) to cold-migrate from nodes P3, P4 to nodes P1, P2
-# (which become available earlier than expected due to the
-# early end of the first AR lease)
-0 1800 3600 900 2 1 1024 0 foobar1.img 1024
-0 90 3600 3600 2 1 1024 0 foobar2.img 1024
-0 -1 3600 3600 2 1 1024 0 foobar3.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="migrate.lwf">
+ <description>
+ Slightly convoluted combination of requests with
+ the ultimate purpose of forcing one of the leases (the third
+ one) to cold-migrate from nodes P3, P4 to nodes P1, P2
+ (which become available earlier than expected due to the
+ early end of the first AR lease)
+ </description>
+
+ <site>
+ <resource-types names="CPU Memory"/>
+ <nodes>
+ <node-set numnodes="4">
+ <res type="CPU" amount="100"/>
+ <res type="Memory" amount="1024"/>
+ </node-set>
+ </nodes>
+ </site>
+
+ <lease-requests>
+ <lease-request arrival="00:00:00.00">
+ <lease id="1" preemptible="false">
+ <nodes>
+ <node-set numnodes="2">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start>
+ <exact time="00:01:30.00"/>
+ </start>
+ <duration time="01:00:00.00"/>
+ <software>
+ <disk-image id="foobar2.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="2" preemptible="false">
+ <nodes>
+ <node-set numnodes="2">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start>
+ <exact time="00:30:00.00"/>
+ </start>
+ <duration time="01:00:00.00"/>
+ <software>
+ <disk-image id="foobar1.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="3" preemptible="true">
+ <nodes>
+ <node-set numnodes="2">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="01:00:00.00"/>
+ <software>
+ <disk-image id="foobar3.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ </lease-requests>
+</lease-workload>
Modified: branches/TP2.0/tests/preemption.lwf
===================================================================
--- branches/TP2.0/tests/preemption.lwf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/preemption.lwf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,6 +1,52 @@
-# Name: PREEMPT
-# Description: A simple trace where an AR lease
-# preempts a best-effort lease that is already
-# running.
-0 -1 3600 3600 1 1 1024 0 foobar1.img 1024
-900 1800 1800 1800 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="preemption">
+ <description>
+ A simple trace where an AR lease preempts a
+ best-effort lease that is already running.
+ </description>
+
+ <site>
+ <resource-types names="CPU Memory"/>
+ <nodes>
+ <node-set numnodes="4">
+ <res type="CPU" amount="100"/>
+ <res type="Memory" amount="1024"/>
+ </node-set>
+ </nodes>
+ </site>
+
+ <lease-requests>
+ <lease-request arrival="00:00:00.00">
+ <lease id="1" preemptible="true">
+ <nodes>
+ <node-set numnodes="1">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="01:00:00.00"/>
+ <software>
+ <disk-image id="foobar1.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:15:00.00">
+ <lease id="2" preemptible="false">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start>
+ <exact time="00:30:00.00"/>
+ </start>
+ <duration time="00:30:00.00"/>
+ <software>
+ <disk-image id="foobar2.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ </lease-requests>
+</lease-workload>
Modified: branches/TP2.0/tests/preemption_prematureend.lwf
===================================================================
--- branches/TP2.0/tests/preemption_prematureend.lwf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/preemption_prematureend.lwf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,5 +1,52 @@
-# Name: PREEMPT-PREMATUREEND
-# Description: Same as PREEMPT, but with
-# premature end time for the best-effort lease.
-0 -1 3600 2700 1 1 1024 0 foobar1.img 1024
-900 1800 900 900 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="preemption_prematureend">
+ <description>
+ Same as preempt.lwf, but with premature end time for the best-effort lease.
+ </description>
+
+ <site>
+ <resource-types names="CPU Memory"/>
+ <nodes>
+ <node-set numnodes="4">
+ <res type="CPU" amount="100"/>
+ <res type="Memory" amount="1024"/>
+ </node-set>
+ </nodes>
+ </site>
+
+ <lease-requests>
+ <lease-request arrival="00:00:00.00">
+ <realduration time="00:45:00.00"/>
+ <lease id="1" preemptible="true">
+ <nodes>
+ <node-set numnodes="1">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="01:00:00.00"/>
+ <software>
+ <disk-image id="foobar1.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:15:00.00">
+ <lease id="2" preemptible="false">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start>
+ <exact time="00:30:00.00"/>
+ </start>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar2.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ </lease-requests>
+</lease-workload>
Modified: branches/TP2.0/tests/preemption_prematureend2.lwf
===================================================================
--- branches/TP2.0/tests/preemption_prematureend2.lwf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/preemption_prematureend2.lwf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,5 +1,53 @@
-# Name: PREEMPT-PREMATUREEND2
-# Description: Same as PREEMPT, but with
-# premature end time for both leases.
-0 -1 3600 2700 1 1 1024 0 foobar1.img 1024
-900 1800 900 600 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="preemption_prematureend2">
+ <description>
+ Same as preempt.lwf, but with premature end time for both leases.
+ </description>
+
+ <site>
+ <resource-types names="CPU Memory"/>
+ <nodes>
+ <node-set numnodes="4">
+ <res type="CPU" amount="100"/>
+ <res type="Memory" amount="1024"/>
+ </node-set>
+ </nodes>
+ </site>
+
+ <lease-requests>
+ <lease-request arrival="00:00:00.00">
+ <realduration time="00:45:00.00"/>
+ <lease id="1" preemptible="true">
+ <nodes>
+ <node-set numnodes="1">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="01:00:00.00"/>
+ <software>
+ <disk-image id="foobar1.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:15:00.00">
+ <realduration time="00:10:00.00"/>
+ <lease id="2" preemptible="false">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start>
+ <exact time="00:30:00.00"/>
+ </start>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar2.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ </lease-requests>
+</lease-workload>
Modified: branches/TP2.0/tests/reservation.lwf
===================================================================
--- branches/TP2.0/tests/reservation.lwf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/reservation.lwf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,7 +1,52 @@
-# Name: RESERVATION
-# Description: Two best-effort leases. The first one is a long
-# single-node lease, while the second one is a short wide lease
-# using up all the nodes. With backfilling, the scheduler
-# should make a future reservation for the second lease.
-0 -1 3600 3600 1 1 1024 0 foobar1.img 1024
-900 -1 900 900 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="reservation">
+ <description>
+ Two best-effort leases. The first one is a long
+ single-node lease, while the second one is a short wide lease
+ using up all the nodes. With backfilling, the scheduler
+ should make a future reservation for the second lease.
+ </description>
+
+ <site>
+ <resource-types names="CPU Memory"/>
+ <nodes>
+ <node-set numnodes="4">
+ <res type="CPU" amount="100"/>
+ <res type="Memory" amount="1024"/>
+ </node-set>
+ </nodes>
+ </site>
+
+ <lease-requests>
+ <lease-request arrival="00:00:00.00">
+ <lease id="1" preemptible="true">
+ <nodes>
+ <node-set numnodes="1">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="01:00:00.00"/>
+ <software>
+ <disk-image id="foobar1.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:15:00.00">
+ <lease id="2" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar2.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ </lease-requests>
+</lease-workload>
Modified: branches/TP2.0/tests/reservation_prematureend.lwf
===================================================================
--- branches/TP2.0/tests/reservation_prematureend.lwf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/reservation_prematureend.lwf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,8 +1,54 @@
-# Name: RESERVATION-PREMATUREEND
-# Description: Same as RESERVATION. However, the first lease
-# ends prematurely, freeing up resources earlier than
-# expected. The scheduler should reschedule the second lease
-# (for which a reservation was made in the future)
-# since it can now start earlier
-0 -1 3600 1800 1 1 1024 0 foobar1.img 1024
-900 -1 900 900 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="reservation_prematureend">
+ <description>
+ Same as RESERVATION. However, the first lease
+ ends prematurely, freeing up resources earlier than
+ expected. The scheduler should reschedule the second lease
+ (for which a reservation was made in the future)
+ since it can now start earlier
+ </description>
+
+ <site>
+ <resource-types names="CPU Memory"/>
+ <nodes>
+ <node-set numnodes="4">
+ <res type="CPU" amount="100"/>
+ <res type="Memory" amount="1024"/>
+ </node-set>
+ </nodes>
+ </site>
+
+ <lease-requests>
+ <lease-request arrival="00:00:00.00">
+ <realduration time="00:30:00.00"/>
+ <lease id="1" preemptible="true">
+ <nodes>
+ <node-set numnodes="1">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="01:00:00.00"/>
+ <software>
+ <disk-image id="foobar1.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:15:00.00">
+ <lease id="2" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar2.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ </lease-requests>
+</lease-workload>
Modified: branches/TP2.0/tests/reuse1.lwf
===================================================================
--- branches/TP2.0/tests/reuse1.lwf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/reuse1.lwf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,7 +1,54 @@
-# Name: REUSE-1
-# Description: Two lease requests, both requiring the same
-# disk image. The second (best-effort) should be able to avoid
-# doing an image transfer by reusing the cached image from the
-# first (AR) request.
-0 900 900 900 1 1 1024 0 foobar.img 1024
-2700 -1 900 900 1 1 1024 0 foobar.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="reuse1">
+ <description>
+ Two lease requests, both requiring the same
+ disk image. The second (best-effort) should be able to avoid
+ doing an image transfer by reusing the cached image from the
+ first (AR) request.
+ </description>
+
+ <site>
+ <resource-types names="CPU Memory"/>
+ <nodes>
+ <node-set numnodes="4">
+ <res type="CPU" amount="100"/>
+ <res type="Memory" amount="1024"/>
+ </node-set>
+ </nodes>
+ </site>
+
+ <lease-requests>
+ <lease-request arrival="00:00:00.00">
+ <lease id="1" preemptible="false">
+ <nodes>
+ <node-set numnodes="1">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start>
+ <exact time="00:15:00.00"/>
+ </start>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:45:00.00">
+ <lease id="2" preemptible="true">
+ <nodes>
+ <node-set numnodes="1">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ </lease-requests>
+</lease-workload>
Modified: branches/TP2.0/tests/reuse2.lwf
===================================================================
--- branches/TP2.0/tests/reuse2.lwf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/reuse2.lwf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,10 +1,73 @@
-# Name: REUSE-2
-# Description: The first two lease requests are AR leases happening
-# at the same time, but with different images. The third one is
-# a best-effort one, using the image from the second AR request
-# (which should be scheduled on nodes P3, P4). The scheduler should
-# prefer scheduling the best-effort lease on P3, P4 since it
-# avoids an image transfer.
-0 900 900 900 2 1 1024 0 foobar1.img 1024
-0 900 900 900 2 1 1024 0 foobar2.img 1024
-2700 -1 900 900 1 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="reuse2">
+ <description>
+ The first two lease requests are AR leases happening
+ at the same time, but with different images. The third one is
+ a best-effort one, using the image from the second AR request
+ (which should be scheduled on nodes P3, P4). The scheduler should
+ prefer scheduling the best-effort lease on P3, P4 since it
+ avoids an image transfer.
+ </description>
+
+ <site>
+ <resource-types names="CPU Memory"/>
+ <nodes>
+ <node-set numnodes="4">
+ <res type="CPU" amount="100"/>
+ <res type="Memory" amount="1024"/>
+ </node-set>
+ </nodes>
+ </site>
+
+ <lease-requests>
+ <lease-request arrival="00:00:00.00">
+ <lease id="1" preemptible="false">
+ <nodes>
+ <node-set numnodes="2">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start>
+ <exact time="00:15:00.00"/>
+ </start>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar1.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="2" preemptible="false">
+ <nodes>
+ <node-set numnodes="2">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start>
+ <exact time="00:15:00.00"/>
+ </start>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar2.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:45:00.00">
+ <lease id="3" preemptible="true">
+ <nodes>
+ <node-set numnodes="1">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar2.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ </lease-requests>
+</lease-workload>
Modified: branches/TP2.0/tests/wait.lwf
===================================================================
--- branches/TP2.0/tests/wait.lwf 2009-07-13 16:35:44 UTC (rev 603)
+++ branches/TP2.0/tests/wait.lwf 2009-07-14 15:27:35 UTC (rev 604)
@@ -1,11 +1,140 @@
-# Name: WAIT
-# Description: All best-effort requests, all of which (except the first)
-# will have to wait in the queue before starting.
-#0 -1 900 900 4 1 1024 0 foobar1.img 1024
-#0 -1 900 900 4 1 1024 0 foobar2.img 1024
-#0 -1 900 900 4 1 1024 0 foobar3.img 1024
-#0 -1 900 900 4 1 1024 0 foobar4.img 1024
-#0 -1 900 900 4 1 1024 0 foobar5.img 1024
-#0 -1 900 900 4 1 1024 0 foobar6.img 1024
-#0 -1 900 900 4 1 1024 0 foobar7.img 1024
-#0 -1 900 900 4 1 1024 0 foobar8.img 1024
+<?xml version="1.0"?>
+<lease-workload name="wait">
+ <description>
+ All best-effort requests, all of which (except the first)
+ will have to wait in the queue before starting.
+ </description>
+
+ <site>
+ <resource-types names="CPU Memory"/>
+ <nodes>
+ <node-set numnodes="4">
+ <res type="CPU" amount="100"/>
+ <res type="Memory" amount="1024"/>
+ </node-set>
+ </nodes>
+ </site>
+
+ <lease-requests>
+ <lease-request arrival="00:00:00.00">
+ <lease id="1" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar1.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="2" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar2.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="3" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar3.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="4" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar4.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="5" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar5.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="6" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar6.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="7" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar7.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ <lease-request arrival="00:00:00.00">
+ <lease id="8" preemptible="true">
+ <nodes>
+ <node-set numnodes="4">
+ <res amount="100" type="CPU"/>
+ <res amount="1024" type="Memory"/>
+ </node-set>
+ </nodes>
+ <start/>
+ <duration time="00:15:00.00"/>
+ <software>
+ <disk-image id="foobar8.img" size="1024"/>
+ </software>
+ </lease>
+ </lease-request>
+ </lease-requests>
+</lease-workload>
More information about the Haizea-commit
mailing list