[haizea-commit] r632 - in trunk: bin doc/manual etc src/haizea src/haizea/cli src/haizea/common src/haizea/core src/haizea/core/enact src/haizea/core/frontends src/haizea/core/scheduler src/haizea/core/scheduler/preparation_schedulers src/haizea/policies tests traces/multi traces/undocumented

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Wed Aug 5 06:12:04 CDT 2009


Author: borja
Date: 2009-08-05 06:12:04 -0500 (Wed, 05 Aug 2009)
New Revision: 632

Added:
   trunk/bin/haizea-lwf2xml
   trunk/src/haizea/common/opennebula_xmlrpc.py
   trunk/src/haizea/core/
   trunk/src/haizea/core/__init__.py
   trunk/src/haizea/core/accounting.py
   trunk/src/haizea/core/configfile.py
   trunk/src/haizea/core/enact/
   trunk/src/haizea/core/enact/__init__.py
   trunk/src/haizea/core/enact/actions.py
   trunk/src/haizea/core/enact/opennebula.py
   trunk/src/haizea/core/enact/simulated.py
   trunk/src/haizea/core/frontends/
   trunk/src/haizea/core/frontends/__init__.py
   trunk/src/haizea/core/frontends/opennebula.py
   trunk/src/haizea/core/frontends/rpc.py
   trunk/src/haizea/core/frontends/tracefile.py
   trunk/src/haizea/core/leases.py
   trunk/src/haizea/core/log.py
   trunk/src/haizea/core/manager.py
   trunk/src/haizea/core/rpcserver.py
   trunk/src/haizea/core/scheduler/
   trunk/src/haizea/core/scheduler/__init__.py
   trunk/src/haizea/core/scheduler/lease_scheduler.py
   trunk/src/haizea/core/scheduler/mapper.py
   trunk/src/haizea/core/scheduler/policy.py
   trunk/src/haizea/core/scheduler/preparation_schedulers/
   trunk/src/haizea/core/scheduler/preparation_schedulers/__init__.py
   trunk/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py
   trunk/src/haizea/core/scheduler/preparation_schedulers/unmanaged.py
   trunk/src/haizea/core/scheduler/resourcepool.py
   trunk/src/haizea/core/scheduler/slottable.py
   trunk/src/haizea/core/scheduler/vm_scheduler.py
   trunk/src/haizea/policies/
   trunk/src/haizea/policies/__init__.py
   trunk/src/haizea/policies/admission.py
   trunk/src/haizea/policies/host_selection.py
   trunk/src/haizea/policies/preemption.py
   trunk/tests/sample_slottables.py
   trunk/tests/test_mapper.py
   trunk/tests/test_slottable.py
   trunk/traces/undocumented/generators.py
Removed:
   trunk/src/haizea/resourcemanager/
   trunk/src/haizea/traces/
   trunk/tests/test_opennebula.py
Modified:
   trunk/doc/manual/gen_config_doc.py
   trunk/doc/manual/install.tex
   trunk/doc/manual/manual.tex
   trunk/doc/manual/opennebula.tex
   trunk/doc/manual/quickstart.tex
   trunk/doc/manual/simulation.tex
   trunk/doc/manual/title.tex
   trunk/etc/sample_multi.conf
   trunk/src/haizea/cli/commands.py
   trunk/src/haizea/cli/rpc_commands.py
   trunk/src/haizea/common/constants.py
   trunk/src/haizea/common/defaults.py
   trunk/src/haizea/common/utils.py
   trunk/tests/base_config_simulator.conf
   trunk/tests/common.py
   trunk/tests/migrate.lwf
   trunk/tests/preemption.lwf
   trunk/tests/preemption_prematureend.lwf
   trunk/tests/preemption_prematureend2.lwf
   trunk/tests/reservation.lwf
   trunk/tests/reservation_prematureend.lwf
   trunk/tests/reuse1.lwf
   trunk/tests/reuse2.lwf
   trunk/tests/test_xmlrpc.py
   trunk/tests/wait.lwf
   trunk/traces/multi/inj1.lwf
   trunk/traces/multi/inj2.lwf
   trunk/traces/multi/withoutprematureend.lwf
   trunk/traces/multi/withprematureend.lwf
Log:
Merged TP2.0/0.9 branch into trunk.


Added: trunk/bin/haizea-lwf2xml
===================================================================
--- trunk/bin/haizea-lwf2xml	                        (rev 0)
+++ trunk/bin/haizea-lwf2xml	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,7 @@
+#!/usr/bin/python
+
+from haizea.cli import commands
+import sys
+	
+c = commands.haizea_lwf2xml(sys.argv)
+c.run()
\ No newline at end of file

Modified: trunk/doc/manual/gen_config_doc.py
===================================================================
--- trunk/doc/manual/gen_config_doc.py	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/doc/manual/gen_config_doc.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,4 +1,4 @@
-from haizea.resourcemanager.configfile import HaizeaConfig
+from haizea.core.configfile import HaizeaConfig
 from haizea.common.config import OPTTYPE_INT, OPTTYPE_FLOAT, OPTTYPE_STRING, OPTTYPE_BOOLEAN, OPTTYPE_DATETIME, OPTTYPE_TIMEDELTA 
 from docutils.core import publish_string
 import re

Modified: trunk/doc/manual/install.tex
===================================================================
--- trunk/doc/manual/install.tex	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/doc/manual/install.tex	2009-08-05 11:12:04 UTC (rev 632)
@@ -9,12 +9,11 @@
 \begin{itemize}
 \item Python 2.5. (\url{http://www.python.org/})
 \item mxDateTime 3.1.0 (\url{http://www.egenix.com/products/python/mxBase/mxDateTime/}), part of the eGenix.com mx Base Distribution).
-\item Optional: pysqlite (\url{http://oss.itsystementwicklung.de/trac/pysqlite/}). This package is only necessary if you want to use the OpenNebula modules.
 \item Optional: Mako Templates for Python 0.2.2 (\url{http://www.makotemplates.org/}). This package is only necessary if you want to automate running multiple simulation experiments (if this doesn't make any sense, you can skip this prerequisite for now; you will be pointed to this prerequisite again in the documentation when you get to running multiple experiments).
 \item Optional: Psyco 1.6 (\url{http://psyco.sourceforge.net/}). This package optimises the execution of Python code, resulting in the simulation code running much faster. You can skip this prerequisite if you are not going to use Haizea to run simulations, or if you are only going to run short simulations.
 \end{itemize}
 
-Note that mxDateTime, pysqlite, Mako, and Psyco are all available as packages (DEB, RPM, etc.) on most Linux distributions. If you don't install any of the optional dependencies, Haizea will still run fine, but some functionality may not be available, as noted above.
+Note that mxDateTime, Mako, and Psyco are all available as packages (DEB, RPM, etc.) on most Linux distributions. If you don't install any of the optional dependencies, Haizea will still run fine, but some functionality may not be available, as noted above.
 
 \section{Download Haizea}
 
@@ -72,7 +71,7 @@
 
 \begin{wideshellverbatim}
 [2006-11-25 13:00:00.00] TFILE   Loading tracefile /usr/share/haizea/traces/sample.lwf
-[2006-11-25 13:00:00.00] TFILE   Loaded workload with 0 requests (0 best-effort + 0 AR)
+[2006-11-25 13:00:00.00] TFILE   Loaded workload with 0 requests ()
 [2006-11-25 13:00:00.00] RM      Starting resource manager
 [2006-11-25 13:00:00.00] CLOCK   Starting simulated clock
 [2006-11-25 13:00:00.00] CLOCK   Simulated clock has stopped

Modified: trunk/doc/manual/manual.tex
===================================================================
--- trunk/doc/manual/manual.tex	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/doc/manual/manual.tex	2009-08-05 11:12:04 UTC (rev 632)
@@ -61,6 +61,16 @@
 \input{analysing}
 
 
+\part{Customizing Haizea}
+
+\chapter{Writing your own policies}
+\label{chap:policies}
+\input{policies}
+
+
+\chapter{Writing your own resource mapper}
+\label{mapper}
+
 %\part{Advanced Topics}
 
 %\chapter{Haizea's resource and leasing model}
@@ -82,8 +92,16 @@
 \label{app:conffile}
 \input{appendix_conf}
 
-\chapter{LWF file format}
+\chapter{XML format reference}
 \label{app:lwf}
 \input{appendix_lwf}
 
+\section{Nodes format}
+
+\section{Lease format}
+
+\section{Site format}
+
+\section{LWF file format}
+
 \end{document}

Modified: trunk/doc/manual/opennebula.tex
===================================================================
--- trunk/doc/manual/opennebula.tex	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/doc/manual/opennebula.tex	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,16 +1,12 @@
-OpenNebula (\url{http://www.opennebula.org/}) is a virtual infrastructure manager that enables the dynamic deployment and re-allocation of virtual machines on a pool of physical resources. Haizea can be used to extend OpenNebula's scheduling capabilities, allowing it to support advance reservation of resources and queueing of best effort requests. OpenNebula and Haizea complement each other, since OpenNebula provides all the enactment muscle (OpenNebula can manage Xen and KVM VMs on a cluster, with VMWare support to follow shortly) and Haizea provides the scheduling brains. Using both of them together is simple, since Haizea acts as a drop-in replacement for OpenNebula's scheduling daemon. 
+OpenNebula (\url{http://www.opennebula.org/}) is a virtual infrastructure manager that enables the dynamic deployment and re-allocation of virtual machines on a pool of physical resources. Haizea can be used to extend OpenNebula's scheduling capabilities, allowing it to support advance reservation of resources and queueing of best effort requests. OpenNebula and Haizea complement each other, since OpenNebula provides all the enactment muscle (OpenNebula can manage Xen, KVM, and VMWare VMs on a cluster) and Haizea provides the scheduling brains. Using both of them together is simple, since Haizea acts as a drop-in replacement for OpenNebula's scheduling daemon. 
 
 This chapter explains how to use OpenNebula and Haizea together, and explains how to submit requests to OpenNebula to use Haizea's scheduling capabilities.
 
-\begin{warning}
-Please remember that, although we have tested Haizea considerably with OpenNebula 1.2, Haizea is still a technology preview and, thus, not a good choice for production environments (yet). There are a couple of known issues and limitations which are listed at the end of this document. If you need to use OpenNebula in a production environment, and don't need any of Haizea's scheduling features (advance reservations, queueing of requests, etc.), you may want to use OpenNebula's default scheduler instead.
-\end{warning}
-
 \section{Installing OpenNebula and Haizea}
 
-If you have not already done so, you will need to install OpenNebula 1.2 and the latest version of Haizea. Start by installing OpenNebula, and then installing Haizea.
+If you have not already done so, you will need to install OpenNebula 1.4 and the latest version of Haizea. Start by installing OpenNebula, and then installing Haizea.
 
-Before proceeding, you may want to follow the OpenNebula quickstart guide (\url{http://www.opennebula.org/doku.php?id=documentation:rel1.2:qg}) to verify that your OpenNebula installation is working fine. The rest of this document assumes that OpenNebula is correctly installed, and that you know what a \emph{virtual machine template} is (``VM templates'' is how VMs are requested to OpenNebula, so we'll be working with them quite a bit). You may also want to follow the Haizea Quickstart Guide (see Chapter~\ref{chap:quickstart}, to verify that Haizea is correctly installed.
+Before proceeding, you may want to follow the OpenNebula quickstart guide (\url{http://www.opennebula.org/doku.php?id=documentation:rel1.4:qg}) to verify that your OpenNebula installation is working fine. The rest of this document assumes that OpenNebula is correctly installed, and that you know what a \emph{virtual machine template} is (``VM templates'' is how VMs are requested to OpenNebula, so we'll be working with them quite a bit). You may also want to follow the Haizea Quickstart Guide (see Chapter~\ref{chap:quickstart}, to verify that Haizea is correctly installed.
 
 \section{Configuring Haizea}
 
@@ -23,19 +19,21 @@
 ...
 \end{wideshellverbatim}
 
-Next, you need to tell Haizea where the OpenNebula database and \texttt{onevm} command are located. This is done in the \texttt{opennebula} section:
+Haizea interacts with OpenNebula through it's XML-RPC API, so you need to tell Haizea what host OpenNebula is on. This is done in the \texttt{opennebula} section:
 
 \begin{wideshellverbatim}
 [opennebula]
-# The following assumes that \$ONE_LOCATION is /opt/nebula/ONE
-# If you used a different \$ONE_LOCATION, modify the paths 
-# accordingly
-db: /usr/local/one/var/one.db
-onevm: /usr/local/one/bin/onevm
+# Typically, OpenNebula and Haizea will be installed
+# on the same host, so the following option should be
+# set to 'localhost'. If they're on different hosts,
+# make sure you modify this option accordingly.
+host: localhost
 \end{wideshellverbatim}
 
-There are some additional options described at the end of this chapter, but which you do not need to concern yourself with yet.
+Additionally, if OpenNebula is not listening on its default port (2633), you can use the \texttt{port} option in the \texttt{opennebula} section to specify a different port.
 
+There are also a couple options in the \texttt{scheduling} section that are relevant to OpenNebula mode, but which you do not need to concern yourself with yet (they are described at the end of this chapter).
+
 \section{Running OpenNebula and Haizea together}
 
 Now that Haizea is configured to run alongside OpenNebula, running them is as simple as starting the OpenNebula daemon:
@@ -44,6 +42,8 @@
 oned
 \end{wideshellverbatim}
 
+%TODO: ONE_AUTH, explain distinction between hosts
+
 Followed by Haizea:
 
 \begin{wideshellverbatim}
@@ -59,13 +59,13 @@
 When Haizea starts up, it will print out something like this:
 
 \begin{wideshellverbatim}
-[2009-02-15 23:32:08.07] ENACT.ONE.INFO Fetched N nodes from ONE db
-[2009-02-15 23:32:08.07] RM      Starting resource manager
-[2009-02-15 23:32:08.07] RPCSERVER RPC server started on port 42493
-[2009-02-15 23:32:08.07] CLOCK   Starting clock
+[2009-07-30 18:36:54.06] ENACT.ONE.INFO Fetched N nodes from OpenNebula
+[2009-07-30 18:36:54.07] RM      Starting resource manager
+[2009-07-30 18:36:54.07] RPCSERVER RPC server started on port 42493
+[2009-07-30 18:36:54.07] CLOCK   Starting clock
 \end{wideshellverbatim}
 
-This means that Haizea has correctly started up, accessed OpenNebula's database and detected that there are N physical nodes (the value of N will depend, of course, on how many nodes you have in your system).
+This means that Haizea has correctly started up, contacted OpenNebula and detected that there are N physical nodes (the value of N will depend, of course, on how many nodes you have in your system).
 
 \begin{warning}
 Haizea is a drop-in replacement for OpenNebula's default scheduler (\texttt{mm\_sched}). Do not run Haizea and \texttt{mm\_sched} at the same time, or funny things will happen.
@@ -90,10 +90,10 @@
 Before you submit your request to OpenNebula, take a look at the Haizea log. You should see something like this repeating every minute:
 
 \begin{wideshellverbatim}
-[2008-07-21 11:49:00.63] CLOCK   Waking up to manage resources
-[2008-07-21 11:49:00.63] CLOCK   Wake-up time recorded as 2008-07-21 11:49:01.00
-[2008-07-21 11:49:00.63] CLOCK   Going back to sleep. 
-                                 Waking up at 2008-07-21 11:50:01.00 
+[2009-07-30 18:38:44.00] CLOCK   Waking up to manage resources
+[2009-07-30 18:38:44.00] CLOCK   Wake-up time recorded as 2009-07-30 18:38:44.00
+[2009-07-30 18:38:44.01] CLOCK   Going back to sleep. 
+                                 Waking up at 2009-07-30 18:38:54.00 
                                  to see if something interesting has happened by then.
 \end{wideshellverbatim}
 
@@ -106,24 +106,25 @@
 If you run \texttt{onevm list} to see the VMs managed by OpenNebula, you'll see that the request is in a \texttt{pending} state:
 
 \begin{wideshellverbatim}
-  ID     NAME STAT CPU     MEM        HOSTNAME        TIME
-----------------------------------------------------------
-  42     test pend   0       0                 00 00:00:04
+  ID     USER     NAME STAT CPU     MEM        HOSTNAME        TIME
+-------------------------------------------------------------------
+  42    borja     test pend   0       0                 00 00:00:02
 \end{wideshellverbatim}
 
 Next time Haizea wakes up, you should see something like this:
 
 \begin{wideshellverbatim}
-[2009-02-15 23:38:01.99] CLOCK   Waking up to manage resources
-[2009-02-15 23:38:01.99] CLOCK   Wake-up time recorded as 2009-02-15 23:38:02.00
-[2009-02-15 23:38:02.03] LSCHED  Lease #1 has been requested and is pending.
-[2009-02-15 23:38:02.03] LSCHED  Scheduling AR lease #1, 1 nodes 
-                                   from 2009-02-15 23:38:32.00 
-                                     to 2009-02-15 23:39:32.00.
-[2009-02-15 23:38:02.03] LSCHED  AR lease #1 has been accepted.
+[2009-07-30 18:41:49.16] CLOCK   Waking up to manage resources
+[2009-07-30 18:41:49.16] CLOCK   Wake-up time recorded as 2009-07-30 18:41:49.00
+[2009-07-30 18:41:49.19] LSCHED  Lease #1 has been requested.
+[2009-07-30 18:41:49.19] LSCHED  Lease #1 has been marked as pending.
+[2009-07-30 18:41:49.19] LSCHED  Scheduling AR lease #1, 1 nodes 
+                                     from 2009-07-30 18:42:15.00 
+                                       to 2009-07-30 18:43:15.00.
+[2009-07-30 18:41:49.19] LSCHED  AR lease #1 has been scheduled.
 
-[2009-02-15 23:38:02.03] CLOCK   Going back to sleep. 
-                                 Waking up at 2009-02-15 23:38:32.00 
+[2009-07-30 18:41:49.19] CLOCK   Going back to sleep. 
+                                 Waking up at 2009-07-30 18:42:15.00 
                                  to handle slot table event.
 \end{wideshellverbatim}
 
@@ -140,30 +141,37 @@
 When the VM is scheduled to start, you will see the following in the Haizea logs:
 
 \begin{wideshellverbatim}
-[2009-02-15 23:38:32.00] CLOCK   Waking up to manage resources
-[2009-02-15 23:38:32.00] CLOCK   Wake-up time recorded as 2009-02-15 23:38:32.00
-[2009-02-15 23:38:32.21] VMSCHED Started VMs for lease 1 on nodes [1]
-[2009-02-15 23:38:32.21] CLOCK   Going back to sleep. 
-                                 Waking up at 2009-02-15 23:39:32.00 
+[2009-07-30 18:42:15.02] CLOCK   Waking up to manage resources
+[2009-07-30 18:42:15.02] CLOCK   Wake-up time recorded as 2009-07-30 18:42:15.00
+[2009-07-30 18:42:15.04] VMSCHED Started VMs for lease 1 on nodes [2]
+[2009-07-30 18:42:15.09] CLOCK   Going back to sleep. 
+                                 Waking up at 2009-07-30 18:43:00.00 
                                  to handle slot table event.
 \end{wideshellverbatim}
 
 Haizea has instructed OpenNebula to start the VM for the advance reservation. If you run \texttt{onevm list}, the VM will now show up as running:
 
 \begin{wideshellverbatim}
-  ID     NAME STAT CPU     MEM        HOSTNAME        TIME
-----------------------------------------------------------
-  42     test runn   2  262144          ursa03 00 00:01:04
+  ID     USER     NAME STAT CPU     MEM        HOSTNAME        TIME
+-------------------------------------------------------------------
+  42    borja     test runn  10   65536       cluster05 00 00:00:52
 \end{wideshellverbatim}
 
 You should be able to access the VM (if you configured it with networking and SSH). However, since we requested the VM to run for just a minute, you will soon see the following in the Haizea logs:
 
 \begin{wideshellverbatim}
-[2009-02-15 23:39:32.00] CLOCK   Waking up to manage resources
-[2009-02-15 23:39:32.00] CLOCK   Wake-up time recorded as 2009-02-15 23:39:32.00
-[2009-02-15 23:39:32.00] VMSCHED Stopped VMs for lease 1 on nodes [1]
-[2009-02-15 23:39:32.12] CLOCK   Going back to sleep. 
-                                 Waking up at 2009-02-15 23:39:42.00 
+[2009-07-30 18:43:00.04] CLOCK   Waking up to manage resources
+[2009-07-30 18:43:00.04] CLOCK   Wake-up time recorded as 2009-07-30 18:43:00.00
+[2009-07-30 18:43:00.05] VMSCHED Stopped VMs for lease 1 on nodes [2]
+[2009-07-30 18:43:05.07] CLOCK   Going back to sleep. 
+                                 Waking up at 2009-07-30 18:43:15.00 
+                                 to handle slot table event.
+
+[2009-07-30 18:43:15.00] CLOCK   Waking up to manage resources
+[2009-07-30 18:43:15.00] CLOCK   Wake-up time recorded as 2009-07-30 18:43:15.00
+[2009-07-30 18:43:15.00] VMSCHED Lease 1's VMs have shutdown.
+[2009-07-30 18:43:15.01] CLOCK   Going back to sleep. 
+                                 Waking up at 2009-07-30 18:44:15.00 
                                  to see if something interesting has happened by then.
 \end{wideshellverbatim}
 
@@ -184,7 +192,7 @@
 \item \texttt{unlimited}: The lease will run forever, until explicitly stopped
 \item ISO-formatted time: i.e., \texttt{HH:MM:SS}
 \end{itemize}
-\item \texttt{preemptible}: This option can be either yes or no. Haizea currently uses a very simple priority scheme where VMs are either preemptible or non-preemptible (furthermore, a non-preemptible VM can preempt preemptible VMs, while preemptible VMs can't preempt anything). If a VM is preemptible, and a preempting VM needs its resources, then the preemptible VM will be suspended while the preempting VM is running. Future versions of Haizea will include better priority schemes.
+\item \texttt{preemptible}: This option can be either yes or no. %TODO: Refer to lease documentation
 \item \texttt{group}: This option can take on any string value, and allows you to schedule several VMs as a group (or, in Haizea terminology, as a single lease with multiple nodes). All OpenNebula VM templates with the same group name will be considered part of the same lease (i.e., all the VMs will be scheduled in a all-or-nothing fashion: all VMs must be able to start/stop at the same time). Future versions of OpenNebula will automatically manage this option, so users don't have to worry about manually setting this option in multiple VM templates (which can be error-prone). 
 \end{itemize}
 
@@ -242,7 +250,7 @@
 
 \section{Additional OpenNebula configuration options}
 
-When running Haizea with OpenNebula, you must specify at least the \texttt{db} and \texttt{onevm} options in the \texttt{[opennebula]} section of the configuration file. However, there are additional options in other sections that you can tweak:
+When running Haizea with OpenNebula, you must specify at least the \texttt{host} option in the \texttt{[opennebula]} section of the configuration file. However, there are additional options in other sections that you can tweak:
 
 \subsection{Wakeup interval}
 
@@ -294,8 +302,7 @@
 The following are known issues and limitations when using Haizea with OpenNebula:
 
 \begin{itemize}
-\item As pointed out in this guide, Haizea has to poll OpenNebula every minute to ask if there are any new requests. Additionally, OpenNebula has no way of notifying Haizea of a change of state in a VM (e.g., a VM that died, a suspend operation that finished before expected, etc.). An upcoming version of OpenNebula will add this feature, and Haizea (in turn) will support receiving events from OpenNebula (this includes being instantly notified of new requests, instead of having to poll OpenNebula periodically).
-\item If a command sent to OpenNebula fails, Haizea currently ignores this. Nonetheless, OpenNebula commands run from Haizea shouldn't fail unless you're running incredibly heavy loads, or if you manually shutdown a VM managed by Haizea.
+\item As pointed out in this guide, Haizea has to poll OpenNebula every minute to ask if there are any new requests.  Although OpenNebula 1.4 added a ``hook mechanism'' that allows actions to be carried out when certain events happen (such as sending Haizea notifications of a VM that has died, a suspend operation that finished before expected, etc.), Haizea currently does not use this hook mechanism.
 \item Haizea currently cannot do any image deployment with OpenNebula, and VM images are assumed to be predeployed on the physical nodes, or available on a shared NFS filesystem. Although OpenNebula includes support for interfacing with a \emph{transfer manager} to handle various VM deployment scenarios, Haizea currently does not access this functionality.
-\item Haizea cannot enact cold migrations in OpenNebula (i.e., migrating a suspended VM to a different node if resources become available earlier on a different node than the one where the VM was suspended on). Haizea actually has all the scheduling code for this, and only the enactment "glue" is missing (should be added in TP 1.3 or 1.4)
+\item Haizea cannot enact cold migrations in OpenNebula (i.e., migrating a suspended VM to a different node if resources become available earlier on a different node than the one where the VM was suspended on). Haizea actually has all the scheduling code for this, and only the enactment "glue" is missing.
 \end{itemize}
\ No newline at end of file

Modified: trunk/doc/manual/quickstart.tex
===================================================================
--- trunk/doc/manual/quickstart.tex	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/doc/manual/quickstart.tex	2009-08-05 11:12:04 UTC (rev 632)
@@ -22,11 +22,10 @@
 \begin{wideshellverbatim}
 [simulation]
 starttime: 2006-11-25 13:00:00
-nodes: 4
-resources: CPU,1;Mem,1024;Net (in),100;Net (out),100;Disk,20000
+resources: 4  CPU:100 Memory:1024
 \end{wideshellverbatim}
 
-These options are used to describe the characteristics of our simulated cluster. In particular, we're using a 4-node cluster, each node with 1 CPU, 1024 MB of memory, 20GB of disk space, and 100Mbps of inbound/outbound network bandwidth. In this document, we will represent this cluster over time like this:
+These options are used to describe the characteristics of our simulated cluster. In particular, we're using a 4-node cluster, each node with 1 CPU, 1024 MB of memory. In this document, we will represent this cluster over time like this:
 
 \begin{center}
 \includegraphics{images/quickstart_leasegraph1.png}
@@ -49,21 +48,34 @@
 tracefile: /usr/share/haizea/traces/sample.lwf 
 \end{wideshellverbatim}
 
-The default value is a sample tracefile included with Haizea. If you copy the file to a different location, make sure to update the \texttt{tracefile} option accordingly. The format of this file is LWF (Lease Workload Format), which is particular to Haizea. The sample file includes documentation on the file format, and several sample workloads. You can also find details on the LWF format in Appendix~\ref{app:lwf}. For now, we will focus on the first workload, "PREEMPT":
+The default value is a sample tracefile included with Haizea. If you copy the file to a different location, make sure to update the \texttt{tracefile} option accordingly. The format of this file is LWF (Lease Workload Format), an XML format which is particular to Haizea. For now, don't worry about parsing the trace format in detail; it is fairly human-readable and you can also find details on the LWF format in Appendix~\ref{app:lwf}.
 
 \begin{wideshellverbatim}
-# 0   -1   3600 3600 1 1 1024 0 foobar.img 1024
-# 900 1800 1800 1800 4 1 1024 0 foobar.img 1024
-\end{wideshellverbatim}
+<lease-workload name="sample">
+	<description>
+	A simple trace where an AR lease preempts a 
+	best-effort lease that is already running. 
+	</description>
 
-For now, don't worry about parsing the trace format in detail. The above represents two lease requests:
+	<lease-requests>
+	
+	<!-- The lease requests are initially commented out -->
+	
+	<!-- First lease request -->
+	<!--
+	...
+	-->
 
-\begin{itemize}
-\item The first line is a request for a best-effort lease, requested at time 0 (right at the start of the simulation), requiring 1 hour (3600 seconds), and only one node.
-\item The second line is an advance reservation (AR) lease, requested 15 minutes into the simulation (900 seconds), starting 30 minutes into the simulation (1800 seconds), requiring 30 minutes (1800 seconds), and all four nodes in the cluster. Since the start time of the simulation is set to 13:00, this means the lease request is received at 13:15, and that the lease must run from 13:30 to 14:00.
-\end{itemize}
+	<!-- Second lease request -->
+	<!--
+	...
+	-->
+	
+	</lease-requests>
+</lease-workload>
+\end{wideshellverbatim}
 
-Both leases require 1 CPU per node, 1024 MB of memory, a disk image called "foobar.img" (which uses up 1024MB of disk space). The \# characters are used to comment out the lease requests. Do not change this for now.
+As you can see, there are two lease requests in the file, but they are initially commented out. We will take a closer look at each of these requests next.
 
 \section{Running the simulator}
 
@@ -77,10 +89,11 @@
 
 \begin{wideshellverbatim}
 [2006-11-25 13:00:00.00] TFILE   Loading tracefile /usr/share/haizea/traces/sample.lwf
-[2006-11-25 13:00:00.00] TFILE   Loaded workload with 0 requests (0 best-effort + 0 AR)
+[2006-11-25 13:00:00.00] TFILE   Loaded workload with 0 requests ()
 [2006-11-25 13:00:00.00] RM      Starting resource manager
 [2006-11-25 13:00:00.00] CLOCK   Starting simulated clock
-[2006-11-25 13:00:00.00] CLOCK   Stopping simulated clock
+[2006-11-25 13:00:00.00] CLOCK   Simulated clock has stopped
+[2006-11-25 13:00:00.00] RM      Stopping resource manager gracefully...
 [2006-11-25 13:00:00.00] RM      --- Haizea status summary ---
 [2006-11-25 13:00:00.00] RM      Number of leases (not including completed): 0
 [2006-11-25 13:00:00.00] RM      Completed leases: 0
@@ -93,14 +106,35 @@
 [2006-11-25 13:00:00.00] RM      ---- End summary ----
 \end{wideshellverbatim}
 
-Now that you've seen the tracefile, you can see why the simulator starts up and immediately stops: all the lease requests in the tracefile are commented out, and there's nothing to schedule. Go ahead and uncomment only the first lease request and run Haizea again. You should now see the following:
+Now that you've seen the tracefile, you can see why the simulator starts up and immediately stops: all the lease requests in the tracefile are commented out, and there's nothing to schedule. Go ahead and uncomment the first lease request, which looks like this:
 
 \begin{wideshellverbatim}
+<lease-request arrival="00:00:00">
+<lease preemptible="true">
+	<nodes>
+		<node-set numnodes="1">
+			<res type="CPU" amount="100"/>
+			<res type="Memory" amount="1024"/>
+		</node-set>
+	</nodes>	
+	<start></start>
+	<duration time="01:00:00"/>
+	<software>
+		<disk-image id="foobar.img" size="1024"/>
+	</software>
+</lease>
+</lease-request>
+\end{wideshellverbatim}
+
+This is a request for a best-effort lease (notice how the starting time is left empty, meaning it's up to Haizea to determine the start time), requested at time 00:00:00 (right at the start of the simulation), requiring 1 hour, and only one node. Now run Haizea again. You should now see the following:
+
+\begin{wideshellverbatim}
 [2006-11-25 13:00:00.00] TFILE   Loading tracefile /usr/share/haizea/traces/sample.lwf
-[2006-11-25 13:00:00.00] TFILE   Loaded workload with 1 requests (1 best-effort + 0 AR)
+[2006-11-25 13:00:00.00] TFILE   Loaded workload with 1 requests (1 Best-effort)
 [2006-11-25 13:00:00.00] RM      Starting resource manager
 [2006-11-25 13:00:00.00] CLOCK   Starting simulated clock
-[2006-11-25 13:00:00.00] LSCHED  Lease #1 has been requested and is pending.
+[2006-11-25 13:00:00.00] LSCHED  Lease #1 has been requested.
+[2006-11-25 13:00:00.00] LSCHED  Lease #1 has been marked as pending.
 [2006-11-25 13:00:00.00] LSCHED  Queued best-effort lease request #1, 1 nodes for 01:00:00.00.
 [2006-11-25 13:00:00.00] LSCHED  Next request in the queue is lease 1. Attempting to schedule...
 [2006-11-25 13:00:00.00] VMSCHED Lease #1 has been scheduled on nodes [1] 
@@ -131,8 +165,30 @@
 
 A best-effort request is received at 13:00 and, since the cluster is empty, it is scheduled immediately. Notice how the VMs for the lease start at 13:00 and stop at 14:00. For now, we're assuming that the disk images are predeployed on the physical nodes (we will modify this option in the next section).
 
-So, what would happen if we also added the AR lease? Since it requires all the cluster resources from 13:30 to 14:00, the best-effort lease will be unable to run in that time interval. Since the leases are implemented as VMs, Haizea will still schedule the best-effort lease to start at 13:00, but will suspend it before the AR lease starts, and will resume it once the AR lease has finished. In effect, we want the schedule to look like this:
+Now go ahead and uncomment the second lease request, which looks like this:
 
+\begin{wideshellverbatim}
+<lease-request arrival="00:15:00">
+<lease preemptible="false">
+	<nodes>
+		<node-set numnodes="4">
+			<res type="CPU" amount="100"/>
+			<res type="Memory" amount="1024"/>
+		</node-set>
+	</nodes>
+	<start>
+		<exact time="00:30:00"/>
+	</start>
+	<duration time="00:30:00"/>
+	<software>
+		<disk-image id="foobar.img" size="1024"/>
+	</software>
+</lease>
+</lease-request>
+\end{wideshellverbatim}
+
+This is a request for an advance reservation lease (notice how there is an exact starting time specified), requesting all four nodes for 30 minutes. So, what would happen if we also added this AR lease? Since it requires all the cluster resources from 13:30 to 14:00, the best-effort lease will be unable to run in that time interval. Since the leases are implemented as VMs, Haizea will still schedule the best-effort lease to start at 13:00, but will suspend it before the AR lease starts, and will resume it once the AR lease has finished. In effect, we want the schedule to look like this:
+
 \begin{center}
 \includegraphics{images/quickstart_leasegraph3.png}
 \end{center}
@@ -140,11 +196,12 @@
 Uncomment the AR lease request, and run Haizea again. You should now see the following:
 
 \begin{wideshellverbatim}
-[2006-11-25 13:00:00.00] TFILE   Loading tracefile /home/haizea/sample.lwf
-[2006-11-25 13:00:00.00] TFILE   Loaded workload with 2 requests (1 best-effort + 1 AR)
+[2006-11-25 13:00:00.00] TFILE   Loading tracefile /usr/share/haizea/traces/sample.lwf
+[2006-11-25 13:00:00.00] TFILE   Loaded workload with 2 requests (1 Best-effort + 1 AR)
 [2006-11-25 13:00:00.00] RM      Starting resource manager
 [2006-11-25 13:00:00.00] CLOCK   Starting simulated clock
-[2006-11-25 13:00:00.00] LSCHED  Lease #1 has been requested and is pending.
+[2006-11-25 13:00:00.00] LSCHED  Lease #1 has been requested.
+[2006-11-25 13:00:00.00] LSCHED  Lease #1 has been marked as pending.
 [2006-11-25 13:00:00.00] LSCHED  Queued best-effort lease request #1, 1 nodes for 01:00:00.00.
 [2006-11-25 13:00:00.00] LSCHED  Next request in the queue is lease 1. Attempting to schedule...
 [2006-11-25 13:00:00.00] VMSCHED Lease #1 has been scheduled on nodes [1] 
@@ -152,7 +209,8 @@
                                    to 2006-11-25 14:00:00.00
 [2006-11-25 13:00:00.00] VMSCHED Started VMs for lease 1 on nodes [1]
 
-[2006-11-25 13:15:00.00] LSCHED  Lease #2 has been requested and is pending.
+[2006-11-25 13:15:00.00] LSCHED  Lease #2 has been requested.
+[2006-11-25 13:15:00.00] LSCHED  Lease #2 has been marked as pending.
 [2006-11-25 13:15:00.00] LSCHED  Scheduling AR lease #2, 4 nodes
                                  from 2006-11-25 13:30:00.00 
                                    to 2006-11-25 14:00:00.00.
@@ -160,29 +218,29 @@
 [2006-11-25 13:15:00.00] LSCHED  Preempting lease #1...
 [2006-11-25 13:15:00.00] LSCHED  ... lease #1 will be suspended 
                                      at 2006-11-25 13:30:00.00.
-[2006-11-25 13:15:00.00] LSCHED  AR lease #2 has been accepted.
+[2006-11-25 13:15:00.00] LSCHED  AR lease #2 has been scheduled.
 
-[2006-11-25 13:29:39.00] VMSCHED Stopped VMs for lease 1 on nodes [1]
-[2006-11-25 13:29:39.00] VMSCHED Suspending lease 1...
+[2006-11-25 13:29:28.00] VMSCHED Stopped VMs for lease 1 on nodes [1]
+[2006-11-25 13:29:28.00] VMSCHED Suspending lease 1...
 
 [2006-11-25 13:30:00.00] VMSCHED Lease 1 suspended.
 [2006-11-25 13:30:00.00] VMSCHED Started VMs for lease 2 on nodes [2, 3, 4, 1]
 [2006-11-25 13:30:00.00] LSCHED  Next request in the queue is lease 1. Attempting to schedule...
 [2006-11-25 13:30:00.00] VMSCHED Lease #1 has been scheduled on nodes [1]
-                                 from 2006-11-25 14:00:21.00 (resuming) 
-                                   to 2006-11-25 14:30:42.00
+                                 from 2006-11-25 14:00:00.00 (resuming) 
+                                   to 2006-11-25 14:31:04.00
 
 [2006-11-25 14:00:00.00] VMSCHED Stopped VMs for lease 2 on nodes [2, 3, 4, 1]
 [2006-11-25 14:00:00.00] VMSCHED Resuming lease 1...
 [2006-11-25 14:00:00.00] VMSCHED Lease 2's VMs have shutdown.
 
-[2006-11-25 14:00:21.00] VMSCHED Resumed lease 1
-[2006-11-25 14:00:21.00] VMSCHED Started VMs for lease 1 on nodes [1]
+[2006-11-25 14:00:32.00] VMSCHED Resumed lease 1
+[2006-11-25 14:00:32.00] VMSCHED Started VMs for lease 1 on nodes [1]
 
-[2006-11-25 14:30:42.00] VMSCHED Stopped VMs for lease 1 on nodes [1]
-[2006-11-25 14:30:42.00] VMSCHED Lease 1's VMs have shutdown.
-[2006-11-25 14:30:42.00] CLOCK   Simulated clock has stopped
-[2006-11-25 14:30:42.00] RM      Stopping resource manager gracefully...
+[2006-11-25 14:31:04.00] VMSCHED Stopped VMs for lease 1 on nodes [1]
+[2006-11-25 14:31:04.00] VMSCHED Lease 1's VMs have shutdown.
+[2006-11-25 14:31:04.00] CLOCK   Simulated clock has stopped
+[2006-11-25 14:31:04.00] RM      Stopping resource manager gracefully...
 \end{wideshellverbatim}
 
 Notice how the above corresponds to the previous figure. In particular, notice the following:
@@ -233,7 +291,7 @@
 \begin{wideshellverbatim}
 [general]
 ...
-lease-deployment: imagetransfer
+lease-preparation: imagetransfer
 ...
 \end{wideshellverbatim}
 
@@ -300,6 +358,12 @@
                      -c 1 -m 512 -i foobar.img -z 600 
 \end{wideshellverbatim}
 
+Additionally, you can also write a lease request using the XML format seen previous, save it to a file, and have \texttt{haizea-request-lease} command parse it:
+
+\begin{wideshellverbatim}
+haizea-request-lease -f request.xml
+\end{wideshellverbatim}
+
 You can find more details on this command's parameters by running \texttt{haizea-request-lease -h} or taking a look at Appendix~\ref{app:cli}. Once you've submitted the lease, you should see the following:
 
 \begin{wideshellverbatim}
@@ -316,8 +380,8 @@
 You should see the following:
 
 \begin{wideshellverbatim}
- ID   Type  State      Starting time           Duration      Nodes  
- 1    AR    Scheduled  2008-09-24 14:24:47.00  00:10:00.00   1       
+ ID   Type          State      Starting time           Duration      Nodes  
+ 1    AR            Scheduled  2009-08-04 11:25:57.00  00:10:00.00   1        
 \end{wideshellverbatim}
 
 Note: You may not see your lease right away, since Haizea has to ``become aware'' of it (which won't happen until it wakes up to check if there are any new requests). Future versions of Haizea will enable it to be notified immediately of incoming requests.
@@ -325,15 +389,15 @@
 Remember that the lease has been requested one minute into the future, so it will remain in a ``Scheduled'' state for a couple seconds. If you run \texttt{haizea-list-leases} periodically, you should see it pass through a couple other states. If image transfers are still enabled, it will first transition to the ``Preparing'' state:
 
 \begin{wideshellverbatim}
- ID   Type  State      Starting time           Duration      Nodes  
- 1    AR    Preparing  2008-09-24 14:24:47.00  00:10:00.00   1       
+ ID   Type          State      Starting time           Duration      Nodes  
+ 1    AR            Preparing  2009-08-04 11:25:57.00  00:10:00.00   1       
 \end{wideshellverbatim}
 
 And then to the ``Active'' state:
 
 \begin{wideshellverbatim}
- ID   Type  State      Starting time           Duration      Nodes  
- 1    AR    Active     2008-09-24 14:24:47.00  00:10:00.00   1       
+ ID   Type          State      Starting time           Duration      Nodes  
+ 1    AR            Active     2009-08-04 11:25:57.00  00:10:00.00   1       
 \end{wideshellverbatim}
 
 Now let's request a best-effort lease:
@@ -346,23 +410,23 @@
 The list of leases will now look like this:
 
 \begin{wideshellverbatim}
- ID   Type  State      Starting time           Duration      Nodes  
- 1    AR    Active     2008-09-24 14:24:47.00  00:10:00.00   1       
- 2    BE    Scheduled  None                    00:10:00.00   4       
+ ID   Type          State      Starting time           Duration      Nodes  
+ 1    AR            Active     2009-08-04 11:25:57.00  00:10:00.00   1       
+ 2    Best-effort   Scheduled  Unspecified             00:10:00.00   4       
 \end{wideshellverbatim}
 
-Note how, for best-effort leases, the starting time is set to ``None'', which means this time is not specified by the user, but instead determined on a best-effort basis by the scheduler. Since the lease is in a ``Scheduled'' state, that means that it has been assigned a starting time (although that information is currently not available through the command-line interface; it can be seen in the Haizea log).
+Note how, for best-effort leases, the starting time is set to ``Unspecified'', which means this time is not specified by the user, but instead determined on a best-effort basis by the scheduler. Since the lease is in a ``Scheduled'' state, that means that it has been assigned a starting time (although that information is currently not available through the command-line interface; it can be seen in the Haizea log).
 
 Now try to rerun the \texttt{haizea-request-lease} command a couple times (i.e., lets submit a couple more best-effort requests). The scheduler won't be able to schedule them, since they require all the available nodes, and the AR lease is using up one of them. The previous best-effort lease was scheduled because Haizea's default behaviour is to schedule at most one best-effort lease in the future if resources cannot be found right away (this is due to Haizea's use of backfilling algorithms; for now, don't worry if you don't know what they are). Anyway, the list of leases should now look like this:
 
 \begin{wideshellverbatim}
  ID   Type  State      Starting time           Duration      Nodes  
- 1    AR    Active     2008-09-24 14:24:47.00  00:10:00.00   1       
- 2    BE    Scheduled  None                    00:10:00.00   4       
- 3    BE    Queued     None                    00:10:00.00   4       
- 4    BE    Queued     None                    00:10:00.00   4       
- 5    BE    Queued     None                    00:10:00.00   4       
- 6    BE    Queued     None                    00:10:00.00   4       
+ 1    AR            Active     2009-08-04 11:25:57.00  00:10:00.00   1       
+ 2    Best-effort   Scheduled  Unspecified             00:10:00.00   4       
+ 3    Best-effort   Queued     Unspecified             00:10:00.00   4       
+ 4    Best-effort   Queued     Unspecified             00:10:00.00   4       
+ 5    Best-effort   Queued     Unspecified             00:10:00.00   4       
+ 6    Best-effort   Queued     Unspecified             00:10:00.00   4       
 \end{wideshellverbatim}
 
 Notice how the extra best-effort requests have been queued. If you only want to see the contents of the queue, you can use the following command:
@@ -374,11 +438,11 @@
 This should show the following:
 
 \begin{wideshellverbatim}
- ID   Type  State      Sched. Start time       Duration      Nodes  
- 3    BE    Queued     None                    00:10:00.00   4       
- 4    BE    Queued     None                    00:10:00.00   4       
- 5    BE    Queued     None                    00:10:00.00   4       
- 6    BE    Queued     None                    00:10:00.00   4       
+ ID   Type          State      Starting time           Duration      Nodes  
+ 3    Best-effort   Queued     Unspecified             00:10:00.00   4       
+ 4    Best-effort   Queued     Unspecified             00:10:00.00   4       
+ 5    Best-effort   Queued     Unspecified             00:10:00.00   4       
+ 6    Best-effort   Queued     Unspecified             00:10:00.00   4       
 \end{wideshellverbatim}
 
 When you're done, you can shut Haizea down cleanly by running the following:
@@ -392,8 +456,6 @@
 
 At this point, we have seen how to run simple simulations with Haizea. However, there is a lot more that Haizea can do:
 
-When not running in simulation, Haizea runs as a daemon that can accept requests (e.g., through a command-line interface) and sends out enactment commands ("start VM", "stop VM", etc.) to the appropriate enactment module. For example, when running OpenNebula with Haizea as a scheduling backend, Haizea is in charge of processing the requests received by OpenNebula, coming up with a schedule, and then instructing OpenNebula on when VMs should be start/stop/suspend/resume on what physical nodes.
-
 \begin{description}
 \item[Run on real hardware] First and foremost, almost everything you just saw above in simulation can be done on real hardware. This is accomplished by using Haizea with the OpenNebula virtual infrastructure manager. So, if you have a Xen or KVM cluster, you can just install OpenNebula and Haizea to enable your users to request VM-based leases on your cluster. This is explained in Chapter~\ref{chap:opennebula}.
 \item[Run complex simulations] This chapter concerned itself mostly with scheduling two leases on a 4-node cluster during a span of roughly 2 hours. \emph{Boring}. Haizea can handle more complex simulations, and also provides the necessary tools for you to easily run multiple simulations with different profiles. For example, in the Haizea paper ``Combining Batch Execution and Leasing Using Virtual Machines'' (see the Haizea publication page: \url{http://haizea.cs.uchicago.edu/pubs.html}) we simulated running 72 30-day workloads in six different configurations, or 36 years of lease scheduling. Running multiple simulations is explained in Section~\ref{sec:multiplesim}

Modified: trunk/doc/manual/simulation.tex
===================================================================
--- trunk/doc/manual/simulation.tex	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/doc/manual/simulation.tex	2009-08-05 11:12:04 UTC (rev 632)
@@ -48,32 +48,59 @@
 
 \section{Specifying the simulated physical resources}
 
-The simulated physical resources are specified using the \texttt{nodes} and \texttt{resources} options in the \texttt{[simulation]} section:
+The simulated physical resources are specified using the \texttt{resources} option in the \texttt{[simulation]} section. This option can take two values, "in-tracefile", which means that the description of the simulated site is in the tracefile, or a string specifying the site's resources. For the former, see Appendix~\ref{app:lwf} for details on how the simulated site is specified in the tracefile. When using the latter, the format of the string is:
 
 \begin{wideshellverbatim}
+<numnodes> <resource_type>:<resource_quantity>[,<resource_type>:<resource_quantity>]*
+\end{wideshellverbatim}
+ 
+For example:
+
+\begin{wideshellverbatim}
 [simulation]
 ...
-nodes: 4
-resources: CPU,1;Mem,1024;Net (in),100;Net (out),100;Disk,20000
+resources: 4  CPU:100 Memory:1024
 ...
 \end{wideshellverbatim}
 
-Haizea currently only allows homogeneous resources to be specified. In other words, Haizea will manage a number of simulated physical machines, all with the same resources. The \texttt{resources} specifies the per-node resources using a semicolon-delimited list. Each entry in the list contains a pair: a resource name and its maximum capacity. Haizea currently recognises the following:
+The above describes a site with four nodes, each with one CPU and 1024 MB of memory. Note that you must always specify at least the ``CPU'' and ``Memory'' resource types.
 
-\begin{itemize}
-\item \texttt{CPU}: Number of processors per node.
-\item \texttt{Mem}: Memory (in MB)
-\item \texttt{Net (in)}: Inbound network bandwidth (in Mbps) 
-\item \texttt{Net (out)}: Outbound network bandwidth (in Mbps) 
-\item \texttt{Disk}: Disk space in MB (not counting space for disk image cache).
-\end{itemize}
-
-These five resources must always be specified, since Haizea depends on them for fundamental resource reservations (running VMs, suspension of VMs, etc.) which involve these five types of resources. Additional resource types can be specified, but Haizea's scheduling code would have to be modified for them to be taken into account when scheduling leases. In the future, it will be possible to specify additional resources in the simulated nodes and in the lease requests with less effort.
-
 \section{Scheduling options}
 
 The scheduling options control how leases are assigned to resources.
 
+\subsection{Scheduling policies}
+
+Haizea includes a policy decision module that supports ``pluggable policies'', allowing developers to write their own scheduling policies. This is described in more detail in Chapter~\ref{chap:policies}, and we describe here only the built-in policies that are included with Haizea.
+
+The first policy is lease admission, which controls what leases are accepted by Haizea. Take into account that this decision takes place before Haizea even attempts to schedule the lease (so, you can think of lease admission as ``eligibility to be scheduled''). The two built-in policies are to accept all leases, and to accept all leases \emph{except} advance reservations.
+
+\begin{wideshellverbatim}
+[scheduling]
+...
+policy-admission: accept-all | no-ARs | <custom policy>
+...
+\end{wideshellverbatim}
+
+The next policy is lease preemptability, or what leases can be preempted. The two built-in policies are to not allow any preemptions, and to allow all ARs to preempt other leases.
+
+\begin{wideshellverbatim}
+[scheduling]
+...
+policy-preemption: no-preemption | ar-preempts-everything | <custom policy>
+...
+\end{wideshellverbatim}
+
+Finally, the host selection policy controls how Haizea chooses what physical hosts to map VMs to. The two built-in policies are to choose nodes arbitrarily (i.e., ``no policy''), or to apply a greedy policy that tries to minimize the number of preemptions. Currently, you should choose the greedy policy unless you really know what you're doing.
+
+\begin{wideshellverbatim}
+[scheduling]
+...
+policy-host-selection: no-policy | greedy | <custom policy>
+...
+\end{wideshellverbatim}
+
+
 \subsection{Backfilling algorithms}
 
 \begin{warning}
@@ -138,19 +165,16 @@
 ...
 \end{wideshellverbatim}
 
-Lease migration can be allowed or not allowed. When allowed, we can specify whether a migration will involve transferring only the memory image of a VM (i.e., the file containing the contents of the VM when it was suspended), or will require transferring both the memory image and the disk image:
+Lease migration can be disallowed, allowed, or allowed but without having to transfer any files from one to another:
 
 \begin{wideshellverbatim}
 [scheduling]
 ...
-migration: True | False
-what-to-migrate: nothing | mem | mem+disk
+migration: no | yes | yes-notransfer
 ...
 \end{wideshellverbatim}
 
-Setting \texttt{what-to-migrate} to \texttt{nothing} means that migration \emph{is} allowed, but does not involve transferring any files from one node to another.
 
-
 \subsection{Lease preparation scheduling}
 
 Before a lease can start, it may require some preparation, such as transferring a disk image from a repository to the physical node where a VM will be running. When no preparation is necessary (e.g., assuming that all required disk images are predeployed on the physical nodes), the \texttt{lease-preparation} option must be set to \texttt{unmanaged}:
@@ -183,16 +207,16 @@
 
 \subsubsection{Transfer mechanisms}
 
-The transfer mechanism specifies how the images will be transferred from the repository to the physical nodes. haizea currently only supports a multicast transfer mechanism:
+The transfer mechanism specifies how the images will be transferred from the repository to the physical nodes. Haizea supports a unicast or a multicast transfer mechanism:
 
 \begin{wideshellverbatim}
 [deploy-imagetransfer]
 ...
-transfer-mechanism: multicast
+transfer-mechanism: unicast | multicast
 ...
 \end{wideshellverbatim}
 
-This mechanism assumes that it is possible to multicast the same image from the repository node to more than one physical node at the same time.
+Whe using a unicast transfer mechanism, one image can only be transferred to one node at a time. When using multicast, it is possible to transfer the same image from the repository node to more than one physical node at the same time.
 
 \subsubsection{Avoiding redundant transfers}
 

Modified: trunk/doc/manual/title.tex
===================================================================
--- trunk/doc/manual/title.tex	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/doc/manual/title.tex	2009-08-05 11:12:04 UTC (rev 632)
@@ -7,7 +7,7 @@
 % Title
 \HRule \\[0.4cm]
 \includegraphics[width=0.6\textwidth]{images/haizea.png}\\[1cm]
-\textsc{ \huge The Haizea Manual}\\{\large Technology Preview 1.3}\\{\large 2/16/09}\\[0.4cm]
+\textsc{ \huge The Haizea Manual}\\{\large 1.0 Beta 1}\\{\large 8/06/09}\\[0.4cm]
  
 \HRule \\[1.5cm]
 \url{http://haizea.cs.uchicago.edu/}

Modified: trunk/etc/sample_multi.conf
===================================================================
--- trunk/etc/sample_multi.conf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/etc/sample_multi.conf	2009-08-05 11:12:04 UTC (rev 632)
@@ -76,13 +76,12 @@
 [common:general]
 loglevel: STATUS
 mode: simulated
-lease-deployment: unmanaged
+lease-preparation: unmanaged
 
 [common:simulation]
 clock: simulated
 starttime: 2006-11-25 13:00:00
-nodes: 4
-resources: CPU,1;Mem,1024;Net (in),100;Net (out),100;Disk,20000
+resources: 4  CPU:100 Memory:1024
 imagetransfer-bandwidth: 100
 status-message-interval: 15
 
@@ -93,21 +92,20 @@
 [nobackfilling:scheduling]
 backfilling: off
 suspension: none
-migration: False
+migration: no
 
 [backfilling:scheduling]
 backfilling: aggressive
 suspension: none
-migration: False
+migration: no
 
 [backfilling+SR:scheduling]
 backfilling: aggressive
 suspension: all
-migration: False
+migration: no
 
 [backfilling+SR+M:scheduling]
 backfilling: aggressive
 suspension: all
-migration: True
-what-to-migrate: mem+disk
+migration: yes
 

Modified: trunk/src/haizea/cli/commands.py
===================================================================
--- trunk/src/haizea/cli/commands.py	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/src/haizea/cli/commands.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -16,12 +16,14 @@
 # limitations under the License.                                             #
 # -------------------------------------------------------------------------- #
 
-from haizea.resourcemanager.rm import ResourceManager
+from haizea.core.manager import Manager
 from haizea.common.utils import generate_config_name, unpickle
-from haizea.resourcemanager.configfile import HaizeaConfig, HaizeaMultiConfig
+from haizea.core.configfile import HaizeaConfig, HaizeaMultiConfig
 from haizea.common.config import ConfigException
 from haizea.cli.optionparser import OptionParser, Option
 from haizea.cli import Command
+from mx.DateTime import TimeDelta
+import xml.etree.ElementTree as ET
 import haizea.common.defaults as defaults
 import sys
 import os
@@ -108,9 +110,9 @@
                 
             daemon = not self.opt.foreground
         
-            rm = ResourceManager(config, daemon, pidfile)
+            manager = Manager(config, daemon, pidfile)
         
-            rm.start()
+            manager.start()
         elif self.opt.stop: # Stop Haizea
             # Based on code in:  http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
             try:
@@ -297,4 +299,113 @@
                 for lease_id in waitingtimes:
                     print ",".join([attrs, `lease_id`, `waitingtimes[lease_id].seconds`, `slowdowns[lease_id]`])
 
+class haizea_lwf2xml(Command):
+    """
+    Converts old Haizea LWF file into new XML-based LWF format
+    """
+    
+    name = "haizea-lwf2xml"
 
+    def __init__(self, argv):
+        Command.__init__(self, argv)
+        
+        self.optparser.add_option(Option("-i", "--in", action="store",  type="string", dest="inf",
+                                         help = """
+                                         Input file
+                                         """))
+        self.optparser.add_option(Option("-o", "--out", action="store", type="string", dest="outf",
+                                         help = """
+                                         Output file
+                                         """))
+                
+    def run(self):            
+        self.parse_options()
+
+        infile = self.opt.inf
+        outfile = self.opt.outf
+        
+        root = ET.Element("lease-workload")
+        root.set("name", infile)
+        description = ET.SubElement(root, "description")
+        time = TimeDelta(seconds=0)
+        id = 1
+        requests = ET.SubElement(root, "lease-requests")
+        
+        
+        infile = open(infile, "r")
+        for line in infile:
+            if line[0]!='#' and len(line.strip()) != 0:
+                fields = line.split()
+                submit_time = int(fields[0])
+                start_time = int(fields[1])
+                duration = int(fields[2])
+                real_duration = int(fields[3])
+                num_nodes = int(fields[4])
+                cpu = int(fields[5])
+                mem = int(fields[6])
+                disk = int(fields[7])
+                vm_image = fields[8]
+                vm_imagesize = int(fields[9])
+                
+                
+        
+                lease_request = ET.SubElement(requests, "lease-request")
+                lease_request.set("arrival", str(TimeDelta(seconds=submit_time)))
+                if real_duration != duration:
+                    realduration = ET.SubElement(lease_request, "realduration")
+                    realduration.set("time", str(TimeDelta(seconds=real_duration)))
+                
+                lease = ET.SubElement(lease_request, "lease")
+                lease.set("id", `id`)
+
+                
+                nodes = ET.SubElement(lease, "nodes")
+                node_set = ET.SubElement(nodes, "node-set")
+                node_set.set("numnodes", `num_nodes`)
+                res = ET.SubElement(node_set, "res")
+                res.set("type", "CPU")
+                if cpu == 1:
+                    res.set("amount", "100")
+                else:
+                    pass
+                res = ET.SubElement(node_set, "res")
+                res.set("type", "Memory")
+                res.set("amount", `mem`)
+                
+                start = ET.SubElement(lease, "start")
+                if start_time == -1:
+                    lease.set("preemptible", "true")
+                else:
+                    lease.set("preemptible", "false")
+                    exact = ET.SubElement(start, "exact")
+                    exact.set("time", str(TimeDelta(seconds=start_time)))
+
+                duration_elem = ET.SubElement(lease, "duration")
+                duration_elem.set("time", str(TimeDelta(seconds=duration)))
+
+                software = ET.SubElement(lease, "software")
+                diskimage = ET.SubElement(software, "disk-image")
+                diskimage.set("id", vm_image)
+                diskimage.set("size", `vm_imagesize`)
+                
+                    
+                id += 1
+        tree = ET.ElementTree(root)
+        print ET.tostring(root)
+        #tree.write("page.xhtml")
+#head = ET.SubElement(root, "head")
+
+#title = ET.SubElement(head, "title")
+#title.text = "Page Title"
+
+#body = ET.SubElement(root, "body")
+#body.set("bgcolor", "#ffffff")
+
+#body.text = "Hello, World!"
+
+# wrap it in an ElementTree instance, and save as XML
+#tree = ET.ElementTree(root)
+
+        
+
+

Modified: trunk/src/haizea/cli/rpc_commands.py
===================================================================
--- trunk/src/haizea/cli/rpc_commands.py	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/src/haizea/cli/rpc_commands.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -16,13 +16,15 @@
 # limitations under the License.                                             #
 # -------------------------------------------------------------------------- #
 import haizea.common.defaults as defaults
-from haizea.resourcemanager.datastruct import Lease
+import haizea.common.constants as constants
+from haizea.core.leases import Lease, Capacity, Duration, Timestamp, DiskImageSoftwareEnvironment
+from haizea.common.utils import round_datetime
 from haizea.cli.optionparser import OptionParser, Option
 from haizea.cli import Command
 import xmlrpclib
 import sys
-from mx.DateTime import TimeDelta
-from mx.DateTime import ISO
+from mx.DateTime import TimeDelta, ISO, now
+import xml.etree.ElementTree as ET
 
 class RPCCommand(Command):
     def __init__(self, argv):
@@ -41,10 +43,18 @@
     """
     
     name = "haizea-request-lease"
+
+    START_NOW = "now"
+    START_BESTEFFORT = "best_effort"
+    DURATION_UNLIMITED = "unlimited"    
     
     def __init__(self, argv):
         RPCCommand.__init__(self, argv)
         
+        self.optparser.add_option(Option("-f", "--file", action="store", type="string", dest="file",
+                                         help = """
+                                         File containing a lease description in XML.
+                                         """))
         self.optparser.add_option(Option("-t", "--start", action="store", type="string", dest="start",
                                          help = """
                                          Starting time. Can be an ISO timestamp, "best_effort", or "now"
@@ -65,9 +75,9 @@
                                          help = """
                                          Specifies a non-preemptible lease.
                                          """))
-        self.optparser.add_option(Option("-c", "--cpu", action="store", type="float", dest="cpu",
+        self.optparser.add_option(Option("-c", "--cpu", action="store", type="int", dest="cpu",
                                          help = """
-                                         Percentage of CPU (must be 0 < c <= 1.0)
+                                         Percentage of CPU (must be 0 < c <= 100)
                                          """))
         self.optparser.add_option(Option("-m", "--mem", action="store", type="int", dest="mem",
                                          help = """
@@ -85,16 +95,73 @@
     def run(self):
         self.parse_options()
         
-        if self.opt.preemptible == None:
-            preemptible = False
+        if self.opt.file != None:
+            lease_elem = ET.parse(self.opt.file).getroot()
+            # If a relative starting time is used, replace for an
+            # absolute starting time.
+            exact = lease.find("start/exact")
+            if exact != None:
+                exact_time = exact.get("time")
+                exact.set("time", str(self.__absolute_time(exact_time)))            
+            lease_xml_str = ET.tostring(lease_elem)
         else:
-            preemptible = self.opt.preemptible
+            if self.opt.preemptible == None:
+                preemptible = False
+            else:
+                preemptible = self.opt.preemptible
             
+            capacity = Capacity([constants.RES_CPU, constants.RES_MEM])
+            capacity.set_quantity(constants.RES_CPU, int(self.opt.cpu) * 100)
+            capacity.set_quantity(constants.RES_MEM, int(self.opt.mem))    
+            requested_resources = dict([(i+1, capacity) for i in range(self.opt.numnodes)])    
+            if self.opt.duration == haizea_request_lease.DURATION_UNLIMITED:
+                # This is an interim solution (make it run for a century).
+                # TODO: Integrate concept of unlimited duration in the lease datastruct
+                duration = DateTimeDelta(36500)
+            else:
+                duration = ISO.ParseTimeDelta(self.opt.duration)
+    
+            if self.opt.start == haizea_request_lease.START_NOW:
+                lease = Lease(id = None,
+                              submit_time = None,
+                              requested_resources = requested_resources, 
+                              start = Timestamp(Timestamp.NOW),
+                              duration = Duration(duration),
+                              deadline = None, 
+                              preemptible=preemptible,
+                              software = DiskImageSoftwareEnvironment(self.opt.vmimage, self.opt.vmimagesize),
+                              state = None
+                              )
+            elif self.opt.start == haizea_request_lease.START_BESTEFFORT:
+                lease = Lease(id = None,
+                              submit_time = None,
+                              requested_resources = requested_resources, 
+                              start = Timestamp(Timestamp.UNSPECIFIED),
+                              duration = Duration(duration),
+                              deadline = None, 
+                              preemptible=preemptible,
+                              software = DiskImageSoftwareEnvironment(self.opt.vmimage, self.opt.vmimagesize),
+                              state = None
+                              )
+            else:
+                start = self.__absolute_time(self.opt.start)
+                lease = Lease(id = None,
+                              submit_time = None,
+                              requested_resources = requested_resources, 
+                              start = Timestamp(start),
+                              duration = Duration(duration),
+                              deadline = None, 
+                              preemptible=preemptible,
+                              software = DiskImageSoftwareEnvironment(self.opt.vmimage, self.opt.vmimagesize),
+                              state = None
+                              )
+
+            lease_xml_str = ET.tostring(lease.to_xml())
+
         server = self.create_rpc_proxy(self.opt.server)
         
         try:
-            lease_id = server.create_lease(self.opt.start, self.opt.duration, preemptible, self.opt.numnodes, 
-                                self.opt.cpu, self.opt.mem, self.opt.vmimage, self.opt.vmimagesize)
+            lease_id = server.create_lease(lease_xml_str)
             print "Lease submitted correctly."
             print "Lease ID: %i" % lease_id
         except xmlrpclib.Fault, err:
@@ -106,6 +173,14 @@
             if self.opt.debug:
                 raise
 
+    def __absolute_time(self, time_str):
+        if time_str[0] == "+":
+            # Relative time
+            time = round_datetime(now() + ISO.ParseTime(time_str[1:]))
+        else:
+            time = Parser.ParseDateTime(time_str)
+            
+        return time
         
 class haizea_cancel_lease(RPCCommand):
     """
@@ -155,7 +230,7 @@
         server = self.create_rpc_proxy(self.opt.server)
         
         fields = [("id","ID", 3),
-                  ("type","Type", 4),
+                  ("type","Type", 12),
                   ("state","State", 9),
                   ("start_req", "Starting time", 22),
                   ("duration_req", "Duration", 12),
@@ -163,7 +238,18 @@
         
         try:
             leases = server.get_leases()
-            console_table_printer(fields, leases)
+            leases_fields = []
+            for lease_xml in leases:
+                lease = Lease.from_xml_string(lease_xml)
+                lease_fields = {}
+                lease_fields["id"] = lease.id
+                lease_fields["type"] = Lease.type_str[lease.get_type()]
+                lease_fields["state"] = Lease.state_str[lease.get_state()]
+                lease_fields["start_req"] = lease.start.requested
+                lease_fields["duration_req"] = lease.duration.requested
+                lease_fields["numnodes"] = len(lease.requested_resources)
+                leases_fields.append(lease_fields)
+            console_table_printer(fields, leases_fields)
         except xmlrpclib.Fault, err:
             print >> sys.stderr, "XMLRPC fault: %s" % err.faultString
             if self.opt.debug:
@@ -222,9 +308,9 @@
         server = self.create_rpc_proxy(self.opt.server)
         
         fields = [("id","ID", 3),
-                  ("type","Type", 4),
+                  ("type","Type", 12),
                   ("state","State", 9),
-                  ("start_sched", "Sched. Start time", 22),
+                  ("start_req", "Starting time", 22),
                   ("duration_req", "Duration", 12),
                   ("numnodes", "Nodes", 3)]
         
@@ -233,7 +319,18 @@
             if len(leases) == 0:
                 print "Queue is empty."
             else:
-                console_table_printer(fields, leases)
+                leases_fields = []
+                for lease_xml in leases:
+                    lease = Lease.from_xml_string(lease_xml)
+                    lease_fields = {}
+                    lease_fields["id"] = lease.id
+                    lease_fields["type"] = Lease.type_str[lease.get_type()]
+                    lease_fields["state"] = Lease.state_str[lease.get_state()]
+                    lease_fields["start_req"] = lease.start.requested
+                    lease_fields["duration_req"] = lease.duration.requested
+                    lease_fields["numnodes"] = len(lease.requested_resources)
+                    leases_fields.append(lease_fields)
+                console_table_printer(fields, leases_fields)                
         except xmlrpclib.Fault, err:
             print >> sys.stderr, "XMLRPC fault: %s" % err.faultString
             if self.opt.debug:
@@ -253,18 +350,6 @@
     print "\33[0m"
     for v in values:
         for (name,pname,width) in fields:
-            value = pretty_print_rpcvalue(name, v[name])
             width = max(len(name),width)
-            print " %s" % str(value).ljust(width),
+            print " %s" % str(v[name]).ljust(width),
         print
-    
-def pretty_print_rpcvalue(name, value):
-    if name == "state":
-        value = Lease.state_str[value]
-    elif name == "duration_req":
-        value = TimeDelta(seconds=value)
-    elif name == "start_req":
-        if value != None:
-            value = ISO.ParseDateTime(value.value)
-
-    return value

Modified: trunk/src/haizea/common/constants.py
===================================================================
--- trunk/src/haizea/common/constants.py	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/src/haizea/common/constants.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -17,17 +17,12 @@
 # -------------------------------------------------------------------------- #
 
 # Types of resources
-RES_CPU = 0
-RES_MEM = 1
-RES_NETIN = 2
-RES_NETOUT = 3
-RES_DISK = 4
+RES_CPU = "CPU"
+RES_MEM = "Memory"
+RES_NETIN = "Net-in"
+RES_NETOUT = "Net-out"
+RES_DISK = "Disk"
 
-# Types of types of resources
-RESTYPE_FLOAT = 0
-RESTYPE_INT = 1
-
-
 COMMON_SEC="common"
 MULTI_SEC="multi"
 BASEDATADIR_OPT="basedatadir"
@@ -47,9 +42,9 @@
 SUSPRES_EXCLUSION_LOCAL="local"
 SUSPRES_EXCLUSION_GLOBAL="global"
 
-MIGRATE_NONE="nothing"
-MIGRATE_MEM="mem"
-MIGRATE_MEMDISK="mem+disk"
+MIGRATE_NO="no"
+MIGRATE_YES="yes"
+MIGRATE_YES_NOTRANSFER="yes-notransfer"
 
 TRANSFER_UNICAST="unicast"
 TRANSFER_MULTICAST="multicast"
@@ -72,12 +67,6 @@
 CLOCK_SIMULATED = "simulated"
 CLOCK_REAL = "real"
 
-# Transfer required in deployment
-REQTRANSFER_NO = 0
-REQTRANSFER_YES = 1
-REQTRANSFER_COWPOOL = 2
-REQTRANSFER_PIGGYBACK = 3
-
 # Misc
 BETTER = -1
 EQUAL = 0
@@ -90,7 +79,7 @@
 
 
 
-ENACT_PACKAGE="haizea.resourcemanager.enact"
+ENACT_PACKAGE="haizea.core.enact"
 
 COUNTER_ARACCEPTED="Accepted AR"
 COUNTER_ARREJECTED="Rejected AR"

Modified: trunk/src/haizea/common/defaults.py
===================================================================
--- trunk/src/haizea/common/defaults.py	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/src/haizea/common/defaults.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -8,4 +8,6 @@
 
 RPC_SERVER = "localhost"
 RPC_PORT = 42493
-RPC_URI = "http://%s:%i" % (RPC_SERVER, RPC_PORT)
\ No newline at end of file
+RPC_URI = "http://%s:%i" % (RPC_SERVER, RPC_PORT)
+
+OPENNEBULA_RPC_PORT = 2633
\ No newline at end of file

Added: trunk/src/haizea/common/opennebula_xmlrpc.py
===================================================================
--- trunk/src/haizea/common/opennebula_xmlrpc.py	                        (rev 0)
+++ trunk/src/haizea/common/opennebula_xmlrpc.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,259 @@
+import xmlrpclib
+import os
+import hashlib
+import xml.etree.ElementTree as ET
+
+class OpenNebulaXMLRPCClient(object):
+    def __init__(self, host, port, user, password):
+        uri = "http://%s:%i" % (host, port)
+        self.rpc = xmlrpclib.ServerProxy(uri)
+        try:
+            methods = self.rpc.system.listMethods()
+        except xmlrpclib.Fault, err:
+            raise Exception("Cannot connect to ONE XML RPC server at %s" % uri)        
+        
+        if not set(["one.hostpool.info", 
+                    "one.host.info",
+                    "one.vmpool.info", 
+                    "one.vm.info"]).issubset(set(methods)):
+            raise Exception("XML RPC server does not support required methods. OpenNebula 1.4 or higher is required.")
+    
+        passhash = hashlib.sha1(password).hexdigest()
+        
+        self.auth = "%s:%s" % (user, passhash)
+        
+    @staticmethod
+    def get_userpass_from_env():
+        if not os.environ.has_key("ONE_AUTH"):
+            return None
+        else:
+            auth = os.environ["ONE_AUTH"]
+            user, passw = auth.split(":")
+            return user, passw
+        
+    def hostpool_info(self):
+        try:
+            (rc, value) = self.rpc.one.hostpool.info(self.auth)
+            if rc == False:
+                raise Exception("ONE reported an error: %s" % value)
+            else:
+                hosts = OpenNebulaHost.from_hostpool_xml(value)
+                return hosts
+        except xmlrpclib.Fault, err:
+            raise Exception("XMLRPC fault: %s" % err.faultString)
+        
+    def host_info(self, hid):
+        try:
+            (rc, value) = self.rpc.one.host.info(self.auth, hid)
+            if rc == False:
+                raise Exception("ONE reported an error: %s" % value)
+            else:
+                host = OpenNebulaHost.from_host_xml(value)
+                return host
+        except xmlrpclib.Fault, err:
+            raise Exception("XMLRPC fault: %s" % err.faultString)     
+        
+    def vmpool_info(self):
+        try:
+            (rc, value) = self.rpc.one.vmpool.info(self.auth, -2) # -2: Get all VMs
+            if rc == False:
+                raise Exception("ONE reported an error: %s" % value)
+            else:
+                hosts = OpenNebulaVM.from_vmpool_xml(value)
+                return hosts
+        except xmlrpclib.Fault, err:
+            raise Exception("XMLRPC fault: %s" % err.faultString)
+        
+    def vm_info(self, id):
+        try:
+            (rc, value) = self.rpc.one.vm.info(self.auth, id)
+            if rc == False:
+                raise Exception("ONE reported an error: %s" % value)
+            else:
+                host = OpenNebulaVM.from_vm_xml(value)
+                return host
+        except xmlrpclib.Fault, err:
+            raise Exception("XMLRPC fault: %s" % err.faultString)     
+        
+    def vm_deploy(self, vid, hid):
+        try:
+            rv = self.rpc.one.vm.deploy(self.auth, vid, hid)
+            if rv[0] == False:
+                raise Exception("ONE reported an error: %s" % rv[1])
+            else:
+                return
+        except xmlrpclib.Fault, err:
+            raise Exception("XMLRPC fault: %s" % err.faultString)                    
+
+    def vm_action(self, action, vid):
+        if not action in ["shutdown", "hold", "release", "stop", 
+                          "cancel", "suspend", "resume", "restart", 
+                          "finalize" ]:
+            raise Exception("%s is not a valid action" % action)
+        try:
+            rv = self.rpc.one.vm.action(self.auth, action, vid)
+            if rv[0] == False:
+                raise Exception("ONE reported an error: %s" % rv[1])
+            else:
+                return
+        except xmlrpclib.Fault, err:
+            raise Exception("XMLRPC fault: %s" % err.faultString)  
+        
+    def vm_shutdown(self, vid):
+        return self.vm_action("shutdown", vid)                  
+
+    def vm_hold(self, vid):
+        return self.vm_action("hold", vid)                  
+
+    def vm_release(self, vid):
+        return self.vm_action("release", vid)                  
+
+    def vm_stop(self, vid):
+        return self.vm_action("stop", vid)                  
+
+    def vm_cancel(self, vid):
+        return self.vm_action("cancel", vid)                  
+
+    def vm_suspend(self, vid):
+        return self.vm_action("suspend", vid)                  
+
+    def vm_resume(self, vid):
+        return self.vm_action("resume", vid)                  
+
+    def vm_restart(self, vid):
+        return self.vm_action("restart", vid)                  
+
+    def vm_finalize(self, vid):
+        return self.vm_action("finalize", vid)                  
+
+    
+class OpenNebulaHost(object):
+
+    STATE_INIT       = 0
+    STATE_MONITORING = 1
+    STATE_MONITORED  = 2
+    STATE_ERROR      = 3
+    STATE_DISABLED   = 4
+
+    
+    def __init__(self, host_element):
+        self.id = int(host_element.find("ID").text)
+        self.name = host_element.find("NAME").text
+        self.state = int(host_element.find("STATE").text)
+        self.im_mad = host_element.find("IM_MAD").text
+        self.vm_mad = host_element.find("VM_MAD").text
+        self.tm_mad = host_element.find("TM_MAD").text
+        self.last_mon_time = int(host_element.find("LAST_MON_TIME").text)
+        
+        host_share_element = host_element.find("HOST_SHARE")
+
+        self.disk_usage = int(host_share_element.find("DISK_USAGE").text)
+        self.mem_usage = int(host_share_element.find("MEM_USAGE").text)
+        self.cpu_usage = int(host_share_element.find("CPU_USAGE").text)
+        self.max_disk = int(host_share_element.find("MAX_DISK").text)
+        self.max_mem = int(host_share_element.find("MAX_MEM").text)
+        self.max_cpu = int(host_share_element.find("MAX_CPU").text)
+        self.free_disk = int(host_share_element.find("FREE_DISK").text)
+        self.free_mem = int(host_share_element.find("FREE_MEM").text)
+        self.free_cpu = int(host_share_element.find("FREE_CPU").text)
+        self.used_disk = int(host_share_element.find("USED_DISK").text)
+        self.used_mem = int(host_share_element.find("USED_MEM").text)
+        self.used_cpu = int(host_share_element.find("USED_CPU").text)
+        self.running_vms = int(host_share_element.find("RUNNING_VMS").text)
+        
+        self.template = parse_template(host_element.find("TEMPLATE"))
+           
+
+    @classmethod
+    def from_host_xml(cls, xmlstr):
+        host_element = ET.fromstring(xmlstr)
+        return cls(host_element)
+    
+    @classmethod
+    def from_hostpool_xml(cls, xmlstr):
+        hostpool_element = ET.fromstring(xmlstr)
+        host_elements = hostpool_element.findall("HOST")
+        return [cls(host_element) for host_element in host_elements]
+    
+class OpenNebulaVM(object):
+
+    STATE_INIT      = 0
+    STATE_PENDING   = 1
+    STATE_HOLD      = 2
+    STATE_ACTIVE    = 3
+    STATE_STOPPED   = 4
+    STATE_SUSPENDED = 5
+    STATE_DONE      = 6
+    STATE_FAILED    = 7
+    
+    LCMSTATE_LCM_INIT       = 0
+    LCMSTATE_PROLOG         = 1
+    LCMSTATE_BOOT           = 2
+    LCMSTATE_RUNNING        = 3
+    LCMSTATE_MIGRATE        = 4
+    LCMSTATE_SAVE_STOP      = 5
+    LCMSTATE_SAVE_SUSPEND   = 6
+    LCMSTATE_SAVE_MIGRATE   = 7
+    LCMSTATE_PROLOG_MIGRATE = 8
+    LCMSTATE_PROLOG_RESUME  = 9
+    LCMSTATE_EPILOG_STOP    = 10
+    LCMSTATE_EPILOG         = 11
+    LCMSTATE_SHUTDOWN       = 12
+    LCMSTATE_CANCEL         = 13
+    LCMSTATE_FAILURE        = 14
+    LCMSTATE_DELETE         = 15
+    LCMSTATE_UNKNOWN        = 16
+    
+
+    def __init__(self, vm_element):
+        self.id = int(vm_element.find("ID").text)
+        self.uid = int(vm_element.find("UID").text)
+        username_element = vm_element.find("USERNAME")
+        if username_element == None:
+            self.username = None
+        else:
+            self.username = username_element.text   
+        self.name = vm_element.find("NAME").text
+        self.last_poll = int(vm_element.find("LAST_POLL").text)
+        self.state = int(vm_element.find("STATE").text)
+        self.lcm_state = int(vm_element.find("LCM_STATE").text)
+        self.stime = int(vm_element.find("STIME").text)
+        self.etime = int(vm_element.find("ETIME").text)
+        deploy_id = vm_element.find("DEPLOY_ID").text
+        if deploy_id == None:
+            self.deploy_id = None
+        else:
+            self.deploy_id = deploy_id
+        self.memory = int(vm_element.find("MEMORY").text)
+        self.cpu = int(vm_element.find("CPU").text)
+        self.net_tx = int(vm_element.find("NET_TX").text)
+        self.net_rx = int(vm_element.find("NET_RX").text)
+
+        self.template = parse_template(vm_element.find("TEMPLATE"))
+
+    
+    @classmethod
+    def from_vm_xml(cls, xmlstr):
+        vm_element = ET.fromstring(xmlstr)
+        return cls(vm_element)
+    
+    @classmethod
+    def from_vmpool_xml(cls, xmlstr):
+        vmpool_element = ET.fromstring(xmlstr)
+        vm_elements = vmpool_element.findall("VM")
+        return [cls(vm_element) for vm_element in vm_elements]
+    
+def parse_template(template_element):
+    template = {}
+    if template_element != None:
+        for subelement in template_element:
+            name = subelement.tag
+            if len(subelement) == 0:
+                template[name] = subelement.text
+            else:
+                template[name] = {}
+                for subsubelement in subelement:
+                    template[name][subsubelement.tag] = subsubelement.text
+                    
+    return template
+    
\ No newline at end of file

Modified: trunk/src/haizea/common/utils.py
===================================================================
--- trunk/src/haizea/common/utils.py	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/src/haizea/common/utils.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -76,6 +76,10 @@
     LEASE_ID += 1
     return l
 
+def reset_lease_id_counter():
+    global LEASE_ID
+    LEASE_ID = 1
+
 def pretty_nodemap(nodes):
     pnodes = list(set(nodes.values()))
     normmap = [([y[0] for y in nodes.items() if y[1]==x], x) for x in pnodes]
@@ -96,6 +100,14 @@
     else:
         return value
     
+def import_class(fq_name):
+    fq_name = fq_name.split(".")
+    package_name = ".".join(fq_name[:-1])
+    class_name = fq_name[-1]
+    module = __import__(package_name, fromlist=[class_name])
+    exec("cls = module.%s" % class_name)
+    return cls
+    
 class Singleton(object):
      """ 
      A singleton base class. 
@@ -116,17 +128,21 @@
 
  
 def get_config():
-    from haizea.resourcemanager.rm import ResourceManager
-    return ResourceManager.get_singleton().config
+    from haizea.core.manager import Manager
+    return Manager.get_singleton().config
 
 def get_accounting():
-    from haizea.resourcemanager.rm import ResourceManager
-    return ResourceManager.get_singleton().accounting
+    from haizea.core.manager import Manager
+    return Manager.get_singleton().accounting
 
 def get_clock():
-    from haizea.resourcemanager.rm import ResourceManager
-    return ResourceManager.get_singleton().clock
+    from haizea.core.manager import Manager
+    return Manager.get_singleton().clock
 
+def get_policy():
+    from haizea.core.manager import Manager
+    return Manager.get_singleton().policy
+
 class InvalidStateMachineTransition(Exception):
     pass
 


Property changes on: trunk/src/haizea/core
___________________________________________________________________
Added: svn:mergeinfo
   + 

Added: trunk/src/haizea/core/__init__.py
===================================================================
--- trunk/src/haizea/core/__init__.py	                        (rev 0)
+++ trunk/src/haizea/core/__init__.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,18 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+

Added: trunk/src/haizea/core/accounting.py
===================================================================
--- trunk/src/haizea/core/accounting.py	                        (rev 0)
+++ trunk/src/haizea/core/accounting.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,186 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+import os
+import os.path
+import haizea.common.constants as constants
+from haizea.core.leases import Lease
+from haizea.common.utils import pickle, get_config, get_clock
+from errno import EEXIST
+
+class AccountingData(object):
+    def __init__(self):
+        # Counters
+        self.counters = {}
+        self.counter_lists = {}
+        self.counter_avg_type = {}
+        
+        # Lease data
+        self.leases = {}
+        
+        # Attributes
+        self.attrs = {}
+        
+        self.starttime = None
+        
+    def get_waiting_times(self):
+        waiting_times = {}
+        for lease_id in self.leases:
+            lease = self.leases[lease_id]
+            if lease.get_type() == Lease.BEST_EFFORT:
+                waiting_times[lease_id] = lease.get_waiting_time()
+        return waiting_times
+
+    def get_slowdowns(self):
+        slowdowns = {}
+        for lease_id in self.leases:
+            lease = self.leases[lease_id]
+            if lease.get_type() == Lease.BEST_EFFORT:
+                slowdowns[lease_id] = lease.get_slowdown()
+        return slowdowns
+    
+    def get_besteffort_end(self):
+        return max([l.end for l in self.leases.values() if l.get_type() == Lease.BEST_EFFORT])
+
+class AccountingDataCollection(object):
+    def __init__(self, manager, datafile):
+        self.data = AccountingData()
+        self.manager = manager
+        self.datafile = datafile
+        
+        attrs = get_config().get_attrs()
+        for attr in attrs:
+            self.data.attrs[attr] = get_config().get_attr(attr)
+
+    def create_counter(self, counter_id, avgtype, initial=0):
+        self.data.counters[counter_id] = initial
+        self.data.counter_lists[counter_id] = []
+        self.data.counter_avg_type[counter_id] = avgtype
+
+    def incr_counter(self, counter_id, lease_id = None):
+        time = get_clock().get_time()
+        self.append_stat(counter_id, self.data.counters[counter_id] + 1, lease_id, time)
+
+    def decr_counter(self, counter_id, lease_id = None):
+        time = get_clock().get_time()
+        self.append_stat(counter_id, self.data.counters[counter_id] - 1, lease_id, time)
+        
+    def append_stat(self, counter_id, value, lease_id = None, time = None):
+        if time == None:
+            time = get_clock().get_time()
+        if len(self.data.counter_lists[counter_id]) > 0:
+            prevtime = self.data.counter_lists[counter_id][-1][0]
+        else:
+            prevtime = None
+        self.data.counters[counter_id] = value
+        if time == prevtime:
+            self.data.counter_lists[counter_id][-1][2] = value
+        else:
+            self.data.counter_lists[counter_id].append([time, lease_id, value])
+
+        
+    def start(self, time):
+        self.data.starttime = time
+        
+        # Start the counters
+        for counter_id in self.data.counters:
+            initial = self.data.counters[counter_id]
+            self.append_stat(counter_id, initial, time = time)
+
+        
+    def stop(self):
+        time = get_clock().get_time()
+
+        # Stop the counters
+        for counter_id in self.data.counters:
+            self.append_stat(counter_id, self.data.counters[counter_id], time=time)
+        
+        # Add the averages
+        for counter_id in self.data.counters:
+            l = self.normalize_times(self.data.counter_lists[counter_id])
+            avgtype = self.data.counter_avg_type[counter_id]
+            if avgtype == constants.AVERAGE_NONE:
+                self.data.counter_lists[counter_id] = self.add_no_average(l)
+            elif avgtype == constants.AVERAGE_NORMAL:
+                self.data.counter_lists[counter_id] = self.add_average(l)
+            elif avgtype == constants.AVERAGE_TIMEWEIGHTED:
+                self.data.counter_lists[counter_id] = self.add_timeweighted_average(l)
+            
+    def normalize_times(self, data):
+        return [((v[0] - self.data.starttime).seconds, v[1], v[2]) for v in data]
+        
+    def add_no_average(self, data):
+        return [(v[0], v[1], v[2], None) for v in data]
+    
+    def add_timeweighted_average(self, data):
+        accum = 0
+        prev_time = None
+        prev_value = None
+        stats = []
+        for v in data:
+            time = v[0]
+            lease_id = v[1]
+            value = v[2]
+            if prev_time != None:
+                timediff = time - prev_time
+                weighted_value = prev_value*timediff
+                accum += weighted_value
+                avg = accum/time
+            else:
+                avg = value
+            stats.append((time, lease_id, value, avg))
+            prev_time = time
+            prev_value = value
+        
+        return stats        
+    
+    def add_average(self, data):
+        accum = 0
+        count = 0
+        stats = []
+        for v in data:
+            value = v[2]
+            accum += value
+            count += 1
+            avg = accum/count
+            stats.append((v[0], v[1], value, avg))
+        
+        return stats          
+    
+    def save_to_disk(self):
+        try:
+            dirname = os.path.dirname(self.datafile)
+            if not os.path.exists(dirname):
+                os.makedirs(dirname)
+        except OSError, e:
+            if e.errno != EEXIST:
+                raise e
+    
+        # Add lease data
+        leases = self.manager.scheduler.completed_leases.entries
+        # Remove some data that won't be necessary in the reporting tools
+        for l in leases.values():
+            l.clear_rrs()
+            l.logger = None
+            self.data.leases[l.id] = l
+
+        # Save data
+        pickle(self.data, self.datafile)
+
+                
+            

Added: trunk/src/haizea/core/configfile.py
===================================================================
--- trunk/src/haizea/core/configfile.py	                        (rev 0)
+++ trunk/src/haizea/core/configfile.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,880 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.common.config import ConfigException, Section, Option, Config, OPTTYPE_INT, OPTTYPE_FLOAT, OPTTYPE_STRING, OPTTYPE_BOOLEAN, OPTTYPE_DATETIME, OPTTYPE_TIMEDELTA 
+from haizea.common.utils import generate_config_name
+import haizea.common.constants as constants
+import haizea.common.defaults as defaults
+import sys
+from mx.DateTime import TimeDelta
+import ConfigParser
+
+class HaizeaConfig(Config):
+
+    sections = []
+    
+    # ============================= #
+    #                               #
+    #        GENERAL OPTIONS        #
+    #                               #
+    # ============================= #
+
+    general = Section("general", required=True,
+                      doc = "This section is used for general options affecting Haizea as a whole.")
+    general.options = \
+    [
+     Option(name        = "loglevel",
+            getter      = "loglevel",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = "INFO",
+            valid       = ["STATUS","INFO","DEBUG","VDEBUG"],
+            doc         = """
+            Controls the level (and amount) of 
+            log messages. Valid values are:
+            
+             - STATUS: Only print status messages
+             - INFO: Slightly more verbose that STATUS
+             - DEBUG: Prints information useful for debugging the scheduler.
+             - VDEBUG: Prints very verbose information
+               on the scheduler's internal data structures. Use only
+               for short runs.        
+            """),
+
+     Option(name        = "logfile",
+            getter      = "logfile",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = "/var/tmp/haizea.log",
+            doc         = """
+            When running Haizea as a daemon, this option specifies the file
+            that log messages should be written to.        
+            """),
+     
+     Option(name        = "mode",
+            getter      = "mode",
+            type        = OPTTYPE_STRING,
+            required    = True,
+            valid       = ["simulated","opennebula"],
+            doc         = """
+            Sets the mode the scheduler will run in.
+            Currently the only valid values are "simulated" and
+            "opennebula". The "simulated" mode expects lease
+            requests to be provided through a trace file, and
+            all enactment is simulated. The "opennebula" mode
+            interacts with the OpenNebula virtual infrastructure
+            manager (http://www.opennebula.org/) to obtain lease
+            requests and to do enactment on physical resources.                
+            """),
+
+     Option(name        = "lease-preparation",
+            getter      = "lease-preparation",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = constants.PREPARATION_UNMANAGED,
+            valid       = [constants.PREPARATION_UNMANAGED,
+                           constants.PREPARATION_TRANSFER],
+            doc         = """
+            Sets how the scheduler will handle the
+            preparation overhead of leases. Valid values are:
+            
+             - unmanaged: The scheduler can assume that there
+               is no deployment overhead, or that some
+               other entity is taking care of it (e.g., one
+               of the enactment backends)
+             - imagetransfer: A disk image has to be transferred
+               from a repository node before the lease can start.
+            """),
+
+     Option(name        = "lease-failure-handling",
+            getter      = "lease-failure-handling",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = constants.ONFAILURE_CANCEL,
+            valid       = [constants.ONFAILURE_CANCEL,
+                           constants.ONFAILURE_EXIT,
+                           constants.ONFAILURE_EXIT_RAISE],
+            doc         = """
+            Sets how the scheduler will handle a failure in
+            a lease. Valid values are:
+            
+             - cancel: The lease is cancelled and marked as "FAILED"
+             - exit: Haizea will exit cleanly, printing relevant debugging
+               information to its log.
+             - exit-raise: Haizea will exit by raising an exception. This is
+               useful for debugging, as IDEs will recognize this as an exception
+               and will facilitate debugging it.
+            """),
+
+     Option(name        = "datafile",
+            getter      = "datafile",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = None,
+            doc         = """
+            This is the file where statistics on
+            the scheduler's run will be saved to (waiting time of leases,
+            utilization data, etc.). If omitted, no data will be saved.
+            """),
+
+     Option(name        = "attributes",
+            getter      = "attributes",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            doc         = """
+            This option is used internally by Haizea when using
+            multiconfiguration files. See the multiconfiguration
+            documentation for more details.        
+            """)
+    ]
+
+    sections.append(general)
+
+    # ============================= #
+    #                               #
+    #      SCHEDULING OPTIONS       #
+    #                               #
+    # ============================= #
+
+    scheduling = Section("scheduling", required=True,
+                         doc = "The options in this section control how Haizea schedules leases.")
+    scheduling.options = \
+    [
+     Option(name        = "mapper",
+            getter      = "mapper",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = "greedy",
+            doc         = """
+            VM-to-physical node mapping algorithm used by Haizea. There is currently
+            only one mapper available (the greedy mapper).
+            """),
+
+     Option(name        = "policy-admission",
+            getter      = "policy.admission",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = "accept-all",
+            doc         = """
+            Lease admission policy. This controls what leases are accepted by Haizea. 
+            Take into account that this decision takes place before Haizea even 
+            attempts to schedule the lease (so, you can think of lease admission as 
+            "eligibility to be scheduled"). 
+            
+            There are two built-in policies:
+            
+             - accept-all: Accept all leases.
+             - no-ARs: Accept all leases except advance reservations.
+             
+            See the Haizea documentation for details on how to write your own
+            policies.
+            """),
+
+     Option(name        = "policy-preemption",
+            getter      = "policy.preemption",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = "no-preemption",
+            doc         = """
+            Lease preemption policy. Determines what leases can be preempted. There
+            are two built-in policies:
+            
+             - no-preemption: Do not allow any preemptions
+             - ar-preempts-everything: Allow all ARs to preempt other leases.
+            
+            See the Haizea documentation for details on how to write your own
+            policies.
+            """),
+            
+     Option(name        = "policy-host-selection",
+            getter      = "policy.host-selection",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = "greedy",
+            doc         = """
+            Physical host selection policy. controls how Haizea chooses what physical hosts 
+            to map VMs to. This option is closely related to the mapper options 
+            (if the greedy mapper is used, then the greedy host selection policy
+            should be used, or unexpected results will happen). 
+            
+            The two built-in policies are:
+             - no-policy: Choose nodes arbitrarily
+             - greedy: Apply a greedy policy that tries to minimize the number
+               of preemptions.
+            
+            See the Haizea documentation for details on how to write your own
+            policies.
+            """),
+                        
+     Option(name        = "wakeup-interval",
+            getter      = "wakeup-interval",
+            type        = OPTTYPE_TIMEDELTA,
+            required    = False,
+            default     = TimeDelta(seconds=60),
+            doc         = """
+            Interval at which Haizea will wake up
+            to manage resources and process pending requests.
+            This option is not used when using a simulated clock,
+            since the clock will skip directly to the time where an
+            event is happening.
+            """),
+
+     Option(name        = "backfilling",
+            getter      = "backfilling",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = None,
+            valid       = [constants.BACKFILLING_OFF,
+                           constants.BACKFILLING_AGGRESSIVE,
+                           constants.BACKFILLING_CONSERVATIVE,
+                           constants.BACKFILLING_INTERMEDIATE],
+            doc         = """
+            Backfilling algorithm to use. Valid values are:
+            
+             - off: don't do backfilling
+             - aggressive: at most 1 reservation in the future
+             - conservative: unlimited reservations in the future
+             - intermediate: N reservations in the future (N is specified
+               in the backfilling-reservations option)
+            """),
+
+     Option(name        = "backfilling-reservations",
+            getter      = "backfilling-reservations",
+            type        = OPTTYPE_INT,
+            required    = False,
+            required_if = [(("scheduling","backfilling"),constants.BACKFILLING_INTERMEDIATE)],
+            doc         = """
+            Number of future reservations to allow when
+            using the "intermediate" backfilling option.
+            """),
+
+     Option(name        = "suspension",
+            getter      = "suspension",
+            type        = OPTTYPE_STRING,
+            required    = True,
+            valid       = [constants.SUSPENSION_NONE,
+                           constants.SUSPENSION_SERIAL,
+                           constants.SUSPENSION_ALL],
+            doc         = """
+            Specifies what can be suspended. Valid values are:
+            
+             - none: suspension is never allowed
+             - serial-only: only 1-node leases can be suspended
+             - all: any lease can be suspended                
+            """),
+
+     Option(name        = "suspend-rate",
+            getter      = "suspend-rate",
+            type        = OPTTYPE_FLOAT,
+            required    = True,
+            doc         = """
+            Rate at which VMs are assumed to suspend (in MB of
+            memory per second)                
+            """),
+
+     Option(name        = "resume-rate",
+            getter      = "resume-rate",
+            type        = OPTTYPE_FLOAT,
+            required    = True,
+            doc         = """
+            Rate at which VMs are assumed to resume (in MB of
+            memory per second)                
+            """),
+
+     Option(name        = "suspendresume-exclusion",
+            getter      = "suspendresume-exclusion",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = constants.SUSPRES_EXCLUSION_LOCAL,
+            valid       = [constants.SUSPRES_EXCLUSION_LOCAL,
+                           constants.SUSPRES_EXCLUSION_GLOBAL],
+            doc         = """
+            When suspending or resuming a VM, the VM's memory is dumped to a
+            file on disk. To correctly estimate the time required to suspend
+            a lease with multiple VMs, Haizea makes sure that no two 
+            suspensions/resumptions happen at the same time (e.g., if eight
+            memory files were being saved at the same time to disk, the disk's
+            performance would be reduced in a way that is not as easy to estimate
+            as if only one file were being saved at a time).
+            
+            Depending on whether the files are being saved to/read from a global
+            or local filesystem, this exclusion can be either global or local.                        
+            """),
+
+     Option(name        = "scheduling-threshold-factor",
+            getter      = "scheduling-threshold-factor",
+            type        = OPTTYPE_INT,
+            required    = False,
+            default     = 1,
+            doc         = """
+            To avoid thrashing, Haizea will not schedule a lease unless all overheads
+            can be correctly scheduled (which includes image transfers, suspensions, etc.).
+            However, this can still result in situations where a lease is prepared,
+            and then immediately suspended because of a blocking lease in the future.
+            The scheduling threshold factor can be used to specify that a lease must
+            not be scheduled unless it is guaranteed to run for a minimum amount of
+            time (the rationale behind this is that you ideally don't want leases
+            to be scheduled if they're not going to be active for at least as much time
+            as was spent in overheads).
+            
+            The default value is 1, meaning that the lease will be active for at least
+            as much time T as was spent on overheads (e.g., if preparing the lease requires
+            60 seconds, and we know that it will have to be suspended, requiring 30 seconds,
+            Haizea won't schedule the lease unless it can run for at least 90 minutes).
+            In other words, a scheduling factor of F required a minimum duration of 
+            F*T. A value of 0 could lead to thrashing, since Haizea could end up with
+            situations where a lease starts and immediately gets suspended.               
+            """),
+
+     Option(name        = "override-suspend-time",
+            getter      = "override-suspend-time",
+            type        = OPTTYPE_INT,
+            required    = False,
+            default     = None,
+            doc         = """
+            Overrides the time it takes to suspend a VM to a fixed value
+            (i.e., not computed based on amount of memory, enactment overhead, etc.)
+            """),
+
+     Option(name        = "override-resume-time",
+            getter      = "override-resume-time",
+            type        = OPTTYPE_INT,
+            required    = False,
+            default     = None,
+            doc         = """
+            Overrides the time it takes to suspend a VM to a fixed value
+            (i.e., not computed based on amount of memory, enactment overhead, etc.)
+            """),
+
+     Option(name        = "force-scheduling-threshold",
+            getter      = "force-scheduling-threshold",
+            type        = OPTTYPE_TIMEDELTA,
+            required    = False,
+            doc         = """
+            This option can be used to force a specific scheduling threshold time
+            to be used, instead of calculating one based on overheads.                
+            """),
+
+     Option(name        = "migration",
+            getter      = "migration",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = constants.MIGRATE_NO,          
+            valid       = [constants.MIGRATE_NO,
+                           constants.MIGRATE_YES,
+                           constants.MIGRATE_YES_NOTRANSFER],              
+            doc         = """
+            Specifies whether leases can be migrated from one
+            physical node to another. Valid values are: 
+            
+             - no
+             - yes
+             - yes-notransfer: migration is performed without
+               transferring any files. 
+            """),
+
+     Option(name        = "non-schedulable-interval",
+            getter      = "non-schedulable-interval",
+            type        = OPTTYPE_TIMEDELTA,
+            required    = False,
+            default     = TimeDelta(seconds=10),
+            doc         = """
+            The minimum amount of time that must pass between
+            when a request is scheduled to when it can actually start.
+            The default should be good for most configurations, but
+            may need to be increased if you're dealing with exceptionally
+            high loads.                
+            """),
+
+     Option(name        = "shutdown-time",
+            getter      = "shutdown-time",
+            type        = OPTTYPE_TIMEDELTA,
+            required    = False,
+            default     = TimeDelta(seconds=0),
+            doc         = """
+            The amount of time that will be allocated for a VM to shutdown.
+            When running in OpenNebula mode, it is advisable to set this to
+            a few seconds, so no operation gets scheduled right when a
+            VM is shutting down. The most common scenario is that a VM
+            will start resuming right when another VM shuts down. However,
+            since both these activities involve I/O, it can delay the resume
+            operation and affect Haizea's estimation of how long the resume
+            will take.
+            """),
+
+     Option(name        = "enactment-overhead",
+            getter      = "enactment-overhead",
+            type        = OPTTYPE_TIMEDELTA,
+            required    = False,
+            default     = TimeDelta(seconds=0),
+            doc         = """
+            The amount of time that is required to send
+            an enactment command. This value will affect suspend/resume
+            estimations and, in OpenNebula mode, will force a pause
+            of this much time between suspend/resume enactment
+            commands. When suspending/resuming many VMs at the same time
+            (which is likely to happen if suspendresume-exclusion is set
+            to "local"), it will take OpenNebula 1-2 seconds to process
+            each command (this is a small amount of time, but if 32 VMs
+            are being suspended at the same time, on in each physical node,
+            this time can compound up to 32-64 seconds, which has to be
+            taken into account when estimating when to start a suspend
+            operation that must be completed before another lease starts).
+            """)
+
+    ]
+    sections.append(scheduling)
+    
+    # ============================= #
+    #                               #
+    #      SIMULATION OPTIONS       #
+    #                               #
+    # ============================= #
+    
+    simulation = Section("simulation", required=False,
+                         required_if = [(("general","mode"),"simulated")],
+                         doc = "This section is used to specify options when Haizea runs in simulation" )
+    simulation.options = \
+    [
+     Option(name        = "clock",
+            getter      = "clock",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = constants.CLOCK_REAL,
+            valid       = [constants.CLOCK_REAL,
+                           constants.CLOCK_SIMULATED],
+            doc         = """
+            Type of clock to use in simulation:
+            
+             - simulated: A simulated clock that fastforwards through
+               time. Can only use the tracefile request
+               frontend
+             - real: A real clock is used, but simulated resources and
+               enactment actions are used. Can only use the RPC
+               request frontend.                
+            """),
+
+     Option(name        = "starttime",
+            getter      = "starttime",
+            type        = OPTTYPE_DATETIME,
+            required    = False,
+            required_if = [(("simulation","clock"),constants.CLOCK_SIMULATED)],
+            doc         = """
+            Time at which simulated clock will start.                
+            """),             
+
+     Option(name        = "resources",
+            getter      = "simul.resources",
+            type        = OPTTYPE_STRING,
+            required    = True,
+            doc         = """
+            Simulated resources. This option can take two values,
+            "in-tracefile" (which means that the description of
+            the simulated site is in the tracefile) or a string 
+            specifying a site with homogeneous resources. 
+            The format is:
+        
+            <numnodes> [ <resource_type>:<resource_quantity>]+
+        
+            For example, "4  CPU:100 Memory:1024" describes a site
+            with four nodes, each with one CPU and 1024 MB of memory.
+            """),
+
+     Option(name        = "imagetransfer-bandwidth",
+            getter      = "imagetransfer-bandwidth",
+            type        = OPTTYPE_INT,
+            required    = True,
+            doc         = """
+            Bandwidth (in Mbps) available for image transfers.
+            This would correspond to the outbound network bandwidth of the
+            node where the images are stored.                
+            """),
+
+     Option(name        = "stop-when",
+            getter      = "stop-when",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = constants.STOPWHEN_ALLDONE,
+            valid       = [constants.STOPWHEN_ALLDONE,
+                           constants.STOPWHEN_BESUBMITTED,
+                           constants.STOPWHEN_BEDONE],
+            doc         = """
+            When using the simulated clock, this specifies when the
+            simulation must end. Valid options are:
+            
+             - all-leases-done: All requested leases have been completed
+               and there are no queued/pending requests.
+             - besteffort-submitted: When all best-effort leases have been
+               submitted.
+             - besteffort-done: When all best-effort leases have been
+               completed.                
+            """),
+
+     Option(name        = "status-message-interval",
+            getter      = "status-message-interval",
+            type        = OPTTYPE_INT,
+            required    = False,
+            default     = None,
+            doc         = """
+            If specified, the simulated clock will print a status
+            message with some basic statistics. This is useful to keep track
+            of long simulations. The interval is specified in minutes.                
+            """)
+
+    ]
+    sections.append(simulation)
+    
+
+    # ============================= #
+    #                               #
+    #      DEPLOYMENT OPTIONS       #
+    #     (w/ image transfers)      #
+    #                               #
+    # ============================= #
+
+    imgtransfer = Section("deploy-imagetransfer", required=False,
+                         required_if = [(("general","lease-deployment"),"imagetransfer")],
+                         doc = """
+                         When lease deployment with disk image transfers is selected,
+                         this section is used to control image deployment parameters.""")
+    imgtransfer.options = \
+    [
+     Option(name        = "transfer-mechanism",
+            getter      = "transfer-mechanism",
+            type        = OPTTYPE_STRING,
+            required    = True,
+            valid       = [constants.TRANSFER_UNICAST,
+                           constants.TRANSFER_MULTICAST],
+            doc         = """
+            Specifies how disk images are transferred. Valid values are:
+            
+             - unicast: A disk image can be transferred to just one node at a time
+             - multicast: A disk image can be multicast to multiple nodes at 
+               the same time.                
+            """),
+
+     Option(name        = "avoid-redundant-transfers",
+            getter      = "avoid-redundant-transfers",
+            type        = OPTTYPE_BOOLEAN,
+            required    = False,
+            default     = True,
+            doc         = """
+            Specifies whether the scheduler should take steps to
+            detect and avoid redundant transfers (e.g., if two leases are
+            scheduled on the same node, and they both require the same disk
+            image, don't transfer the image twice; allow one to "piggyback"
+            on the other). There is generally no reason to set this option
+            to False.
+            """),
+
+     Option(name        = "force-imagetransfer-time",
+            getter      = "force-imagetransfer-time",
+            type        = OPTTYPE_TIMEDELTA,
+            required    = False,
+            doc         = """
+            Forces the image transfer time to a specific amount.
+            This options is intended for testing purposes.                
+            """),
+            
+     Option(name        = "diskimage-reuse",
+            getter      = "diskimage-reuse",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            required_if = None,
+            default     = constants.REUSE_NONE,
+            valid       = [constants.REUSE_NONE,
+                           constants.REUSE_IMAGECACHES],
+            doc         = """
+            Specifies whether disk image caches should be created
+            on the nodes, so the scheduler can reduce the number of transfers
+            by reusing images. Valid values are:
+            
+             - none: No image reuse
+             - image-caches: Use image caching algorithm described in Haizea
+               publications
+            """),
+
+     Option(name        = "diskimage-cache-size",
+            getter      = "diskimage-cache-size",
+            type        = OPTTYPE_INT,
+            required    = False,
+            required_if = [(("deploy-imagetransfer","diskimage-reuse"),True)],
+            doc         = """
+            Specifies the size (in MB) of the disk image cache on
+            each physical node.                
+            """)
+    ]
+    sections.append(imgtransfer)
+
+    # ============================= #
+    #                               #
+    #      TRACEFILE OPTIONS        #
+    #                               #
+    # ============================= #
+
+    tracefile = Section("tracefile", required=False, 
+                        doc="""
+                        When reading in requests from a tracefile, this section is used
+                        to specify the tracefile and other parameters.""")
+    tracefile.options = \
+    [
+     Option(name        = "tracefile",
+            getter      = "tracefile",
+            type        = OPTTYPE_STRING,
+            required    = True,
+            doc         = """
+            Path to tracefile to use.                
+            """),
+
+     Option(name        = "imagefile",
+            getter      = "imagefile",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            doc         = """
+            Path to list of images to append to lease requests.
+            If omitted, the images in the tracefile are used.                
+            """),
+
+     Option(name        = "injectionfile",
+            getter      = "injectionfile",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            doc         = """
+            Path to file with leases to "inject" into the tracefile.                
+            """),      
+               
+     Option(name        = "runtime-slowdown-overhead",
+            getter      = "runtime-slowdown-overhead",
+            type        = OPTTYPE_FLOAT,
+            required    = False,
+            default     = 0,
+            doc         = """
+            Adds a runtime overhead (in %) to the lease duration.                
+            """),
+
+     Option(name        = "add-overhead",
+            getter      = "add-overhead",
+            type        = OPTTYPE_STRING,
+            required    = False,
+            default     = constants.RUNTIMEOVERHEAD_NONE,
+            valid       = [constants.RUNTIMEOVERHEAD_NONE,
+                           constants.RUNTIMEOVERHEAD_ALL,
+                           constants.RUNTIMEOVERHEAD_BE],
+            doc         = """
+            Specifies what leases will have a runtime overhead added:
+            
+             - none: No runtime overhead must be added.
+             - besteffort: Add only to best-effort leases
+             - all: Add runtime overhead to all leases                
+            """),
+
+     Option(name        = "bootshutdown-overhead",
+            getter      = "bootshutdown-overhead",
+            type        = OPTTYPE_TIMEDELTA,
+            required    = False,
+            default     = TimeDelta(seconds=0),
+            doc         = """
+            Specifies how many seconds will be alloted to
+            boot and shutdown of the lease.                
+            """),
+                  
+     Option(name        = "override-memory",
+            getter      = "override-memory",
+            type        = OPTTYPE_INT,
+            required    = False,
+            default     = constants.NO_MEMORY_OVERRIDE,
+            doc         = """
+            Overrides memory requirements specified in tracefile.
+            """),
+    ]
+    sections.append(tracefile)
+    
+    # ============================= #
+    #                               #
+    #      OPENNEBULA OPTIONS       #
+    #                               #
+    # ============================= #
+
+    opennebula = Section("opennebula", required=False,
+                         required_if = [(("general","mode"),"opennebula")],
+                         doc = """
+                         This section is used to specify OpenNebula parameters,
+                         necessary when using Haizea as an OpenNebula scheduling backend.""")
+    opennebula.options = \
+    [
+     Option(name        = "host",
+            getter      = "one.host",
+            type        = OPTTYPE_STRING,
+            required    = True,
+            doc         = """
+            Host where OpenNebula is running.
+            Typically, OpenNebula and Haizea will be installed
+            on the same host, so the following option should be
+            set to 'localhost'. If they're on different hosts,
+            make sure you modify this option accordingly.             
+            """),
+
+     Option(name        = "port",
+            getter      = "one.port",
+            type        = OPTTYPE_INT,
+            required    = False,
+            default     = defaults.OPENNEBULA_RPC_PORT,
+            doc         = """
+            TCP port of OpenNebula's XML RPC server             
+            """),
+            
+     Option(name        = "stop-when-no-more-leases",
+            getter      = "stop-when-no-more-leases",
+            type        = OPTTYPE_BOOLEAN,
+            required    = False,
+            default     = False,
+            doc         = """
+            This option is useful for testing and running experiments.
+            If set to True, Haizea will stop when there are no more leases
+            to process (which allows you to tun Haizea and OpenNebula unattended,
+            and count on it stopping when there are no more leases to process).
+            For now, this only makes sense if you're seeding Haizea with requests from
+            the start (otherwise, it will start and immediately stop).
+            """),            
+
+     Option(name        = "dry-run",
+            getter      = "dry-run",
+            type        = OPTTYPE_BOOLEAN,
+            required    = False,
+            default     = False,
+            doc         = """
+            This option is useful for testing.
+            If set to True, Haizea will fast-forward through time (note that this is
+            different that using the simulated clock, which has to be used with a tracefile;
+            with an Haizea/OpenNebula dry run, you will have to seed OpenNebula with requests
+            before starting Haizea). You will generally want to set stop-when-no-more-leases
+            when doing a dry-run.
+            
+            IMPORTANT: Haizea will still send out enactment commands to OpenNebula. Make
+            sure you replace onevm with a dummy command that does nothing (or that reacts
+            in some way you want to test; e.g., by emulating a deployment failure, etc.)
+            """),            
+
+    ]
+    sections.append(opennebula)
+    
+    def __init__(self, config):
+        Config.__init__(self, config, self.sections)
+        
+        self.attrs = {}
+        if self._options["attributes"] != None:
+            self.attrs = {}
+            attrs = self._options["attributes"].split(",")
+            for attr in attrs:
+                (k,v) = attr.split("=")
+                self.attrs[k] = v
+        
+    def get_attr(self, attr):
+        return self.attrs[attr]
+        
+    def get_attrs(self):
+        return self.attrs.keys()
+
+
+class HaizeaMultiConfig(Config):
+    
+    MULTI_SEC = "multi"
+    COMMON_SEC = "common"
+    TRACEDIR_OPT = "tracedir"
+    TRACEFILES_OPT = "tracefiles"
+    INJDIR_OPT = "injectiondir"
+    INJFILES_OPT = "injectionfiles"
+    DATADIR_OPT = "datadir"
+    
+    def __init__(self, config):
+        # TODO: Define "multi" section as a Section object
+        Config.__init__(self, config, [])
+        
+    def get_profiles(self):
+        sections = set([s.split(":")[0] for s in self.config.sections()])
+        # Remove multi and common sections
+        sections.difference_update([self.COMMON_SEC, self.MULTI_SEC])
+        return list(sections)
+
+    def get_trace_files(self):
+        dir = self.config.get(self.MULTI_SEC, self.TRACEDIR_OPT)
+        traces = self.config.get(self.MULTI_SEC, self.TRACEFILES_OPT).split()
+        return [dir + "/" + t for t in traces]
+
+    def get_inject_files(self):
+        dir = self.config.get(self.MULTI_SEC, self.INJDIR_OPT)
+        inj = self.config.get(self.MULTI_SEC, self.INJFILES_OPT).split()
+        inj = [dir + "/" + i for i in inj]
+        inj.append(None)
+        return inj
+    
+    def get_configs(self):
+        profiles = self.get_profiles()
+        tracefiles = self.get_trace_files()
+        injectfiles = self.get_inject_files()
+
+        configs = []
+        for profile in profiles:
+            for tracefile in tracefiles:
+                for injectfile in injectfiles:
+                    profileconfig = ConfigParser.ConfigParser()
+                    commonsections = [s for s in self.config.sections() if s.startswith("common:")]
+                    profilesections = [s for s in self.config.sections() if s.startswith(profile +":")]
+                    sections = commonsections + profilesections
+                    for s in sections:
+                        s_noprefix = s.split(":")[1]
+                        items = self.config.items(s)
+                        if not profileconfig.has_section(s_noprefix):
+                            profileconfig.add_section(s_noprefix)
+                        for item in items:
+                            profileconfig.set(s_noprefix, item[0], item[1])
+                            
+                    # The tracefile section may have not been created
+                    if not profileconfig.has_section("tracefile"):
+                        profileconfig.add_section("tracefile")
+
+                    # Add tracefile option
+                    profileconfig.set("tracefile", "tracefile", tracefile)
+                    
+                    # Add injected file option
+                    if injectfile != None:
+                        profileconfig.set("tracefile", "injectionfile", injectfile)
+
+                    # Add datafile option
+                    datadir = self.config.get(self.MULTI_SEC, self.DATADIR_OPT)
+                    datafilename = generate_config_name(profile, tracefile, injectfile)
+                    datafile = datadir + "/" + datafilename + ".dat"
+                    profileconfig.set("general", "datafile", datafile)
+                    
+                    # Set "attributes" option (only used internally)
+                    attrs = {"profile":profile,"tracefile":tracefile,"injectfile":injectfile}
+                    # TODO: Load additional attributes from trace/injfiles
+                    attrs_str = ",".join(["%s=%s" % (k,v) for (k,v) in attrs.items()])
+                    if profileconfig.has_option("general", "attributes"):
+                        attrs_str += ",%s" % profileconfig.get("general", "attributes")
+                    profileconfig.set("general", "attributes", attrs_str)
+                    
+                    try:
+                        c = HaizeaConfig(profileconfig)
+                    except ConfigException, msg:
+                        print >> sys.stderr, "Error in configuration file:"
+                        print >> sys.stderr, msg
+                        exit(1)
+                    configs.append(c)
+        
+        return configs
\ No newline at end of file

Added: trunk/src/haizea/core/enact/__init__.py
===================================================================
--- trunk/src/haizea/core/enact/__init__.py	                        (rev 0)
+++ trunk/src/haizea/core/enact/__init__.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,46 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.common.utils import abstract
+
+class ResourcePoolInfo(object):
+    def __init__(self):
+        pass
+
+    def get_nodes(self): 
+        """ Returns the nodes in the resource pool. """
+        abstract()
+        
+    def get_resource_types(self):
+        abstract()
+        
+class VMEnactment(object):
+    def __init__(self):
+        pass
+        
+    def start(self, vms): abstract()
+    
+    def stop(self, vms): abstract()
+    
+    def suspend(self, vms): abstract()
+    
+    def resume(self, vms): abstract()
+    
+class DeploymentEnactment(object):
+    def __init__(self):
+        pass

Added: trunk/src/haizea/core/enact/actions.py
===================================================================
--- trunk/src/haizea/core/enact/actions.py	                        (rev 0)
+++ trunk/src/haizea/core/enact/actions.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,82 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+class EnactmentAction(object):
+    def __init__(self):
+        self.lease_haizea_id = None
+        self.lease_enactment_info = None
+            
+    def from_rr(self, rr):
+        self.lease_haizea_id = rr.lease.id
+        self.lease_enactment_info = rr.lease.enactment_info
+        
+class VNode(object):
+    def __init__(self, enactment_info):
+        self.enactment_info = enactment_info
+        self.pnode = None
+        self.resources = None
+        self.diskimage = None
+        
+class VMEnactmentAction(EnactmentAction):
+    def __init__(self):
+        EnactmentAction.__init__(self)
+        self.vnodes = {}
+    
+    def from_rr(self, rr):
+        EnactmentAction.from_rr(self, rr)
+        self.vnodes = dict([(vnode, VNode(info)) for (vnode, info) in rr.lease.vnode_enactment_info.items()])
+
+class VMEnactmentStartAction(VMEnactmentAction):
+    def __init__(self):
+        VMEnactmentAction.__init__(self)
+
+class VMEnactmentStopAction(VMEnactmentAction):
+    def __init__(self):
+        VMEnactmentAction.__init__(self)
+
+class VMEnactmentSuspendAction(VMEnactmentAction):
+    def __init__(self):
+        VMEnactmentAction.__init__(self)
+
+    def from_rr(self, rr):
+        VMEnactmentAction.from_rr(self, rr)
+        self.vnodes = dict([(k, v) for (k,v) in self.vnodes.items() if k in rr.vnodes])
+
+class VMEnactmentResumeAction(VMEnactmentAction):
+    def __init__(self):
+        VMEnactmentAction.__init__(self)
+
+    def from_rr(self, rr):
+        VMEnactmentAction.from_rr(self, rr)
+        self.vnodes = dict([(k, v) for (k,v) in self.vnodes.items() if k in rr.vnodes])
+
+class VMEnactmentConfirmSuspendAction(VMEnactmentAction):
+    def __init__(self):
+        VMEnactmentAction.__init__(self)
+
+    def from_rr(self, rr):
+        VMEnactmentAction.from_rr(self, rr)
+        self.vnodes = dict([(k, v) for (k,v) in self.vnodes.items() if k in rr.vnodes])
+
+class VMEnactmentConfirmResumeAction(VMEnactmentAction):
+    def __init__(self):
+        VMEnactmentAction.__init__(self)
+
+    def from_rr(self, rr):
+        VMEnactmentAction.from_rr(self, rr)
+        self.vnodes = dict([(k, v) for (k,v) in self.vnodes.items() if k in rr.vnodes])

Added: trunk/src/haizea/core/enact/opennebula.py
===================================================================
--- trunk/src/haizea/core/enact/opennebula.py	                        (rev 0)
+++ trunk/src/haizea/core/enact/opennebula.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,233 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.core.scheduler import EnactmentError
+from haizea.core.leases import Capacity
+from haizea.core.scheduler.resourcepool import ResourcePoolNode
+from haizea.core.scheduler.slottable import ResourceTuple
+from haizea.core.enact import ResourcePoolInfo, VMEnactment, DeploymentEnactment
+from haizea.common.utils import get_config
+from haizea.common.opennebula_xmlrpc import OpenNebulaXMLRPCClient, OpenNebulaVM, OpenNebulaHost
+import haizea.common.constants as constants
+import logging
+from time import sleep
+
+one_rpc = None
+
+def get_one_xmlrpcclient():
+    global one_rpc
+    if one_rpc == None:
+        host = get_config().get("one.host")
+        port = get_config().get("one.port")
+        user, passw = OpenNebulaXMLRPCClient.get_userpass_from_env()
+        one_rpc = OpenNebulaXMLRPCClient(host, port, user, passw)
+    return one_rpc
+
+class OpenNebulaEnactmentError(EnactmentError):
+    def __init__(self, method, msg):
+        self.method = method
+        self.msg = msg
+        self.message = "Error when invoking '%s': %s" % (method, msg)
+
+class OpenNebulaResourcePoolInfo(ResourcePoolInfo):
+    
+    def __init__(self):
+        ResourcePoolInfo.__init__(self)
+        self.logger = logging.getLogger("ENACT.ONE.INFO")
+
+        self.rpc = get_one_xmlrpcclient()
+
+        # Get information about nodes from OpenNebula
+        self.nodes = {}
+        hosts = self.rpc.hostpool_info()
+        for (i, host) in enumerate(hosts):
+            if not host.state in (OpenNebulaHost.STATE_ERROR, OpenNebulaHost.STATE_DISABLED):
+                nod_id = i+1
+                enact_id = host.id
+                hostname = host.name
+                capacity = Capacity([constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])
+                
+                # CPU
+                # OpenNebula reports each CPU as "100"
+                # (so, a 4-core machine is reported as "400")
+                # We need to convert this to a multi-instance
+                # resource type in Haizea
+                cpu = host.max_cpu
+                ncpu = cpu / 100
+                capacity.set_ninstances(constants.RES_CPU, ncpu)
+                for i in range(ncpu):
+                    capacity.set_quantity_instance(constants.RES_CPU, i+1, 100)            
+                
+                # Memory. Must divide by 1024 to obtain quantity in MB
+                capacity.set_quantity(constants.RES_MEM, host.max_mem / 1024.0)
+                
+                # Disk
+                # OpenNebula doesn't report this correctly yet.
+                # We set it to an arbitrarily high value.
+                capacity.set_quantity(constants.RES_DISK, 80000)
+    
+                node = ResourcePoolNode(nod_id, hostname, capacity)
+                node.enactment_info = enact_id
+                self.nodes[nod_id] = node
+            
+        self.resource_types = []
+        self.resource_types.append((constants.RES_CPU,1))
+        self.resource_types.append((constants.RES_MEM,1))
+        self.resource_types.append((constants.RES_DISK,1))
+            
+        self.logger.info("Fetched %i nodes from OpenNebula" % len(self.nodes))
+        for n in self.nodes.values():
+            self.logger.debug("%i %s %s" % (n.id, n.hostname, n.capacity))
+        
+    def get_nodes(self):
+        return self.nodes
+    
+    def get_resource_types(self):
+        return self.resource_types
+
+    def get_bandwidth(self):
+        return 0
+
+class OpenNebulaVMEnactment(VMEnactment):
+    def __init__(self):
+        VMEnactment.__init__(self)
+        self.logger = logging.getLogger("ENACT.ONE.VM")
+        self.rpc = get_one_xmlrpcclient()
+
+    def start(self, action):
+        for vnode in action.vnodes:
+            # Unpack action
+            vid = action.vnodes[vnode].enactment_info
+            hid = action.vnodes[vnode].pnode
+            
+            self.logger.debug("Sending request to start VM for L%iV%i (ONE: vid=%i, hid=%i)"
+                         % (action.lease_haizea_id, vnode, vid, hid))
+
+            try:
+                self.rpc.vm_deploy(vid, hid)
+                self.logger.debug("Request succesful.")
+            except Exception, msg:
+                raise OpenNebulaEnactmentError("vm.deploy", msg)
+            
+    def stop(self, action):
+        for vnode in action.vnodes:
+            # Unpack action
+            vid = action.vnodes[vnode].enactment_info
+            
+            self.logger.debug("Sending request to shutdown VM for L%iV%i (ONE: vid=%i)"
+                         % (action.lease_haizea_id, vnode, vid))
+
+            try:
+                self.rpc.vm_shutdown(vid)
+                self.logger.debug("Request succesful.")
+            except Exception, msg:
+                raise OpenNebulaEnactmentError("vm.shutdown", msg)
+            
+            # Space out commands to avoid OpenNebula from getting saturated
+            # TODO: We should spawn out a thread to do this, so Haizea isn't
+            # blocking until all these commands end
+            interval = get_config().get("enactment-overhead").seconds
+            sleep(interval)
+
+    def suspend(self, action):
+        for vnode in action.vnodes:
+            # Unpack action
+            vid = action.vnodes[vnode].enactment_info
+            
+            self.logger.debug("Sending request to suspend VM for L%iV%i (ONE: vid=%i)"
+                         % (action.lease_haizea_id, vnode, vid))
+
+            try:
+                self.rpc.vm_suspend(vid)
+                self.logger.debug("Request succesful.")
+            except Exception, msg:
+                raise OpenNebulaEnactmentError("vm.suspend", msg)
+            
+            # Space out commands to avoid OpenNebula from getting saturated
+            # TODO: We should spawn out a thread to do this, so Haizea isn't
+            # blocking until all these commands end
+            interval = get_config().get("enactment-overhead").seconds
+            sleep(interval)
+        
+    def resume(self, action):
+        for vnode in action.vnodes:
+            # Unpack action
+            vid = action.vnodes[vnode].enactment_info
+            
+            self.logger.debug("Sending request to resume VM for L%iV%i (ONE: vid=%i)"
+                         % (action.lease_haizea_id, vnode, vid))
+
+            try:
+                self.rpc.vm_resume(vid)
+                self.logger.debug("Request succesful.")
+            except Exception, msg:
+                raise OpenNebulaEnactmentError("vm.resume", msg)
+            
+            # Space out commands to avoid OpenNebula from getting saturated
+            # TODO: We should spawn out a thread to do this, so Haizea isn't
+            # blocking until all these commands end
+            interval = get_config().get("enactment-overhead").seconds
+            sleep(interval)
+
+    def verify_suspend(self, action):
+        result = 0
+        for vnode in action.vnodes:
+            # Unpack action
+            vid = action.vnodes[vnode].enactment_info
+            
+            try:
+                vm = self.rpc.vm_info(vid)   
+                state = vm.state
+                if state == OpenNebulaVM.STATE_SUSPENDED:
+                    self.logger.debug("Suspend of L%iV%i correct (ONE vid=%i)." % (action.lease_haizea_id, vnode, vid))
+                else:
+                    self.logger.warning("ONE did not complete suspend of L%iV%i on time. State is %i. (ONE vid=%i)" % (action.lease_haizea_id, vnode, state, vid))
+                    result = 1
+            except Exception, msg:
+                raise OpenNebulaEnactmentError("vm.info", msg)
+
+        return result
+        
+    def verify_resume(self, action):
+        result = 0
+        for vnode in action.vnodes:
+            # Unpack action
+            vid = action.vnodes[vnode].enactment_info
+            
+            try:
+                vm = self.rpc.vm_info(vid)   
+                state = vm.state
+                if state == OpenNebulaVM.STATE_ACTIVE:
+                    self.logger.debug("Resume of L%iV%i correct (ONE vid=%i)." % (action.lease_haizea_id, vnode, vid))
+                else:
+                    self.logger.warning("ONE did not complete resume of L%iV%i on time. State is %i. (ONE vid=%i)" % (action.lease_haizea_id, vnode, state, vid))
+                    result = 1
+            except Exception, msg:
+                raise OpenNebulaEnactmentError("vm.info", msg)
+
+        return result        
+
+class OpenNebulaDummyDeploymentEnactment(DeploymentEnactment):    
+    def __init__(self):
+        DeploymentEnactment.__init__(self)
+            
+    def get_aux_nodes(self):
+        return [] 
+            
+    def resolve_to_file(self, lease_id, vnode, diskimage_id):
+        return "/var/haizea/images/%s-L%iV%i" % (diskimage_id, lease_id, vnode)
\ No newline at end of file

Added: trunk/src/haizea/core/enact/simulated.py
===================================================================
--- trunk/src/haizea/core/enact/simulated.py	                        (rev 0)
+++ trunk/src/haizea/core/enact/simulated.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,125 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.core.leases import Capacity
+from haizea.core.scheduler.resourcepool import ResourcePoolNode
+from haizea.core.enact import ResourcePoolInfo, VMEnactment, DeploymentEnactment
+import haizea.common.constants as constants
+from haizea.common.utils import get_config
+import logging
+
+class SimulatedResourcePoolInfo(ResourcePoolInfo):
+    def __init__(self, site):
+        ResourcePoolInfo.__init__(self)
+        self.logger = logging.getLogger("ENACT.SIMUL.INFO")
+        config = get_config()
+                
+        if not ("CPU" in site.resource_types and "Memory" in site.resource_types):
+            # CPU and Memory must be specified
+            # TODO: raise something more meaningful
+            raise
+        
+        # Disk and network should be specified but, if not, we can
+        # just add arbitrarily large values.
+        if not "Disk" in site.resource_types:
+            site.add_resource("Disk", [1000000])
+
+        if not "Net-in" in site.resource_types:
+            site.add_resource("Net-in", [1000000])
+
+        if not "Net-out" in site.resource_types:
+            site.add_resource("Net-out", [1000000])
+        
+        self.resource_types = site.get_resource_types()        
+        
+        nodes = site.nodes.get_all_nodes()
+        
+        self.nodes = dict([(id, ResourcePoolNode(id, "simul-%i" % id, capacity)) for (id, capacity) in nodes.items()])
+        for node in self.nodes.values():
+            node.enactment_info = node.id      
+        
+    def get_nodes(self):
+        return self.nodes
+    
+    def get_resource_types(self):
+        return self.resource_types
+
+    def get_migration_bandwidth(self):
+        return 100 # TODO: Get from config file
+
+class SimulatedVMEnactment(VMEnactment):
+    def __init__(self):
+        VMEnactment.__init__(self)
+        self.logger = logging.getLogger("ENACT.SIMUL.VM")
+        
+    def start(self, action):
+        for vnode in action.vnodes:
+            # Unpack action
+            pnode = action.vnodes[vnode].pnode
+            image = action.vnodes[vnode].diskimage
+            cpu = 100 #action.vnodes[vnode].resources.get_by_type(constants.RES_CPU)
+            memory = 1024 #action.vnodes[vnode].resources.get_by_type(constants.RES_MEM)
+            self.logger.debug("Received request to start VM for L%iV%i on host %i, image=%s, cpu=%i, mem=%i"
+                         % (action.lease_haizea_id, vnode, pnode, image, cpu, memory))
+    
+    def stop(self, action):
+        for vnode in action.vnodes:
+            self.logger.debug("Received request to stop VM for L%iV%i"
+                         % (action.lease_haizea_id, vnode))
+
+    def suspend(self, action):
+        for vnode in action.vnodes:
+            self.logger.debug("Received request to suspend VM for L%iV%i"
+                         % (action.lease_haizea_id, vnode))
+
+    def resume(self, action):
+        for vnode in action.vnodes:
+            self.logger.debug("Received request to resume VM for L%iV%i"
+                         % (action.lease_haizea_id, vnode))
+
+    def verify_suspend(self, action):
+        return 0
+    
+    def verify_resume(self, action):
+        return 0
+    
+class SimulatedDeploymentEnactment(DeploymentEnactment):    
+    def __init__(self):
+        DeploymentEnactment.__init__(self)
+        self.logger = logging.getLogger("ENACT.SIMUL.INFO")
+        config = get_config()
+                
+        self.bandwidth = config.get("imagetransfer-bandwidth")
+        
+        imgcapacity = Capacity([constants.RES_NETOUT])
+        imgcapacity.set_quantity(constants.RES_NETOUT, self.bandwidth)
+
+        # TODO: Determine node number based on site
+        self.imagenode = ResourcePoolNode(1000, "image_node", imgcapacity)
+        
+    def get_imagenode(self):
+        return self.imagenode
+        
+    def get_aux_nodes(self):
+        return [self.imagenode] 
+    
+    def get_bandwidth(self):
+        return self.bandwidth
+        
+    def resolve_to_file(self, lease_id, vnode, diskimage_id):
+        return "/var/haizea/images/%s-L%iV%i" % (diskimage_id, lease_id, vnode)
\ No newline at end of file

Added: trunk/src/haizea/core/frontends/__init__.py
===================================================================
--- trunk/src/haizea/core/frontends/__init__.py	                        (rev 0)
+++ trunk/src/haizea/core/frontends/__init__.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,27 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.common.utils import abstract
+
+class RequestFrontend(object):
+    def __init__(self, manager):
+        self.manager = manager
+    
+    def get_accumulated_requests(self): abstract()
+    
+    def exists_more_requests(self): abstract()
\ No newline at end of file

Added: trunk/src/haizea/core/frontends/opennebula.py
===================================================================
--- trunk/src/haizea/core/frontends/opennebula.py	                        (rev 0)
+++ trunk/src/haizea/core/frontends/opennebula.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,186 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+import haizea.common.constants as constants
+from haizea.core.leases import Lease, Capacity, Timestamp, Duration, UnmanagedSoftwareEnvironment
+from haizea.core.frontends import RequestFrontend
+from haizea.core.scheduler.slottable import ResourceTuple
+from haizea.common.utils import UNIX2DateTime, round_datetime, get_config, get_clock
+from haizea.common.opennebula_xmlrpc import OpenNebulaXMLRPCClient, OpenNebulaVM
+from mx.DateTime import DateTimeDelta, TimeDelta, ISO
+
+import operator
+import logging
+
+one_rpc = None
+
+def get_one_xmlrpcclient():
+    global one_rpc
+    if one_rpc == None:
+        host = get_config().get("one.host")
+        port = get_config().get("one.port")
+        user, passw = OpenNebulaXMLRPCClient.get_userpass_from_env()
+        one_rpc = OpenNebulaXMLRPCClient(host, port, user, passw)
+    return one_rpc
+
+class OpenNebulaHaizeaVM(object):
+    HAIZEA_PARAM = "HAIZEA"
+    HAIZEA_START = "START"
+    HAIZEA_START_NOW = "now"
+    HAIZEA_START_BESTEFFORT = "best_effort"
+    HAIZEA_DURATION = "DURATION"
+    HAIZEA_DURATION_UNLIMITED = "unlimited"
+    HAIZEA_PREEMPTIBLE = "PREEMPTIBLE"
+    HAIZEA_PREEMPTIBLE_YES = "yes"
+    HAIZEA_PREEMPTIBLE_NO = "no"
+    HAIZEA_GROUP = "GROUP"
+  
+    
+    def __init__(self, opennebula_vm):                        
+        # If there is no HAIZEA parameter, the default is to treat the
+        # request as an immediate request with unlimited duration
+        if not opennebula_vm.template.has_key(OpenNebulaHaizeaVM.HAIZEA_PARAM):
+            self.start = OpenNebulaHaizeaVM.HAIZEA_START_NOW
+            self.duration = OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED
+            self.preemptible = OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_NO
+            self.group = None
+        else:
+            self.start = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_START]
+            self.duration = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_DURATION]
+            self.preemptible = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE]
+            if opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM].has_key(OpenNebulaHaizeaVM.HAIZEA_GROUP):
+                self.group = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_GROUP]
+            else:
+                self.group = None
+                
+        self.submit_time = UNIX2DateTime(opennebula_vm.stime)
+                
+        # Create Timestamp object
+        if self.start == OpenNebulaHaizeaVM.HAIZEA_START_NOW:
+            self.start = Timestamp(Timestamp.NOW)
+        elif self.start == OpenNebulaHaizeaVM.HAIZEA_START_BESTEFFORT:
+            self.start = Timestamp(Timestamp.UNSPECIFIED)
+        elif self.start[0] == "+":
+            # Relative time
+            self.start = Timestamp(round_datetime(self.submit_time + ISO.ParseTime(self.start[1:])))
+        else:
+            self.start = Timestamp(ISO.ParseDateTime(self.start))
+            
+        # Create Duration object
+        if self.duration == OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED:
+            # This is an interim solution (make it run for a century).
+            # TODO: Integrate concept of unlimited duration in the lease datastruct
+            self.duration = Duration(DateTimeDelta(36500))
+        else:
+            self.duration = Duration(ISO.ParseTimeDelta(self.duration))
+            
+
+        self.preemptible = (self.preemptible == OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_YES)
+
+    
+        self.capacity = Capacity([constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])
+        
+        # CPU
+        # CPUs in VMs are not reported the same as in hosts.
+        # THere are two template values: CPU and VCPU.
+        # CPU reports the percentage of the CPU needed by the VM.
+        # VCPU, which is optional, reports how many CPUs are needed.
+        cpu = int(float(opennebula_vm.template["CPU"]) * 100)
+        if opennebula_vm.template.has_key("VCPU"):
+            ncpu = int(opennebula_vm.template["VCPU"])
+        else:
+            ncpu = 1
+        self.capacity.set_ninstances(constants.RES_CPU, ncpu)
+        for i in range(ncpu):
+            self.capacity.set_quantity_instance(constants.RES_CPU, i+1, cpu)            
+        
+        # Memory. Unlike hosts, memory is reported directly in MBs
+        self.capacity.set_quantity(constants.RES_MEM, int(opennebula_vm.template["MEMORY"]))
+
+        self.one_id = opennebula_vm.id
+        
+    
+class OpenNebulaFrontend(RequestFrontend):    
+    
+    def __init__(self, manager):
+        self.manager = manager
+        self.processed = []
+        self.logger = logging.getLogger("ONEREQ")
+        self.rpc = get_one_xmlrpcclient()
+
+        
+    def get_accumulated_requests(self):
+        vms = self.rpc.vmpool_info()
+
+        # Extract the pending OpenNebula VMs
+        pending_vms = [] 
+        for vm in vms:
+            if not vm.id  in self.processed and vm.state == OpenNebulaVM.STATE_PENDING:
+                vm_detailed = self.rpc.vm_info(vm.id)        
+                pending_vms.append(OpenNebulaHaizeaVM(vm_detailed))
+                self.processed.append(vm.id)
+            
+        grouped = [vm for vm in pending_vms if vm.group != None]
+        not_grouped = [vm for vm in pending_vms if vm.group == None]
+        
+        # Extract VM groups
+        group_ids = set([vm.group for vm in grouped])
+        groups = {}
+        for group_id in group_ids:
+            groups[group_id] = [vm for vm in grouped if vm.group == group_id]
+            
+        lease_requests = []
+        for group_id, opennebula_vms in groups.items():
+            lease_requests.append(self.__ONEreqs_to_lease(opennebula_vms, group_id))
+
+        for opennebula_vm in not_grouped:
+            lease_requests.append(self.__ONEreqs_to_lease([opennebula_vm]))
+        
+        lease_requests.sort(key=operator.attrgetter("submit_time"))
+        return lease_requests
+
+    def exists_more_requests(self):
+        return True
+
+    
+    def __ONEreqs_to_lease(self, opennebula_vms, group_id=None):
+        # The vm_with_params is used to extract the HAIZEA parameters.
+        # (i.e., lease-wide attributes)
+        vm_with_params = opennebula_vms[0]
+
+        # Per-lease attributes
+        start = vm_with_params.start
+        duration = vm_with_params.duration
+        preemptible = vm_with_params.preemptible
+        submit_time = vm_with_params.submit_time
+
+        # Per-vnode attributes
+        requested_resources = dict([(i+1,vm.capacity) for i, vm in enumerate(opennebula_vms)])
+
+        lease = Lease.create_new(submit_time = submit_time, 
+                                 requested_resources = requested_resources, 
+                                 start = start, 
+                                 duration = duration, 
+                                 deadline = None,
+                                 preemptible = preemptible, 
+                                 software = UnmanagedSoftwareEnvironment())
+     
+        lease.enactment_info = group_id
+        lease.vnode_enactment_info = dict([(i+1,vm.one_id) for i, vm in enumerate(opennebula_vms)])
+        return lease
+

Added: trunk/src/haizea/core/frontends/rpc.py
===================================================================
--- trunk/src/haizea/core/frontends/rpc.py	                        (rev 0)
+++ trunk/src/haizea/core/frontends/rpc.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,51 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+import haizea.common.constants as constants
+from haizea.core.scheduler.slottable import ResourceTuple
+from haizea.core.leases import Lease
+from haizea.core.frontends import RequestFrontend
+from haizea.common.utils import round_datetime, get_config, get_clock, get_lease_id
+from mx.DateTime import DateTimeDelta, TimeDelta, ISO
+import logging
+
+class RPCFrontend(RequestFrontend):
+    def __init__(self, manager):
+        self.manager = manager
+        self.logger = logging.getLogger("RPCREQ")
+        self.accumulated = []
+        config = get_config()
+        self.manager.rpc_server.register_rpc(self.create_lease)
+
+    def get_accumulated_requests(self):
+        acc = self.accumulated
+        self.accumulated = []
+        return acc
+    
+    def exists_more_requests(self): 
+        return True
+
+    def create_lease(self, lease_xml_str):     
+        lease = Lease.from_xml_string(lease_xml_str)
+        lease.id = get_lease_id()
+        self.accumulated.append(lease)        
+        return lease.id
+        
+        
+    
+    
+            

Added: trunk/src/haizea/core/frontends/tracefile.py
===================================================================
--- trunk/src/haizea/core/frontends/tracefile.py	                        (rev 0)
+++ trunk/src/haizea/core/frontends/tracefile.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,113 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+import haizea.common.constants as constants
+from haizea.common.utils import get_clock
+from haizea.core.frontends import RequestFrontend
+from haizea.core.leases import LeaseWorkload, Lease
+import operator
+import logging
+
+class TracefileFrontend(RequestFrontend):
+    def __init__(self, manager, starttime):
+        RequestFrontend.__init__(self, manager)
+        self.logger = logging.getLogger("TFILE")
+        config = manager.config
+
+        tracefile = config.get("tracefile")
+        injectfile = config.get("injectionfile")
+        imagefile = config.get("imagefile")
+        
+        # Read trace file
+        # Requests is a list of lease requests
+        self.logger.info("Loading tracefile %s" % tracefile)
+        self.requests = None
+        if tracefile.endswith(".swf"):
+            self.requests = LeaseWorkload.from_swf_file(tracefile, starttime)
+        elif tracefile.endswith(".lwf") or tracefile.endswith(".xml"):
+            lease_workload = LeaseWorkload.from_xml_file(tracefile, starttime)
+            self.requests = lease_workload.get_leases()
+    
+        if injectfile != None:
+            self.logger.info("Loading injection file %s" % injectfile)
+            inj_lease_workload = LeaseWorkload.from_xml_file(injectfile, starttime)
+            inj_leases = inj_lease_workload.get_leases()
+            self.requests += inj_leases
+            self.requests.sort(key=operator.attrgetter("submit_time"))
+
+        if imagefile != None:
+            self.logger.info("Loading image file %s" % imagefile)
+            file = open (imgfile, "r")
+            imagesizes = {}
+            images = []
+            state = 0  # 0 -> Reading image sizes  1 -> Reading image sequence
+            for line in file:
+                if line[0]=='#':
+                    state = 1
+                elif state == 0:
+                    image, size = line.split()
+                    imagesizes[image] = int(size)
+                elif state == 1:
+                    images.append(line.strip())            
+            for lease, image_id in zip(self.requests, images):
+                lease.software = DiskImageSoftwareEnvironment(image_id, imagesizes[image_id])
+        
+        # Add runtime overhead, if necessary
+        add_overhead = config.get("add-overhead")
+        
+        if add_overhead != constants.RUNTIMEOVERHEAD_NONE:
+            slowdown_overhead = config.get("runtime-slowdown-overhead")
+            boot_overhead = config.get("bootshutdown-overhead")
+            for r in self.requests:
+                if add_overhead == constants.RUNTIMEOVERHEAD_ALL or (add_overhead == constants.RUNTIMEOVERHEAD_BE and isinstance(r,BestEffortLease)):
+                   if slowdown_overhead != 0:
+                       r.add_runtime_overhead(slowdown_overhead)
+                   r.add_boot_overhead(boot_overhead)
+
+        # Override requested memory, if necessary
+        memory = config.get("override-memory")
+        if memory != constants.NO_MEMORY_OVERRIDE:
+            for r in self.requests:
+                r.requested_resources.set_by_type(constants.RES_MEM, memory)            
+            
+        types = {}
+        for r in self.requests:
+            types[r.get_type()] = types.setdefault(r.get_type(), 0) + 1
+        types_str = " + ".join(["%i %s" % (types[t],Lease.type_str[t]) for t in types])
+
+        self.logger.info("Loaded workload with %i requests (%s)" % (len(self.requests), types_str))
+        
+        
+    def get_accumulated_requests(self):
+        # When reading from a trace file, there are no
+        # "accumulated requests". Rather, we just take whatever
+        # requests are in the trace up to the current time
+        # reported by the resource manager
+        time = get_clock().get_time()
+        nowreq = [r for r in self.requests if r.submit_time <= time]
+        self.requests = [r for r in self.requests if r.submit_time > time]   
+        return nowreq              
+
+    def exists_more_requests(self):
+        return len(self.requests) != 0
+
+    def get_next_request_time(self):
+        if self.exists_more_requests():
+            return self.requests[0].submit_time
+        else:
+            return None
\ No newline at end of file

Added: trunk/src/haizea/core/leases.py
===================================================================
--- trunk/src/haizea/core/leases.py	                        (rev 0)
+++ trunk/src/haizea/core/leases.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,1341 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+"""This module provides the lease data structures:
+
+* Lease: Represents a lease
+* LeaseStateMachine: A state machine to keep track of a lease's state
+* Capacity: Used to represent a quantity of resources
+* Timestamp: An exact moment in time
+* Duration: A duration
+* SoftwareEnvironment, UnmanagedSoftwareEnvironment, DiskImageSoftwareEnvironment:
+  Used to represent a lease's required software environment.
+* LeaseWorkload: Represents a collection of lease requests submitted
+  in a specific order.
+* Site: Represents the site with leasable resources.
+* Nodes: Represents a collection of machines ("nodes"). This is used
+  both when specifying a site and when specifying the machines
+  needed by a leases.
+"""
+
+from haizea.common.constants import LOGLEVEL_VDEBUG
+from haizea.common.utils import StateMachine, round_datetime, round_datetime_delta, get_lease_id, pretty_nodemap, xmlrpc_marshall_singlevalue
+from haizea.core.scheduler.slottable import ResourceReservation
+
+from mx.DateTime import DateTime, TimeDelta, Parser
+
+import logging
+import xml.etree.ElementTree as ET
+
+
+
+
+class Lease(object):
+    """A resource lease
+    
+    This is one of the main data structures used in Haizea. A lease
+    is "a negotiated and renegotiable agreement between a resource 
+    provider and a resource consumer, where the former agrees to make 
+    a set of resources available to the latter, based on a set of 
+    lease terms presented by the resource consumer". All the gory
+    details on what this means can be found on the Haizea website
+    and on the Haizea publications.
+    
+    See the __init__ method for a description of the information that
+    is contained in a lease.
+    
+    """
+    
+    # Lease states
+    STATE_NEW = 0
+    STATE_PENDING = 1
+    STATE_REJECTED = 2
+    STATE_SCHEDULED = 3
+    STATE_QUEUED = 4
+    STATE_CANCELLED = 5
+    STATE_PREPARING = 6
+    STATE_READY = 7
+    STATE_ACTIVE = 8
+    STATE_SUSPENDING = 9
+    STATE_SUSPENDED_PENDING = 10
+    STATE_SUSPENDED_QUEUED = 11
+    STATE_SUSPENDED_SCHEDULED = 12
+    STATE_MIGRATING = 13
+    STATE_RESUMING = 14
+    STATE_RESUMED_READY = 15
+    STATE_DONE = 16
+    STATE_FAIL = 17
+    
+    # String representation of lease states
+    state_str = {STATE_NEW : "New",
+                 STATE_PENDING : "Pending",
+                 STATE_REJECTED : "Rejected",
+                 STATE_SCHEDULED : "Scheduled",
+                 STATE_QUEUED : "Queued",
+                 STATE_CANCELLED : "Cancelled",
+                 STATE_PREPARING : "Preparing",
+                 STATE_READY : "Ready",
+                 STATE_ACTIVE : "Active",
+                 STATE_SUSPENDING : "Suspending",
+                 STATE_SUSPENDED_PENDING : "Suspended-Pending",
+                 STATE_SUSPENDED_QUEUED : "Suspended-Queued",
+                 STATE_SUSPENDED_SCHEDULED : "Suspended-Scheduled",
+                 STATE_MIGRATING : "Migrating",
+                 STATE_RESUMING : "Resuming",
+                 STATE_RESUMED_READY: "Resumed-Ready",
+                 STATE_DONE : "Done",
+                 STATE_FAIL : "Fail"}
+    
+    # Lease types
+    BEST_EFFORT = 1
+    ADVANCE_RESERVATION = 2
+    IMMEDIATE = 3
+    UNKNOWN = -1
+    
+    # String representation of lease types    
+    type_str = {BEST_EFFORT: "Best-effort",
+                ADVANCE_RESERVATION: "AR",
+                IMMEDIATE: "Immediate",
+                UNKNOWN: "Unknown"}
+    
+    def __init__(self, id, submit_time, requested_resources, start, duration, 
+                 deadline, preemptible, software, state):
+        """Constructs a lease.
+        
+        The arguments are the fundamental attributes of a lease.
+        The attributes that are not specified by the arguments are
+        the lease ID (which is an autoincremented integer), the
+        lease state (a lease always starts out in state "NEW").
+        A lease also has several bookkeeping attributes that are
+        only meant to be consumed by other Haizea objects.
+        
+        Arguments:
+        id -- Unique identifier for the lease. If None, one
+        will be provided.
+        submit_time -- The time at which the lease was submitted
+        requested_resources -- A dictionary (int -> Capacity) mapping
+          each requested node to a capacity (i.e., the amount of
+          resources requested for that node)
+        start -- A Timestamp object containing the requested time.
+        duration -- A Duration object containing the requested duration.
+        deadline -- A Timestamp object containing the deadline by which
+          this lease must be completed.
+        preemptible -- A boolean indicating whether this lease can be
+          preempted or not.
+        software -- A SoftwareEnvironment object specifying the
+          software environment required by the lease.
+        """        
+        # Lease ID (read only)
+        self.id = id
+        
+        # Lease attributes
+        self.submit_time = submit_time
+        self.requested_resources = requested_resources
+        self.start = start
+        self.duration = duration
+        self.deadline = deadline
+        self.preemptible = preemptible
+        self.software = software
+
+        # Bookkeeping attributes:
+
+        # Lease state
+        if state == None:
+            state = Lease.STATE_NEW
+        self.state = LeaseStateMachine(initial_state = state)
+
+        # End of lease (recorded when the lease ends)
+        self.end = None
+        
+        # Number of nodes requested in the lease
+        self.numnodes = len(requested_resources)
+        
+        # The following two lists contain all the resource reservations
+        # (or RRs) associated to this lease. These two lists are
+        # basically the link between the lease and Haizea's slot table.
+        
+        # The preparation RRs are reservations that have to be
+        # completed before a lease can first transition into a
+        # READY state (e.g., image transfers)
+        self.preparation_rrs = []
+        # The VM RRs are reservations for the VMs that implement
+        # the lease.
+        self.vm_rrs = []
+
+        # Enactment information. Should only be manipulated by enactment module
+        self.enactment_info = None
+        self.vnode_enactment_info = dict([(n, None) for n in self.requested_resources.keys()])
+        
+        self.logger = logging.getLogger("LEASES")
+        
+        
+    @classmethod
+    def create_new(cls, submit_time, requested_resources, start, duration, 
+                 deadline, preemptible, software):
+        id = get_lease_id()
+        state = Lease.STATE_NEW
+        return cls(id, submit_time, requested_resources, start, duration, 
+                 deadline, preemptible, software, state)
+        
+    @classmethod
+    def create_new_from_xml_element(cls, element):
+        lease = cls.from_xml_element(element)
+        lease.id = get_lease_id()
+        lease.state = LeaseStateMachine(initial_state = Lease.STATE_NEW)
+        return lease
+
+    @classmethod
+    def from_xml_file(cls, xml_file):
+        """Constructs a lease from an XML file.
+        
+        See the Haizea documentation for details on the
+        lease XML format.
+        
+        Argument:
+        xml_file -- XML file containing the lease in XML format.
+        """        
+        return cls.from_xml_element(ET.parse(xml_file).getroot())
+
+    @classmethod
+    def from_xml_string(cls, xml_str):
+        """Constructs a lease from an XML string.
+        
+        See the Haizea documentation for details on the
+        lease XML format.
+        
+        Argument:
+        xml_str -- String containing the lease in XML format.
+        """        
+        return cls.from_xml_element(ET.fromstring(xml_str))
+        
+    @classmethod
+    def from_xml_element(cls, element):
+        """Constructs a lease from an ElementTree element.
+        
+        See the Haizea documentation for details on the
+        lease XML format.
+        
+        Argument:
+        element -- Element object containing a "<lease>" element.
+        """        
+        
+        id = element.get("id")
+        
+        if id == None:
+            id = None
+        else:
+            id = int(id)
+
+        state = element.get("state")
+        if state == None:
+            state = None
+        else:
+            state = int(state)
+
+        
+        submit_time = element.get("submit-time")
+        if submit_time == None:
+            submit_time = None
+        else:
+            submit_time = Parser.DateTimeFromString(submit_time)
+        
+        nodes = Nodes.from_xml_element(element.find("nodes"))
+        
+        requested_resources = nodes.get_all_nodes()
+        
+        start = element.find("start")
+        if len(start.getchildren()) == 0:
+            start = Timestamp(Timestamp.UNSPECIFIED)
+        else:
+            child = start[0]
+            if child.tag == "now":
+                start = Timestamp(Timestamp.NOW)
+            elif child.tag == "exact":
+                start = Timestamp(Parser.DateTimeFromString(child.get("time")))
+        
+        duration = Duration(Parser.DateTimeDeltaFromString(element.find("duration").get("time")))
+
+        deadline = None
+        
+        preemptible = element.get("preemptible").capitalize()
+        if preemptible == "True":
+            preemptible = True
+        elif preemptible == "False":
+            preemptible = False
+        
+        software = element.find("software")
+        
+        if software.find("none") != None:
+            software = UnmanagedSoftwareEnvironment()
+        elif software.find("disk-image") != None:
+            disk_image = software.find("disk-image")
+            image_id = disk_image.get("id")
+            image_size = int(disk_image.get("size"))
+            software = DiskImageSoftwareEnvironment(image_id, image_size)
+        
+        return Lease(id, submit_time, requested_resources, start, duration, 
+                     deadline, preemptible, software, state)
+
+
+    def to_xml(self):
+        """Returns an ElementTree XML representation of the lease
+        
+        See the Haizea documentation for details on the
+        lease XML format.
+        
+        """        
+        lease = ET.Element("lease")
+        if self.id != None:
+            lease.set("id", str(self.id))
+        lease.set("state", str(self.get_state()))
+        lease.set("preemptible", str(self.preemptible))
+        if self.submit_time != None:
+            lease.set("submit-time", str(self.submit_time))
+        
+        capacities = {}
+        for capacity in self.requested_resources.values():
+            key = capacity
+            for c in capacities:
+                if capacity == c:
+                    key = c
+                    break
+            numnodes = capacities.setdefault(key, 0)
+            capacities[key] += 1
+        
+        nodes = Nodes([(numnodes,c) for c,numnodes in capacities.items()])
+        lease.append(nodes.to_xml())
+        
+        start = ET.SubElement(lease, "start")
+        if self.start.requested == Timestamp.UNSPECIFIED:
+            pass # empty start element
+        elif self.start.requested == Timestamp.NOW:
+            ET.SubElement(start, "now") #empty now element
+        else:
+            exact = ET.SubElement(start, "exact")
+            exact.set("time", str(self.start.requested))
+            
+        duration = ET.SubElement(lease, "duration")
+        duration.set("time", str(self.duration.requested))
+        
+        software = ET.SubElement(lease, "software")
+        if isinstance(self.software, UnmanagedSoftwareEnvironment):
+            ET.SubElement(software, "none")
+        elif isinstance(self.software, DiskImageSoftwareEnvironment):
+            imagetransfer = ET.SubElement(software, "disk-image")
+            imagetransfer.set("id", self.software.image_id)
+            imagetransfer.set("size", str(self.software.image_size))
+            
+        return lease
+
+    def to_xml_string(self):
+        """Returns a string XML representation of the lease
+        
+        See the Haizea documentation for details on the
+        lease XML format.
+        
+        """   
+        return ET.tostring(self.to_xml())
+
+    def get_type(self):
+        """Determines the type of lease
+        
+        Based on the lease's attributes, determines the lease's type.
+        Can return Lease.BEST_EFFORT, Lease.ADVANCE_RESERVATION, or
+        Lease.IMMEDIATE
+        
+        """
+        if self.start.requested == Timestamp.UNSPECIFIED:
+            return Lease.BEST_EFFORT
+        elif self.start.requested == Timestamp.NOW:
+            return Lease.IMMEDIATE            
+        else:
+            return Lease.ADVANCE_RESERVATION
+        
+    def get_state(self):
+        """Returns the lease's state.
+                
+        """        
+        return self.state.get_state()
+    
+    def set_state(self, state):
+        """Changes the lease's state.
+                
+        The state machine will throw an exception if the 
+        requested transition is illegal.
+        
+        Argument:
+        state -- The new state
+        """        
+        self.state.change_state(state)
+        
+    def print_contents(self, loglevel=LOGLEVEL_VDEBUG):
+        """Prints the lease's attributes to the log.
+                
+        Argument:
+        loglevel -- The loglevel at which to print the information
+        """           
+        self.logger.log(loglevel, "__________________________________________________")
+        self.logger.log(loglevel, "Lease ID       : %i" % self.id)
+        self.logger.log(loglevel, "Type           : %s" % Lease.type_str[self.get_type()])
+        self.logger.log(loglevel, "Submission time: %s" % self.submit_time)
+        self.logger.log(loglevel, "Start          : %s" % self.start)
+        self.logger.log(loglevel, "Duration       : %s" % self.duration)
+        self.logger.log(loglevel, "State          : %s" % Lease.state_str[self.get_state()])
+        self.logger.log(loglevel, "Resource req   : %s" % self.requested_resources)
+        self.logger.log(loglevel, "Software       : %s" % self.software)
+        self.print_rrs(loglevel)
+        self.logger.log(loglevel, "--------------------------------------------------")
+
+    def print_rrs(self, loglevel=LOGLEVEL_VDEBUG):
+        """Prints the lease's resource reservations to the log.
+                
+        Argument:
+        loglevel -- The loglevel at which to print the information
+        """              
+        if len(self.preparation_rrs) > 0:
+            self.logger.log(loglevel, "DEPLOYMENT RESOURCE RESERVATIONS")
+            self.logger.log(loglevel, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
+            for r in self.preparation_rrs:
+                r.print_contents(loglevel)
+                self.logger.log(loglevel, "##")
+        self.logger.log(loglevel, "VM RESOURCE RESERVATIONS")
+        self.logger.log(loglevel, "~~~~~~~~~~~~~~~~~~~~~~~~")
+        for r in self.vm_rrs:
+            r.print_contents(loglevel)
+            self.logger.log(loglevel, "##")
+
+    def get_active_vmrrs(self, time):
+        """Returns the active VM resource reservations at a given time
+                
+        Argument:
+        time -- Time to look for active reservations
+        """        
+        return [r for r in self.vm_rrs if r.start <= time and time <= r.end and r.state == ResourceReservation.STATE_ACTIVE]
+
+    def get_scheduled_reservations(self):
+        """Returns all scheduled reservations
+                
+        """           
+        return [r for r in self.preparation_rrs + self.vm_rrs if r.state == ResourceReservation.STATE_SCHEDULED]
+
+    def get_last_vmrr(self):
+        """Returns the last VM reservation for this lease.
+                        
+        """            
+        return self.vm_rrs[-1]    
+
+    def get_endtime(self):
+        """Returns the time at which the last VM reservation 
+        for this lease ends.
+        
+        Note that this is not necessarily the time at which the lease
+        will end, just the time at which the last currently scheduled
+        VM will end.
+                
+        """        
+        vmrr = self.get_last_vmrr()
+        return vmrr.end
+    
+    def append_vmrr(self, vmrr):
+        """Adds a VM resource reservation to the lease.
+        
+        Argument:
+        vmrr -- The VM RR to add.
+        """             
+        self.vm_rrs.append(vmrr)
+        
+    def remove_vmrr(self, vmrr):
+        """Removes a VM resource reservation from the lease.
+        
+        Argument:
+        vmrr -- The VM RR to remove.
+        """           
+        if not vmrr in self.vm_rrs:
+            raise Exception, "Tried to remove an VM RR not contained in this lease"
+        else:
+            self.vm_rrs.remove(vmrr)
+                    
+    def append_preparationrr(self, preparation_rr):
+        """Adds a preparation resource reservation to the lease.
+        
+        Argument:
+        preparation_rr -- The preparation RR to add.
+        """             
+        self.preparation_rrs.append(preparation_rr)
+        
+    def remove_preparationrr(self, preparation_rr):
+        """Removes a preparation resource reservation from the lease.
+        
+        Argument:
+        preparation_rr -- The preparation RR to remove.
+        """        
+        if not preparation_rr in self.preparation_rrs:
+            raise Exception, "Tried to remove a preparation RR not contained in this lease"
+        else:
+            self.preparation_rrs.remove(preparation_rr)        
+
+    def clear_rrs(self):
+        """Removes all resource reservations for this lease
+        (both preparation and VM)
+        
+        """            
+        self.preparation_rrs = []
+        self.vm_rrs = []
+
+    def get_waiting_time(self):
+        """Gets the waiting time for this lease.
+        
+        The waiting time is the difference between the submission
+        time and the time at which the lease start. This method
+        mostly makes sense for best-effort leases, where the
+        starting time is determined by Haizea.
+        
+        """          
+        return self.start.actual - self.submit_time
+        
+    def get_slowdown(self, bound=10):
+        """Determines the bounded slowdown for this lease.
+        
+        Slowdown is a normalized measure of how much time a
+        request takes to make it through a queue (thus, like
+        get_waiting_time, the slowdown makes sense mostly for
+        best-effort leases). Slowdown is equal to the time the
+        lease took to run on a loaded system (i.e., a system where
+        it had to compete with other leases for resources)
+        divided by the time it would take if it just had the
+        system all to itself (i.e., starts running immediately
+        without having to wait in a queue and without the
+        possibility of being preempted).
+        
+        "Bounded" slowdown is one where leases with very short
+        durations are rounded up to a bound, to prevent the
+        metric to be affected by reasonable but disproportionate
+        waiting times (e.g., a 5-second lease with a 15 second
+        waiting time -an arguably reasonable waiting time- has a 
+        slowdown of 4, the same as 10 hour lease having to wait 
+        30 hours for resources).
+        
+        Argument:
+        bound -- The bound, specified in seconds.
+        All leases with a duration less than this
+        parameter are rounded up to the bound.
+        """          
+        time_on_dedicated = self.duration.original
+        time_on_loaded = self.end - self.submit_time
+        bound = TimeDelta(seconds=bound)
+        if time_on_dedicated < bound:
+            time_on_dedicated = bound
+        return time_on_loaded / time_on_dedicated
+        
+    def add_boot_overhead(self, t):
+        """Adds a boot overhead to the lease.
+        
+        Increments the requested duration to account for the fact 
+        that some time will be spent booting up the resources.
+        
+        Argument:
+        t -- Time to add
+        """          
+        self.duration.incr(t)        
+
+    def add_runtime_overhead(self, percent):
+        """Adds a runtime overhead to the lease.
+        
+        This method is mostly meant for simulations. Since VMs
+        run slower than physical hardware, this increments the
+        duration of a lease by a percent to observe the effect
+        of having all the leases run slower on account of
+        running on a VM.
+        
+        Note: the whole "runtime overhead" problem is becoming
+        increasingly moot as people have lost their aversion to
+        VMs thanks to the cloud computing craze. Anecdotal evidence
+        suggests that most people don't care that VMs will run
+        X % slower (compared to a physical machine) because they
+        know full well that what they're getting is a virtual
+        machine (the same way a user of an HPC system would know
+        that he/she's getting processors with speed X as opposed to
+        those on some other site, with speed X*0.10)
+        
+        Argument:
+        percent -- Runtime overhead (in percent of requested
+        duration) to add to the lease.
+        """            
+        self.duration.incr_by_percent(percent)
+            
+        
+class LeaseStateMachine(StateMachine):
+    """A lease state machine
+    
+    A child of StateMachine, this class simply specifies the valid
+    states and transitions for a lease (the actual state machine code
+    is in StateMachine).
+    
+    See the Haizea documentation for a description of states and
+    valid transitions.
+    
+    """    
+    transitions = {Lease.STATE_NEW:                 [(Lease.STATE_PENDING,    "")],
+                   
+                   Lease.STATE_PENDING:             [(Lease.STATE_SCHEDULED,  ""),
+                                                     (Lease.STATE_QUEUED,     ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_REJECTED,   "")],
+                                                     
+                   Lease.STATE_SCHEDULED:           [(Lease.STATE_PREPARING,  ""),
+                                                     (Lease.STATE_QUEUED,     ""),
+                                                     (Lease.STATE_PENDING,     ""),
+                                                     (Lease.STATE_READY,      ""),
+                                                     (Lease.STATE_CANCELLED,  "")],
+                                                     
+                   Lease.STATE_QUEUED:              [(Lease.STATE_SCHEDULED,  ""),
+                                                     (Lease.STATE_CANCELLED,  "")],
+                                                     
+                   Lease.STATE_PREPARING:           [(Lease.STATE_READY,      ""),
+                                                     (Lease.STATE_PENDING,     ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                                                     
+                   Lease.STATE_READY:               [(Lease.STATE_ACTIVE,     ""),
+                                                     (Lease.STATE_QUEUED,     ""),
+                                                     (Lease.STATE_PENDING,     ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                                                     
+                   Lease.STATE_ACTIVE:              [(Lease.STATE_SUSPENDING, ""),
+                                                     (Lease.STATE_QUEUED,     ""),
+                                                     (Lease.STATE_DONE,       ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                                                     
+                   Lease.STATE_SUSPENDING:          [(Lease.STATE_SUSPENDED_PENDING,  ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                                                     
+                   Lease.STATE_SUSPENDED_PENDING:   [(Lease.STATE_SUSPENDED_QUEUED,     ""),
+                                                     (Lease.STATE_SUSPENDED_SCHEDULED,  ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                                                     
+                   Lease.STATE_SUSPENDED_QUEUED:    [(Lease.STATE_SUSPENDED_SCHEDULED,  ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                                                     
+                   Lease.STATE_SUSPENDED_SCHEDULED: [(Lease.STATE_SUSPENDED_QUEUED,     ""),
+                                                     (Lease.STATE_SUSPENDED_PENDING,  ""),
+                                                     (Lease.STATE_MIGRATING,  ""),
+                                                     (Lease.STATE_RESUMING,   ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                                                     
+                   Lease.STATE_MIGRATING:           [(Lease.STATE_SUSPENDED_SCHEDULED,  ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                                                     
+                   Lease.STATE_RESUMING:            [(Lease.STATE_RESUMED_READY, ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                                                     
+                   Lease.STATE_RESUMED_READY:       [(Lease.STATE_ACTIVE,     ""),
+                                                     (Lease.STATE_CANCELLED,  ""),
+                                                     (Lease.STATE_FAIL,       "")],
+                   
+                   # Final states
+                   Lease.STATE_DONE:          [],
+                   Lease.STATE_CANCELLED:     [],
+                   Lease.STATE_FAIL:          [],
+                   Lease.STATE_REJECTED:      [],
+                   }
+    
+    def __init__(self, initial_state):
+        StateMachine.__init__(self, initial_state, LeaseStateMachine.transitions, Lease.state_str)
+
+
+class Capacity(object):
+    """A quantity of resources
+    
+    This class is used to represent a quantity of resources, such
+    as those required by a lease. For example, if a lease needs a
+    single node with 1 CPU and 1024 MB of memory, a single Capacity
+    object would be used containing that information. 
+    
+    Resources in a Capacity object can be multi-instance, meaning
+    that several instances of the same type of resources can be
+    specified. For example, if a node requires 2 CPUs, then this is
+    represented as two instances of the same type of resource. Most
+    resources, however, will be "single instance" (e.g., a physical
+    node only has "one" memory).
+    
+    Note: This class is similar, but distinct from, the ResourceTuple
+    class in the slottable module. The ResourceTuple class can contain
+    the same information, but uses a different internal representation
+    (which is optimized for long-running simulations) and is tightly
+    coupled to the SlotTable class. The Quantity and ResourceTuple
+    classes are kept separate so that the slottable module remains
+    independent from the rest of Haizea (in case we want to switch
+    to a different slottable implementation in the future).
+    
+    """        
+    def __init__(self, types):
+        """Constructs an empty Capacity object.
+        
+        All resource types are initially set to be single-instance,
+        with a quantity of 0 for each resource.
+        
+        Argument:
+        types -- List of resource types. e.g., ["CPU", "Memory"]
+        """          
+        self.ninstances = dict([(type, 1) for type in types])
+        self.quantity = dict([(type, [0]) for type in types])
+        
+    def get_ninstances(self, type):
+        """Gets the number of instances for a resource type
+                
+        Argument:
+        type -- The type of resource (using the same name passed
+        when constructing the Capacity object)
+        """               
+        return self.ninstances[type]
+           
+    def get_quantity(self, type):
+        """Gets the quantity of a single-instance resource
+                
+        Argument:
+        type -- The type of resource (using the same name passed
+        when constructing the Capacity object)
+        """               
+        return self.get_quantity_instance(type, 1)
+    
+    def get_quantity_instance(self, type, instance):
+        """Gets the quantity of a specific instance of a 
+        multi-instance resource.
+                        
+        Argument:
+        type -- The type of resource (using the same name passed
+        when constructing the Capacity object)
+        instance -- The instance. Note that instances are numbered
+        from 1.
+        """               
+        return self.quantity[type][instance-1]
+
+    def set_quantity(self, type, amount):
+        """Sets the quantity of a single-instance resource
+                
+        Argument:
+        type -- The type of resource (using the same name passed
+        when constructing the Capacity object)
+        amount -- The amount to set the resource to.
+        """            
+        self.set_quantity_instance(type, 1, amount)
+    
+    def set_quantity_instance(self, type, instance, amount):
+        """Sets the quantity of a specific instance of a 
+        multi-instance resource.
+                        
+        Argument:
+        type -- The type of resource (using the same name passed
+        when constructing the Capacity object)
+        instance -- The instance. Note that instances are numbered
+        from 1.
+        amount -- The amount to set the instance of the resource to.
+        """        
+        self.quantity[type][instance-1] = amount
+    
+    def set_ninstances(self, type, ninstances):
+        """Changes the number of instances of a resource type.
+                        
+        Note that changing the number of instances will initialize
+        all the instances' amounts to zero. This method should
+        only be called right after constructing a Capacity object.
+        
+        Argument:
+        type -- The type of resource (using the same name passed
+        when constructing the Capacity object)
+        ninstance -- The number of instances
+        """                
+        self.ninstances[type] = ninstances
+        self.quantity[type] = [0 for i in range(ninstances)]
+       
+    def get_resource_types(self):
+        """Returns the types of resources in this capacity.
+                        
+        """            
+        return self.quantity.keys()
+    
+    def __eq__(self, other):
+        """Tests if two capacities are the same
+                        
+        """        
+        for type in self.quantity:
+            if not other.quantity.has_key(type):
+                return False
+            if self.ninstances[type] != other.ninstances[type]:
+                return False
+            if self.quantity[type] != other.quantity[type]:
+                return False
+        return True
+
+    def __ne__(self, other):
+        """Tests if two capacities are not the same
+                        
+        """        
+        return not self == other
+            
+    def __repr__(self):
+        """Returns a string representation of the Capacity"""
+        return "  |  ".join("%s: %i" % (type,q[0]) for type, q in self.quantity.items())
+            
+
+class Timestamp(object):
+    """An exact point in time.
+    
+    This class is just a wrapper around three DateTimes. When
+    dealing with timestamps in Haizea (such as the requested
+    starting time for a lease), we want to keep track not just
+    of the requested timestamp, but also the scheduled timestamp
+    (which could differ from the requested one) and the
+    actual timestamp (which could differ from the scheduled one).
+    """        
+    
+    UNSPECIFIED = "Unspecified"
+    NOW = "Now"
+    
+    def __init__(self, requested):
+        """Constructor
+                        
+        Argument:
+        requested -- The requested timestamp
+        """        
+        self.requested = requested
+        self.scheduled = None
+        self.actual = None
+
+    def __repr__(self):
+        """Returns a string representation of the Duration"""
+        return "REQ: %s  |  SCH: %s  |  ACT: %s" % (self.requested, self.scheduled, self.actual)
+        
+class Duration(object):
+    """A duration
+    
+    This class is just a wrapper around five DateTimes. When
+    dealing with durations in Haizea (such as the requested
+    duration for a lease), we want to keep track of the following:
+    
+    - The requested duration
+    - The accumulated duration (when the entire duration of
+    the lease can't be scheduled without interrumption, this
+    keeps track of how much duration has been fulfilled so far)
+    - The actual duration (which might not be the same as the
+    requested duration)
+    
+    For the purposes of simulation, we also want to keep track
+    of the "original" duration (since the requested duration
+    can be modified to simulate certain overheads) and the
+    "known" duration (when simulating lease workloads, this is
+    the actual duration of the lease, which is known a posteriori).
+    """  
+    
+    def __init__(self, requested, known=None):
+        """Constructor
+                        
+        Argument:
+        requested -- The requested duration
+        known -- The known duration (ONLY in simulation)
+        """              
+        self.original = requested
+        self.requested = requested
+        self.accumulated = TimeDelta()
+        self.actual = None
+        # The following is ONLY used in simulation
+        self.known = known
+        
+    def incr(self, t):
+        """Increments the requested duration by an amount.
+                        
+        Argument:
+        t -- The time to add to the requested duration.
+        """               
+        self.requested += t
+        if self.known != None:
+            self.known += t
+            
+    def incr_by_percent(self, pct):
+        """Increments the requested duration by a percentage.
+                        
+        Argument:
+        pct -- The percentage of the requested duration to add.
+        """          
+        factor = 1 + float(pct)/100
+        self.requested = round_datetime_delta(self.requested * factor)
+        if self.known != None:
+            self.requested = round_datetime_delta(self.known * factor)
+        
+    def accumulate_duration(self, t):
+        """Increments the accumulated duration by an amount.
+                        
+        Argument:
+        t -- The time to add to the accumulated duration.
+        """        
+        self.accumulated += t
+            
+    def get_remaining_duration(self):
+        """Returns the amount of time required to fulfil the entire
+        requested duration of the lease.
+                        
+        """         
+        return self.requested - self.accumulated
+
+    def get_remaining_known_duration(self):
+        """Returns the amount of time required to fulfil the entire
+        known duration of the lease.
+              
+        ONLY for simulations.
+        """           
+        return self.known - self.accumulated
+            
+    def __repr__(self):
+        """Returns a string representation of the Duration"""
+        return "REQ: %s  |  ACC: %s  |  ACT: %s  |  KNW: %s" % (self.requested, self.accumulated, self.actual, self.known)
+    
+class SoftwareEnvironment(object):
+    """The base class for a lease's software environment"""
+    
+    def __init__(self):
+        """Constructor.
+        
+        Does nothing."""
+        pass
+
+class UnmanagedSoftwareEnvironment(SoftwareEnvironment):
+    """Represents an "unmanaged" software environment.
+    
+    When a lease has an unmanaged software environment,
+    Haizea does not need to perform any actions to prepare
+    a lease's software environment (it assumes that this
+    task is carried out by an external entity, and that
+    software environments can be assumed to be ready
+    when a lease has to start; e.g., if VM disk images are
+    predeployed on all physical nodes)."""
+    
+    def __init__(self):
+        """Constructor.
+        
+        Does nothing."""        
+        pass
+
+class DiskImageSoftwareEnvironment(SoftwareEnvironment):
+    """Reprents a software environment encapsulated in a disk image.
+    
+    When a lease's software environment is contained in a disk image,
+    this disk image must be deployed to the physical nodes the lease
+    is mapped to before the lease can start. This means that the
+    preparation for this lease must be handled by a preparation
+    scheduler (see documentation in lease_scheduler) capable of
+    handling a DiskImageSoftwareEnvironment.
+    """
+    def __init__(self, image_id, image_size):
+        """Constructor.
+        
+        Arguments:
+        image_id -- A unique identifier for the disk image required
+        by the lease.
+        image_size -- The size, in MB, of the disk image. """         
+        self.image_id = image_id
+        self.image_size = image_size
+
+    
+class LeaseWorkload(object):
+    """Reprents a sequence of lease requests.
+    
+    A lease workload is a sequence of lease requests with a specific
+    arrival time for each lease. This class is currently only used
+    to load LWF (Lease Workload File) files. See the Haizea documentation 
+    for details on the LWF format.
+    """    
+    def __init__(self, leases):
+        """Constructor.
+        
+        Arguments:
+        leases -- An ordered list (by arrival time) of leases in the workload
+        """                 
+        self.leases = leases
+        
+
+    def get_leases(self):
+        """Returns the leases in the workload.
+        
+        """  
+        return self.leases
+    
+    @classmethod
+    def from_xml_file(cls, xml_file, inittime = DateTime(0)):
+        """Constructs a lease workload from an XML file.
+        
+        See the Haizea documentation for details on the
+        lease workload XML format.
+        
+        Argument:
+        xml_file -- XML file containing the lease in XML format.
+        inittime -- The starting time of the lease workload. All relative
+        times in the XML file will be converted to absolute times by
+        adding them to inittime. If inittime is not specified, it will
+        arbitrarily be 0000/01/01 00:00:00.
+        """        
+        return cls.__from_xml_element(ET.parse(xml_file).getroot(), inittime)
+
+    # TODO: need to adapt the old SWF trace reading code to new Lease
+    # data structures
+#    @classmethod
+#    def from_swf_file(cls, swf_file, inittime = DateTime(0)):
+#        file = open (tracefile, "r")
+#        requests = []
+#        inittime = config.get("starttime")
+#        for line in file:
+#            if line[0]!=';':
+#                req = None
+#                fields = line.split()
+#                reqtime = float(fields[8])
+#                runtime = int(fields[3]) # 3: RunTime
+#                waittime = int(fields[2])
+#                status = int(fields[10])
+#                
+#                if reqtime > 0:
+#                    tSubmit = int(fields[1]) # 1: Submission time
+#                    tSubmit = inittime + TimeDelta(seconds=tSubmit) 
+#                    vmimage = "NOIMAGE"
+#                    vmimagesize = 600 # Arbitrary
+#                    numnodes = int(fields[7]) # 7: reqNProcs
+#                    resreq = ResourceTuple.create_empty()
+#                    resreq.set_by_type(constants.RES_CPU, 1) # One CPU per VM, should be configurable
+#                    resreq.set_by_type(constants.RES_MEM, 1024) # Should be configurable
+#                    resreq.set_by_type(constants.RES_DISK, vmimagesize + 0) # Should be configurable
+#                    maxdur = TimeDelta(seconds=reqtime)
+#                    if runtime < 0 and status==5:
+#                        # This is a job that got cancelled while waiting in the queue
+#                        continue
+#                    else:
+#                        if runtime == 0:
+#                            runtime = 1 # Runtime of 0 is <0.5 rounded down.
+#                        realdur = TimeDelta(seconds=runtime) # 3: RunTime
+#                    if realdur > maxdur:
+#                        realdur = maxdur
+#                    preemptible = True
+#                    req = BestEffortLease(tSubmit, maxdur, vmimage, vmimagesize, numnodes, resreq, preemptible, realdur)
+#                    requests.append(req)
+#        return requests
+
+    @classmethod
+    def __from_xml_element(cls, element, inittime):
+        """Constructs a lease from an ElementTree element.
+        
+        See the Haizea documentation for details on the
+        lease XML format.
+        
+        Argument:
+        element -- Element object containing a "<lease-workload>" element.
+        inittime -- The starting time of the lease workload. All relative
+        times in the XML file will be converted to absolute times by
+        adding them to inittime.  
+        """                
+        reqs = element.findall("lease-requests/lease-request")
+        leases = []
+        for r in reqs:
+            lease = r.find("lease")
+            # Add time lease is submitted
+            submittime = inittime + Parser.DateTimeDeltaFromString(r.get("arrival"))
+            lease.set("submit-time", str(submittime))
+            
+            # If an exact starting time is specified, add the init time
+            exact = lease.find("start/exact")
+            if exact != None:
+                start = inittime + Parser.DateTimeDeltaFromString(exact.get("time"))
+                exact.set("time", str(start))
+                
+            lease = Lease.create_new_from_xml_element(lease)
+            
+            realduration = r.find("realduration")
+            if realduration != None:
+                lease.duration.known = Parser.DateTimeDeltaFromString(realduration.get("time"))
+
+            leases.append(lease)
+            
+        return cls(leases)
+        
+class Site(object):
+    """Represents a site containing machines ("nodes").
+    
+    This class is used to load site descriptions in XML format or
+    using a "resources string". Site descriptions can appear in two places:
+    in a LWF file (where the site required for the lease workload is
+    embedded in the LWF file) or in the Haizea configuration file. In both
+    cases, the site description is only used in simulation (in OpenNebula mode,
+    the available nodes and resources are obtained by querying OpenNebula). 
+    
+    Note that this class is distinct from the ResourcePool class, even though
+    both are used to represent "collections of nodes". The Site class is used
+    purely as a convenient way to load site information from an XML file
+    and to manipulate that information elsewhere in Haizea, while the
+    ResourcePool class is responsible for sending enactment commands
+    to nodes, monitoring nodes, etc.
+    """        
+    def __init__(self, nodes, resource_types, attr_types):
+        """Constructor.
+        
+        Arguments:
+        nodes -- A Nodes object
+        resource_types -- A list of valid resource types in this site.
+        attr_types -- A list of valid attribute types in this site
+        """             
+        self.nodes = nodes
+        self.resource_types = resource_types
+        self.attr_types = attr_types
+        
+    @classmethod
+    def from_xml_file(cls, xml_file):
+        """Constructs a site from an XML file.
+        
+        See the Haizea documentation for details on the
+        site XML format.
+        
+        Argument:
+        xml_file -- XML file containing the site in XML format.
+        """                
+        return cls.__from_xml_element(ET.parse(xml_file).getroot())        
+
+    @classmethod
+    def from_lwf_file(cls, lwf_file):
+        """Constructs a site from an LWF file.
+        
+        LWF files can have site information embedded in them. This method
+        loads this site information from an LWF file. See the Haizea 
+        documentation for details on the LWF format.
+        
+        Argument:
+        lwf_file -- LWF file.
+        """                
+        return cls.__from_xml_element(ET.parse(lwf_file).getroot().find("site"))        
+        
+    @classmethod
+    def __from_xml_element(cls, element):     
+        """Constructs a site from an ElementTree element.
+        
+        See the Haizea documentation for details on the
+        site XML format.
+        
+        Argument:
+        element -- Element object containing a "<site>" element.
+        """     
+        resource_types = element.find("resource-types")
+        resource_types = resource_types.get("names").split()
+       
+        # TODO: Attributes
+        attrs = []
+        
+        nodes = Nodes.from_xml_element(element.find("nodes"))
+
+        # Validate nodes
+        for node_set in nodes.node_sets:
+            capacity = node_set[1]
+            for resource_type in capacity.get_resource_types():
+                if resource_type not in resource_types:
+                    # TODO: Raise something more meaningful
+                    raise Exception
+
+        return cls(nodes, resource_types, attrs)
+    
+    @classmethod
+    def from_resources_string(cls, resource_str):
+        """Constructs a site from a "resources string"
+        
+        A "resources string" is a shorthand way of specifying a site
+        with homogeneous resources and no attributes. The format is:
+        
+        <numnodes> <resource_type>:<resource_quantity>[,<resource_type>:<resource_quantity>]*
+        
+        For example: 4 CPU:100,Memory:1024
+        
+        Argument:
+        resource_str -- resources string
+        """    
+
+        resource_str = resource_str.split()
+        numnodes = int(resource_str[0])
+        resources = resource_str[1:]
+        res = {}
+        
+        for r in resources:
+            type, amount = r.split(":")
+            res[type] = int(amount)
+            
+        capacity = Capacity(res.keys())
+        for (type,amount) in res.items():
+            capacity.set_quantity(type, amount)
+        
+        nodes = Nodes([(numnodes,capacity)])
+
+        return cls(nodes, res.keys(), [])
+            
+    def add_resource(self, name, amounts):
+        """Adds a new resource to all nodes in the site.
+                
+        Argument:
+        name -- Name of the resource type
+        amounts -- A list with the amounts of the resource to add to each
+        node. If the resource is single-instance, then this will just
+        be a list with a single element. If multi-instance, each element
+        of the list represent the amount of an instance of the resource.
+        """            
+        self.resource_types.append(name)
+        self.nodes.add_resource(name, amounts)
+    
+    def get_resource_types(self):
+        """Returns the resource types in this site.
+        
+        This method returns a list, each item being a pair with
+        1. the name of the resource type and 2. the maximum number of
+        instances for that resource type across all nodes.
+                
+        """               
+        max_ninstances = dict((rt, 1) for rt in self.resource_types)
+        for node_set in self.nodes.node_sets:
+            capacity = node_set[1]
+            for resource_type in capacity.get_resource_types():
+                if capacity.ninstances[resource_type] > max_ninstances[resource_type]:
+                    max_ninstances[resource_type] = capacity.ninstances[resource_type]
+                    
+        max_ninstances = [(rt,max_ninstances[rt]) for rt in self.resource_types]
+
+        return max_ninstances
+    
+
+
+class Nodes(object):
+    """Represents a collection of machines ("nodes")
+    
+    This class is used to load descriptions of nodes from an XML
+    file. These nodes can appear in two places: in a site description
+    (which, in turn, is loaded by the Site class) or in a lease's
+    resource requirements (describing what nodes, with what resources,
+    are required by the lease).
+    
+    Nodes are stored as one or more "node sets". Each node set has nodes
+    with the exact same resources. So, for example, a lease requiring 100
+    nodes (all identical, except 50 have 1024MB of memory and the other 50
+    have 512MB of memory) doesn't need to enumerate all 100 nodes. Instead,
+    it just has to describe the two "node sets" (indicating that there are
+    50 nodes of one type and 50 of the other). See the Haizea documentation
+    for more details on the XML format.
+    
+    Like the Site class, this class is distinct from the ResourcePool class, even
+    though they both represent a "collection of nodes". See the 
+    Site class documentation for more details.
+    """            
+    def __init__(self, node_sets):
+        """Constructor.
+        
+        Arguments:
+        node_sets -- A list of (n,c) pairs (where n is the number of nodes
+        in the set and c is a Capacity object; all nodes in the set have
+        capacity c).
+        """                 
+        self.node_sets = node_sets
+
+    @classmethod
+    def from_xml_element(cls, nodes_element):
+        """Constructs a node collection from an ElementTree element.
+        
+        See the Haizea documentation for details on the
+        <nodes> XML format.
+        
+        Argument:
+        element -- Element object containing a "<nodes>" element.
+        """           
+        nodesets = []
+        nodesets_elems = nodes_element.findall("node-set")
+        for nodeset_elem in nodesets_elems:
+            r = Capacity([])
+            resources = nodeset_elem.findall("res")
+            for i, res in enumerate(resources):
+                type = res.get("type")
+                if len(res.getchildren()) == 0:
+                    amount = int(res.get("amount"))
+                    r.set_ninstances(type, 1)
+                    r.set_quantity(type, amount)
+                else:
+                    instances = res.findall("instance")
+                    r.set_ninstances(type, len(instances))
+                    for i, instance in enumerate(instances):
+                        amount = int(instance.get("amount"))
+                        r.set_quantity_instance(type, i+1, amount)
+                                     
+            numnodes = int(nodeset_elem.get("numnodes"))
+
+            nodesets.append((numnodes,r))
+            
+        return cls(nodesets)
+    
+    def to_xml(self):
+        """Returns an ElementTree XML representation of the nodes
+        
+        See the Haizea documentation for details on the
+        lease XML format.
+        
+        """   
+        nodes = ET.Element("nodes")
+        for (numnodes, capacity) in self.node_sets:
+            nodeset = ET.SubElement(nodes, "node-set")
+            nodeset.set("numnodes", str(numnodes))
+            for type in capacity.get_resource_types():
+                res = ET.SubElement(nodeset, "res")
+                res.set("type", type)
+                ninstances = capacity.get_ninstances(type)
+                if ninstances == 1:
+                    res.set("amount", str(capacity.get_quantity(type)))                
+            
+        return nodes
+    
+    def get_all_nodes(self):
+        """Returns a dictionary mapping individual nodes to capacities
+        
+        """              
+        nodes = {}
+        nodenum = 1
+        for node_set in self.node_sets:
+            numnodes = node_set[0]
+            r = node_set[1]
+            for i in range(numnodes):
+                nodes[nodenum] = r
+                nodenum += 1     
+        return nodes   
+                
+    def add_resource(self, name, amounts):
+        """Adds a new resource to all the nodes
+                
+        Argument:
+        name -- Name of the resource type
+        amounts -- A list with the amounts of the resource to add to each
+        node. If the resource is single-instance, then this will just
+        be a list with a single element. If multi-instance, each element
+        of the list represent the amount of an instance of the resource.
+        """              
+        for node_set in self.node_sets:
+            r = node_set[1]
+            r.set_ninstances(name, len(amounts))
+            for ninstance, amount in enumerate(amounts):
+                r.set_quantity_instance(name, ninstance+1, amount)
+

Added: trunk/src/haizea/core/log.py
===================================================================
--- trunk/src/haizea/core/log.py	                        (rev 0)
+++ trunk/src/haizea/core/log.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,44 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+import logging
+from haizea.common.utils import get_clock
+from haizea.common.constants import LOGLEVEL_VDEBUG, LOGLEVEL_STATUS
+
+logging.addLevelName(LOGLEVEL_VDEBUG, "VDEBUG")
+logging.addLevelName(LOGLEVEL_STATUS, "STATUS")
+
+# Custom logger that uses our log record
+class HaizeaLogger(logging.Logger):
+    
+    def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func, extra):
+        # Modify "extra" parameter keyword
+        haizeatime = get_clock().get_time()
+        extra = { "haizeatime" : haizeatime}
+        return logging.Logger.makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func, extra)
+    
+    def status(self, msg):
+        self.log(logging.getLevelName("STATUS"), msg)
+
+    def vdebug(self, msg):
+        # Since there is such a huge amount of vdebug messages, we check the
+        # log level manually to decide if we call the log function or not.
+        # (this actually saves quite a bit of cycles spent in logging functions
+        # that ultimately determine that the message doesn't have to printed)
+        if self.getEffectiveLevel() == LOGLEVEL_VDEBUG:
+            self.log(logging.getLevelName("VDEBUG"), msg)
\ No newline at end of file

Added: trunk/src/haizea/core/manager.py
===================================================================
--- trunk/src/haizea/core/manager.py	                        (rev 0)
+++ trunk/src/haizea/core/manager.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,867 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+"""The manager (resource manager) module is the root of Haizea. If you want to
+see where the ball starts rolling, look at the following two functions:
+
+* manager.Manager.__init__()
+* manager.Manager.start()
+
+This module provides the following classes:
+
+* Manager: Haizea itself. Pretty much everything else
+  is contained in this class.
+* Clock: A base class for Haizea's clock.
+* SimulatedClock: A clock for simulations.
+* RealClock: A clock that advances in realtime.
+"""
+ 
+import haizea.core.accounting as accounting
+import haizea.common.constants as constants
+from haizea.core.scheduler.preparation_schedulers.unmanaged import UnmanagedPreparationScheduler
+from haizea.core.scheduler.preparation_schedulers.imagetransfer import ImageTransferPreparationScheduler
+from haizea.core.enact.opennebula import OpenNebulaResourcePoolInfo, OpenNebulaVMEnactment, OpenNebulaDummyDeploymentEnactment
+from haizea.core.enact.simulated import SimulatedResourcePoolInfo, SimulatedVMEnactment, SimulatedDeploymentEnactment
+from haizea.core.frontends.tracefile import TracefileFrontend
+from haizea.core.frontends.opennebula import OpenNebulaFrontend
+from haizea.core.frontends.rpc import RPCFrontend
+from haizea.core.scheduler import UnrecoverableError
+from haizea.core.scheduler.lease_scheduler import LeaseScheduler
+from haizea.core.scheduler.vm_scheduler import VMScheduler
+from haizea.core.scheduler.mapper import class_mappings as mapper_mappings
+from haizea.core.scheduler.slottable import SlotTable
+from haizea.core.scheduler.policy import PolicyManager
+from haizea.core.scheduler.resourcepool import ResourcePool, ResourcePoolWithReusableImages
+from haizea.core.leases import Lease, Site
+from haizea.core.log import HaizeaLogger
+from haizea.core.rpcserver import RPCServer
+from haizea.common.utils import abstract, round_datetime, Singleton, import_class
+from haizea.policies import admission_class_mappings, preemption_class_mappings, host_class_mappings 
+
+import operator
+import logging
+import signal
+import sys, os
+import traceback
+from time import sleep
+from math import ceil
+from mx.DateTime import now, TimeDelta
+
+DAEMON_STDOUT = DAEMON_STDIN = "/dev/null"
+DAEMON_STDERR = "/var/tmp/haizea.err"
+DEFAULT_LOGFILE = "/var/tmp/haizea.log"
+
+class Manager(Singleton):
+    """The root of Haizea
+    
+    This class is the root of Haizea. Pretty much everything else (scheduler,
+    enactment modules, etc.) is contained in this class. The Manager
+    class is meant to be a singleton.
+    
+    """
+    
+    def __init__(self, config, daemon=False, pidfile=None):
+        """Initializes the manager.
+        
+        Argument:
+        config -- a populated instance of haizea.common.config.RMConfig
+        daemon -- True if Haizea must run as a daemon, False if it must
+                  run in the foreground
+        pidfile -- When running as a daemon, file to save pid to
+        """
+        self.config = config
+        
+        # Create the RM components
+        
+        mode = config.get("mode")
+        
+        self.daemon = daemon
+        self.pidfile = pidfile
+
+        if mode == "simulated":
+            # Simulated-time simulations always run in the foreground
+            clock = self.config.get("clock")
+            if clock == constants.CLOCK_SIMULATED:
+                self.daemon = False
+        elif mode == "opennebula":
+            clock = constants.CLOCK_REAL        
+        
+        self.init_logging()
+                
+        if clock == constants.CLOCK_SIMULATED:
+            starttime = self.config.get("starttime")
+            self.clock = SimulatedClock(self, starttime)
+            self.rpc_server = None
+        elif clock == constants.CLOCK_REAL:
+            wakeup_interval = self.config.get("wakeup-interval")
+            non_sched = self.config.get("non-schedulable-interval")
+            if mode == "opennebula":
+                fastforward = self.config.get("dry-run")
+            else:
+                fastforward = False
+            self.clock = RealClock(self, wakeup_interval, non_sched, fastforward)
+            if fastforward:
+                # No need for an RPC server when doing a dry run
+                self.rpc_server = None
+            else:
+                self.rpc_server = RPCServer(self)
+                    
+        # Enactment modules
+        if mode == "simulated":
+            resources = self.config.get("simul.resources")
+            if resources == "in-tracefile":
+                tracefile = self.config.get("tracefile")
+                site = Site.from_lwf_file(tracefile)
+            elif resources.startswith("file:"):
+                sitefile = resources.split(":")
+                site = Site.from_xml_file(sitefile)
+            else:
+                site = Site.from_resources_string(resources)
+    
+            info_enact = SimulatedResourcePoolInfo(site)
+            vm_enact = SimulatedVMEnactment()
+            deploy_enact = SimulatedDeploymentEnactment()
+        elif mode == "opennebula":
+            # Enactment modules
+            info_enact = OpenNebulaResourcePoolInfo()
+            vm_enact = OpenNebulaVMEnactment()
+            # No deployment in OpenNebula. Using dummy one for now.
+            deploy_enact = OpenNebulaDummyDeploymentEnactment()            
+
+        if mode == "simulated":
+            preparation_type = self.config.get("lease-preparation")
+        elif mode == "opennebula":
+            # No deployment in OpenNebula.
+            preparation_type = constants.PREPARATION_UNMANAGED
+
+        # Resource pool
+        if preparation_type == constants.PREPARATION_TRANSFER:
+            if self.config.get("diskimage-reuse") == constants.REUSE_IMAGECACHES:
+                resourcepool = ResourcePoolWithReusableImages(info_enact, vm_enact, deploy_enact)
+            else:
+                resourcepool = ResourcePool(info_enact, vm_enact, deploy_enact)
+        else:
+            resourcepool = ResourcePool(info_enact, vm_enact, deploy_enact)
+    
+        # Slot table
+        slottable = SlotTable(info_enact.get_resource_types())
+        for n in resourcepool.get_nodes() + resourcepool.get_aux_nodes():
+            rt = slottable.create_resource_tuple_from_capacity(n.capacity)
+            slottable.add_node(n.id, rt)
+
+        # Policy manager
+        admission = self.config.get("policy.admission")
+        admission = admission_class_mappings.get(admission, admission)
+        admission = import_class(admission)
+        admission = admission(slottable)
+        
+        preemption = self.config.get("policy.preemption")
+        preemption = preemption_class_mappings.get(preemption, preemption)
+        preemption = import_class(preemption)
+        preemption = preemption(slottable)
+
+        host_selection = self.config.get("policy.host-selection")
+        host_selection = host_class_mappings.get(host_selection, host_selection)
+        host_selection = import_class(host_selection)
+        host_selection = host_selection(slottable)
+
+        self.policy = PolicyManager(admission, preemption, host_selection)
+
+        # Preparation scheduler
+        if preparation_type == constants.PREPARATION_UNMANAGED:
+            preparation_scheduler = UnmanagedPreparationScheduler(slottable, resourcepool, deploy_enact)
+        elif preparation_type == constants.PREPARATION_TRANSFER:
+            preparation_scheduler = ImageTransferPreparationScheduler(slottable, resourcepool, deploy_enact)    
+    
+        # VM mapper and scheduler
+        mapper = self.config.get("mapper")
+        mapper = mapper_mappings.get(mapper, mapper)
+        mapper = import_class(mapper)
+        mapper = mapper(slottable, self.policy)
+        vm_scheduler = VMScheduler(slottable, resourcepool, mapper)
+    
+        # Lease Scheduler
+        self.scheduler = LeaseScheduler(vm_scheduler, preparation_scheduler, slottable)
+        
+        # Lease request frontends
+        if mode == "simulated":
+            if clock == constants.CLOCK_SIMULATED:
+                # In pure simulation, we can only use the tracefile frontend
+                self.frontends = [TracefileFrontend(self, self.clock.get_start_time())]
+            elif clock == constants.CLOCK_REAL:
+                # In simulation with a real clock, only the RPC frontend can be used
+                self.frontends = [RPCFrontend(self)]             
+        elif mode == "opennebula":
+                self.frontends = [OpenNebulaFrontend(self)]               
+
+        # Statistics collection 
+        self.accounting = accounting.AccountingDataCollection(self, self.config.get("datafile"))
+        
+        self.logger = logging.getLogger("RM")
+
+
+    def init_logging(self):
+        """Initializes logging
+        
+        """
+
+        logger = logging.getLogger("")
+        if self.daemon:
+            handler = logging.FileHandler(self.config.get("logfile"))
+        else:
+            handler = logging.StreamHandler()
+        formatter = logging.Formatter('[%(haizeatime)s] %(name)-7s %(message)s')
+        handler.setFormatter(formatter)
+        logger.addHandler(handler)
+        level = logging.getLevelName(self.config.get("loglevel"))
+        logger.setLevel(level)
+        logging.setLoggerClass(HaizeaLogger)
+
+        
+    def daemonize(self):
+        """Daemonizes the Haizea process.
+        
+        Based on code in:  http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
+        
+        """
+        # First fork
+        try:
+            pid = os.fork()
+            if pid > 0: 
+                # Exit first parent
+                sys.exit(0) 
+        except OSError, e:
+            sys.stderr.write("Failed to daemonize Haizea: (%d) %s\n" % (e.errno, e.strerror))
+            sys.exit(1)
+    
+        # Decouple from parent environment.
+        os.chdir(".")
+        os.umask(0)
+        os.setsid()
+    
+        # Second fork
+        try:
+            pid = os.fork()
+            if pid > 0: 
+                # Exit second parent.
+                sys.exit(0) 
+        except OSError, e:
+            sys.stderr.write("Failed to daemonize Haizea: (%d) %s\n" % (e.errno, e.strerror))
+            sys.exit(2)
+            
+        # Open file descriptors and print start message
+        si = file(DAEMON_STDIN, 'r')
+        so = file(DAEMON_STDOUT, 'a+')
+        se = file(DAEMON_STDERR, 'a+', 0)
+        pid = os.getpid()
+        sys.stderr.write("\nStarted Haizea daemon with pid %i\n\n" % pid)
+        sys.stderr.flush()
+        file(self.pidfile,'w+').write("%i\n" % pid)
+        
+        # Redirect standard file descriptors.
+        os.close(sys.stdin.fileno())
+        os.close(sys.stdout.fileno())
+        os.close(sys.stderr.fileno())
+        os.dup2(si.fileno(), sys.stdin.fileno())
+        os.dup2(so.fileno(), sys.stdout.fileno())
+        os.dup2(se.fileno(), sys.stderr.fileno())
+
+    def start(self):
+        """Starts the resource manager"""
+        self.logger.info("Starting resource manager")
+
+        # Create counters to keep track of interesting data.
+        self.accounting.create_counter(constants.COUNTER_ARACCEPTED, constants.AVERAGE_NONE)
+        self.accounting.create_counter(constants.COUNTER_ARREJECTED, constants.AVERAGE_NONE)
+        self.accounting.create_counter(constants.COUNTER_IMACCEPTED, constants.AVERAGE_NONE)
+        self.accounting.create_counter(constants.COUNTER_IMREJECTED, constants.AVERAGE_NONE)
+        self.accounting.create_counter(constants.COUNTER_BESTEFFORTCOMPLETED, constants.AVERAGE_NONE)
+        self.accounting.create_counter(constants.COUNTER_QUEUESIZE, constants.AVERAGE_TIMEWEIGHTED)
+        self.accounting.create_counter(constants.COUNTER_DISKUSAGE, constants.AVERAGE_NONE)
+        self.accounting.create_counter(constants.COUNTER_UTILIZATION, constants.AVERAGE_NONE)
+        
+        if self.daemon:
+            self.daemonize()
+        if self.rpc_server:
+            self.rpc_server.start()
+            
+        # Start the clock
+        try:
+            self.clock.run()
+        except UnrecoverableError, exc:
+            self.__unrecoverable_error(exc)
+        except Exception, exc:
+            self.__unexpected_exception(exc)
+
+    def stop(self):
+        """Stops the resource manager by stopping the clock"""
+        self.clock.stop()
+        
+    def graceful_stop(self):
+        """Stops the resource manager gracefully and exits"""
+        
+        self.logger.status("Stopping resource manager gracefully...")
+        
+        # Stop collecting data (this finalizes counters)
+        self.accounting.stop()
+        
+        # TODO: When gracefully stopping mid-scheduling, we need to figure out what to
+        #       do with leases that are still running.
+
+        self.print_status()
+        
+        # In debug mode, dump the lease descriptors.
+        for lease in self.scheduler.completed_leases.entries.values():
+            lease.print_contents()
+            
+        # Write all collected data to disk
+        self.accounting.save_to_disk()
+        
+        # Stop RPC server
+        if self.rpc_server != None:
+            self.rpc_server.stop()
+                    
+    def process_requests(self, nexttime):
+        """Process any new requests in the request frontend
+        
+        Checks the request frontend to see if there are any new requests that
+        have to be processed. AR leases are sent directly to the schedule.
+        Best-effort leases are queued.
+        
+        Arguments:
+        nexttime -- The next time at which the scheduler can allocate resources.
+                    This is meant to be provided by the clock simply as a sanity
+                    measure when running in real time (to avoid scheduling something
+                    "now" to actually have "now" be in the past once the scheduling
+                    function returns. i.e., nexttime has nothing to do with whether 
+                    there are resources available at that time or not.
+        
+        """        
+        
+        # Get requests from frontend
+        requests = []
+        for frontend in self.frontends:
+            requests += frontend.get_accumulated_requests()
+        requests.sort(key=operator.attrgetter("submit_time"))
+        
+        # Request leases and run the scheduling function.
+        try:
+            self.logger.vdebug("Requesting leases")
+            for req in requests:
+                self.scheduler.request_lease(req)
+
+            self.logger.vdebug("Running scheduling function")
+            self.scheduler.schedule(nexttime)
+        except UnrecoverableError, exc:
+            self.__unrecoverable_error(exc)
+        except Exception, exc:
+            self.__unexpected_exception(exc)
+
+    def process_starting_reservations(self, time):
+        """Process reservations starting/stopping at specified time"""
+        
+        # The lease scheduler takes care of this.
+        try:
+            self.scheduler.process_starting_reservations(time)
+        except UnrecoverableError, exc:
+            self.__unrecoverable_error(exc)
+        except Exception, exc:
+            self.__unexpected_exception(exc)
+
+    def process_ending_reservations(self, time):
+        """Process reservations starting/stopping at specified time"""
+        
+        # The lease scheduler takes care of this.
+        try:
+            self.scheduler.process_ending_reservations(time)
+        except UnrecoverableError, exc:
+            self.__unrecoverable_error(exc)
+        except Exception, exc:
+            self.__unexpected_exception(exc)
+         
+    def get_utilization(self, nowtime):
+        """ Gather utilization information at a given time.
+        
+        Each time we process reservations, we report resource utilization 
+        to the accounting module. This utilization information shows what 
+        portion of the physical resources is used by each type of reservation 
+        (e.g., 70% are running a VM, 5% are doing suspensions, etc.) See the 
+        accounting module for details on how this data is stored.
+        Currently we only collect utilization from the VM Scheduler 
+        (in the future, information may also be gathered from the preparation 
+        scheduler).
+        """
+        util = self.scheduler.vm_scheduler.get_utilization(nowtime)
+        self.accounting.append_stat(constants.COUNTER_UTILIZATION, util)             
+             
+    def notify_event(self, lease_id, event):
+        """Notifies an asynchronous event to Haizea.
+        
+        Arguments:
+        lease_id -- ID of lease that is affected by event
+        event -- Event (currently, only the constants.EVENT_END_VM event is supported)
+        """
+        try:
+            lease = self.scheduler.get_lease_by_id(lease_id)
+            self.scheduler.notify_event(lease, event)
+        except UnrecoverableError, exc:
+            self.__unrecoverable_error(exc)
+        except Exception, exc:
+            self.__unexpected_exception(exc)
+        
+    def cancel_lease(self, lease_id):
+        """Cancels a lease.
+        
+        Arguments:
+        lease_id -- ID of lease to cancel
+        """    
+        try:
+            lease = self.scheduler.get_lease_by_id(lease_id)
+            self.scheduler.cancel_lease(lease)
+        except UnrecoverableError, exc:
+            self.__unrecoverable_error(exc)
+        except Exception, exc:
+            self.__unexpected_exception(exc)
+            
+    def get_next_changepoint(self):
+        """Return next changepoint in the slot table"""
+        return self.scheduler.slottable.get_next_changepoint(self.clock.get_time())
+   
+    def exists_more_leases(self):
+        """Return True if there are any leases still "in the system" """
+        return self.scheduler.exists_scheduled_leases() or not self.scheduler.is_queue_empty()
+
+    def print_status(self):
+        """Prints status summary."""
+        
+        leases = self.scheduler.leases.get_leases()
+        completed_leases = self.scheduler.completed_leases.get_leases()
+        self.logger.status("--- Haizea status summary ---")
+        self.logger.status("Number of leases (not including completed): %i" % len(leases))
+        self.logger.status("Completed leases: %i" % len(completed_leases))
+        self.logger.status("Completed best-effort leases: %i" % self.accounting.data.counters[constants.COUNTER_BESTEFFORTCOMPLETED])
+        self.logger.status("Queue size: %i" % self.accounting.data.counters[constants.COUNTER_QUEUESIZE])
+        self.logger.status("Accepted AR leases: %i" % self.accounting.data.counters[constants.COUNTER_ARACCEPTED])
+        self.logger.status("Rejected AR leases: %i" % self.accounting.data.counters[constants.COUNTER_ARREJECTED])
+        self.logger.status("Accepted IM leases: %i" % self.accounting.data.counters[constants.COUNTER_IMACCEPTED])
+        self.logger.status("Rejected IM leases: %i" % self.accounting.data.counters[constants.COUNTER_IMREJECTED])
+        self.logger.status("---- End summary ----")        
+
+    def __unrecoverable_error(self, exc):
+        """Handles an unrecoverable error.
+        
+        This method prints information on the unrecoverable error and makes Haizea panic.
+        """
+        self.logger.error("An unrecoverable error has happened.")
+        self.logger.error("Original exception:")
+        self.__print_exception(exc.exc, exc.get_traceback())
+        self.logger.error("Unrecoverable error traceback:")
+        self.__print_exception(exc, sys.exc_info()[2])
+        self.__panic()
+
+    def __unexpected_exception(self, exc):
+        """Handles an unrecoverable error.
+        
+        This method prints information on the unrecoverable error and makes Haizea panic.
+        """
+        self.logger.error("An unexpected exception has happened.")
+        self.__print_exception(exc, sys.exc_info()[2])
+        self.__panic()
+            
+    def __print_exception(self, exc, exc_traceback):
+        """Prints an exception's traceback to the log."""
+        tb = traceback.format_tb(exc_traceback)
+        for line in tb:
+            self.logger.error(line)
+        self.logger.error("Message: %s" % exc)
+
+    
+    def __panic(self):
+        """Makes Haizea crash and burn in a panicked frenzy"""
+        
+        self.logger.status("Panicking...")
+
+        # Stop RPC server
+        if self.rpc_server != None:
+            self.rpc_server.stop()
+
+        # Dump state
+        self.print_status()
+        self.logger.error("Next change point (in slot table): %s" % self.get_next_changepoint())
+
+        # Print lease descriptors
+        leases = self.scheduler.leases.get_leases()
+        if len(leases)>0:
+            self.logger.vdebug("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
+            for lease in leases:
+                lease.print_contents()
+            self.logger.vdebug("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")        
+
+        # Exit
+        treatment = self.config.get("lease-failure-handling")
+        if treatment == constants.ONFAILURE_EXIT_RAISE:
+            raise
+        else:
+            exit(1)
+
+            
+class Clock(object):
+    """Base class for the resource manager's clock.
+    
+    The clock is in charge of periodically waking the resource manager so it
+    will process new requests and handle existing reservations. This is a
+    base class defining abstract methods.
+    
+    """
+    def __init__(self, manager):
+        self.manager = manager
+        self.done = False
+    
+    def get_time(self): 
+        """Return the current time"""
+        return abstract()
+
+    def get_start_time(self): 
+        """Return the time at which the clock started ticking"""
+        return abstract()
+    
+    def get_next_schedulable_time(self): 
+        """Return the next time at which resources could be scheduled.
+        
+        The "next schedulable time" server sanity measure when running 
+        in real time (to avoid scheduling something "now" to actually 
+        have "now" be in the past once the scheduling function returns. 
+        i.e., the "next schedulable time" has nothing to do with whether 
+        there are resources available at that time or not.
+        """
+        return abstract()
+    
+    def run(self):
+        """Start and run the clock. This function is, in effect,
+        the main loop of the resource manager."""
+        return abstract()     
+
+    def stop(self):
+        """Stop the clock.
+        
+        Stopping the clock makes Haizea exit.
+        """
+        self.done = True    
+    
+        
+class SimulatedClock(Clock):
+    """Simulates the passage of time... really fast.
+    
+    The simulated clock steps through time to produce an ideal schedule.
+    See the run() function for a description of how time is incremented
+    exactly in the simulated clock.
+    
+    """
+    
+    def __init__(self, manager, starttime):
+        """Initialize the simulated clock, starting at the provided starttime"""
+        Clock.__init__(self, manager)
+        self.starttime = starttime
+        self.time = starttime
+        self.logger = logging.getLogger("CLOCK")
+        self.statusinterval = self.manager.config.get("status-message-interval")
+       
+    def get_time(self):
+        """See docstring in base Clock class."""
+        return self.time
+    
+    def get_start_time(self):
+        """See docstring in base Clock class."""
+        return self.starttime
+
+    def get_next_schedulable_time(self):
+        """See docstring in base Clock class."""
+        return self.time    
+    
+    def run(self):
+        """Runs the simulated clock through time.
+        
+        The clock starts at the provided start time. At each point in time,
+        it wakes up the resource manager and then skips to the next time
+        where "something" is happening (see __get_next_time for a more
+        rigorous description of this).
+        
+        The clock stops when there is nothing left to do (no pending or 
+        queue requests, and no future reservations)
+        
+        The simulated clock can only work in conjunction with the
+        tracefile request frontend.
+        """
+        self.logger.status("Starting simulated clock")
+        self.manager.accounting.start(self.get_start_time())
+        prevstatustime = self.time
+        
+        # Main loop
+        while not self.done:
+            # Check to see if there are any leases which are ending prematurely.
+            # Note that this is unique to simulation.
+            prematureends = self.manager.scheduler.slottable.get_prematurely_ending_res(self.time)
+            
+            # Notify the resource manager about the premature ends
+            for rr in prematureends:
+                self.manager.notify_event(rr.lease.id, constants.EVENT_END_VM)
+                
+            # Process reservations starting/stopping at the current time and
+            # check if there are any new requests.
+            self.manager.process_ending_reservations(self.time)
+            self.manager.process_starting_reservations(self.time)
+            self.manager.process_requests(self.time)
+            
+            # Since processing requests may have resulted in new reservations
+            # starting now, we process reservations again.
+            self.manager.process_starting_reservations(self.time)
+            # And one final call to deal with nil-duration reservations
+            self.manager.process_ending_reservations(self.time)
+            
+            
+            # Print a status message
+            if self.statusinterval != None and (self.time - prevstatustime).minutes >= self.statusinterval:
+                self.manager.print_status()
+                prevstatustime = self.time
+                
+            # Skip to next point in time.
+            self.time, self.done = self.__get_next_time()
+                    
+        self.logger.status("Simulated clock has stopped")
+
+        # Stop the resource manager
+        self.manager.graceful_stop()
+        
+    
+    def __get_next_time(self):
+        """Determines what is the next point in time to skip to.
+        
+        At a given point in time, the next time is the earliest of the following:
+        * The arrival of the next lease request
+        * The start or end of a reservation (a "changepoint" in the slot table)
+        * A premature end of a lease
+        """
+        
+        # Determine candidate next times
+        tracefrontend = self.__get_trace_frontend()
+        nextchangepoint = self.manager.get_next_changepoint()
+        nextprematureend = self.manager.scheduler.slottable.get_next_premature_end(self.time)
+        nextreqtime = tracefrontend.get_next_request_time()
+        self.logger.debug("Next change point (in slot table): %s" % nextchangepoint)
+        self.logger.debug("Next request time: %s" % nextreqtime)
+        self.logger.debug("Next premature end: %s" % nextprematureend)
+        
+        # The previous time is now
+        prevtime = self.time
+        
+        # We initialize the next time to now too, to detect if
+        # we've been unable to determine what the next time is.
+        newtime = self.time
+        
+        # Find the earliest of the three, accounting for None values
+        if nextchangepoint != None and nextreqtime == None:
+            newtime = nextchangepoint
+        elif nextchangepoint == None and nextreqtime != None:
+            newtime = nextreqtime
+        elif nextchangepoint != None and nextreqtime != None:
+            newtime = min(nextchangepoint, nextreqtime)
+            
+        if nextprematureend != None:
+            newtime = min(nextprematureend, newtime)
+                        
+        # If there's no more leases in the system, and no more pending requests,
+        # then we're done.
+        if not self.manager.exists_more_leases() and not tracefrontend.exists_more_requests():
+            self.done = True
+        
+        # We can also be done if we've specified that we want to stop when
+        # the best-effort requests are all done or when they've all been submitted.
+        stopwhen = self.manager.config.get("stop-when")
+        besteffort = self.manager.scheduler.leases.get_leases(type = Lease.BEST_EFFORT)
+        pendingbesteffort = [r for r in tracefrontend.requests if r.get_type() == Lease.BEST_EFFORT]
+        if stopwhen == constants.STOPWHEN_BEDONE:
+            if self.manager.scheduler.is_queue_empty() and len(besteffort) + len(pendingbesteffort) == 0:
+                self.done = True
+        elif stopwhen == constants.STOPWHEN_BESUBMITTED:
+            if len(pendingbesteffort) == 0:
+                self.done = True
+                
+        # If we didn't arrive at a new time, and we're not done, we've fallen into
+        # an infinite loop. This is A Bad Thing(tm).
+        if newtime == prevtime and self.done != True:
+            raise Exception, "Simulated clock has fallen into an infinite loop."
+        
+        return newtime, self.done
+
+    def __get_trace_frontend(self):
+        """Gets the tracefile frontend from the resource manager"""
+        frontends = self.manager.frontends
+        tracef = [f for f in frontends if isinstance(f, TracefileFrontend)]
+        if len(tracef) != 1:
+            raise Exception, "The simulated clock can only work with a tracefile request frontend."
+        else:
+            return tracef[0] 
+        
+        
+class RealClock(Clock):
+    """A realtime clock.
+    
+    The real clock wakes up periodically to, in turn, tell the resource manager
+    to wake up. The real clock can also be run in a "fastforward" mode for
+    debugging purposes (however, unlike the simulated clock, the clock will
+    always skip a fixed amount of time into the future).
+    """
+    def __init__(self, manager, quantum, non_sched, fastforward = False):
+        """Initializes the real clock.
+        
+        Arguments:
+        manager -- the resource manager
+        quantum -- interval between clock wakeups
+        fastforward -- if True, the clock won't actually sleep
+                       for the duration of the quantum."""
+        Clock.__init__(self, manager)
+        self.fastforward = fastforward
+        if not self.fastforward:
+            self.lastwakeup = None
+        else:
+            self.lastwakeup = round_datetime(now())
+        self.logger = logging.getLogger("CLOCK")
+        self.starttime = self.get_time()
+        self.nextschedulable = None
+        self.nextperiodicwakeup = None
+        self.quantum = TimeDelta(seconds=quantum)
+        self.non_sched = TimeDelta(seconds=non_sched)
+               
+    def get_time(self):
+        """See docstring in base Clock class."""
+        if not self.fastforward:
+            return now()
+        else:
+            return self.lastwakeup
+    
+    def get_start_time(self):
+        """See docstring in base Clock class."""
+        return self.starttime
+
+    def get_next_schedulable_time(self):
+        """See docstring in base Clock class."""
+        return self.nextschedulable    
+    
+    def run(self):
+        """Runs the real clock through time.
+        
+        The clock starts when run() is called. In each iteration of the main loop
+        it will do the following:
+        - Wake up the resource manager
+        - Determine if there will be anything to do before the next
+          time the clock will wake up (after the quantum has passed). Note
+          that this information is readily available on the slot table.
+          If so, set next-wakeup-time to (now + time until slot table
+          event). Otherwise, set it to (now + quantum)
+        - Sleep until next-wake-up-time
+        
+        The clock keeps on tickin' until a SIGINT signal (Ctrl-C if running in the
+        foreground) or a SIGTERM signal is received.
+        """
+        self.logger.status("Starting clock")
+        self.manager.accounting.start(self.get_start_time())
+        
+        try:
+            signal.signal(signal.SIGINT, self.signalhandler_gracefulstop)
+            signal.signal(signal.SIGTERM, self.signalhandler_gracefulstop)
+        except ValueError, exc:
+            # This means Haizea is not the main thread, which will happen
+            # when running it as part of a py.test. We simply ignore this
+            # to allow the test to continue.
+            pass
+        
+        # Main loop
+        while not self.done:
+            self.logger.status("Waking up to manage resources")
+            
+            # Save the waking time. We want to use a consistent time in the 
+            # resource manager operations (if we use now(), we'll get a different
+            # time every time)
+            if not self.fastforward:
+                self.lastwakeup = round_datetime(self.get_time())
+            self.logger.status("Wake-up time recorded as %s" % self.lastwakeup)
+                
+            # Next schedulable time
+            self.nextschedulable = round_datetime(self.lastwakeup + self.non_sched)
+            
+            # Wake up the resource manager
+            self.manager.process_ending_reservations(self.lastwakeup)
+            self.manager.process_starting_reservations(self.lastwakeup)
+            # TODO: Compute nextschedulable here, before processing requests
+            self.manager.process_requests(self.nextschedulable)
+            
+            # Next wakeup time
+            time_now = now()
+            if self.lastwakeup + self.quantum <= time_now:
+                quantums = (time_now - self.lastwakeup) / self.quantum
+                quantums = int(ceil(quantums)) * self.quantum
+                self.nextperiodicwakeup = round_datetime(self.lastwakeup + quantums)
+            else:
+                self.nextperiodicwakeup = round_datetime(self.lastwakeup + self.quantum)
+            
+            # Determine if there's anything to do before the next wakeup time
+            nextchangepoint = self.manager.get_next_changepoint()
+            if nextchangepoint != None and nextchangepoint <= self.nextperiodicwakeup:
+                # We need to wake up earlier to handle a slot table event
+                nextwakeup = nextchangepoint
+                self.logger.status("Going back to sleep. Waking up at %s to handle slot table event." % nextwakeup)
+            else:
+                # Nothing to do before waking up
+                nextwakeup = self.nextperiodicwakeup
+                self.logger.status("Going back to sleep. Waking up at %s to see if something interesting has happened by then." % nextwakeup)
+            
+            # The only exit condition from the real clock is if the stop_when_no_more_leases
+            # is set to True, and there's no more work left to do.
+            # TODO: This first if is a kludge. Other options should only interact with
+            # options through the configfile's get method. The "stop-when-no-more-leases"
+            # option is currently OpenNebula-specific (while the real clock isn't; it can
+            # be used by both the simulator and the OpenNebula mode). This has to be
+            # fixed.            
+            if self.manager.config._options.has_key("stop-when-no-more-leases"):
+                stop_when_no_more_leases = self.manager.config.get("stop-when-no-more-leases")
+                if stop_when_no_more_leases and not self.manager.exists_more_leases():
+                    self.done = True
+            
+            # Sleep
+            if not self.done:
+                if not self.fastforward:
+                    sleep((nextwakeup - now()).seconds)
+                else:
+                    self.lastwakeup = nextwakeup
+
+        self.logger.status("Real clock has stopped")
+
+        # Stop the resource manager
+        self.manager.graceful_stop()
+    
+    def signalhandler_gracefulstop(self, signum, frame):
+        """Handler for SIGTERM and SIGINT. Allows Haizea to stop gracefully."""
+        
+        sigstr = ""
+        if signum == signal.SIGTERM:
+            sigstr = " (SIGTERM)"
+        elif signum == signal.SIGINT:
+            sigstr = " (SIGINT)"
+        self.logger.status("Received signal %i%s" %(signum, sigstr))
+        self.done = True
+

Added: trunk/src/haizea/core/rpcserver.py
===================================================================
--- trunk/src/haizea/core/rpcserver.py	                        (rev 0)
+++ trunk/src/haizea/core/rpcserver.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,90 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+import threading
+import logging 
+from SimpleXMLRPCServer import SimpleXMLRPCServer
+
+DEFAULT_HAIZEA_PORT = 42493
+
+class StoppableSimpleXMLRPCServer(SimpleXMLRPCServer):
+    allow_reuse_address = True
+
+    def serve_forever(self):
+        self.run = True
+        self.socket.settimeout(1)
+        while self.run:
+            self.handle_request()
+
+    def stop(self):
+        self.run = False
+        self.socket.close()
+
+class RPCServer(object):
+    def __init__(self, manager):
+        self.manager = manager
+        self.logger = logging.getLogger("RPCSERVER")
+        self.port = DEFAULT_HAIZEA_PORT
+        self.server = StoppableSimpleXMLRPCServer(("localhost", self.port), allow_none=True)
+        self.server_thread = None
+        self.register_rpc(self.test_func)
+        self.register_rpc(self.cancel_lease)
+        self.register_rpc(self.get_leases)
+        self.register_rpc(self.get_lease)
+        self.register_rpc(self.get_queue)
+        self.register_rpc(self.get_hosts)
+        self.register_rpc(self.notify_event)
+
+    def start(self):
+        # Start the XML-RPC server
+        self.server_thread = threading.Thread( target = self.serve )
+        self.server_thread.start()
+        
+    def stop(self):
+        self.server.stop()
+        self.server_thread.join()
+        
+    def register_rpc(self, func):
+        self.server.register_function(func)
+        
+    def serve(self):
+        self.logger.info("RPC server started on port %i" % self.port)
+        self.server.serve_forever()        
+        
+    def test_func(self):
+        self.logger.info("Test RPC function called")
+        return 0
+    
+    def cancel_lease(self, lease_id):
+        self.manager.cancel_lease(lease_id)
+        return 0
+
+    def get_leases(self):
+        return [l.to_xml_string() for l in self.manager.scheduler.leases.get_leases()]
+
+    def get_lease(self, lease_id):
+        return 0
+
+    def get_queue(self):
+        return [l.to_xml_string() for l in self.manager.scheduler.queue]
+
+    def get_hosts(self):
+        return [h.xmlrpc_marshall() for h in self.manager.scheduler.vm_scheduler.resourcepool.nodes.values()]
+
+    def notify_event(self, lease_id, enactment_id, event):
+        pass
\ No newline at end of file

Added: trunk/src/haizea/core/scheduler/__init__.py
===================================================================
--- trunk/src/haizea/core/scheduler/__init__.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/__init__.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,102 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.core.scheduler.slottable import ResourceReservation
+import haizea.common.constants as constants
+import sys
+
+class SchedException(Exception):
+    """The base class for scheduling exceptions"""
+    pass
+
+class NotSchedulableException(SchedException):
+    """A simple exception class used when a lease cannot be scheduled
+    
+    This exception must be raised when a lease cannot be scheduled
+    """
+    
+    def __init__(self, reason):
+        self.reason = reason
+
+class CancelLeaseException(SchedException):
+    pass
+
+class NormalEndLeaseException(SchedException):
+    pass
+
+class RescheduleLeaseException(SchedException):
+    pass
+
+
+class SchedulingError(Exception):
+    """The base class for scheduling errors"""
+    pass
+
+class InconsistentScheduleError(SchedulingError):
+    pass
+
+class InconsistentLeaseStateError(SchedulingError):
+    def __init__(self, lease, doing):
+        self.lease = lease
+        self.doing = doing
+        
+        self.message = "Lease %i is in an inconsistent state (%i) when %s" % (lease.id, lease.get_state(), doing)
+
+class EnactmentError(SchedulingError):
+    pass
+
+class UnrecoverableError(SchedulingError):
+    def __init__(self, exc):
+        self.exc = exc
+        self.exc_info = sys.exc_info()
+        
+    def get_traceback(self):
+        return self.exc_info[2]
+
+
+class ReservationEventHandler(object):
+    """A wrapper for reservation event handlers.
+    
+    Reservations (in the slot table) can start and they can end. This class
+    provides a convenient wrapper around the event handlers for these two
+    events (see Scheduler.__register_handler for details on event handlers)
+    """
+    def __init__(self, sched, on_start, on_end):
+        self.sched = sched
+        self.on_start_method = on_start
+        self.on_end_method = on_end
+        
+    def on_start(self, lease, rr):
+        self.on_start_method(self.sched, lease, rr)
+        
+    def on_end(self, lease, rr):
+        self.on_end_method(self.sched, lease, rr)        
+        
+class EarliestStartingTime(object):
+    EARLIEST_NOPREPARATION = 0
+    EARLIEST_MIGRATION = 1
+    
+    def __init__(self, time, type):
+        self.time = time
+        self.type = type  
+        
+class MigrationResourceReservation(ResourceReservation):
+    def __init__(self, lease, start, end, res, vmrr, transfers):
+        ResourceReservation.__init__(self, lease, start, end, res)
+        self.vmrr = vmrr
+        self.transfers = transfers         
\ No newline at end of file

Added: trunk/src/haizea/core/scheduler/lease_scheduler.py
===================================================================
--- trunk/src/haizea/core/scheduler/lease_scheduler.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/lease_scheduler.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,783 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+
+"""This module provides the main classes for Haizea's lease scheduler, particularly
+the LeaseScheduler class. This module does *not* contain VM scheduling code (i.e.,
+the code that decides what physical hosts a VM should be mapped to), which is
+located in the vm_scheduler module. Lease preparation code (e.g., image transfer 
+scheduling) is located in the preparation_schedulers package. In fact, the
+main purpose of the lease schedule is to orchestrate these preparation and VM
+schedulers.
+
+This module also includes a Queue class and a LeaseTable class, which are used
+by the lease scheduler.
+"""
+
+import haizea.common.constants as constants
+from haizea.common.utils import round_datetime, get_config, get_accounting, get_clock, get_policy
+from haizea.core.leases import Lease
+from haizea.core.scheduler import RescheduleLeaseException, NormalEndLeaseException, InconsistentLeaseStateError, EnactmentError, UnrecoverableError, NotSchedulableException, EarliestStartingTime
+from haizea.core.scheduler.slottable import ResourceReservation
+from haizea.core.scheduler.vm_scheduler import VMResourceReservation
+from operator import attrgetter
+
+import logging
+
+class LeaseScheduler(object):
+    """The Haizea Lease Scheduler
+    
+    This is the main scheduling class in Haizea. It handles lease scheduling which,
+    in turn, involves VM scheduling, preparation scheduling (such as transferring
+    a VM image), and numerous bookkeeping operations. All these operations are
+    handled by other classes, so this class acts mostly as an orchestrator that
+    coordinates all the different operations involved in scheduling a lease.    
+    """
+    
+    def __init__(self, vm_scheduler, preparation_scheduler, slottable):
+        """Constructor
+        
+        The constructor does little more than create the lease scheduler's
+        attributes. However, it does expect (in the arguments) a fully-constructed 
+        VMScheduler, PreparationScheduler, SlotTable, and PolicyManager (these are 
+        constructed in the Manager's constructor). 
+        
+        Arguments:
+        vm_scheduler -- VM scheduler
+        preparation_scheduler -- Preparation scheduler
+        slottable -- Slottable
+        """
+        
+        # Logger
+        self.logger = logging.getLogger("LSCHED")
+        
+        # Assign schedulers and slottable
+        self.vm_scheduler = vm_scheduler
+        self.preparation_scheduler = preparation_scheduler
+        self.slottable = slottable
+
+        # Create other data structures
+        self.queue = Queue()
+        self.leases = LeaseTable()
+        self.completed_leases = LeaseTable()
+
+        # Handlers are callback functions that get called whenever a type of
+        # resource reservation starts or ends. Each scheduler publishes the
+        # handlers it supports through its "handlers" attributes. For example,
+        # the VMScheduler provides _handle_start_vm and _handle_end_vm that
+        # must be called when a VMResourceReservation start or end is encountered
+        # in the slot table.
+        #
+        # Handlers are called from the process_reservations method of this class
+        self.handlers = {}
+        for (type, handler) in self.vm_scheduler.handlers.items():
+            self.handlers[type] = handler
+
+        for (type, handler) in self.preparation_scheduler.handlers.items():
+            self.handlers[type] = handler
+
+
+    def request_lease(self, lease):
+        """Requests a leases. This is the entry point of leases into the scheduler.
+        
+        Request a lease. The decision on whether to accept or reject a
+        lease is deferred to the policy manager (through its admission
+        control policy). 
+        
+        If the policy determines the lease can be
+        accepted, it is marked as "Pending". This still doesn't
+        guarantee that the lease will be scheduled (e.g., an AR lease
+        could still be rejected if the scheduler determines there are no
+        resources for it; but that is a *scheduling* decision, not a admission
+        control policy decision). The ultimate fate of the lease is determined
+        the next time the scheduling function is called.
+        
+        If the policy determines the lease cannot be accepted, it is marked
+        as rejected.
+
+        Arguments:
+        lease -- Lease object. Its state must be STATE_NEW.
+        """
+        self.logger.info("Lease #%i has been requested." % lease.id)
+        if lease.submit_time == None:
+            lease.submit_time = round_datetime(get_clock().get_time())
+        lease.print_contents()
+        lease.set_state(Lease.STATE_PENDING)
+        if get_policy().accept_lease(lease):
+            self.logger.info("Lease #%i has been marked as pending." % lease.id)
+            self.leases.add(lease)
+        else:
+            self.logger.info("Lease #%i has not been accepted" % lease.id)
+            lease.set_state(Lease.STATE_REJECTED)
+            self.completed_leases.add(lease)
+
+        
+    def schedule(self, nexttime):
+        """ The main scheduling function
+        
+        The scheduling function looks at all pending requests and schedules them.
+        Note that most of the actual scheduling code is contained in the
+        __schedule_lease method and in the VMScheduler and PreparationScheduler classes.
+        
+        Arguments:
+        nexttime -- The next time at which the scheduler can allocate resources.
+        """
+        
+        # Get pending leases
+        pending_leases = self.leases.get_leases_by_state(Lease.STATE_PENDING)  
+        ar_leases = [req for req in pending_leases if req.get_type() == Lease.ADVANCE_RESERVATION]
+        im_leases = [req for req in pending_leases if req.get_type() == Lease.IMMEDIATE]
+        be_leases = [req for req in pending_leases if req.get_type() == Lease.BEST_EFFORT]
+        
+        # Queue best-effort leases
+        for lease in be_leases:
+            self.__enqueue(lease)
+            lease.set_state(Lease.STATE_QUEUED)
+            self.logger.info("Queued best-effort lease request #%i, %i nodes for %s." % (lease.id, lease.numnodes, lease.duration.requested))
+
+        # Schedule immediate leases
+        for lease in im_leases:
+            self.logger.info("Scheduling immediate lease #%i (%i nodes)" % (lease.id, lease.numnodes))
+            lease.print_contents()
+       
+            try:
+                self.__schedule_lease(lease, nexttime=nexttime)
+                self.logger.info("Immediate lease #%i has been scheduled." % lease.id)
+                get_accounting().incr_counter(constants.COUNTER_IMACCEPTED, lease.id)
+                lease.print_contents()
+            except NotSchedulableException, exc:
+                get_accounting().incr_counter(constants.COUNTER_IMREJECTED, lease.id)
+                self.logger.info("Immediate lease request #%i cannot be scheduled: %s" % (lease.id, exc.reason))
+                lease.set_state(Lease.STATE_REJECTED)
+                self.completed_leases.add(lease)
+                self.leases.remove(lease)            
+
+        # Schedule AR requests
+        for lease in ar_leases:
+            self.logger.info("Scheduling AR lease #%i, %i nodes from %s to %s." % (lease.id, lease.numnodes, lease.start.requested, lease.start.requested + lease.duration.requested))
+            lease.print_contents()
+            
+            try:
+                self.__schedule_lease(lease, nexttime)
+                self.logger.info("AR lease #%i has been scheduled." % lease.id)
+                get_accounting().incr_counter(constants.COUNTER_ARACCEPTED, lease.id)
+                lease.print_contents()
+            except NotSchedulableException, exc:
+                get_accounting().incr_counter(constants.COUNTER_ARREJECTED, lease.id)
+                self.logger.info("AR lease request #%i cannot be scheduled: %s" % (lease.id, exc.reason))
+                lease.set_state(Lease.STATE_REJECTED)
+                self.completed_leases.add(lease)
+                self.leases.remove(lease)            
+            
+        # Process queue (i.e., traverse queue in search of leases that can be scheduled)
+        self.__process_queue(nexttime)
+        
+    
+    def process_starting_reservations(self, nowtime):
+        """Processes starting reservations
+        
+        This method checks the slottable to see if there are any reservations that are
+        starting at "nowtime". If so, the appropriate handler is called.
+
+        Arguments:
+        nowtime -- Time at which to check for starting reservations.
+        """
+
+        # Find starting/ending reservations
+        starting = self.slottable.get_reservations_starting_at(nowtime)
+        starting = [res for res in starting if res.state == ResourceReservation.STATE_SCHEDULED]
+        
+        # Process starting reservations
+        for rr in starting:
+            lease = rr.lease
+            # Call the appropriate handler, and catch exceptions and errors.
+            try:
+                self.handlers[type(rr)].on_start(lease, rr)
+                
+            # An InconsistentLeaseStateError is raised when the lease is in an inconsistent
+            # state. This is usually indicative of a programming error, but not necessarily
+            # one that affects all leases, so we just fail this lease. Note that Haizea can also
+            # be configured to stop immediately when a lease fails.
+            except InconsistentLeaseStateError, exc:
+                self.fail_lease(lease, exc)
+            # An EnactmentError is raised when the handler had to perform an enactment action
+            # (e.g., stopping a VM), and that enactment action failed. This is currently treated
+            # as a non-recoverable error for the lease, and the lease is failed.
+            except EnactmentError, exc:
+                self.fail_lease(lease, exc)
+
+            # Other exceptions are not expected, and generally indicate a programming error.
+            # Thus, they are propagated upwards to the Manager where they will make
+            # Haizea crash and burn.
+
+    def process_ending_reservations(self, nowtime):
+        """Processes ending reservations
+        
+        This method checks the slottable to see if there are any reservations that are
+        ending at "nowtime". If so, the appropriate handler is called.
+
+        Arguments:
+        nowtime -- Time at which to check for starting/ending reservations.
+        """
+
+        # Find starting/ending reservations
+        ending = self.slottable.get_reservations_ending_at(nowtime)
+        ending = [res for res in ending if res.state == ResourceReservation.STATE_ACTIVE]
+
+        # Process ending reservations
+        for rr in ending:
+            lease = rr.lease
+            self._handle_end_rr(rr)
+            
+            # Call the appropriate handler, and catch exceptions and errors.
+            try:
+                self.handlers[type(rr)].on_end(lease, rr)
+                
+            # A RescheduleLeaseException indicates that the lease has to be rescheduled
+            except RescheduleLeaseException, exc:
+                # Currently, the only leases that get rescheduled are best-effort leases,
+                # once they've been suspended.
+                if rr.lease.get_type() == Lease.BEST_EFFORT:
+                    if lease.get_state() == Lease.STATE_SUSPENDED_PENDING:
+                        # Put back in the queue, in the same order it arrived
+                        self.__enqueue_in_order(lease)
+                        lease.set_state(Lease.STATE_SUSPENDED_QUEUED)
+                    else:
+                        raise InconsistentLeaseStateError(lease, doing = "rescheduling best-effort lease")
+                    
+            # A NormalEndLeaseException indicates that the end of this reservations marks
+            # the normal end of the lease.
+            except NormalEndLeaseException, msg:
+                self._handle_end_lease(lease)
+                
+            # An InconsistentLeaseStateError is raised when the lease is in an inconsistent
+            # state. This is usually indicative of a programming error, but not necessarily
+            # one that affects all leases, so we just fail this lease. Note that Haizea can also
+            # be configured to stop immediately when a lease fails.
+            except InconsistentLeaseStateError, exc:
+                self.fail_lease(lease, exc)
+                
+            # An EnactmentError is raised when the handler had to perform an enactment action
+            # (e.g., stopping a VM), and that enactment action failed. This is currently treated
+            # as a non-recoverable error for the lease, and the lease is failed.
+            except EnactmentError, exc:
+                self.fail_lease(lease, exc)
+                
+            # Other exceptions are not expected, and generally indicate a programming error.
+            # Thus, they are propagated upwards to the Manager where they will make
+            # Haizea crash and burn.
+
+    def get_lease_by_id(self, lease_id):
+        """Gets a lease with the given ID
+        
+        This method is useful for UIs (like the CLI) that operate on the lease ID.
+        If no lease with a given ID is found, None is returned.
+
+        Arguments:
+        lease_id -- The ID of the lease
+        """
+        if not self.leases.has_lease(lease_id):
+            return None
+        else:
+            return self.leases.get_lease(lease_id)
+
+    def cancel_lease(self, lease):
+        """Cancels a lease.
+        
+        Arguments:
+        lease -- Lease to cancel
+        """
+        time = get_clock().get_time()
+        
+        self.logger.info("Cancelling lease %i..." % lease.id)
+            
+        lease_state = lease.get_state()
+        
+        if lease_state == Lease.STATE_PENDING:
+            # If a lease is pending, we just need to change its state and
+            # remove it from the lease table. Since this is done at the
+            # end of this method, we do nothing here.
+            pass
+
+        elif lease_state == Lease.STATE_ACTIVE:
+            # If a lease is active, that means we have to shut down its VMs to cancel it.
+            self.logger.info("Lease %i is active. Stopping active reservation..." % lease.id)
+            vmrr = lease.get_active_vmrrs(time)[0]
+            self.vm_scheduler._handle_unscheduled_end_vm(lease, vmrr)
+
+        elif lease_state in [Lease.STATE_SCHEDULED, Lease.STATE_SUSPENDED_SCHEDULED, Lease.STATE_READY, Lease.STATE_RESUMED_READY]:
+            # If a lease is scheduled or ready, we just need to cancel all future reservations
+            # for that lease
+            self.logger.info("Lease %i is scheduled. Cancelling reservations." % lease.id)
+            rrs = lease.get_scheduled_reservations()
+            for r in rrs:
+                self.slottable.remove_reservation(r)
+            
+        elif lease_state in [Lease.STATE_QUEUED, Lease.STATE_SUSPENDED_QUEUED]:
+            # If a lease is in the queue, waiting to be scheduled, cancelling
+            # just requires removing it from the queue
+            
+            self.logger.info("Lease %i is in the queue. Removing..." % lease.id)
+            self.queue.remove_lease(lease)
+        else:
+            # Cancelling in any of the other states is currently unsupported
+            raise InconsistentLeaseStateError(lease, doing = "cancelling the VM")
+            
+        # Change state, and remove from lease table
+        lease.set_state(Lease.STATE_CANCELLED)
+        self.completed_leases.add(lease)
+        self.leases.remove(lease)
+
+    
+    def fail_lease(self, lease, exc=None):
+        """Transitions a lease to a failed state, and does any necessary cleaning up
+        
+        Arguments:
+        lease -- Lease to fail
+        exc -- The exception that made the lease fail
+        """
+        treatment = get_config().get("lease-failure-handling")
+        
+        if treatment == constants.ONFAILURE_CANCEL:
+            # In this case, a lease failure is handled by cancelling the lease,
+            # but allowing Haizea to continue to run normally.
+            rrs = lease.get_scheduled_reservations()
+            for r in rrs:
+                self.slottable.remove_reservation(r)
+            lease.set_state(Lease.STATE_FAIL)
+            self.completed_leases.add(lease)
+            self.leases.remove(lease)
+        elif treatment == constants.ONFAILURE_EXIT or treatment == constants.ONFAILURE_EXIT_RAISE:
+            # In this case, a lease failure makes Haizea exit. This is useful when debugging,
+            # so we can immediately know about any errors.
+            raise UnrecoverableError(exc)
+            
+    
+    def notify_event(self, lease, event):
+        """Notifies an event that affects a lease.
+        
+        This is the entry point of asynchronous events into the scheduler. Currently,
+        the only supported event is the premature end of a VM (i.e., before its
+        scheduled end). Other events will emerge when we integrate Haizea with OpenNebula 1.4,
+        since that version will support sending asynchronous events to Haizea.
+        
+        Arguments:
+        lease -- Lease the event refers to
+        event -- Event type
+        """
+        time = get_clock().get_time()
+        if event == constants.EVENT_END_VM:
+            vmrr = lease.get_last_vmrr()
+            self._handle_end_rr(vmrr)
+            # TODO: Exception handling
+            self.vm_scheduler._handle_unscheduled_end_vm(lease, vmrr)
+            self._handle_end_lease(lease)
+            nexttime = get_clock().get_next_schedulable_time()
+            # We need to reevaluate the schedule to see if there are any 
+            # leases scheduled in the future that could be rescheduled
+            # to start earlier
+            self.reevaluate_schedule(nexttime)
+
+
+    def reevaluate_schedule(self, nexttime):
+        """Reevaluates the schedule.
+        
+        This method can be called whenever resources are freed up
+        unexpectedly (e.g., a lease than ends earlier than expected))
+        to check if any leases scheduled in the future could be
+        rescheduled to start earlier on the freed up resources.
+        
+        Currently, this method only checks if best-effort leases
+        scheduled in the future (using a backfilling algorithm)
+        can be rescheduled
+        
+        Arguments:
+        nexttime -- The next time at which the scheduler can allocate resources.
+        """        
+        future = self.vm_scheduler.get_future_reschedulable_leases()
+        for l in future:
+            # We can only reschedule leases in the following four states
+            if l.get_state() in (Lease.STATE_PREPARING, Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_SUSPENDED_SCHEDULED):
+                # For each reschedulable lease already scheduled in the
+                # future, we cancel the lease's preparantion and
+                # the last scheduled VM.
+                vmrr = l.get_last_vmrr()
+                self.preparation_scheduler.cancel_preparation(l)
+                self.vm_scheduler.cancel_vm(vmrr)
+                l.remove_vmrr(vmrr)
+                if l.get_state() in (Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_PREPARING):
+                    l.set_state(Lease.STATE_PENDING)
+                elif l.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:
+                    l.set_state(Lease.STATE_SUSPENDED_PENDING)
+
+                # At this point, the lease just looks like a regular
+                # pending lease that can be handed off directly to the
+                # __schedule_lease method.
+                # TODO: We should do exception handling here. However,
+                # since we can only reschedule best-effort leases that were
+                # originally schedule in the future, the scheduling function 
+                # should always be able to schedule the lease (worst-case 
+                # scenario is that it simply replicates the previous schedule)
+                self.__schedule_lease(l, nexttime)
+
+
+    def is_queue_empty(self):
+        """Return True is the queue is empty, False otherwise"""
+        return self.queue.is_empty()
+
+    
+    def exists_scheduled_leases(self):
+        """Return True if there are any leases scheduled in the future"""
+        return not self.slottable.is_empty()    
+
+            
+    def __process_queue(self, nexttime):
+        """ Traverses the queue in search of leases that can be scheduled.
+        
+        This method processes the queue in order, but takes into account that
+        it may be possible to schedule leases in the future (using a 
+        backfilling algorithm)
+        
+        Arguments:
+        nexttime -- The next time at which the scheduler can allocate resources.
+        """        
+        
+        done = False
+        newqueue = Queue()
+        while not done and not self.is_queue_empty():
+            if not self.vm_scheduler.can_schedule_in_future() and self.slottable.is_full(nexttime, restype = constants.RES_CPU):
+                self.logger.debug("Used up all future reservations and slot table is full. Skipping rest of queue.")
+                done = True
+            else:
+                lease = self.queue.dequeue()
+                try:
+                    self.logger.info("Next request in the queue is lease %i. Attempting to schedule..." % lease.id)
+                    lease.print_contents()
+                    self.__schedule_lease(lease, nexttime)
+                    get_accounting().decr_counter(constants.COUNTER_QUEUESIZE, lease.id)
+                except NotSchedulableException, msg:
+                    # Put back on queue
+                    newqueue.enqueue(lease)
+                    self.logger.info("Lease %i could not be scheduled at this time." % lease.id)
+                    if get_config().get("backfilling") == constants.BACKFILLING_OFF:
+                        done = True
+        
+        for lease in self.queue:
+            newqueue.enqueue(lease)
+        
+        self.queue = newqueue 
+    
+
+    def __schedule_lease(self, lease, nexttime):            
+        """ Schedules a lease.
+        
+        This method orchestrates the preparation and VM scheduler to
+        schedule a lease.
+        
+        Arguments:
+        lease -- Lease to schedule.
+        nexttime -- The next time at which the scheduler can allocate resources.
+        """       
+                
+        lease_state = lease.get_state()
+        migration = get_config().get("migration")
+        
+        # Determine earliest start time in each node
+        if lease_state == Lease.STATE_PENDING or lease_state == Lease.STATE_QUEUED:
+            # This lease might require preparation. Ask the preparation
+            # scheduler for the earliest starting time.
+            earliest = self.preparation_scheduler.find_earliest_starting_times(lease, nexttime)
+        elif lease_state == Lease.STATE_SUSPENDED_PENDING or lease_state == Lease.STATE_SUSPENDED_QUEUED:
+            # This lease may have to be migrated.
+            # We have to ask both the preparation scheduler and the VM
+            # scheduler what would be the earliest possible starting time
+            # on each node, assuming we have to transfer files between
+            # nodes.
+
+            node_ids = self.slottable.nodes.keys()
+            earliest = {}
+            if migration == constants.MIGRATE_NO:
+                # If migration is disabled, the earliest starting time
+                # is simply nexttime.
+                for node in node_ids:
+                    earliest[node] = EarliestStartingTime(nexttime, EarliestStartingTime.EARLIEST_NOPREPARATION)
+            else:
+                # Otherwise, we ask the preparation scheduler and the VM
+                # scheduler how long it would take them to migrate the
+                # lease state.
+                prep_migr_time = self.preparation_scheduler.estimate_migration_time(lease)            
+                vm_migr_time = self.vm_scheduler.estimate_migration_time(lease)
+                for node in node_ids:
+                    earliest[node] = EarliestStartingTime(nexttime + prep_migr_time + vm_migr_time, EarliestStartingTime.EARLIEST_MIGRATION)
+        else:
+            raise InconsistentLeaseStateError(lease, doing = "scheduling a best-effort lease")
+
+        # Now, we give the lease to the VM scheduler, along with the
+        # earliest possible starting times. If the VM scheduler can
+        # schedule VMs for this lease, it will return a resource reservation
+        # that we can add to the slot table, along with a list of
+        # leases that have to be preempted.
+        # If the VM scheduler can't schedule the VMs, it will throw an
+        # exception (we don't catch it here, and it is just thrown up
+        # to the calling method.
+        (vmrr, preemptions) = self.vm_scheduler.schedule(lease, nexttime, earliest)
+                                
+        # If scheduling the lease involves preempting other leases,
+        # go ahead and preempt them.
+        if len(preemptions) > 0:
+            self.logger.info("Must preempt leases %s to make room for lease #%i" % ([l.id for l in preemptions], lease.id))
+            for l in preemptions:
+                self.__preempt_lease(l, preemption_time=vmrr.start)
+                
+        # Schedule lease preparation
+        is_ready = False
+        preparation_rrs = []
+        if lease_state in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED) and migration != constants.MIGRATE_NO:
+            # The lease might require migration
+            migr_rrs = self.preparation_scheduler.schedule_migration(lease, vmrr, nexttime)
+            if len(migr_rrs) > 0:
+                end_migr = migr_rrs[-1].end
+            else:
+                end_migr = nexttime
+            migr_rrs += self.vm_scheduler.schedule_migration(lease, vmrr, end_migr)
+            migr_rrs.reverse()
+            for migr_rr in migr_rrs:
+                vmrr.pre_rrs.insert(0, migr_rr)
+            if len(migr_rrs) == 0:
+                is_ready = True
+        elif lease_state in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED) and migration == constants.MIGRATE_NO:
+            # No migration means the lease is ready
+            is_ready = True
+        elif lease_state in (Lease.STATE_PENDING, Lease.STATE_QUEUED):
+            # The lease might require initial preparation
+            preparation_rrs, is_ready = self.preparation_scheduler.schedule(lease, vmrr, earliest)
+
+        # At this point, the lease is feasible.
+        # Commit changes by adding RRs to lease and to slot table
+        
+        # Add preparation RRs (if any) to lease
+        for rr in preparation_rrs:
+            lease.append_preparationrr(rr)
+        
+        # Add VMRR to lease
+        lease.append_vmrr(vmrr)
+        
+
+        # Add resource reservations to slottable
+        
+        # Preparation RRs (if any)
+        for rr in preparation_rrs:
+            self.slottable.add_reservation(rr)
+        
+        # Pre-VM RRs (if any)
+        for rr in vmrr.pre_rrs:
+            self.slottable.add_reservation(rr)
+            
+        # VM
+        self.slottable.add_reservation(vmrr)
+        
+        # Post-VM RRs (if any)
+        for rr in vmrr.post_rrs:
+            self.slottable.add_reservation(rr)
+          
+        # Change lease state
+        if lease_state == Lease.STATE_PENDING or lease_state == Lease.STATE_QUEUED:
+            lease.set_state(Lease.STATE_SCHEDULED)
+            if is_ready:
+                lease.set_state(Lease.STATE_READY)
+        elif lease_state == Lease.STATE_SUSPENDED_PENDING or lease_state == Lease.STATE_SUSPENDED_QUEUED:
+            lease.set_state(Lease.STATE_SUSPENDED_SCHEDULED)
+
+        lease.print_contents()
+
+        
+    def __preempt_lease(self, lease, preemption_time):
+        """ Preempts a lease.
+        
+        This method preempts a lease such that any resources allocated
+        to that lease after a given time are freed up. This may require
+        scheduling the lease to suspend before that time, or cancelling
+        the lease altogether.
+        
+        Arguments:
+        lease -- Lease to schedule.
+        preemption_time -- Time at which lease must be preempted
+        """       
+        
+        self.logger.info("Preempting lease #%i..." % (lease.id))
+        self.logger.vdebug("Lease before preemption:")
+        lease.print_contents()
+        vmrr = lease.get_last_vmrr()
+        
+        if vmrr.state == ResourceReservation.STATE_SCHEDULED and vmrr.start >= preemption_time:
+            self.logger.debug("Lease was set to start in the middle of the preempting lease.")
+            must_cancel_and_requeue = True
+        else:
+            susptype = get_config().get("suspension")
+            if susptype == constants.SUSPENSION_NONE:
+                must_cancel_and_requeue = True
+            else:
+                can_suspend = self.vm_scheduler.can_suspend_at(lease, preemption_time)
+                if not can_suspend:
+                    self.logger.debug("Suspending the lease does not meet scheduling threshold.")
+                    must_cancel_and_requeue = True
+                else:
+                    if lease.numnodes > 1 and susptype == constants.SUSPENSION_SERIAL:
+                        self.logger.debug("Can't suspend lease because only suspension of single-node leases is allowed.")
+                        must_cancel_and_requeue = True
+                    else:
+                        self.logger.debug("Lease can be suspended")
+                        must_cancel_and_requeue = False
+                    
+        if must_cancel_and_requeue:
+            self.logger.info("... lease #%i has been cancelled and requeued." % lease.id)
+            self.preparation_scheduler.cancel_preparation(lease)
+            self.vm_scheduler.cancel_vm(vmrr)
+            lease.remove_vmrr(vmrr)
+            # TODO: Take into account other states
+            if lease.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:
+                lease.set_state(Lease.STATE_SUSPENDED_QUEUED)
+            else:
+                lease.set_state(Lease.STATE_QUEUED)
+            self.__enqueue_in_order(lease)
+        else:
+            self.logger.info("... lease #%i will be suspended at %s." % (lease.id, preemption_time))
+            self.vm_scheduler.preempt_vm(vmrr, preemption_time)            
+            
+        self.logger.vdebug("Lease after preemption:")
+        lease.print_contents()
+                
+  
+    def __enqueue(self, lease):
+        """Queues a best-effort lease request
+        
+        Arguments:
+        lease -- Lease to be queued
+        """
+        get_accounting().incr_counter(constants.COUNTER_QUEUESIZE, lease.id)
+        self.queue.enqueue(lease)
+
+
+    def __enqueue_in_order(self, lease):
+        """Queues a lease in order (currently, time of submission)
+        
+        Arguments:
+        lease -- Lease to be queued
+        """
+        get_accounting().incr_counter(constants.COUNTER_QUEUESIZE, lease.id)
+        self.queue.enqueue_in_order(lease)
+
+
+    def _handle_end_rr(self, rr):
+        """Performs actions that have to be done each time a reservation ends.
+        
+        Arguments:
+        rr -- Reservation that ended
+        """
+        self.slottable.remove_reservation(rr)
+        
+
+    def _handle_end_lease(self, l):
+        """Performs actions that have to be done each time a lease ends.
+        
+        Arguments:
+        lease -- Lease that has ended
+        """
+        l.set_state(Lease.STATE_DONE)
+        l.duration.actual = l.duration.accumulated
+        l.end = round_datetime(get_clock().get_time())
+        self.preparation_scheduler.cleanup(l)
+        self.completed_leases.add(l)
+        self.leases.remove(l)
+        if l.get_type() == Lease.BEST_EFFORT:
+            get_accounting().incr_counter(constants.COUNTER_BESTEFFORTCOMPLETED, l.id)
+        
+
+class Queue(object):
+    """A simple queue for leases
+    
+    This class is a simple queue container for leases, with some
+    extra syntactic sugar added for convenience.    
+    """    
+
+    def __init__(self):
+        self.__q = []
+        
+    def is_empty(self):
+        return len(self.__q)==0
+    
+    def enqueue(self, r):
+        self.__q.append(r)
+    
+    def dequeue(self):
+        return self.__q.pop(0)
+    
+    def enqueue_in_order(self, r):
+        self.__q.append(r)
+        self.__q.sort(key=attrgetter("submit_time"))
+
+    def length(self):
+        return len(self.__q)
+    
+    def has_lease(self, lease_id):
+        return (1 == len([l for l in self.__q if l.id == lease_id]))
+    
+    def get_lease(self, lease_id):
+        return [l for l in self.__q if l.id == lease_id][0]
+    
+    def remove_lease(self, lease):
+        self.__q.remove(lease)
+    
+    def __iter__(self):
+        return iter(self.__q)
+        
+class LeaseTable(object):
+    """A simple container for leases
+    
+    This class is a simple dictionary-like container for leases, with some
+    extra syntactic sugar added for convenience.    
+    """    
+    
+    def __init__(self):
+        self.entries = {}
+        
+    def has_lease(self, lease_id):
+        return self.entries.has_key(lease_id)
+        
+    def get_lease(self, lease_id):
+        return self.entries[lease_id]
+    
+    def is_empty(self):
+        return len(self.entries)==0
+    
+    def remove(self, lease):
+        del self.entries[lease.id]
+        
+    def add(self, lease):
+        self.entries[lease.id] = lease
+        
+    def get_leases(self, type=None):
+        if type==None:
+            return self.entries.values()
+        else:
+            return [e for e in self.entries.values() if e.get_type() == type]
+
+    def get_leases_by_state(self, state):
+        return [e for e in self.entries.values() if e.get_state() == state]
+ 
+        
\ No newline at end of file

Added: trunk/src/haizea/core/scheduler/mapper.py
===================================================================
--- trunk/src/haizea/core/scheduler/mapper.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/mapper.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,266 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+"""This module provides the base class for writing custom "mappers" and the
+default greedy mapper used in Haizea. A mapper is a class with a single function
+"map" that takes a set of requested resources (typically corresponding to
+VMs) and maps them to physical nodes (if such a mapping exists).
+"""
+
+from haizea.common.utils import abstract
+from haizea.core.scheduler.slottable import ResourceTuple, AvailabilityWindow
+import haizea.common.constants as constants
+import operator
+
+# This dictionary provides a shorthand notation for any mappers
+# included in this module (this shorthand notation can be used in
+# the configuration file)
+class_mappings = {"greedy": "haizea.core.scheduler.mapper.GreedyMapper"}
+
+class Mapper(object):
+    """Base class for mappers
+    
+    """
+    
+    def __init__(self, slottable, policy):
+        """Constructor
+        
+        Arguments
+        slottable -- A fully constructed SlotTable
+        policy -- A fully constructed PolicyManager
+        """
+        self.slottable = slottable
+        self.policy = policy
+    
+    
+    def map(self, requested_resources, start, end, strictend, onlynodes = None):
+        """The mapping function
+        
+        The mapping function takes a set of requested resources and maps
+        them to physical resources (based on the availability 
+        in the slot table) in a specified time interval. The mapper
+        may return a mapping that only satisfies part of the specified
+        time interval.
+        
+        Arguments:
+        requested_resources -- A dictionary mapping lease nodes (integers) to
+        ResourceTuples (representing the desired amount of resources for
+        that lease node)
+        start -- Starting time of the interval during which the resources
+        are required
+        end -- Ending time of the interval
+        strictend -- If True, the only valid mappings are those that span
+        the entire requested interval. If False, the mapper is allowed to
+        return mappings that only span part of the interval (this reduced
+        interval must always start at "start"; the earlier end time is
+        returned as a return value)
+        onlynodes -- List of physical nodes. Only look for a mapping in
+        these nodes.
+        
+        Returns:
+        mapping -- A dictionary mapping lease nodes to physical nodes
+        maxend -- The end of the interval for which a mapping was found.
+        As noted in argument "strictend", this return value might not
+        be the same as "end"
+        preempting -- Leases that would have to be preempted for the
+        mapping to be valid.
+        
+        If no mapping is found, the three return values are set to None
+        """
+        abstract()
+
+
+class GreedyMapper(Mapper):
+    """Haizea's default greedy mapper
+    
+    Haizea uses a greedy algorithm to determine how VMs are mapped to
+    physical resources at a specific point in time (determining that point
+    in time, when using best-effort scheduling, is determined in the lease
+    and VM scheduling classes). 
+    
+    The way the algorithm works is by, first, greedily ordering the
+    physical nodes from "most desirable" to "least desirable". For example,
+    a physical node with no leases scheduled on it in the future is preferable
+    to one with leases (since this reduces the probability of having to
+    preempt leases to obtain a mapping). This ordering, however, is done by the 
+    policy engine (see the GreedyPolicy class in the host_selection module) so, 
+    to be a truly greedy algorithm, this mapper must be used in conjunction with 
+    the "greedy" host selection policy).
+    
+    Then, the algorithm traverses the list of nodes and tries to map as many
+    lease nodes into each physical node before moving on to the next. If
+    the list of physical nodes is exhausted without finding a mapping for all
+    the lease nodes, then the algorithm tries to find a mapping by preempting
+    other leases.
+    
+    Before doing this, the mapper must first determine what leases could be
+    preempted. This decision is delegated to the policy engine, which returns
+    a list of leases ordered from "most preemptable" to "least preemptable".
+    The mapper attempts a mapping assuming that the first lease is going
+    to be preempted, then assuming the first and the second, etc.
+    
+    If no mapping is found with preemption, then there is no mapping at the
+    requested time.
+    
+    """
+    
+    def __init__(self, slottable, policy):
+        """Constructor
+        
+        Arguments
+        slottable -- A fully constructed SlotTable
+        policy -- A fully constructed PolicyManager
+        """        
+        Mapper.__init__(self, slottable, policy)
+        
+    def map(self, lease, requested_resources, start, end, strictend, onlynodes=None):
+        """The mapping function
+        
+        See documentation in Mapper for more details
+        """        
+        
+        # Generate an availability window at time "start"
+        aw = self.slottable.get_availability_window(start)
+
+        nodes = aw.get_nodes_at(start)     
+        if onlynodes != None:
+            nodes = list(set(nodes) & onlynodes)
+
+        # Get an ordered list of physical nodes
+        pnodes = self.policy.sort_hosts(nodes, start, lease)
+        
+        # Get an ordered list of lease nodes
+        vnodes = self.__sort_vnodes(requested_resources)
+        
+        # Get the leases that intersect with the requested interval.
+        leases = aw.get_leases_until(end)
+        # Ask the policy engine to sort the leases based on their
+        # preemptability
+        leases = self.policy.sort_leases(lease, leases, start)
+        
+        preemptable_leases = leases
+        preempting = []
+        
+        # Try to find a mapping. Each iteration of this loop goes through
+        # all the lease nodes and tries to find a mapping. The first
+        # iteration assumes no leases can be preempted, and each successive
+        # iteration assumes one more lease can be preempted.
+        mapping = {}
+        done = False
+        while not done:
+            # Start at the first lease node
+            vnodes_pos = 0
+            cur_vnode = vnodes[vnodes_pos]
+            cur_vnode_capacity = requested_resources[cur_vnode]
+            maxend = end 
+            
+            # Go through all the physical nodes.
+            # In each iteration, we try to map as many lease nodes
+            # as possible into the physical nodes.
+            # "cur_vnode_capacity" holds the capacity of the vnode we are currently
+            # trying to map. "need_to_map" is the amount of resources we are 
+            # trying to map into the current physical node (which might be
+            # more than one lease node).
+            for pnode in pnodes:
+                # need_to_map is initialized to the capacity of whatever
+                # lease node we are trying to map now.
+                need_to_map = self.slottable.create_empty_resource_tuple()
+                need_to_map.incr(cur_vnode_capacity)
+                avail=aw.get_availability_at_node(start, pnode, preempted_leases = preempting)
+                
+                # Try to fit as many lease nodes as we can into this physical node
+                pnode_done = False
+                while not pnode_done:
+                    if avail.fits(need_to_map, until = maxend):
+                        # In this case, we can fit "need_to_map" into the
+                        # physical node.
+                        mapping[cur_vnode] = pnode
+                        vnodes_pos += 1
+                        if vnodes_pos >= len(vnodes):
+                            # No more lease nodes to map, we're done.
+                            done = True
+                            break
+                        else:
+                            # Advance to the next lease node, and add its
+                            # capacity to need_to_map
+                            cur_vnode = vnodes[vnodes_pos]
+                            cur_vnode_capacity = requested_resources[cur_vnode]
+                            need_to_map.incr(cur_vnode_capacity)
+                    else:
+                        # We couldn't fit the lease node. If we need to
+                        # find a mapping that spans the entire requested
+                        # interval, then we're done checking this physical node.
+                        if strictend:
+                            pnode_done = True
+                        else:
+                            # Otherwise, check what the longest interval
+                            # we could fit in this physical node
+                            latest = avail.latest_fit(need_to_map)
+                            if latest == None:
+                                pnode_done = True
+                            else:
+                                maxend = latest
+                    
+                if done:
+                    break
+
+            # If there's no more leases that we could preempt,
+            # we're done.
+            if len(preemptable_leases) == 0:
+                done = True
+            elif not done:
+                # Otherwise, add another lease to the list of
+                # leases we are preempting
+                preempting.append(preemptable_leases.pop())
+
+        if len(mapping) != len(requested_resources):
+            # No mapping found
+            return None, None, None
+        else:
+            return mapping, maxend, preempting
+
+    def __sort_vnodes(self, requested_resources):
+        """Sorts the lease nodes
+        
+        Greedily sorts the lease nodes so the mapping algorithm
+        will first try to map those that require the highest
+        capacity.
+        """            
+        
+        # Find the maximum requested resources for each resource type
+        max_res = self.slottable.create_empty_resource_tuple()
+        for res in requested_resources.values():
+            for i in range(len(res._res)):
+                if res._res[i] > max_res._res[i]:
+                    max_res._res[i] = res._res[i]
+                    
+        # Normalize the capacities of the lease nodes (divide each
+        # requested amount of a resource type by the maximum amount)
+        norm_res = {}
+        for k,v in requested_resources.items():
+            norm_capacity = 0
+            for i in range(len(max_res._res)):
+                if max_res._res[i] > 0:
+                    norm_capacity += v._res[i] / float(max_res._res[i])
+            norm_res[k] = norm_capacity
+             
+        vnodes = norm_res.items()
+        vnodes.sort(key=operator.itemgetter(1), reverse = True)
+        vnodes = [k for k,v in vnodes]
+        return vnodes      
+                    

Added: trunk/src/haizea/core/scheduler/policy.py
===================================================================
--- trunk/src/haizea/core/scheduler/policy.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/policy.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,290 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+"""Haizea uses a policy manager that allows certain scheduling decisions to
+be delegated to pluggable policies. This is done so scheduling policies
+can be (1) modified without having to modify core components of Haizea, and
+(2) implemented by writing a single Python class that implements a given
+interface for pluggable policies.
+
+Three policies are currently pluggable: lease preemptability ("Can lease X
+preempt lease Y?"), host selection ("I want to deploy a VM, what host should
+I select for this?") and lease admission ("Should I accept/reject this lease
+request?"). Haizea provides several simple policy modules in the
+haizea.policies package. The policy to use is selected in the configuration
+file. See the Haizea Documentation for more details on how this is done.
+
+This module provides Haizea's policy manager and the base classes for
+pluggable policies.  
+"""
+
+
+from haizea.common.utils import abstract
+from haizea.core.leases import Lease
+from mx.DateTime import DateTimeDelta
+import operator
+
+class PolicyManager(object):
+    """The Policy Manager
+    
+    This class manages the policy modules and provides methods to
+    access these modules.
+    
+    """    
+    def __init__(self, admission, preemption, host_selection):
+        """Constructor
+        
+        Expects fully-constructed policies (these are currently
+        loaded in the Manager class, based on the config file).
+        
+        Arguments:
+        admission -- A child of LeaseAdmissionPolicy
+        preemption -- A child of PreemptabilityPolicy
+        host_selection -- A child of HostSelectionPolicy
+        
+        """
+        self.admission = admission
+        self.preemption = preemption
+        self.host_selection = host_selection
+    
+    def sort_leases(self, preemptor, preemptees, time):
+        """Sorts a list of leases by their preemptability
+        
+        Takes a list of leases (the "preemptees"), determines their preemptability
+        by another lease (the "preemptor"), and returns a list with the
+        leases sorted by decreasing preemptability score (most preemptable
+        leases first)
+        
+        See documentation of PreemptabilityPolicy.get_lease_preemptability_score
+        for more details on the preemptability score.
+        
+        Argument
+        preemptor -- Preemptor lease
+        preemptees -- List of preemptee leases
+        time -- Time at which preemption would take place        
+        """              
+        leases_score = [(preemptee, self.get_lease_preemptability_score(preemptor,preemptee, time)) for preemptee in preemptees]
+        leases_score = [(preemptee,score) for preemptee,score in leases_score if score != -1]
+        leases_score.sort(key=operator.itemgetter(1), reverse=True)
+        return [preemptee for preemptee,score in leases_score]
+
+
+    def sort_hosts(self, nodes, time, lease):
+        """Sorts a list of hosts by their score
+        
+        Takes a list of hosts, determines their score, and sorts them in
+        order of decreasing score (most desireable hosts first)
+        
+        See documentation of HostSelectionPolicy.get_host_score for more details.
+        
+        Arguments:
+        nodes -- List of physical node (the integer identifier used in the slot table)
+        time -- Time at which the lease might be scheduled
+        lease -- Lease that is being scheduled.
+        """        
+        nodes_score = [(node, self.get_host_score(node, time, lease)) for node in nodes]
+        nodes_score.sort(key=operator.itemgetter(1), reverse=True)
+        return [node for node,score in nodes_score]
+    
+    
+    def accept_lease(self, lease):
+        """Lease admission function
+        
+        Returns True if the lease can be accepted, False if it should be rejected.
+        
+        Argument
+        lease -- Lease request
+        """        
+        return self.admission.accept_lease(lease)
+    
+    
+    def get_lease_preemptability_score(self, preemptor, preemptee, time):
+        """Computes the lease preemptability score
+        
+        See documentation of PreemptabilityPolicy.get_lease_preemptability_score
+        for more details.
+        
+        Arguments:
+        preemptor -- Preemptor lease
+        preemptee -- Preemptee lease
+        time -- Time at which preemption would take place
+        """                
+        return self.preemption.get_lease_preemptability_score(preemptor, preemptee, time)
+
+
+    def get_host_score(self, node, time, lease):
+        """Computes the score of a host
+        
+        See documentation of HostSelectionPolicy.get_host_score for more details.
+        
+        Arguments:
+        node -- Physical node (the integer identifier used in the slot table)
+        time -- Time at which the lease might be scheduled
+        lease -- Lease that is being scheduled.
+        """               
+        return self.host_selection.get_host_score(node, time, lease)
+    
+    
+
+class LeaseAdmissionPolicy(object):
+    """Lease Admission policy
+    
+    This is the parent class of lease admission policies. A lease admission
+    policy determines whether a given lease request should be accepted or not
+    by Haizea. Note that this is distinct from whether the lease can be
+    scheduled or not (although this could certainly be a part of the
+    policy); the policy simply decides whether the lease can be considered for
+    scheduling or not. For example, a user could submit an AR lease that must
+    start in 5 hours, but the policy could dictate that all ARs must be notified
+    at least 24 hours in advance (and the lease would be rejected, regardless of
+    whether there was resources available for it in 5 hours). Similarly, an
+    AR lease could be requested 48 hours in advance, be accepted by the lease
+    admission policy, but then be rejected by the scheduler if there are no
+    resources available.
+    
+    """       
+    def __init__(self, slottable):
+        """Constructor
+        
+        Argument
+        slottable -- A fully constructed SlotTable
+        """
+        self.slottable = slottable
+    
+    
+    def accept_lease(self, lease):
+        """Lease admission function
+        
+        Returns True if the lease can be accepted, False if it should be rejected.
+        
+        Argument
+        lease -- Lease request
+        """        
+        abstract()
+    
+    
+    
+class PreemptabilityPolicy(object):
+    """Lease Preemptability policy
+    
+    This is the parent class of lease preemptability policies. This type of
+    policy is used to determine whether a lease can be preempted by another
+    lease at a given time. However, the policy doesn't return True or False but,
+    rather, a "preemptability score" (see get_lease_preemptability_score for
+    more details)
+    
+    """           
+    def __init__(self, slottable):
+        """Constructor
+        
+        Argument
+        slottable -- A fully constructed SlotTable
+        """        
+        self.slottable = slottable
+    
+    
+    def get_lease_preemptability_score(self, preemptor, preemptee, time):
+        """Computes the lease preemptability score
+        
+        Given a lease that needs to preempt resources (the "preemptor"),
+        another lease (the "preemptee") that may be preempted by it, and a time,
+        this method determines the preemptability score of the preemptee or
+        "how preemptable is the preemptee by the preemptor at the given time".
+        The score can be the following:
+        
+        -1 : Cannot be preempted under any circumstances
+        0.0 <= x <= 1.0: Lease can be preempted. The higher the score,
+        the "more preemptable" it is (this is a relative measure; the score
+        should be used to determine which of several leases is a better
+        candidate for preemption)
+        
+        Arguments:
+        preemptor -- Preemptor lease
+        preemptee -- Preemptee lease
+        time -- Time at which preemption would take place
+        """             
+        abstract()    
+
+
+    def _get_aging_factor(self, lease, time):
+        """Returns an aging factor for the preemptability score
+        
+        This is a convenience function that can be used to "age" a
+        preemptability score (allowing leases that have been submitted
+        long ago to avoid preemption). The method returns a factor
+        between 0 and 1 that can be multiplied by the score, reducing
+        the score based on the lease's "age".
+        
+        Currently, this method uses a hard-coded horizon of 31 days
+        (any lease older than 7 days cannot be preempted, and leases
+        less than 7 days are assigned a factor proportional to their age)
+        
+        Arguments:
+        lease -- Lease that is going to be preempted
+        time -- Time at which preemption would take place        
+        """            
+        # TODO: Make horizon configurable
+        horizon = time - DateTimeDelta(7)
+        if lease.submit_time <= horizon:
+            return -1
+        else:
+            seconds = (time - lease.submit_time).seconds
+            horizon_seconds = DateTimeDelta(31).seconds
+            return float(horizon_seconds - seconds) / horizon_seconds        
+        
+        
+class HostSelectionPolicy(object):
+    """Host Selection policy
+    
+    This is the parent class of host selection policies. When mapping VMs
+    to physical hosts, this policy determines what hosts are more desireable.
+    For example, an energy-saving policy might value hosts that already have
+    VMs running (to leave as many empty machines as possible, which could then
+    be turned off), whereas another policy might prefer empty hosts to make
+    sure that VMs are spread out across nodes.
+    
+    To do this, the policy will assign a score to each host. See the documentation
+    for get_host_score for more details.
+        
+    """             
+    def __init__(self, slottable):
+        """Constructor
+        
+        Argument
+        slottable -- A fully constructed SlotTable
+        """        
+        self.slottable = slottable
+    
+    
+    def get_host_score(self, node, time, lease):
+        """Computes the score of a host
+        
+        Given a physical host, a time, and a lease we would like to
+        schedule at that time, this method returns a score indicating
+        how desireable that host is for that lease at that time.
+        The score can be between 0.0 and 1.0. The higher the score,
+        the "more desireable" the physical host is (this is a relative measure; 
+        the score should be used to determine which of several physical hosts
+        is more desireable for this lease).
+        
+        Arguments:
+        node -- Physical node (the integer identifier used in the slot table)
+        time -- Time at which the lease might be scheduled
+        lease -- Lease that is being scheduled.
+        """               
+        abstract()    


Property changes on: trunk/src/haizea/core/scheduler/preparation_schedulers
___________________________________________________________________
Added: svn:mergeinfo
   + 

Added: trunk/src/haizea/core/scheduler/preparation_schedulers/__init__.py
===================================================================
--- trunk/src/haizea/core/scheduler/preparation_schedulers/__init__.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/preparation_schedulers/__init__.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,30 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+import logging
+from haizea.common.utils import abstract
+
+class PreparationScheduler(object):
+    def __init__(self, slottable, resourcepool, deployment_enact):
+        self.slottable = slottable
+        self.resourcepool = resourcepool
+        self.deployment_enact = deployment_enact
+        self.logger = logging.getLogger("DEPLOY")
+        
+    def cleanup(self, lease):
+        abstract() 
\ No newline at end of file

Added: trunk/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py
===================================================================
--- trunk/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/preparation_schedulers/imagetransfer.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,592 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+import haizea.common.constants as constants
+from haizea.core.scheduler.preparation_schedulers import PreparationScheduler
+from haizea.core.scheduler.slottable import ResourceReservation
+from haizea.core.scheduler import MigrationResourceReservation
+from haizea.core.leases import Lease, Capacity, UnmanagedSoftwareEnvironment
+from haizea.core.scheduler import ReservationEventHandler, NotSchedulableException, EarliestStartingTime
+from haizea.common.utils import estimate_transfer_time, get_config
+from haizea.core.scheduler.slottable import ResourceTuple
+from mx.DateTime import TimeDelta
+
+import copy
+import bisect
+
+
+class ImageTransferPreparationScheduler(PreparationScheduler):
+    def __init__(self, slottable, resourcepool, deployment_enact):
+        PreparationScheduler.__init__(self, slottable, resourcepool, deployment_enact)
+        
+        self.imagenode = self.deployment_enact.get_imagenode()
+        
+        self.transfers = []
+        self.completed_transfers = []
+
+        config = get_config()
+        self.reusealg = config.get("diskimage-reuse")
+        if self.reusealg == constants.REUSE_IMAGECACHES:
+            self.maxcachesize = config.get("diskimage-cache-size")
+        else:
+            self.maxcachesize = None
+        
+        self.imagenode_bandwidth = self.deployment_enact.get_bandwidth()
+        
+        self.handlers ={}
+        self.handlers[FileTransferResourceReservation] = ReservationEventHandler(
+                                sched    = self,
+                                on_start = ImageTransferPreparationScheduler._handle_start_filetransfer,
+                                on_end   = ImageTransferPreparationScheduler._handle_end_filetransfer)
+
+        self.handlers[DiskImageMigrationResourceReservation] = ReservationEventHandler(
+                                sched    = self,
+                                on_start = ImageTransferPreparationScheduler._handle_start_migrate,
+                                on_end   = ImageTransferPreparationScheduler._handle_end_migrate)
+
+    def schedule(self, lease, vmrr, earliest):
+        if type(lease.software) == UnmanagedSoftwareEnvironment:
+            return [], True
+        if lease.get_type() == Lease.ADVANCE_RESERVATION:
+            return self.__schedule_deadline(lease, vmrr, earliest)
+        elif lease.get_type() in (Lease.BEST_EFFORT, Lease.IMMEDIATE):
+            return self.__schedule_asap(lease, vmrr, earliest)
+
+    def schedule_migration(self, lease, vmrr, nexttime):
+        if type(lease.software) == UnmanagedSoftwareEnvironment:
+            return []
+        
+        # This code is the same as the one in vm_scheduler
+        # Should be factored out
+        last_vmrr = lease.get_last_vmrr()
+        vnode_migrations = dict([(vnode, (last_vmrr.nodes[vnode], vmrr.nodes[vnode])) for vnode in vmrr.nodes])
+        
+        mustmigrate = False
+        for vnode in vnode_migrations:
+            if vnode_migrations[vnode][0] != vnode_migrations[vnode][1]:
+                mustmigrate = True
+                break
+            
+        if not mustmigrate:
+            return []
+
+        if get_config().get("migration") == constants.MIGRATE_YES_NOTRANSFER:
+            start = nexttime
+            end = nexttime
+            res = {}
+            migr_rr = DiskImageMigrationResourceReservation(lease, start, end, res, vmrr, vnode_migrations)
+            migr_rr.state = ResourceReservation.STATE_SCHEDULED
+            return [migr_rr]
+
+        # Figure out what migrations can be done simultaneously
+        migrations = []
+        while len(vnode_migrations) > 0:
+            pnodes = set()
+            migration = {}
+            for vnode in vnode_migrations:
+                origin = vnode_migrations[vnode][0]
+                dest = vnode_migrations[vnode][1]
+                if not origin in pnodes and not dest in pnodes:
+                    migration[vnode] = vnode_migrations[vnode]
+                    pnodes.add(origin)
+                    pnodes.add(dest)
+            for vnode in migration:
+                del vnode_migrations[vnode]
+            migrations.append(migration)
+        
+        # Create migration RRs
+        start = max(last_vmrr.post_rrs[-1].end, nexttime)
+        bandwidth = self.resourcepool.info.get_migration_bandwidth()
+        migr_rrs = []
+        for m in migrations:
+            mb_to_migrate = lease.software.image_size * len(m.keys())
+            migr_time = estimate_transfer_time(mb_to_migrate, bandwidth)
+            end = start + migr_time
+            res = {}
+            for (origin,dest) in m.values():
+                resorigin = Capacity([constants.RES_NETOUT])
+                resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
+                resdest = Capacity([constants.RES_NETIN])
+                resdest.set_quantity(constants.RES_NETIN, bandwidth)
+                res[origin] = self.slottable.create_resource_tuple_from_capacity(resorigin)
+                res[dest] = self.slottable.create_resource_tuple_from_capacity(resdest)                
+            migr_rr = DiskImageMigrationResourceReservation(lease, start, start + migr_time, res, vmrr, m)
+            migr_rr.state = ResourceReservation.STATE_SCHEDULED
+            migr_rrs.append(migr_rr)
+            start = end
+        
+        return migr_rrs
+
+    def estimate_migration_time(self, lease):
+        migration = get_config().get("migration")
+        if migration == constants.MIGRATE_YES:
+            vmrr = lease.get_last_vmrr()
+            images_in_pnode = dict([(pnode,0) for pnode in set(vmrr.nodes.values())])
+            for (vnode,pnode) in vmrr.nodes.items():
+                images_in_pnode[pnode] += lease.software.image_size
+            max_to_transfer = max(images_in_pnode.values())
+            bandwidth = self.resourcepool.info.get_migration_bandwidth()
+            return estimate_transfer_time(max_to_transfer, bandwidth)
+        elif migration == constants.MIGRATE_YES_NOTRANSFER:
+            return TimeDelta(seconds=0)
+
+    def find_earliest_starting_times(self, lease, nexttime):
+        node_ids = [node.id for node in self.resourcepool.get_nodes()]  
+        config = get_config()
+        mechanism = config.get("transfer-mechanism")
+        reusealg = config.get("diskimage-reuse")
+        avoidredundant = config.get("avoid-redundant-transfers")
+        
+        if type(lease.software) == UnmanagedSoftwareEnvironment:
+            earliest = {}
+            for node in node_ids:
+                earliest[node] = EarliestStartingTime(nexttime, EarliestStartingTime.EARLIEST_NOPREPARATION)
+            return earliest
+        
+        # Figure out earliest times assuming we have to transfer the images
+        transfer_duration = self.__estimate_image_transfer_time(lease, self.imagenode_bandwidth)
+        if mechanism == constants.TRANSFER_UNICAST:
+            transfer_duration *= lease.numnodes
+        start = self.__get_next_transfer_slot(nexttime, transfer_duration)
+        earliest = {}
+        for node in node_ids:
+            earliest[node] = ImageTransferEarliestStartingTime(start + transfer_duration, ImageTransferEarliestStartingTime.EARLIEST_IMAGETRANSFER)
+            earliest[node].transfer_start = start
+                
+        # Check if we can reuse images
+        if reusealg == constants.REUSE_IMAGECACHES:
+            nodeswithimg = self.resourcepool.get_nodes_with_reusable_image(lease.software.image_id)
+            for node in nodeswithimg:
+                earliest[node].time = nexttime
+                earliest[node].type = ImageTransferEarliestStartingTime.EARLIEST_REUSE
+        
+                
+        # Check if we can avoid redundant transfers
+        if avoidredundant:
+            if mechanism == constants.TRANSFER_UNICAST:
+                # Piggybacking not supported if unicasting 
+                # each individual image
+                pass
+            if mechanism == constants.TRANSFER_MULTICAST:                
+                # We can only piggyback on transfers that haven't started yet
+                transfers = [t for t in self.transfers if t.state == ResourceReservation.STATE_SCHEDULED]
+                for t in transfers:
+                    if t.file == lease.software.image_id:
+                        start = t.end
+                        if start > nexttime:
+                            for n in earliest:
+                                if start < earliest[n].time:
+                                    earliest[n].time = start
+                                    earliest[n].type = ImageTransferEarliestStartingTime.EARLIEST_PIGGYBACK
+                                    earliest[n].piggybacking_on = t
+
+        return earliest
+            
+    def cancel_preparation(self, lease):
+        toremove = self.__remove_transfers(lease)     
+        for t in toremove:
+            t.lease.remove_preparationrr(t)
+            self.slottable.remove_reservation(t)
+        self.__remove_files(lease)
+        
+    def cleanup(self, lease):                
+        self.__remove_files(lease)
+  
+        
+    def __schedule_deadline(self, lease, vmrr, earliest):
+        config = get_config()
+        reusealg = config.get("diskimage-reuse")
+        avoidredundant = config.get("avoid-redundant-transfers")
+        is_ready = False
+            
+        musttransfer = {}
+        mustpool = {}
+        nodeassignment = vmrr.nodes
+        start = lease.start.requested
+        end = lease.start.requested + lease.duration.requested
+        for (vnode, pnode) in nodeassignment.items():
+            lease_id = lease.id
+            self.logger.debug("Scheduling image transfer of '%s' for vnode %i to physnode %i" % (lease.software.image_id, vnode, pnode))
+
+            if reusealg == constants.REUSE_IMAGECACHES:
+                if self.resourcepool.exists_reusable_image(pnode, lease.software.image_id, start):
+                    self.logger.debug("No need to schedule an image transfer (reusing an image in pool)")
+                    mustpool[vnode] = pnode                            
+                else:
+                    self.logger.debug("Need to schedule a transfer.")
+                    musttransfer[vnode] = pnode
+            else:
+                self.logger.debug("Need to schedule a transfer.")
+                musttransfer[vnode] = pnode
+
+        if len(musttransfer) == 0:
+            is_ready = True
+        else:
+            try:
+                transfer_rrs = self.__schedule_imagetransfer_edf(lease, musttransfer, earliest)
+            except NotSchedulableException, exc:
+                raise
+ 
+        # No chance of scheduling exception at this point. It's safe
+        # to add entries to the pools
+        if reusealg == constants.REUSE_IMAGECACHES:
+            for (vnode, pnode) in mustpool.items():
+                self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.diskimage_id, lease.id, vnode, start)
+                self.resourcepool.add_diskimage(pnode, lease.diskimage_id, lease.diskimage_size, lease.id, vnode)
+                
+        return transfer_rrs, is_ready
+
+
+    def __schedule_asap(self, lease, vmrr, earliest):
+        config = get_config()
+        reusealg = config.get("diskimage-reuse")
+        avoidredundant = config.get("avoid-redundant-transfers")
+
+        is_ready = False
+
+        transfer_rrs = []
+        musttransfer = {}
+        piggybacking = []
+        for (vnode, pnode) in vmrr.nodes.items():
+            earliest_type = earliest[pnode].type
+            if earliest_type == ImageTransferEarliestStartingTime.EARLIEST_REUSE:
+                # Add to pool
+                self.logger.debug("Reusing image for V%i->P%i." % (vnode, pnode))
+                self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.software.image_id, lease.id, vnode, vmrr.end)
+                self.resourcepool.add_diskimage(pnode, lease.software.image_id, lease.software.image_size, lease.id, vnode)
+            elif earliest_type == ImageTransferEarliestStartingTime.EARLIEST_PIGGYBACK:
+                # We can piggyback on an existing transfer
+                transfer_rr = earliest[pnode].piggybacking_on
+                transfer_rr.piggyback(lease.id, vnode, pnode)
+                self.logger.debug("Piggybacking transfer for V%i->P%i on existing transfer in lease %i." % (vnode, pnode, transfer_rr.lease.id))
+                piggybacking.append(transfer_rr)
+            else:
+                # Transfer
+                musttransfer[vnode] = pnode
+                self.logger.debug("Must transfer V%i->P%i." % (vnode, pnode))
+
+        if len(musttransfer)>0:
+            transfer_rrs = self.__schedule_imagetransfer_fifo(lease, musttransfer, earliest)
+            
+        if len(musttransfer)==0 and len(piggybacking)==0:
+            is_ready = True
+            
+        return transfer_rrs, is_ready
+
+
+    def __schedule_imagetransfer_edf(self, lease, musttransfer, earliest):
+        # Estimate image transfer time 
+        bandwidth = self.deployment_enact.get_bandwidth()
+        config = get_config()
+        mechanism = config.get("transfer-mechanism")
+        transfer_duration = self.__estimate_image_transfer_time(lease, bandwidth)
+        if mechanism == constants.TRANSFER_UNICAST:
+            transfer_duration *= len(musttransfer)
+
+        # Determine start time
+        start = self.__get_last_transfer_slot(lease.start.requested, transfer_duration)
+
+        res = {}
+        resimgnode = Capacity([constants.RES_NETOUT])
+        resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
+        resnode = Capacity([constants.RES_NETIN])
+        resnode.set_quantity(constants.RES_NETIN, bandwidth)
+        res[self.imagenode.id] = self.slottable.create_resource_tuple_from_capacity(resimgnode)
+        for pnode in musttransfer.values():
+            res[pnode] = self.slottable.create_resource_tuple_from_capacity(resnode)
+        
+        newtransfer = FileTransferResourceReservation(lease, res)
+        newtransfer.deadline = lease.start.requested
+        newtransfer.state = ResourceReservation.STATE_SCHEDULED
+        newtransfer.file = lease.software.image_id
+        newtransfer.start = start
+        newtransfer.end = start + transfer_duration
+        for vnode, pnode in musttransfer.items():
+            newtransfer.piggyback(lease.id, vnode, pnode)
+        
+        bisect.insort(self.transfers, newtransfer)
+        
+        return [newtransfer]
+    
+    def __schedule_imagetransfer_fifo(self, lease, musttransfer, earliest):
+        # Estimate image transfer time 
+        bandwidth = self.imagenode_bandwidth
+        config = get_config()
+        mechanism = config.get("transfer-mechanism")
+        
+        # The starting time is the first available slot, which was
+        # included in the "earliest" dictionary.
+        pnodes = musttransfer.values()
+        start = earliest[pnodes[0]].transfer_start
+        transfer_duration = self.__estimate_image_transfer_time(lease, bandwidth)
+        
+        res = {}
+        resimgnode = Capacity([constants.RES_NETOUT])
+        resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
+        resnode = Capacity([constants.RES_NETIN])
+        resnode.set_quantity(constants.RES_NETIN, bandwidth)
+        res[self.imagenode.id] = self.slottable.create_resource_tuple_from_capacity(resimgnode)
+        for n in musttransfer.values():
+            res[n] = self.slottable.create_resource_tuple_from_capacity(resnode)
+         
+        newtransfer = FileTransferResourceReservation(lease, res)
+        newtransfer.start = start
+        if mechanism == constants.TRANSFER_UNICAST:
+            newtransfer.end = start + (len(musttransfer) * transfer_duration)
+        if mechanism == constants.TRANSFER_MULTICAST:
+            newtransfer.end = start + transfer_duration
+        
+        newtransfer.deadline = None
+        newtransfer.state = ResourceReservation.STATE_SCHEDULED
+        newtransfer.file = lease.software.image_id
+        for vnode, pnode in musttransfer.items():
+            newtransfer.piggyback(lease.id, vnode, pnode)
+            
+        bisect.insort(self.transfers, newtransfer)
+        
+        return [newtransfer]
+    
+    
+    def __estimate_image_transfer_time(self, lease, bandwidth):
+        config = get_config()
+        force_transfer_time = config.get("force-imagetransfer-time")
+        if force_transfer_time != None:
+            return force_transfer_time
+        else:      
+            return estimate_transfer_time(lease.software.image_size, bandwidth)    
+    
+    
+    def __get_next_transfer_slot(self, nexttime, required_duration):
+        # This can probably be optimized by using one of the many
+        # "list of holes" algorithms out there
+        if len(self.transfers) == 0:
+            return nexttime
+        elif nexttime + required_duration <= self.transfers[0].start:
+            return nexttime
+        else:
+            for i in xrange(len(self.transfers) - 1):
+                if self.transfers[i].end != self.transfers[i+1].start:
+                    hole_duration = self.transfers[i+1].start - self.transfers[i].end
+                    if hole_duration >= required_duration:
+                        return self.transfers[i].end
+            return self.transfers[-1].end
+        
+        
+    def __get_last_transfer_slot(self, deadline, required_duration):
+        # This can probably be optimized by using one of the many
+        # "list of holes" algorithms out there
+        if len(self.transfers) == 0:
+            return deadline - required_duration
+        elif self.transfers[-1].end + required_duration <= deadline:
+            return deadline - required_duration
+        else:
+            for i in xrange(len(self.transfers) - 1, 0, -1):
+                if self.transfers[i].start != self.transfer[i-1].end:
+                    hole_duration = self.transfer[i].start - self.transfers[i-1].end
+                    if hole_duration >= required_duration:
+                        return self.transfer[i].start - required_duration
+            return self.transfers[0].start - required_duration
+
+    def __remove_transfers(self, lease):
+        toremove = []
+        for t in self.transfers:
+            for pnode in t.transfers:
+                leases = [l for l, v in t.transfers[pnode]]
+                if lease in leases:
+                    newtransfers = [(l, v) for l, v in t.transfers[pnode] if l!=lease]
+                    t.transfers[pnode] = newtransfers
+            # Check if the transfer has to be cancelled
+            a = sum([len(l) for l in t.transfers.values()])
+            if a == 0:
+                toremove.append(t)
+        for t in toremove:
+            self.transfers.remove(t)
+            
+        return toremove
+    
+    def __remove_files(self, lease):
+        for vnode, pnode in lease.get_last_vmrr().nodes.items():
+            self.resourcepool.remove_diskimage(pnode, lease.id, vnode)         
+
+    @staticmethod
+    def _handle_start_filetransfer(sched, lease, rr):
+        sched.logger.debug("LEASE-%i Start of handleStartFileTransfer" % lease.id)
+        lease.print_contents()
+        lease_state = lease.get_state()
+        if lease_state == Lease.STATE_SCHEDULED or lease_state == Lease.STATE_READY:
+            lease.set_state(Lease.STATE_PREPARING)
+            rr.state = ResourceReservation.STATE_ACTIVE
+            # TODO: Enactment
+        else:
+            raise InconsistentLeaseStateError(l, doing = "starting a file transfer")
+            
+        # TODO: Check for piggybacking
+        
+        lease.print_contents()
+        sched.logger.debug("LEASE-%i End of handleStartFileTransfer" % lease.id)
+        sched.logger.info("Starting image transfer for lease %i" % (lease.id))
+
+    @staticmethod
+    def _handle_end_filetransfer(sched, lease, rr):
+        sched.logger.debug("LEASE-%i Start of handleEndFileTransfer" % lease.id)
+        lease.print_contents()
+        lease_state = lease.get_state()
+        if lease_state == Lease.STATE_PREPARING:
+            lease.set_state(Lease.STATE_READY)
+            rr.state = ResourceReservation.STATE_DONE
+            for physnode in rr.transfers:
+                vnodes = rr.transfers[physnode]
+ 
+#                # Find out timeout of image. It will be the latest end time of all the
+#                # leases being used by that image.
+#                leases = [l for (l, v) in vnodes]
+#                maxend=None
+#                for lease_id in leases:
+#                    l = sched.leases.get_lease(lease_id)
+#                    end = lease.get_endtime()
+#                    if maxend==None or end>maxend:
+#                        maxend=end
+                maxend = None
+                # TODO: ENACTMENT: Verify the image was transferred correctly
+                sched._add_diskimages(physnode, rr.file, lease.software.image_size, vnodes, timeout=maxend)
+        else:
+            raise InconsistentLeaseStateError(l, doing = "ending a file transfer")
+
+        sched.transfers.remove(rr)
+        lease.print_contents()
+        sched.logger.debug("LEASE-%i End of handleEndFileTransfer" % lease.id)
+        sched.logger.info("Completed image transfer for lease %i" % (lease.id))
+
+    def _handle_start_migrate(self, l, rr):
+        self.logger.debug("LEASE-%i Start of handleStartMigrate" % l.id)
+        l.print_contents()
+        rr.state = ResourceReservation.STATE_ACTIVE
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleStartMigrate" % l.id)
+        self.logger.info("Migrating lease %i..." % (l.id))
+
+    def _handle_end_migrate(self, l, rr):
+        self.logger.debug("LEASE-%i Start of handleEndMigrate" % l.id)
+        l.print_contents()
+
+        for vnode in rr.transfers:
+            origin = rr.transfers[vnode][0]
+            dest = rr.transfers[vnode][1]
+            
+            self.resourcepool.remove_diskimage(origin, l.id, vnode)
+            self.resourcepool.add_diskimage(dest, l.software.image_id, l.software.image_size, l.id, vnode)
+        
+        rr.state = ResourceReservation.STATE_DONE
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleEndMigrate" % l.id)
+        self.logger.info("Migrated lease %i..." % (l.id))
+        
+    def _add_diskimages(self, pnode_id, diskimage_id, diskimage_size, vnodes, timeout):
+        self.logger.debug("Adding image for leases=%s in nod_id=%i" % (vnodes, pnode_id))
+
+        pnode = self.resourcepool.get_node(pnode_id)
+
+        if self.reusealg == constants.REUSE_NONE:
+            for (lease_id, vnode) in vnodes:
+                self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease_id, vnode)
+        elif self.reusealg == constants.REUSE_IMAGECACHES:
+            # Sometimes we might find that the image is already deployed
+            # (although unused). In that case, don't add another copy to
+            # the pool. Just "reactivate" it.
+            if pnode.exists_reusable_image(diskimage_id):
+                for (lease_id, vnode) in vnodes:
+                    pnode.add_mapping_to_existing_reusable_image(diskimage_id, lease_id, vnode, timeout)
+            else:
+                if self.maxcachesize == constants.CACHESIZE_UNLIMITED:
+                    can_add_to_cache = True
+                else:
+                    # We may have to remove images from the cache
+                    cachesize = pnode.get_reusable_images_size()
+                    reqsize = cachesize + diskimage_size
+                    if reqsize > self.maxcachesize:
+                        # Have to shrink cache
+                        desiredsize = self.maxcachesize - diskimage_size
+                        self.logger.debug("Adding the image would make the size of pool in node %i = %iMB. Will try to bring it down to %i" % (pnode_id, reqsize, desiredsize))
+                        pnode.print_files()
+                        success = pnode.purge_downto(self.maxcachesize)
+                        if not success:
+                            can_add_to_cache = False
+                        else:
+                            can_add_to_cache = True
+                    else:
+                        can_add_to_cache = True
+                        
+                if can_add_to_cache:
+                    self.resourcepool.add_reusable_image(pnode_id, diskimage_id, diskimage_size, vnodes, timeout)
+                else:
+                    # This just means we couldn't add the image
+                    # to the pool. We will have to make do with just adding the tainted images.
+                    self.logger.debug("Unable to add to pool. Must create individual disk images directly instead.")
+                    
+            # Besides adding the image to the cache, we need to create a separate image for
+            # this specific lease
+            for (lease_id, vnode) in vnodes:
+                self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease_id, vnode)
+                    
+        pnode.print_files()
+
+class FileTransferResourceReservation(ResourceReservation):
+    def __init__(self, lease, res, start=None, end=None):
+        ResourceReservation.__init__(self, lease, start, end, res)
+        self.deadline = None
+        self.file = None
+        # Dictionary of  physnode -> [ (lease_id, vnode)* ]
+        self.transfers = {}
+
+    def print_contents(self, loglevel="VDEBUG"):
+        ResourceReservation.print_contents(self, loglevel)
+        self.logger.log(loglevel, "Type           : FILE TRANSFER")
+        self.logger.log(loglevel, "Deadline       : %s" % self.deadline)
+        self.logger.log(loglevel, "File           : %s" % self.file)
+        self.logger.log(loglevel, "Transfers      : %s" % self.transfers)
+        
+    def piggyback(self, lease_id, vnode, physnode):
+        if self.transfers.has_key(physnode):
+            self.transfers[physnode].append((lease_id, vnode))
+        else:
+            self.transfers[physnode] = [(lease_id, vnode)]
+            
+    def is_preemptible(self):
+        return False       
+    
+    def __cmp__(self, rr):
+        return cmp(self.start, rr.start)
+    
+class ImageTransferEarliestStartingTime(EarliestStartingTime):
+    EARLIEST_IMAGETRANSFER = 2
+    EARLIEST_REUSE = 3
+    EARLIEST_PIGGYBACK = 4
+    
+    def __init__(self, time, type):
+        EarliestStartingTime.__init__(self, time, type)
+        self.transfer_start = None
+        self.piggybacking_on = None
+
+class DiskImageMigrationResourceReservation(MigrationResourceReservation):
+    def __init__(self, lease, start, end, res, vmrr, transfers):
+        MigrationResourceReservation.__init__(self, lease, start, end, res, vmrr, transfers)
+
+    def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
+        self.logger.log(loglevel, "Type           : DISK IMAGE MIGRATION")
+        self.logger.log(loglevel, "Transfers      : %s" % self.transfers)
+        ResourceReservation.print_contents(self, loglevel)     
\ No newline at end of file

Added: trunk/src/haizea/core/scheduler/preparation_schedulers/unmanaged.py
===================================================================
--- trunk/src/haizea/core/scheduler/preparation_schedulers/unmanaged.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/preparation_schedulers/unmanaged.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,53 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.core.leases import Lease
+from haizea.core.scheduler import EarliestStartingTime
+from haizea.core.scheduler.preparation_schedulers import PreparationScheduler
+import haizea.common.constants as constants
+from mx.DateTime import TimeDelta
+
+class UnmanagedPreparationScheduler(PreparationScheduler):
+    def __init__(self, slottable, resourcepool, deployment_enact):
+        PreparationScheduler.__init__(self, slottable, resourcepool, deployment_enact)
+        self.handlers = {}
+    
+    def schedule(self, lease, vmrr, nexttime):
+        # Nothing to do
+        return [], True
+    
+    def find_earliest_starting_times(self, lease, nexttime):
+        # The earliest starting time is "nexttime" on all nodes.
+        node_ids = [node.id for node in self.resourcepool.get_nodes()]
+        earliest = {}
+        for node in node_ids:
+            earliest[node] = EarliestStartingTime(nexttime, EarliestStartingTime.EARLIEST_NOPREPARATION)
+        return earliest
+            
+    def estimate_migration_time(self, lease):
+        return TimeDelta(seconds=0)     
+            
+    def schedule_migration(self, lease, vmrr, nexttime):
+        return []
+                
+    def cancel_preparation(self, lease):
+        self.cleanup(lease)
+
+    def cleanup(self, lease):
+        # Nothing to clean up.
+        pass
\ No newline at end of file

Added: trunk/src/haizea/core/scheduler/resourcepool.py
===================================================================
--- trunk/src/haizea/core/scheduler/resourcepool.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/resourcepool.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,424 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.common.utils import vnodemapstr, get_accounting
+import haizea.common.constants as constants
+import haizea.core.enact.actions as actions
+from haizea.core.scheduler import EnactmentError
+import logging 
+
+
+class ResourcePool(object):
+    def __init__(self, info_enact, vm_enact, deploy_enact):
+        self.logger = logging.getLogger("RPOOL")
+                
+        self.info = info_enact
+        self.vm = vm_enact
+        # TODO: Ideally, deployment enactment shouldn't be here, specially since
+        # it already "hangs" below the deployment modules. For now,
+        # it does no harm, though.
+        self.deployment = deploy_enact
+        
+        self.nodes = self.info.get_nodes()
+        
+    def start_vms(self, lease, rr):
+        start_action = actions.VMEnactmentStartAction()
+        start_action.from_rr(rr)
+                
+        for (vnode, pnode) in rr.nodes.items():
+            node = self.get_node(pnode)
+            #diskimage = node.get_diskimage(lease.id, vnode, lease.diskimage_id)
+            start_action.vnodes[vnode].pnode = node.enactment_info
+            #start_action.vnodes[vnode].diskimage = diskimage.filename
+            start_action.vnodes[vnode].resources = rr.resources_in_pnode[pnode]
+
+        try:
+            self.vm.start(start_action)
+        except EnactmentError, exc:
+            self.logger.error("Enactment of start VM failed: %s" % exc.message)
+            raise
+        
+    def stop_vms(self, lease, rr):
+        stop_action = actions.VMEnactmentStopAction()
+        stop_action.from_rr(rr)
+        try:
+            self.vm.stop(stop_action)
+        except EnactmentError, exc:
+            self.logger.error("Enactment of end VM failed: %s" % exc.message)
+            raise
+         
+    def suspend_vms(self, lease, rr):
+        # Add memory image files
+        for vnode in rr.vnodes:
+            pnode = rr.vmrr.nodes[vnode]
+            self.add_ramfile(pnode, lease.id, vnode, lease.requested_resources[vnode].get_quantity(constants.RES_MEM))
+
+        # Enact suspend
+        suspend_action = actions.VMEnactmentSuspendAction()
+        suspend_action.from_rr(rr)
+        try:
+            self.vm.suspend(suspend_action)
+        except EnactmentError, exc:
+            self.logger.error("Enactment of suspend VM failed: %s" % exc.message)
+            raise
+    
+    def verify_suspend(self, lease, rr):
+        verify_suspend_action = actions.VMEnactmentConfirmSuspendAction()
+        verify_suspend_action.from_rr(rr)
+        self.vm.verify_suspend(verify_suspend_action)
+    
+    def resume_vms(self, lease, rr):
+        # Remove memory image files
+        for vnode in rr.vnodes:
+            pnode = rr.vmrr.nodes[vnode]
+            self.remove_ramfile(pnode, lease.id, vnode)
+
+        # Enact resume
+        resume_action = actions.VMEnactmentResumeAction()
+        resume_action.from_rr(rr)
+        try:
+            self.vm.resume(resume_action)
+        except EnactmentError, exc:
+            self.logger.error("Enactment of resume VM failed: %s" % exc.message)
+            raise
+    
+    def verify_resume(self, lease, rr):
+        verify_resume_action = actions.VMEnactmentConfirmResumeAction()
+        verify_resume_action.from_rr(rr)
+        self.vm.verify_resume(verify_resume_action)    
+    
+    def get_nodes(self):
+        return self.nodes.values()
+    
+    # An auxiliary node is a host whose resources are going to be scheduled, but
+    # where no VMs are actually going to run. For example, a disk image repository node.
+    def get_aux_nodes(self):
+        # TODO: We're only asking the deployment enactment module for auxiliary nodes.
+        # There might be a scenario where the info enactment module also reports
+        # auxiliary nodes.
+        return self.deployment.get_aux_nodes()
+
+    def get_num_nodes(self):
+        return len(self.nodes)
+        
+    def get_node(self, node_id):
+        return self.nodes[node_id]
+        
+    def add_diskimage(self, pnode, diskimage_id, imagesize, lease_id, vnode):
+        self.logger.debug("Adding disk image for L%iV%i in pnode=%i" % (lease_id, vnode, pnode))
+        
+        self.logger.vdebug("Files BEFORE:")
+        self.get_node(pnode).print_files()
+        
+        imagefile = self.deployment.resolve_to_file(lease_id, vnode, diskimage_id)
+        img = DiskImageFile(imagefile, imagesize, lease_id, vnode, diskimage_id)
+        self.get_node(pnode).add_file(img)
+
+        self.logger.vdebug("Files AFTER:")
+        self.get_node(pnode).print_files()
+        
+        get_accounting().append_stat(constants.COUNTER_DISKUSAGE, self.get_max_disk_usage())
+        return img
+            
+    def remove_diskimage(self, pnode, lease, vnode):
+        node = self.get_node(pnode)
+        node.print_files()
+
+        self.logger.debug("Removing disk image for L%iV%i in node %i" % (lease, vnode, pnode))
+        node.remove_diskimage(lease, vnode)
+
+        node.print_files()
+        
+        get_accounting().append_stat(constants.COUNTER_DISKUSAGE, self.get_max_disk_usage())    
+        
+    def add_ramfile(self, pnode, lease_id, vnode, size):
+        node = self.get_node(pnode)
+        self.logger.debug("Adding RAM file for L%iV%i in node %i" % (lease_id, vnode, pnode))
+        node.print_files()
+        f = RAMImageFile("RAM_L%iV%i" % (lease_id, vnode), size, lease_id, vnode)
+        node.add_file(f)        
+        node.print_files()
+        get_accounting().append_stat(constants.COUNTER_DISKUSAGE, self.get_max_disk_usage())
+
+    def remove_ramfile(self, pnode, lease_id, vnode):
+        node = self.get_node(pnode)
+        self.logger.debug("Removing RAM file for L%iV%i in node %i" % (lease_id, vnode, pnode))
+        node.print_files()
+        node.remove_ramfile(lease_id, vnode)
+        node.print_files()
+        get_accounting().append_stat(constants.COUNTER_DISKUSAGE, self.get_max_disk_usage())
+        
+    def get_max_disk_usage(self):
+        return max([n.get_disk_usage() for n in self.nodes.values()])
+    
+class ResourcePoolNode(object):
+    def __init__(self, node_id, hostname, capacity):
+        self.logger = logging.getLogger("RESOURCEPOOL")
+        self.id = node_id
+        self.hostname = hostname
+        self.capacity = capacity
+        self.files = []
+
+        # enactment-specific information
+        self.enactment_info = None
+        
+    def get_capacity(self):
+        return self.capacity
+           
+    def add_file(self, f):
+        self.files.append(f)
+        
+    def get_diskimage(self, lease_id, vnode, diskimage_id):
+        image = [f for f in self.files if isinstance(f, DiskImageFile) and 
+                 f.diskimage_id == diskimage_id and 
+                 f.lease_id == lease_id and
+                 f.vnode == vnode]
+        if len(image) == 0:
+            return None
+        elif len(image) == 1:
+            return image[0]
+        elif len(image) > 1:
+            self.logger.warning("More than one tainted image for L%iV%i on node %i" % (lease_id, vnode, self.nod_id))
+            return image[0]
+
+    def remove_diskimage(self, lease_id, vnode):
+        image = [f for f in self.files if isinstance(f, DiskImageFile) and 
+                 f.lease_id == lease_id and
+                 f.vnode == vnode]
+        if len(image) > 0:
+            image = image[0]
+            self.files.remove(image)
+            
+    def remove_ramfile(self, lease_id, vnode):
+        ramfile = [f for f in self.files if isinstance(f, RAMImageFile) and f.lease_id==lease_id and f.vnode==vnode]
+        if len(ramfile) > 0:
+            ramfile = ramfile[0]
+            self.files.remove(ramfile)
+                
+        
+    def get_disk_usage(self):
+        return sum([f.filesize for f in self.files])
+
+
+    def get_diskimages(self):
+        return [f for f in self.files if isinstance(f, DiskImageFile)]
+        
+    def print_files(self):
+        images = ""
+        if len(self.files) > 0:
+            images = ", ".join([str(img) for img in self.files])
+        self.logger.vdebug("Node %i files: %iMB %s" % (self.id, self.get_disk_usage(), images))
+
+    def xmlrpc_marshall(self):
+        # Convert to something we can send through XMLRPC
+        h = {}
+        h["id"] = self.id
+        h["hostname"] = self.hostname
+        h["cpu"] = self.capacity.get_quantity(constants.RES_CPU)
+        h["mem"] = self.capacity.get_quantity(constants.RES_MEM)
+                
+        return h
+        
+
+        
+class File(object):
+    def __init__(self, filename, filesize):
+        self.filename = filename
+        self.filesize = filesize
+        
+class DiskImageFile(File):
+    def __init__(self, filename, filesize, lease_id, vnode, diskimage_id):
+        File.__init__(self, filename, filesize)
+        self.lease_id = lease_id
+        self.vnode = vnode
+        self.diskimage_id = diskimage_id
+                
+    def __str__(self):
+        return "(DISK L%iv%i %s %s)" % (self.lease_id, self.vnode, self.diskimage_id, self.filename)
+
+
+class RAMImageFile(File):
+    def __init__(self, filename, filesize, lease_id, vnode):
+        File.__init__(self, filename, filesize)
+        self.lease_id = lease_id
+        self.vnode = vnode
+                
+    def __str__(self):
+        return "(RAM L%iv%i %s)" % (self.lease_id, self.vnode, self.filename)
+    
+class ResourcePoolWithReusableImages(ResourcePool):
+    def __init__(self, info_enact, vm_enact, deploy_enact):
+        ResourcePool.__init__(self, info_enact, vm_enact, deploy_enact)
+        
+        self.nodes = dict([(id,ResourcePoolNodeWithReusableImages.from_node(node)) for id, node in self.nodes.items()])
+    
+    def add_reusable_image(self, pnode, diskimage_id, imagesize, mappings, timeout):
+        self.logger.debug("Adding reusable image for %s in pnode=%i" % (mappings, pnode))
+        
+        self.logger.vdebug("Files BEFORE:")
+        self.get_node(pnode).print_files()
+        
+        imagefile = "reusable-%s" % diskimage_id
+        img = ReusableDiskImageFile(imagefile, imagesize, diskimage_id, timeout)
+        for (lease_id, vnode) in mappings:
+            img.add_mapping(lease_id, vnode)
+
+        self.get_node(pnode).add_reusable_image(img)
+
+        self.logger.vdebug("Files AFTER:")
+        self.get_node(pnode).print_files()
+        
+        get_accounting().append_stat(constants.COUNTER_DISKUSAGE, self.get_max_disk_usage())
+        return img
+    
+    def add_mapping_to_existing_reusable_image(self, pnode_id, diskimage_id, lease_id, vnode, timeout):
+        self.get_node(pnode_id).add_mapping_to_existing_reusable_image(diskimage_id, lease_id, vnode, timeout)
+    
+    def remove_diskimage(self, pnode_id, lease, vnode):
+        ResourcePool.remove_diskimage(self, pnode_id, lease, vnode)
+        self.logger.debug("Removing cached images for L%iV%i in node %i" % (lease, vnode, pnode_id))
+        for img in self.get_node(pnode_id).get_reusable_images():
+            if (lease, vnode) in img.mappings:
+                img.mappings.remove((lease, vnode))
+            self.get_node(pnode_id).print_files()
+            # Keep image around, even if it isn't going to be used
+            # by any VMs. It might be reused later on.
+            # It will be purged if space has to be made available
+            # for other images
+        
+    def get_nodes_with_reusable_image(self, diskimage_id, after = None):
+        return [n.id for n in self.get_nodes() if n.exists_reusable_image(diskimage_id, after=after)]
+
+    def exists_reusable_image(self, pnode_id, diskimage_id, after):
+        return self.get_node(pnode_id).exists_reusable_image(diskimage_id, after = after)
+    
+    
+class ResourcePoolNodeWithReusableImages(ResourcePoolNode):
+    def __init__(self, node_id, hostname, capacity):
+        ResourcePoolNode.__init__(self, node_id, hostname, capacity)
+        self.reusable_images = []
+
+    @classmethod
+    def from_node(cls, n):
+        node = cls(n.id, n.hostname, n.capacity)
+        node.enactment_info = n.enactment_info
+        return node
+    
+    def add_reusable_image(self, f):
+        self.reusable_images.append(f)
+
+    def add_mapping_to_existing_reusable_image(self, diskimage_id, lease_id, vnode, timeout):
+        for f in self.reusable_images:
+            if f.diskimage_id == diskimage_id:
+                f.add_mapping(lease_id, vnode)
+                f.update_timeout(timeout)
+                break  # Ugh
+        self.print_files()
+            
+    def get_reusable_image(self, diskimage_id, after = None, lease_id=None, vnode=None):
+        images = [i for i in self.reusable_images if i.diskimage_id == diskimage_id]
+        if after != None:
+            images = [i for i in images if i.timeout >= after]
+        if lease_id != None and vnode != None:
+            images = [i for i in images if i.has_mapping(lease_id, vnode)]
+        if len(images)>0:
+            return images[0]
+        else:
+            return None
+        
+    def exists_reusable_image(self, imagefile, after = None, lease_id=None, vnode=None):
+        entry = self.get_reusable_image(imagefile, after = after, lease_id=lease_id, vnode=vnode)
+        if entry == None:
+            return False
+        else:
+            return True
+
+    def get_reusable_images(self):
+        return self.reusable_images
+
+    def get_reusable_images_size(self):
+        return sum([f.filesize for f in self.reusable_images])
+    
+    def purge_oldest_unused_image(self):
+        unused = [img for img in self.reusable_images if not img.has_mappings()]
+        if len(unused) == 0:
+            return 0
+        else:
+            i = iter(unused)
+            oldest = i.next()
+            for img in i:
+                if img.timeout < oldest.timeout:
+                    oldest = img
+            self.reusable_images.remove(oldest)
+            return 1
+    
+    def purge_downto(self, target):
+        done = False
+        while not done:
+            removed = self.purge_oldest_unused_image()
+            if removed==0:
+                done = True
+                success = False
+            elif removed == 1:
+                if self.get_reusable_images_size() <= target:
+                    done = True
+                    success = True
+        return success
+
+    def print_files(self):
+        ResourcePoolNode.print_files(self)
+        images = ""
+        if len(self.reusable_images) > 0:
+            images = ", ".join([str(img) for img in self.reusable_images])
+        self.logger.vdebug("Node %i reusable images: %iMB %s" % (self.id, self.get_reusable_images_size(), images))
+
+class ReusableDiskImageFile(File):
+    def __init__(self, filename, filesize, diskimage_id, timeout):
+        File.__init__(self, filename, filesize)
+        self.diskimage_id = diskimage_id
+        self.mappings = set([])
+        self.timeout = timeout
+        
+    def add_mapping(self, lease_id, vnode):
+        self.mappings.add((lease_id, vnode))
+        
+    def has_mapping(self, lease_id, vnode):
+        return (lease_id, vnode) in self.mappings
+    
+    def has_mappings(self):
+        return len(self.mappings) > 0
+        
+    def update_timeout(self, timeout):
+        if timeout > self.timeout:
+            self.timeout = timeout
+        
+    def is_expired(self, curTime):
+        if self.timeout == None:
+            return False
+        elif self.timeout > curTime:
+            return True
+        else:
+            return False
+        
+    def __str__(self):
+        if self.timeout == None:
+            timeout = "NOTIMEOUT"
+        else:
+            timeout = self.timeout
+        return "(REUSABLE %s %s %s %s)" % (vnodemapstr(self.mappings), self.diskimage_id, str(timeout), self.filename)
+

Added: trunk/src/haizea/core/scheduler/slottable.py
===================================================================
--- trunk/src/haizea/core/scheduler/slottable.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/slottable.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,733 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+import haizea.common.constants as constants
+from haizea.common.utils import xmlrpc_marshall_singlevalue
+from math import floor
+import bisect
+import logging
+from operator import attrgetter
+
+"""This module provides an in-memory slot table data structure. 
+
+A slot table is essentially just a collection of resource reservations.
+See the documentation for ResourceReservation, SlotTable, and AvailabilityWindow
+for additional implementation details.
+
+
+
+
+"""
+
+
+class ResourceTuple(object):
+    """A resource tuple
+    
+    This class ...
+    
+    """    
+    SINGLE_INSTANCE = 1
+    MULTI_INSTANCE = 2
+    
+    def __init__(self, slottable, res):
+        self.slottable = slottable
+        self._res = res
+        if self.slottable.has_multiinst:
+            self.multiinst = dict([(i,[]) for i in range(self.slottable.rtuple_len, self.slottable.rtuple_nres)])
+
+    @classmethod
+    def copy(cls, rt):
+        rt2 = cls(rt.slottable, rt._res[:])
+        if rt.slottable.has_multiinst:
+            rt2.multiinst = dict([(i, l[:]) for (i,l) in rt.multiinst.items()])
+        return rt2 
+        
+    def fits_in(self, res2):
+        for i in xrange(self.slottable.rtuple_len):
+            if self._res[i] > res2._res[i]:
+                return False
+        if self.slottable.has_multiinst:
+            multiinst2 = dict([(i, l[:]) for (i,l) in res2.multiinst.items()])
+            for (pos, l) in self.multiinst.items():
+                insts = multiinst2[pos]
+                for quantity in l:
+                    fits = False
+                    for i in range(len(insts)):
+                        if quantity <= insts[i]:
+                            fits = True
+                            insts[i] -= quantity
+                            break
+                    if fits == False:
+                        return False
+        return True
+    
+    def any_less(self, res2):
+        for i in xrange(self.slottable.rtuple_len):
+            if self._res[i] < res2._res[i]:
+                return True
+        return False    
+   
+    def min(self, res2):
+        for i in xrange(self.slottable.rtuple_len):
+            self._res[i] = min(self._res[i], res2._res[i])
+    
+    def decr(self, res2):
+        for slottype in xrange(self.slottable.rtuple_len):
+            self._res[slottype] -= res2._res[slottype]
+        if self.slottable.has_multiinst:
+            for (pos, l) in res2.multiinst.items():
+                insts = self.multiinst[pos]
+                for quantity in l:
+                    fits = False
+                    for i in range(len(insts)):
+                        if quantity <= insts[i]:
+                            fits = True
+                            insts[i] -= quantity
+                            break
+                    if fits == False:
+                        raise Exception, "Can't decrease"
+                    
+    def incr(self, res2):
+        for slottype in xrange(self.slottable.rtuple_len):
+            self._res[slottype] += res2._res[slottype]
+        if self.slottable.has_multiinst:
+            for (pos, l) in res2.multiinst.items():
+                self.multiinst[pos] += l[:]
+        
+    def get_by_type(self, restype):
+        return self._res[self.slottable.rtuple_restype2pos[restype]]        
+        
+    def is_zero_or_less(self):
+        return sum([v for v in self._res]) <= 0
+    
+    def __repr__(self):
+        r=""
+        for i, x in enumerate(self._res):
+            r += "%s:%i " % (i, x)
+        if self.slottable.has_multiinst:
+            r+= `self.multiinst`
+        return r
+
+    def __eq__(self, res2):
+        return self._res == res2._res
+
+    def __cmp__(self, res2):
+        return cmp(self._res, res2._res)
+
+class ResourceReservation(object):
+    """A resource reservation
+    
+    A resource reservation (or RR) is a data structure specifying that a certain 
+    quantities of resources (represented as a ResourceTuple) are reserved across 
+    several nodes (each node can have a different resource tuple; e.g., 1 CPU and 
+    512 MB of memory in node 1 and 2 CPUs and 1024 MB of memory in node 2). An RR 
+    has a specific start and end time for all the nodes. Thus, if some nodes are 
+    reserved for an interval of time, and other nodes are reserved for a different 
+    interval (even if these reservations are for the same lease), two separate RRs 
+    would have to be added to the slot table.
+    
+    """    
+    
+    # Resource reservation states
+    STATE_SCHEDULED = 0
+    STATE_ACTIVE = 1
+    STATE_DONE = 2
+
+    # Mapping from state to a descriptive string
+    state_str = {STATE_SCHEDULED : "Scheduled",
+                 STATE_ACTIVE : "Active",
+                 STATE_DONE : "Done"}
+    
+    def __init__(self, lease, start, end, res):
+        self.lease = lease
+        self.start = start
+        self.end = end
+        self.state = None
+        self.resources_in_pnode = res # pnode -> ResourceTuple
+        self.logger = logging.getLogger("LEASES")
+                        
+    def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
+        self.logger.log(loglevel, "Start          : %s" % self.start)
+        self.logger.log(loglevel, "End            : %s" % self.end)
+        self.logger.log(loglevel, "State          : %s" % ResourceReservation.state_str[self.state])
+        self.logger.log(loglevel, "Resources      : \n                         %s" % "\n                         ".join(["N%i: %s" %(i, x) for i, x in self.resources_in_pnode.items()])) 
+                
+    def xmlrpc_marshall(self):
+        # Convert to something we can send through XMLRPC
+        rr = {}                
+        rr["start"] = xmlrpc_marshall_singlevalue(self.start)
+        rr["end"] = xmlrpc_marshall_singlevalue(self.end)
+        rr["state"] = self.state
+        return rr
+
+class Node(object):
+    def __init__(self, capacity):
+        self.capacity = ResourceTuple.copy(capacity)
+
+        
+class KeyValueWrapper(object):
+    def __init__(self, key, value):
+        self.key = key
+        self.value = value
+        
+    def __cmp__(self, other):
+        return cmp(self.key, other.key)
+
+
+class SlotTable(object):
+    """Slot Table 
+    
+    The slot table is, by far, the most used data structure in Haizea, we need to
+    optimize access to these reservations. In particular, we will often need to quickly
+    access reservations starting or ending at a specific time (or in an interval of time).
+    The current slot table implementation stores the RRs in two ordered lists: one
+    by starting time and another by ending time. Access is done by binary search in O(log n)
+    time. Insertion and removal require O(n) time, since lists are implemented internally
+    as arrays in CPython. We could improve these times in the future by using a
+    tree structure (which Python doesn't have natively, so we'd have to include
+    our own tree implementation), although slot table accesses far outweight insertion
+    and removal operations. The slot table is implemented with classes SlotTable,
+    Node, NodeList, and KeyValueWrapper.
+    
+    """
+    
+    def __init__(self, resource_types):
+        self.logger = logging.getLogger("SLOT")
+        self.nodes = {}
+        self.resource_types = resource_types
+        self.reservations_by_start = []
+        self.reservations_by_end = []
+        self.__dirty()
+
+        # Resource tuple fields
+        res_singleinstance = [rt for rt,ninst in resource_types if ninst == ResourceTuple.SINGLE_INSTANCE]
+        self.rtuple_len = len(res_singleinstance)
+        self.rtuple_nres = len(resource_types)
+        res_multiinstance = [(rt,ninst) for rt,ninst in resource_types if ninst == ResourceTuple.MULTI_INSTANCE]
+        self.has_multiinst = len(res_multiinstance) > 0
+        self.rtuple_restype2pos = dict([(rt,i) for (i,rt) in enumerate(res_singleinstance)])
+        pos = self.rtuple_len
+        for rt, ninst in res_multiinstance:
+            self.rtuple_restype2pos[rt] = pos
+            pos = pos + 1
+
+    def add_node(self, node_id, resourcetuple):
+        self.nodes[node_id] = Node(resourcetuple)
+
+    def create_empty_resource_tuple(self):
+        return ResourceTuple(self, [0] * self.rtuple_len)
+    
+    def create_resource_tuple_from_capacity(self, capacity):
+        rt = ResourceTuple(self, [0] * self.rtuple_len)
+        for restype in capacity.get_resource_types():
+            pos = self.rtuple_restype2pos[restype]
+            if pos < self.rtuple_len:
+                rt._res[pos] = capacity.get_quantity(restype)
+            else:
+                ninst = capacity.ninstances[restype]
+                for i in range(ninst):
+                    rt.multiinst[pos].append(capacity.get_quantity_instance(restype, i))
+                    
+        return rt
+
+    def is_empty(self):
+        return (len(self.reservations_by_start) == 0)
+
+    def is_full(self, time, restype):
+        nodes = self.get_availability(time)
+        avail = sum([node.capacity.get_by_type(restype) for node in nodes.values()])
+        return (avail == 0)
+
+    def get_total_capacity(self, restype):
+        return sum([n.capacity.get_by_type(restype) for n in self.nodes.values()])        
+
+    def get_reservations_at(self, time):
+        item = KeyValueWrapper(time, None)
+        startpos = bisect.bisect_right(self.reservations_by_start, item)
+        bystart = set([x.value for x in self.reservations_by_start[:startpos]])
+        endpos = bisect.bisect_right(self.reservations_by_end, item)
+        byend = set([x.value for x in self.reservations_by_end[endpos:]])
+        res = bystart & byend
+        return list(res)
+    
+    def get_reservations_starting_between(self, start, end):
+        startitem = KeyValueWrapper(start, None)
+        enditem = KeyValueWrapper(end, None)
+        startpos = bisect.bisect_left(self.reservations_by_start, startitem)
+        endpos = bisect.bisect_right(self.reservations_by_start, enditem)
+        res = [x.value for x in self.reservations_by_start[startpos:endpos]]
+        return res
+
+    def get_reservations_starting_after(self, start):
+        startitem = KeyValueWrapper(start, None)
+        startpos = bisect.bisect_right(self.reservations_by_start, startitem)
+        res = [x.value for x in self.reservations_by_start[startpos:]]
+        return res
+
+    def get_reservations_ending_after(self, end):
+        startitem = KeyValueWrapper(end, None)
+        startpos = bisect.bisect_right(self.reservations_by_end, startitem)
+        res = [x.value for x in self.reservations_by_end[startpos:]]
+        return res
+
+    def get_reservations_starting_on_or_after(self, start):
+        startitem = KeyValueWrapper(start, None)
+        startpos = bisect.bisect_left(self.reservations_by_start, startitem)
+        res = [x.value for x in self.reservations_by_start[startpos:]]
+        return res
+
+    def get_reservations_ending_on_or_after(self, end):
+        startitem = KeyValueWrapper(end, None)
+        startpos = bisect.bisect_left(self.reservations_by_end, startitem)
+        res = [x.value for x in self.reservations_by_end[startpos:]]
+        return res
+
+    def get_reservations_ending_between(self, start, end):
+        startitem = KeyValueWrapper(start, None)
+        enditem = KeyValueWrapper(end, None)
+        startpos = bisect.bisect_left(self.reservations_by_end, startitem)
+        endpos = bisect.bisect_right(self.reservations_by_end, enditem)
+        res = [x.value for x in self.reservations_by_end[startpos:endpos]]
+        return res
+    
+    def get_reservations_starting_at(self, time):
+        return self.get_reservations_starting_between(time, time)
+
+    def get_reservations_ending_at(self, time):
+        return self.get_reservations_ending_between(time, time) 
+
+    def get_reservations_after(self, time):
+        bystart = set(self.get_reservations_starting_after(time))
+        byend = set(self.get_reservations_ending_after(time))
+        return list(bystart | byend)
+    
+    def get_reservations_on_or_after(self, time):
+        bystart = set(self.get_reservations_starting_on_or_after(time))
+        byend = set(self.get_reservations_ending_on_or_after(time))
+        return list(bystart | byend)    
+
+    def get_changepoints_after(self, after, until=None, nodes=None):
+        changepoints = set()
+        res = self.get_reservations_after(after)
+        for rr in res:
+            if nodes == None or (nodes != None and len(set(rr.resources_in_pnode.keys()) & set(nodes)) > 0):
+                if rr.start > after:
+                    changepoints.add(rr.start)
+                if rr.end > after:
+                    changepoints.add(rr.end)
+        changepoints = list(changepoints)
+        if until != None:
+            changepoints =  [c for c in changepoints if c < until]
+        changepoints.sort()
+        return changepoints
+    
+    def add_reservation(self, rr):
+        startitem = KeyValueWrapper(rr.start, rr)
+        enditem = KeyValueWrapper(rr.end, rr)
+        bisect.insort(self.reservations_by_start, startitem)
+        bisect.insort(self.reservations_by_end, enditem)
+        self.__dirty()
+
+    # If the slot table keys are not modified (start / end time)
+    # Just remove and reinsert.
+    def update_reservation(self, rr):
+        # TODO: Might be more efficient to resort lists
+        self.remove_reservation(rr)
+        self.add_reservation(rr)
+        self.__dirty()
+
+    # If the slot table keys are modified (start and/or end time)
+    # provide the old keys (so we can remove it using
+    # the m) and updated reservation
+    def update_reservation_with_key_change(self, rr, old_start, old_end):
+        # TODO: Might be more efficient to resort lists
+        self.remove_reservation(rr, old_start, old_end)
+        self.add_reservation(rr)
+        self.__dirty()
+        
+    def remove_reservation(self, rr, start=None, end=None):
+        if start == None:
+            start = rr.start
+        if end == None:
+            end = rr.end
+        posstart = self.__get_reservation_index(self.reservations_by_start, rr, start)
+        posend = self.__get_reservation_index(self.reservations_by_end, rr, end)
+        self.reservations_by_start.pop(posstart)
+        self.reservations_by_end.pop(posend)
+        self.__dirty()
+        
+    def get_availability(self, time, min_capacity=None):
+        if not self.availabilitycache.has_key(time):
+            self.__get_availability_cache_miss(time)
+            # Cache miss
+            
+        nodes = self.availabilitycache[time]
+
+        # Keep only those nodes with enough resources
+        if min_capacity != None:
+            newnodes = {}
+            for n, node in nodes.items():
+                if min_capacity.fits_in(node.capacity):
+                    newnodes[n]=node
+                else:
+                    pass
+            nodes = newnodes
+
+        return nodes
+
+    def get_next_reservations_in_nodes(self, time, nodes, rr_type=None, immediately_next = False):
+        nodes = set(nodes)
+        rrs_in_nodes = []
+        earliest_end_time = {}
+        rrs = self.get_reservations_starting_after(time)
+        if rr_type != None:
+            rrs = [rr for rr in rrs if isinstance(rr, rr_type)]
+            
+        # Filter the RRs by nodes
+        for rr in rrs:
+            rr_nodes = set(rr.resources_in_pnode.keys())
+            if len(nodes & rr_nodes) > 0:
+                rrs_in_nodes.append(rr)
+                end = rr.end
+                for n in rr_nodes:
+                    if not earliest_end_time.has_key(n):
+                        earliest_end_time[n] = end
+                    else:
+                        if end < earliest_end_time[n]:
+                            earliest_end_time[n] = end
+                            
+        if immediately_next:
+            # We only want to include the ones that are immediately
+            # next. 
+            rr_nodes_excl = set()
+            for n in nodes:
+                if earliest_end_time.has_key(n):
+                    end = earliest_end_time[n]
+                    rrs = [rr for rr in rrs_in_nodes if n in rr.resources_in_pnode.keys() and rr.start < end]
+                    rr_nodes_excl.update(rrs)
+            rrs_in_nodes = list(rr_nodes_excl)
+        
+        return rrs_in_nodes
+    
+    def get_next_changepoint(self, time):
+        item = KeyValueWrapper(time, None)
+        
+        startpos = bisect.bisect_right(self.reservations_by_start, item)
+        if startpos == len(self.reservations_by_start):
+            time1 = None
+        else:
+            time1 = self.reservations_by_start[startpos].value.start
+        
+        endpos = bisect.bisect_right(self.reservations_by_end, item)
+        if endpos == len(self.reservations_by_end):
+            time2 = None
+        else:
+            time2 = self.reservations_by_end[endpos].value.end
+        
+        if time1==None and time2==None:
+            return None
+        elif time1==None:
+            return time2
+        elif time2==None:
+            return time1
+        else:
+            return min(time1, time2)
+        
+    def get_availability_window(self, start):           
+        if self.awcache == None or start < self.awcache_time or (start >= self.awcache_time and not self.awcache.changepoints.has_key(start)):
+            self.__get_aw_cache_miss(start)
+        return self.awcache
+
+    def sanity_check(self):
+        # Get checkpoints
+        changepoints = set()
+        for rr in [x.value for x in self.reservations_by_start]:
+            changepoints.add(rr.start)
+            changepoints.add(rr.end)
+        changepoints = list(changepoints)
+        changepoints.sort()
+        
+        offending_node = None
+        offending_cp = None
+        offending_capacity = None
+        
+        for cp in changepoints:
+            avail = self.get_availability(cp)
+            for node in avail:
+                for resource in avail[node].capacity._res:
+                    if resource < 0:
+                        return False, node, cp, avail[node].capacity
+                
+        return True, None, None, None
+
+    # ONLY for simulation
+    def get_next_premature_end(self, after):
+        from haizea.core.scheduler.vm_scheduler import VMResourceReservation
+        # Inefficient, but ok since this query seldom happens
+        res = [i.value for i in self.reservations_by_end if isinstance(i.value, VMResourceReservation) and i.value.prematureend > after]
+        if len(res) > 0:
+            prematureends = [r.prematureend for r in res]
+            prematureends.sort()
+            return prematureends[0]
+        else:
+            return None
+    
+    # ONLY for simulation
+    def get_prematurely_ending_res(self, t):
+        from haizea.core.scheduler.vm_scheduler import VMResourceReservation
+        return [i.value for i in self.reservations_by_end if isinstance(i.value, VMResourceReservation) and i.value.prematureend == t]
+
+
+    def __get_reservation_index(self, rlist, rr, key):
+        item = KeyValueWrapper(key, None)
+        pos = bisect.bisect_left(rlist, item)
+        found = False
+        while not found:
+            if rlist[pos].value == rr:
+                found = True
+            else:
+                pos += 1
+        return pos
+        
+        
+    def __get_availability_cache_miss(self, time):
+        allnodes = set(self.nodes.keys())
+        nodes = {} 
+        reservations = self.get_reservations_at(time)
+
+        # Find how much resources are available on each node
+        for r in reservations:
+            for node in r.resources_in_pnode:
+                if not nodes.has_key(node):
+                    n = self.nodes[node]
+                    nodes[node] = Node(n.capacity)
+                nodes[node].capacity.decr(r.resources_in_pnode[node])
+
+        # For the remaining nodes, use a reference to the original node, not a copy
+        missing = allnodes - set(nodes.keys())
+        for node in missing:
+            nodes[node] = self.nodes[node]                    
+            
+        self.availabilitycache[time] = nodes
+
+    def __get_aw_cache_miss(self, time):
+        self.awcache = AvailabilityWindow(self, time)
+        self.awcache_time = time
+        
+    def __dirty(self):
+        # You're a dirty, dirty slot table and you should be
+        # ashamed of having outdated caches!
+        self.availabilitycache = {}
+        self.awcache_time = None
+        self.awcache = None
+
+    
+    
+
+        
+class ChangepointAvail(object):
+    def __init__(self):
+        self.nodes = {}
+        self.leases = set()
+        
+    def add_node(self, node, capacity):
+        self.nodes[node] = ChangepointNodeAvail(capacity)
+
+class ChangepointNodeAvail(object):
+    def __init__(self, capacity):
+        self.capacity = capacity     
+        self.available = ResourceTuple.copy(capacity)
+        self.leases = set()
+        self.available_if_preempting = {}
+        self.next_cp = None
+        self.next_nodeavail = None
+
+    def decr(self, capacity):
+        self.available.decr(capacity)
+
+    def add_lease(self, lease, capacity):
+        if not lease in self.leases:
+            self.leases.add(lease)
+            self.available_if_preempting[lease] = ResourceTuple.copy(capacity)
+        else:
+            self.available_if_preempting[lease].incr(capacity)
+        
+    def get_avail_withpreemption(self, leases):
+        avail = ResourceTuple.copy(self.capacity)
+        for l in self.available_if_preempting:
+            if not l in leases:
+                avail.decr(self.available_if_preempting[l])
+        return avail
+        
+class AvailEntry(object):
+    def __init__(self, available, until):
+        self.available = available
+        self.until = until
+    
+class AvailabilityInNode(object):
+    def __init__(self, avail_list):
+        self.avail_list = avail_list
+        
+    def fits(self, capacity, until):
+        for avail in self.avail_list:
+            if avail.until == None or avail.until >= until:
+                return capacity.fits_in(avail.available)
+
+    def latest_fit(self, capacity):
+        prev = None
+        for avail in self.avail_list:
+            if not capacity.fits_in(avail.available):
+                return prev
+            else:
+                prev = avail.until
+
+    def get_avail_at_end(self):
+        return self.avail_list[-1]
+
+class AvailabilityWindow(object):
+    """An availability window
+    
+    A particularly important operation with the slot table is determining the
+    "availability window" of resources starting at a given time. In a nutshell, 
+    an availability window provides a convenient abstraction over the slot table, 
+    with methods to answer questions like "If I want to start a least at time T, 
+    are there enough resources available to start the lease?" "Will those resources 
+    be available until time T+t?" "If not, what's the longest period of time those 
+    resources will be available?"
+
+    """
+    def __init__(self, slottable, time):
+        self.slottable = slottable
+        self.logger = logging.getLogger("SLOTTABLE.WIN")
+        self.time = time
+        self.leases = set()
+
+        self.cp_list = [self.time] + self.slottable.get_changepoints_after(time)
+
+        # Create initial changepoint hash table
+        self.changepoints = dict([(cp,ChangepointAvail()) for cp in self.cp_list])
+ 
+        for cp in self.changepoints.values():
+            for node_id, node in self.slottable.nodes.items():
+                cp.add_node(node_id, node.capacity)
+        
+        rrs = self.slottable.get_reservations_after(time)
+        rrs.sort(key=attrgetter("start"))
+        pos = 0
+        # Fill in rest of changepoint hash table
+        
+        for rr in rrs:
+            # Ignore nil-duration reservations
+            if rr.start == rr.end:
+                continue
+            
+            while rr.start >= self.time and self.cp_list[pos] != rr.start:
+                pos += 1
+            lease = rr.lease
+
+            self.leases.add(lease)
+            
+            if rr.start >= self.time:
+                start_cp = self.changepoints[rr.start]
+            else:
+                start_cp = self.changepoints[self.time]
+
+            start_cp.leases.add(lease)
+            for node in rr.resources_in_pnode:
+                start_cp.nodes[node].decr(rr.resources_in_pnode[node])
+                start_cp.nodes[node].add_lease(lease, rr.resources_in_pnode[node])
+
+            pos2 = pos + 1
+
+            while self.cp_list[pos2] < rr.end:
+                cp = self.changepoints[self.cp_list[pos2]]
+                cp.leases.add(lease)
+                for node in rr.resources_in_pnode:
+                    cp.nodes[node].decr(rr.resources_in_pnode[node])
+                    cp.nodes[node].add_lease(lease, rr.resources_in_pnode[node])
+                    
+                pos2 += 1
+        
+        prev_nodeavail = {}
+        for node_id, node in self.changepoints[self.time].nodes.items():
+            prev_nodeavail[node_id] = [node]
+        
+        # Link node entries
+        for cp in self.cp_list[1:]:
+            for node_id, node in self.changepoints[cp].nodes.items():
+                prev_nodes = prev_nodeavail[node_id]
+                if prev_nodes[-1].available == node.available and prev_nodes[-1].leases == node.leases:
+                    prev_nodes.append(node)
+                else:
+                    for prev_node in prev_nodes:
+                        prev_node.next_cp = cp
+                        prev_node.next_nodeavail = node
+                    prev_nodeavail[node_id] = [node]
+                    
+
+    def get_availability_at_node(self, time, node, preempted_leases = []):
+        avails = []
+        node = self.changepoints[time].nodes[node]
+        prev_avail = None
+        prev_node = None
+        while node != None:
+            if len(preempted_leases) == None:
+                available = ResourceTuple.copy(node.available)
+            else:
+                available = node.get_avail_withpreemption(preempted_leases)
+
+            if prev_avail != None and available.any_less(prev_avail.available):
+                available.min(prev_avail.available)
+                availentry = AvailEntry(available, None)
+                avails.append(availentry)
+                prev_avail.until = prev_node.next_cp
+                prev_avail = availentry
+            elif prev_avail == None:
+                availentry = AvailEntry(available, None)
+                avails.append(availentry)
+                prev_avail = availentry
+            
+            prev_node = node
+            node = node.next_nodeavail
+            
+        return AvailabilityInNode(avails)
+    
+    def get_nodes_at(self, time):
+        return self.changepoints[time].nodes.keys()
+
+    def get_leases_at(self, node, time):
+        return self.changepoints[time].nodes[node].leases
+    
+    def get_availability_at(self, node, time):
+        return self.changepoints[time].nodes[node].available
+    
+    def get_capacity_interval(self, node, time):
+        next_cp = self.changepoints[time].nodes[node].next_cp
+        if next_cp == None:
+            return None
+        else:
+            return next_cp - time
+        
+    def get_leases_until(self, until):
+        leases = set()
+        for cp in self.cp_list:
+            if until <= cp:
+                break
+            leases.update(self.changepoints[cp].leases)
+        return list(leases)
+

Added: trunk/src/haizea/core/scheduler/vm_scheduler.py
===================================================================
--- trunk/src/haizea/core/scheduler/vm_scheduler.py	                        (rev 0)
+++ trunk/src/haizea/core/scheduler/vm_scheduler.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,1491 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+"""This module provides the main classes for Haizea's VM Scheduler. All the
+scheduling code that decides when and where a lease is scheduled is contained
+in the VMScheduler class (except for the code that specifically decides
+what physical machines each virtual machine is mapped to, which is factored out
+into the "mapper" module). This module also provides the classes for the
+reservations that will be placed in the slot table and correspond to VMs. 
+"""
+
+import haizea.common.constants as constants
+from haizea.common.utils import round_datetime_delta, round_datetime, estimate_transfer_time, pretty_nodemap, get_config, get_clock, get_policy
+from haizea.core.leases import Lease, Capacity
+from haizea.core.scheduler.slottable import ResourceReservation, ResourceTuple
+from haizea.core.scheduler import ReservationEventHandler, RescheduleLeaseException, NormalEndLeaseException, EnactmentError, NotSchedulableException, InconsistentScheduleError, InconsistentLeaseStateError, MigrationResourceReservation
+from operator import attrgetter, itemgetter
+from mx.DateTime import TimeDelta
+
+import logging
+
+
+class VMScheduler(object):
+    """The Haizea VM Scheduler
+    
+    This class is responsible for taking a lease and scheduling VMs to satisfy
+    the requirements of that lease.
+    """
+    
+    def __init__(self, slottable, resourcepool, mapper):
+        """Constructor
+        
+        The constructor does little more than create the VM scheduler's
+        attributes. However, it does expect (in the arguments) a fully-constructed 
+        SlotTable, ResourcePool, and Mapper (these are constructed in the 
+        Manager's constructor). 
+        
+        Arguments:
+        slottable -- Slot table
+        resourcepool -- Resource pool where enactment commands will be sent to
+        mapper -- Mapper
+        """        
+        self.slottable = slottable
+        self.resourcepool = resourcepool
+        self.mapper = mapper
+        self.logger = logging.getLogger("VMSCHED")
+        
+        # Register the handlers for the types of reservations used by
+        # the VM scheduler
+        self.handlers = {}
+        self.handlers[VMResourceReservation] = ReservationEventHandler(
+                                sched    = self,
+                                on_start = VMScheduler._handle_start_vm,
+                                on_end   = VMScheduler._handle_end_vm)
+
+        self.handlers[ShutdownResourceReservation] = ReservationEventHandler(
+                                sched    = self,
+                                on_start = VMScheduler._handle_start_shutdown,
+                                on_end   = VMScheduler._handle_end_shutdown)
+
+        self.handlers[SuspensionResourceReservation] = ReservationEventHandler(
+                                sched    = self,
+                                on_start = VMScheduler._handle_start_suspend,
+                                on_end   = VMScheduler._handle_end_suspend)
+
+        self.handlers[ResumptionResourceReservation] = ReservationEventHandler(
+                                sched    = self,
+                                on_start = VMScheduler._handle_start_resume,
+                                on_end   = VMScheduler._handle_end_resume)
+
+        self.handlers[MemImageMigrationResourceReservation] = ReservationEventHandler(
+                                sched    = self,
+                                on_start = VMScheduler._handle_start_migrate,
+                                on_end   = VMScheduler._handle_end_migrate)
+        
+        # When using backfilling, set the number of leases that can be
+        # scheduled in the future.
+        backfilling = get_config().get("backfilling")
+        if backfilling == constants.BACKFILLING_OFF:
+            self.max_in_future = 0
+        elif backfilling == constants.BACKFILLING_AGGRESSIVE:
+            self.max_in_future = 1
+        elif backfilling == constants.BACKFILLING_CONSERVATIVE:
+            self.max_in_future = -1 # Unlimited
+        elif backfilling == constants.BACKFILLING_INTERMEDIATE:
+            self.max_in_future = get_config().get("backfilling-reservations")
+        self.future_leases = set()
+
+
+    def schedule(self, lease, nexttime, earliest):
+        """ The scheduling function
+        
+        This particular function doesn't do much except call __schedule_asap
+        and __schedule_exact (which do all the work).
+        
+        Arguments:
+        lease -- Lease to schedule
+        nexttime -- The next time at which the scheduler can allocate resources.
+        earliest -- The earliest possible starting times on each physical node
+        """        
+        if lease.get_type() == Lease.BEST_EFFORT:
+            return self.__schedule_asap(lease, nexttime, earliest, allow_in_future = self.can_schedule_in_future())
+        elif lease.get_type() == Lease.ADVANCE_RESERVATION:
+            return self.__schedule_exact(lease, nexttime, earliest)
+        elif lease.get_type() == Lease.IMMEDIATE:
+            return self.__schedule_asap(lease, nexttime, earliest, allow_in_future = False)
+
+
+    def estimate_migration_time(self, lease):
+        """ Estimates the time required to migrate a lease's VMs
+
+        This function conservatively estimates that all the VMs are going to
+        be migrated to other nodes. Since all the transfers are intra-node,
+        the bottleneck is the transfer from whatever node has the most
+        memory to transfer.
+        
+        Note that this method only estimates the time to migrate the memory
+        state files for the VMs. Migrating the software environment (which may
+        or may not be a disk image) is the responsibility of the preparation
+        scheduler, which has it's own set of migration scheduling methods.
+
+        Arguments:
+        lease -- Lease that might be migrated
+        """                
+        migration = get_config().get("migration")
+        if migration == constants.MIGRATE_YES:
+            vmrr = lease.get_last_vmrr()
+            mem_in_pnode = dict([(pnode,0) for pnode in set(vmrr.nodes.values())])
+            for (vnode,pnode) in vmrr.nodes.items():
+                mem = vmrr.resources_in_pnode[pnode].get_by_type(constants.RES_MEM)
+                mem_in_pnode[pnode] += mem
+            max_mem_to_transfer = max(mem_in_pnode.values())
+            bandwidth = self.resourcepool.info.get_migration_bandwidth()
+            return estimate_transfer_time(max_mem_to_transfer, bandwidth)
+        elif migration == constants.MIGRATE_YES_NOTRANSFER:
+            return TimeDelta(seconds=0)        
+
+    def schedule_migration(self, lease, vmrr, nexttime):
+        """ Schedules migrations for a lease
+
+        Arguments:
+        lease -- Lease being migrated
+        vmrr -- The VM reservation before which the migration will take place
+        nexttime -- The next time at which the scheduler can allocate resources.
+        """
+        
+        # Determine what migrations have to be done. We do this by looking at
+        # the mapping in the previous VM RR and in the new VM RR
+        last_vmrr = lease.get_last_vmrr()
+        vnode_migrations = dict([(vnode, (last_vmrr.nodes[vnode], vmrr.nodes[vnode])) for vnode in vmrr.nodes])
+        
+        # Determine if we actually have to migrate
+        mustmigrate = False
+        for vnode in vnode_migrations:
+            if vnode_migrations[vnode][0] != vnode_migrations[vnode][1]:
+                mustmigrate = True
+                break
+            
+        if not mustmigrate:
+            return []
+
+        # If Haizea is configured to migrate without doing any transfers,
+        # then we just return a nil-duration migration RR
+        if get_config().get("migration") == constants.MIGRATE_YES_NOTRANSFER:
+            start = nexttime
+            end = nexttime
+            res = {}
+            migr_rr = MemImageMigrationResourceReservation(lease, start, end, res, vmrr, vnode_migrations)
+            migr_rr.state = ResourceReservation.STATE_SCHEDULED
+            return [migr_rr]
+
+        # Figure out what migrations can be done simultaneously
+        migrations = []
+        while len(vnode_migrations) > 0:
+            pnodes = set()
+            migration = {}
+            for vnode in vnode_migrations:
+                origin = vnode_migrations[vnode][0]
+                dest = vnode_migrations[vnode][1]
+                if not origin in pnodes and not dest in pnodes:
+                    migration[vnode] = vnode_migrations[vnode]
+                    pnodes.add(origin)
+                    pnodes.add(dest)
+            for vnode in migration:
+                del vnode_migrations[vnode]
+            migrations.append(migration)
+        
+        # Create migration RRs
+        start = max(last_vmrr.post_rrs[-1].end, nexttime)
+        bandwidth = self.resourcepool.info.get_migration_bandwidth()
+        migr_rrs = []
+        for m in migrations:
+            vnodes_to_migrate = m.keys()
+            max_mem_to_migrate = max([lease.requested_resources[vnode].get_quantity(constants.RES_MEM) for vnode in vnodes_to_migrate])
+            migr_time = estimate_transfer_time(max_mem_to_migrate, bandwidth)
+            end = start + migr_time
+            res = {}
+            for (origin,dest) in m.values():
+                resorigin = Capacity([constants.RES_NETOUT])
+                resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
+                resdest = Capacity([constants.RES_NETIN])
+                resdest.set_quantity(constants.RES_NETIN, bandwidth)
+                res[origin] = self.slottable.create_resource_tuple_from_capacity(resorigin)
+                res[dest] = self.slottable.create_resource_tuple_from_capacity(resdest)                
+            migr_rr = MemImageMigrationResourceReservation(lease, start, start + migr_time, res, vmrr, m)
+            migr_rr.state = ResourceReservation.STATE_SCHEDULED
+            migr_rrs.append(migr_rr)
+            start = end
+            
+        return migr_rrs
+
+    def cancel_vm(self, vmrr):
+        """ Cancels a VM resource reservation
+
+        Arguments:
+        vmrr -- VM RR to be cancelled
+        """         
+        
+        # If this VM RR is part of a lease that was scheduled in the future,
+        # remove that lease from the set of future leases.
+        if vmrr.lease in self.future_leases:
+            self.future_leases.remove(vmrr.lease)
+
+        # If there are any pre-RRs that are scheduled, remove them
+        for rr in vmrr.pre_rrs:
+            if rr.state == ResourceReservation.STATE_SCHEDULED:
+                self.slottable.remove_reservation(rr)
+
+        # If there are any post RRs, remove them
+        for rr in vmrr.post_rrs:
+            self.slottable.remove_reservation(rr)
+        
+        # Remove the reservation itself
+        self.slottable.remove_reservation(vmrr)
+
+
+    def can_suspend_at(self, lease, t):
+        """ Determines if it is possible to suspend a lease before a given time
+
+        Arguments:
+        vmrr -- VM RR to be preempted
+        t -- Time by which the VM must be preempted
+        """                     
+        # TODO: Make more general, should determine vmrr based on current time
+        # This won't currently break, though, since the calling function 
+        # operates on the last VM RR.
+        vmrr = lease.get_last_vmrr()
+        time_until_suspend = t - vmrr.start
+        min_duration = self.__compute_scheduling_threshold(lease)
+        can_suspend = time_until_suspend >= min_duration        
+        return can_suspend
+    
+    
+    def preempt_vm(self, vmrr, t):
+        """ Preempts a VM reservation at a given time
+
+        This method assumes that the lease is, in fact, preemptable,
+        that the VMs are running at the given time, and that there is 
+        enough time to suspend the VMs before the given time (all these
+        checks are done in the lease scheduler).
+        
+        Arguments:
+        vmrr -- VM RR to be preempted
+        t -- Time by which the VM must be preempted
+        """             
+        
+        # Save original start and end time of the vmrr
+        old_start = vmrr.start
+        old_end = vmrr.end
+        
+        # Schedule the VM suspension
+        self.__schedule_suspension(vmrr, t)
+        
+        # Update the VMRR in the slot table
+        self.slottable.update_reservation_with_key_change(vmrr, old_start, old_end)
+        
+        # Add the suspension RRs to the VM's post-RRs
+        for susprr in vmrr.post_rrs:
+            self.slottable.add_reservation(susprr)
+            
+            
+    def get_future_reschedulable_leases(self):
+        """ Returns a list of future leases that are reschedulable.
+
+        Currently, this list is just the best-effort leases scheduled
+        in the future as determined by the backfilling algorithm.
+        Advance reservation leases, by their nature, cannot be 
+        rescheduled to find a "better" starting time.
+        """             
+        return list(self.future_leases)
+    
+
+    def can_schedule_in_future(self):
+        """ Returns True if the backfilling algorithm would allow a lease
+        to be scheduled in the future.
+
+        """             
+        if self.max_in_future == -1: # Unlimited
+            return True
+        else:
+            return len(self.future_leases) < self.max_in_future
+
+        
+    def get_utilization(self, time):
+        """ Computes resource utilization (currently just CPU-based)
+
+        Arguments:
+        time -- Time at which to determine utilization
+        """         
+        total = self.slottable.get_total_capacity(restype = constants.RES_CPU)
+        util = {}
+        reservations = self.slottable.get_reservations_at(time)
+        for r in reservations:
+            for node in r.resources_in_pnode:
+                if isinstance(r, VMResourceReservation):
+                    use = r.resources_in_pnode[node].get_by_type(constants.RES_CPU)
+                    util[type(r)] = use + util.setdefault(type(r),0.0)
+                elif isinstance(r, SuspensionResourceReservation) or isinstance(r, ResumptionResourceReservation) or isinstance(r, ShutdownResourceReservation):
+                    use = r.vmrr.resources_in_pnode[node].get_by_type(constants.RES_CPU)
+                    util[type(r)] = use + util.setdefault(type(r),0.0)
+        util[None] = total - sum(util.values())
+        for k in util:
+            util[k] /= total
+            
+        return util              
+        
+
+    def __schedule_exact(self, lease, nexttime, earliest):
+        """ Schedules VMs that must start at an exact time
+        
+        This type of lease is "easy" to schedule because we know the exact
+        start time, which means that's the only starting time we have to
+        check. So, this method does little more than call the mapper.
+        
+        Arguments:
+        lease -- Lease to schedule
+        nexttime -- The next time at which the scheduler can allocate resources.
+        earliest -- The earliest possible starting times on each physical node
+        """             
+        
+        # Determine the start and end time
+        start = lease.start.requested
+        end = start + lease.duration.requested
+        
+        # Convert Capacity objects in lease object into ResourceTuples that
+        # we can hand over to the mapper.
+        requested_resources = dict([(k,self.slottable.create_resource_tuple_from_capacity(v)) for k,v in lease.requested_resources.items()])
+
+        # Let the mapper do its magiv
+        mapping, actualend, preemptions = self.mapper.map(lease, 
+                                                          requested_resources,
+                                                          start, 
+                                                          end, 
+                                                          strictend = True)
+        
+        # If no mapping was found, tell the lease scheduler about it
+        if mapping == None:
+            raise NotSchedulableException, "Not enough resources in specified interval"
+        
+        # Create VM resource reservations
+        res = {}
+        
+        for (vnode,pnode) in mapping.items():
+            vnode_res = requested_resources[vnode]
+            if res.has_key(pnode):
+                res[pnode].incr(vnode_res)
+            else:
+                res[pnode] = ResourceTuple.copy(vnode_res)
+        
+        vmrr = VMResourceReservation(lease, start, end, mapping, res)
+        vmrr.state = ResourceReservation.STATE_SCHEDULED
+
+        # Schedule shutdown for the VM
+        self.__schedule_shutdown(vmrr)
+        
+        return vmrr, preemptions
+
+
+    def __schedule_asap(self, lease, nexttime, earliest, allow_in_future = None):
+        """ Schedules VMs as soon as possible
+        
+        This method is a bit more complex that __schedule_exact because
+        we need to figure out what "as soon as possible" actually is.
+        This involves attempting several mappings, at different points
+        in time, before we can schedule the lease.
+        
+        This method will always check, at least, if the lease can be scheduled
+        at the earliest possible moment at which the lease could be prepared
+        (e.g., if the lease can't start until 1 hour in the future because that's
+        the earliest possible time at which the disk images it requires can
+        be transferred, then that's when the scheduler will check). Note, however,
+        that this "earliest possible moment" is determined by the preparation
+        scheduler.
+        
+        Additionally, if the lease can't be scheduled at the earliest
+        possible moment, it can also check if the lease can be scheduled
+        in the future. This partially implements a backfilling algorithm
+        (the maximum number of future leases is stored in the max_in_future
+        attribute of VMScheduler), the other part being implemented in the
+        __process_queue method of LeaseScheduler.
+        
+        Note that, if the method is allowed to scheduled in the future,
+        and assuming that the lease doesn't request more resources than
+        the site itself, this method will always schedule the VMs succesfully
+        (since there's always an empty spot somewhere in the future).
+        
+        
+        Arguments:
+        lease -- Lease to schedule
+        nexttime -- The next time at which the scheduler can allocate resources.
+        earliest -- The earliest possible starting times on each physical node
+        allow_in_future -- Boolean indicating whether the scheduler is
+        allowed to schedule the VMs in the future.
+        """                
+        
+
+
+        #
+        # STEP 1: PROLEGOMENA
+        #
+        
+        lease_id = lease.id
+        remaining_duration = lease.duration.get_remaining_duration()
+        shutdown_time = self.__estimate_shutdown_time(lease)
+        
+        # We might be scheduling a suspended lease. If so, we will
+        # also have to schedule its resumption. Right now, just 
+        # figure out if this is such a lease.
+        mustresume = (lease.get_state() in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED, Lease.STATE_SUSPENDED_SCHEDULED))
+
+        # This is the minimum duration that we must be able to schedule.
+        # See __compute_scheduling_threshold for more details.
+        min_duration = self.__compute_scheduling_threshold(lease)
+        
+
+        #
+        # STEP 2: FIND THE CHANGEPOINTS
+        #
+
+        # Find the changepoints, and the available nodes at each changepoint
+        # We need to do this because the preparation scheduler may have
+        # determined that some nodes might require more time to prepare
+        # than others (e.g., if using disk image caching, some nodes
+        # might have the required disk image predeployed, while others
+        # may require transferring the image to that node).
+        # 
+        # The end result of this step is a list (cps) where each entry
+        # is a (t,nodes) pair, where "t" is the time of the changepoint
+        # and "nodes" is the set of nodes that are available at that time.
+        
+        if not mustresume:
+            # If this is not a suspended lease, then the changepoints
+            # are determined based on the "earliest" parameter.
+            cps = [(node, e.time) for node, e in earliest.items()]
+            cps.sort(key=itemgetter(1))
+            curcp = None
+            changepoints = []
+            nodes = []
+            for node, time in cps:
+                nodes.append(node)
+                if time != curcp:
+                    changepoints.append([time, set(nodes)])
+                    curcp = time
+                else:
+                    changepoints[-1][1] = set(nodes)
+        else:
+            # If the lease is suspended, we take into account that, if
+            # migration is disabled, we can only schedule the lease
+            # on the nodes it is currently scheduled on.
+            if get_config().get("migration") == constants.MIGRATE_NO:
+                vmrr = lease.get_last_vmrr()
+                onlynodes = set(vmrr.nodes.values())
+            else:
+                onlynodes = None               
+            changepoints = list(set([x.time for x in earliest.values()]))
+            changepoints.sort()
+            changepoints = [(x, onlynodes) for x in changepoints]
+
+
+        # If we can schedule VMs in the future,
+        # we also consider future changepoints
+        if allow_in_future:
+            res = self.slottable.get_reservations_ending_after(changepoints[-1][0])
+            # We really only care about changepoints where VMs end (which is
+            # when resources become available)
+            futurecp = [r.get_final_end() for r in res if isinstance(r, VMResourceReservation)]
+            # Corner case: Sometimes we're right in the middle of a ShutdownReservation, so it won't be
+            # included in futurecp.
+            futurecp += [r.end for r in res if isinstance(r, ShutdownResourceReservation) and not r.vmrr in res]
+            if not mustresume:
+                futurecp = [(p,None) for p in futurecp]
+            else:
+                futurecp = [(p,onlynodes) for p in futurecp]                
+        else:
+            futurecp = []
+            
+
+        #
+        # STEP 3: FIND A MAPPING
+        #
+        
+        # In this step we find a starting time and a mapping for the VMs,
+        # which involves going through the changepoints in order and seeing
+        # if we can find a mapping.
+        # Most of the work is done in the __find_fit_at_points
+        
+        # If resuming, we also have to allocate enough time for the resumption
+        if mustresume:
+            duration = remaining_duration + self.__estimate_resume_time(lease)
+        else:
+            duration = remaining_duration
+
+        duration += shutdown_time
+
+        in_future = False
+
+        # Convert Capacity objects in lease object into ResourceTuples that
+        # we can hand over to the mapper.
+        requested_resources = dict([(k,self.slottable.create_resource_tuple_from_capacity(v)) for k,v in lease.requested_resources.items()])
+
+        # First, try to find a mapping assuming we can't schedule in the future
+        start, end, mapping, preemptions = self.__find_fit_at_points(lease,
+                                                                     requested_resources,
+                                                                     changepoints, 
+                                                                     duration, 
+                                                                     min_duration)
+        
+        if start == None and not allow_in_future:
+                # We did not find a suitable starting time. This can happen
+                # if we're unable to schedule in the future
+                raise NotSchedulableException, "Could not find enough resources for this request"
+
+        # If we haven't been able to fit the lease, check if we can
+        # reserve it in the future
+        if start == None and allow_in_future:
+            start, end, mapping, preemptions = self.__find_fit_at_points(lease,
+                                                                         requested_resources,
+                                                                         futurecp, 
+                                                                         duration, 
+                                                                         min_duration
+                                                                         )
+            # TODO: The following will also raise an exception if a lease
+            # makes a request that could *never* be satisfied with the
+            # current resources.
+            if start == None:
+                raise InconsistentScheduleError, "Could not find a mapping in the future (this should not happen)"
+
+            in_future = True
+
+        #
+        # STEP 4: CREATE RESERVATIONS
+        #
+        
+        # At this point, the lease is feasible. We just need to create
+        # the reservations for the VMs and, possibly, for the VM resumption,
+        # suspension, and shutdown.    
+        
+        # VM resource reservation
+        res = {}
+        
+        for (vnode,pnode) in mapping.items():
+            vnode_res = requested_resources[vnode]
+            if res.has_key(pnode):
+                res[pnode].incr(vnode_res)
+            else:
+                res[pnode] = ResourceTuple.copy(vnode_res)
+
+        vmrr = VMResourceReservation(lease, start, end, mapping, res)
+        vmrr.state = ResourceReservation.STATE_SCHEDULED
+
+        # VM resumption resource reservation
+        if mustresume:
+            self.__schedule_resumption(vmrr, start)
+
+        # If the mapper couldn't find a mapping for the full duration
+        # of the lease, then we need to schedule a suspension.
+        mustsuspend = (vmrr.end - vmrr.start) < remaining_duration
+        if mustsuspend:
+            self.__schedule_suspension(vmrr, end)
+        else:
+            # Compensate for any overestimation
+            if (vmrr.end - vmrr.start) > remaining_duration + shutdown_time:
+                vmrr.end = vmrr.start + remaining_duration + shutdown_time
+            self.__schedule_shutdown(vmrr)
+        
+        if in_future:
+            self.future_leases.add(lease)
+
+        susp_str = res_str = ""
+        if mustresume:
+            res_str = " (resuming)"
+        if mustsuspend:
+            susp_str = " (suspending)"
+        self.logger.info("Lease #%i has been scheduled on nodes %s from %s%s to %s%s" % (lease.id, mapping.values(), start, res_str, end, susp_str))
+
+        return vmrr, preemptions
+
+
+    def __find_fit_at_points(self, lease, requested_resources, changepoints, duration, min_duration):
+        """ Tries to map a lease in a given list of points in time
+        
+        This method goes through a given list of points in time and tries
+        to find the earliest time at which that lease can be allocated
+        resources.
+        
+        Arguments:
+        lease -- Lease to schedule
+        requested_resources -- A dictionary of lease node -> ResourceTuple.
+        changepoints -- The list of changepoints
+        duration -- The amount of time requested
+        min_duration -- The minimum amount of time that should be allocated
+        
+        Returns:
+        start -- The time at which resources have been found for the lease
+        actualend -- The time at which the resources won't be available. Note
+        that this is not necessarily (start + duration) since the mapper
+        might be unable to find enough resources for the full requested duration.
+        mapping -- A mapping of lease nodes to physical nodes
+        preemptions -- A list of 
+        (if no mapping is found, all these values are set to None)
+        """                 
+        found = False
+        
+        for time, onlynodes in changepoints:
+            start = time
+            end = start + duration
+            self.logger.debug("Attempting to map from %s to %s" % (start, end))
+            
+            # If suspension is disabled, we will only accept mappings that go
+            # from "start" strictly until "end".
+            susptype = get_config().get("suspension")
+            if susptype == constants.SUSPENSION_NONE or (lease.numnodes > 1 and susptype == constants.SUSPENSION_SERIAL):
+                strictend = True
+            else:
+                strictend = False
+
+            # Let the mapper work its magic
+            mapping, actualend, preemptions = self.mapper.map(lease, 
+                                                              requested_resources,
+                                                              start, 
+                                                              end, 
+                                                              strictend = strictend,
+                                                              onlynodes = onlynodes)
+            
+            # We have a mapping; we still have to check if it satisfies
+            # the minimum duration.
+            if mapping != None:
+                if actualend < end:
+                    actualduration = actualend - start
+                    if actualduration >= min_duration:
+                        self.logger.debug("This lease can be scheduled from %s to %s (will require suspension)" % (start, actualend))
+                        found = True
+                        break
+                    else:
+                        self.logger.debug("This starting time does not allow for the requested minimum duration (%s < %s)" % (actualduration, min_duration))
+                else:
+                    self.logger.debug("This lease can be scheduled from %s to %s (full duration)" % (start, end))
+                    found = True
+                    break
+        
+        if found:
+            return start, actualend, mapping, preemptions
+        else:
+            return None, None, None, None
+    
+    
+    def __compute_susprem_times(self, vmrr, time, direction, exclusion, rate, override = None):
+        """ Computes the times at which suspend/resume operations would have to start
+        
+        When suspending or resuming a VM, the VM's memory is dumped to a
+        file on disk. To correctly estimate the time required to suspend
+        a lease with multiple VMs, Haizea makes sure that no two 
+        suspensions/resumptions happen at the same time (e.g., if eight
+        memory files were being saved at the same time to disk, the disk's
+        performance would be reduced in a way that is not as easy to estimate
+        as if only one file were being saved at a time). Based on a number
+        of parameters, this method estimates the times at which the 
+        suspend/resume commands would have to be sent to guarantee this
+        exclusion.
+                    
+        Arguments:
+        vmrr -- The VM reservation that will be suspended/resumed
+        time -- The time at which the suspend should end or the resume should start.
+        direction -- DIRECTION_BACKWARD: start at "time" and compute the times going
+        backward (for suspensions) DIRECTION_FORWARD: start at time "time" and compute
+        the times going forward.
+        exclusion -- SUSPRES_EXCLUSION_GLOBAL (memory is saved to global filesystem)
+        or SUSPRES_EXCLUSION_LOCAL (saved to local filesystem)
+        rate -- The rate at which an individual VM is suspended/resumed
+        override -- If specified, then instead of computing the time to 
+        suspend/resume VM based on its memory and the "rate" parameter,
+        use this override value.
+        
+        """         
+        times = [] # (start, end, {pnode -> vnodes})
+        enactment_overhead = get_config().get("enactment-overhead") 
+
+        if exclusion == constants.SUSPRES_EXCLUSION_GLOBAL:
+            # Global exclusion (which represents, e.g., reading/writing the memory image files
+            # from a global file system) meaning no two suspensions/resumptions can happen at 
+            # the same time in the entire resource pool.
+            
+            t = time
+            t_prev = None
+                
+            for (vnode,pnode) in vmrr.nodes.items():
+                if override == None:
+                    mem = vmrr.lease.requested_resources.get_by_type(constants.RES_MEM)
+                    op_time = self.__compute_suspend_resume_time(mem, rate)
+                else:
+                    op_time = override
+
+                op_time += enactment_overhead
+                    
+                t_prev = t
+                
+                if direction == constants.DIRECTION_FORWARD:
+                    t += op_time
+                    times.append((t_prev, t, {pnode:[vnode]}))
+                elif direction == constants.DIRECTION_BACKWARD:
+                    t -= op_time
+                    times.append((t, t_prev, {pnode:[vnode]}))
+
+        elif exclusion == constants.SUSPRES_EXCLUSION_LOCAL:
+            # Local exclusion (which represents, e.g., reading the memory image files
+            # from a local file system) means no two resumptions can happen at the same
+            # time in the same physical node.
+            pervnode_times = [] # (start, end, vnode)
+            vnodes_in_pnode = {}
+            for (vnode,pnode) in vmrr.nodes.items():
+                vnodes_in_pnode.setdefault(pnode, []).append(vnode)
+            for pnode in vnodes_in_pnode:
+                t = time
+                t_prev = None
+                for vnode in vnodes_in_pnode[pnode]:
+                    if override == None:
+                        mem = vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
+                        op_time = self.__compute_suspend_resume_time(mem, rate)
+                    else:
+                        op_time = override                    
+                    
+                    t_prev = t
+                    
+                    if direction == constants.DIRECTION_FORWARD:
+                        t += op_time
+                        pervnode_times.append((t_prev, t, vnode))
+                    elif direction == constants.DIRECTION_BACKWARD:
+                        t -= op_time
+                        pervnode_times.append((t, t_prev, vnode))
+            
+            # Consolidate suspend/resume operations happening at the same time
+            uniq_times = set([(start, end) for (start, end, vnode) in pervnode_times])
+            for (start, end) in uniq_times:
+                vnodes = [x[2] for x in pervnode_times if x[0] == start and x[1] == end]
+                node_mappings = {}
+                for vnode in vnodes:
+                    pnode = vmrr.nodes[vnode]
+                    node_mappings.setdefault(pnode, []).append(vnode)
+                times.append([start,end,node_mappings])
+        
+            # Add the enactment overhead
+            for t in times:
+                num_vnodes = sum([len(vnodes) for vnodes in t[2].values()])
+                overhead = TimeDelta(seconds = num_vnodes * enactment_overhead)
+                if direction == constants.DIRECTION_FORWARD:
+                    t[1] += overhead
+                elif direction == constants.DIRECTION_BACKWARD:
+                    t[0] -= overhead
+                    
+            # Fix overlaps
+            if direction == constants.DIRECTION_FORWARD:
+                times.sort(key=itemgetter(0))
+            elif direction == constants.DIRECTION_BACKWARD:
+                times.sort(key=itemgetter(1))
+                times.reverse()
+                
+            prev_start = None
+            prev_end = None
+            for t in times:
+                if prev_start != None:
+                    start = t[0]
+                    end = t[1]
+                    if direction == constants.DIRECTION_FORWARD:
+                        if start < prev_end:
+                            diff = prev_end - start
+                            t[0] += diff
+                            t[1] += diff
+                    elif direction == constants.DIRECTION_BACKWARD:
+                        if end > prev_start:
+                            diff = end - prev_start
+                            t[0] -= diff
+                            t[1] -= diff
+                prev_start = t[0]
+                prev_end = t[1]
+        
+        return times
+    
+    
+    def __schedule_shutdown(self, vmrr):
+        """ Schedules the shutdown of a VM reservation
+                            
+        Arguments:
+        vmrr -- The VM reservation that will be shutdown
+        
+        """                 
+        config = get_config()
+        shutdown_time = self.__estimate_shutdown_time(vmrr.lease)
+
+        start = vmrr.end - shutdown_time
+        end = vmrr.end
+        
+        shutdown_rr = ShutdownResourceReservation(vmrr.lease, start, end, vmrr.resources_in_pnode, vmrr.nodes, vmrr)
+        shutdown_rr.state = ResourceReservation.STATE_SCHEDULED
+                
+        vmrr.update_end(start)
+        
+        # If there are any post RRs, remove them
+        for rr in vmrr.post_rrs:
+            self.slottable.remove_reservation(rr)
+        vmrr.post_rrs = []
+
+        vmrr.post_rrs.append(shutdown_rr)
+
+
+    def __schedule_suspension(self, vmrr, suspend_by):
+        """ Schedules the suspension of a VM reservation
+                         
+        Most of the work is done in __compute_susprem_times. See that
+        method's documentation for more details.
+                            
+        Arguments:
+        vmrr -- The VM reservation that will be suspended
+        suspend_by -- The time by which the VMs should be suspended.
+        
+        """            
+        config = get_config()
+        susp_exclusion = config.get("suspendresume-exclusion")
+        override = get_config().get("override-suspend-time")
+        rate = config.get("suspend-rate") 
+
+        if suspend_by < vmrr.start or suspend_by > vmrr.end:
+            raise InconsistentScheduleError, "Tried to schedule a suspension by %s, which is outside the VMRR's duration (%s-%s)" % (suspend_by, vmrr.start, vmrr.end)
+
+        # Find the suspension times
+        times = self.__compute_susprem_times(vmrr, suspend_by, constants.DIRECTION_BACKWARD, susp_exclusion, rate, override)
+        
+        # Create the suspension resource reservations
+        suspend_rrs = []
+        for (start, end, node_mappings) in times:
+            suspres = {}
+            all_vnodes = []
+            for (pnode,vnodes) in node_mappings.items():
+                num_vnodes = len(vnodes)
+                r = Capacity([constants.RES_MEM,constants.RES_DISK])
+                mem = 0
+                for vnode in vnodes:
+                    mem += vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
+                r.set_quantity(constants.RES_MEM, mem * num_vnodes)
+                r.set_quantity(constants.RES_DISK, mem * num_vnodes)
+                suspres[pnode] = self.slottable.create_resource_tuple_from_capacity(r)          
+                all_vnodes += vnodes     
+                             
+            susprr = SuspensionResourceReservation(vmrr.lease, start, end, suspres, all_vnodes, vmrr)
+            susprr.state = ResourceReservation.STATE_SCHEDULED
+            suspend_rrs.append(susprr)
+                
+        suspend_rrs.sort(key=attrgetter("start"))
+            
+        susp_start = suspend_rrs[0].start
+        if susp_start < vmrr.start:
+            raise InconsistentScheduleError, "Determined suspension should start at %s, before the VMRR's start (%s) -- Suspend time not being properly estimated?" % (susp_start, vmrr.start)
+        
+        vmrr.update_end(susp_start)
+        
+        # If there are any post RRs, remove them
+        for rr in vmrr.post_rrs:
+            self.slottable.remove_reservation(rr)
+        vmrr.post_rrs = []
+
+        # Add the suspension RRs to the VM RR
+        for susprr in suspend_rrs:
+            vmrr.post_rrs.append(susprr)       
+            
+            
+    def __schedule_resumption(self, vmrr, resume_at):
+        """ Schedules the resumption of a VM reservation
+                         
+        Most of the work is done in __compute_susprem_times. See that
+        method's documentation for more details.
+                            
+        Arguments:
+        vmrr -- The VM reservation that will be resumed
+        resume_at -- The time at which the resumption should start
+        
+        """                 
+        config = get_config()
+        resm_exclusion = config.get("suspendresume-exclusion")        
+        override = get_config().get("override-resume-time")
+        rate = config.get("resume-rate") 
+
+        if resume_at < vmrr.start or resume_at > vmrr.end:
+            raise InconsistentScheduleError, "Tried to schedule a resumption at %s, which is outside the VMRR's duration (%s-%s)" % (resume_at, vmrr.start, vmrr.end)
+
+        # Find the resumption times
+        times = self.__compute_susprem_times(vmrr, resume_at, constants.DIRECTION_FORWARD, resm_exclusion, rate, override)
+        
+        # Create the resumption resource reservations
+        resume_rrs = []
+        for (start, end, node_mappings) in times:
+            resmres = {}
+            all_vnodes = []
+            for (pnode,vnodes) in node_mappings.items():
+                num_vnodes = len(vnodes)
+                r = Capacity([constants.RES_MEM,constants.RES_DISK])
+                mem = 0
+                for vnode in vnodes:
+                    mem += vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
+                r.set_quantity(constants.RES_MEM, mem * num_vnodes)
+                r.set_quantity(constants.RES_DISK, mem * num_vnodes)
+                resmres[pnode] = self.slottable.create_resource_tuple_from_capacity(r)
+                all_vnodes += vnodes
+            resmrr = ResumptionResourceReservation(vmrr.lease, start, end, resmres, all_vnodes, vmrr)
+            resmrr.state = ResourceReservation.STATE_SCHEDULED
+            resume_rrs.append(resmrr)
+                
+        resume_rrs.sort(key=attrgetter("start"))
+            
+        resm_end = resume_rrs[-1].end
+        if resm_end > vmrr.end:
+            raise InconsistentScheduleError, "Determined resumption would end at %s, after the VMRR's end (%s) -- Resume time not being properly estimated?" % (resm_end, vmrr.end)
+        
+        vmrr.update_start(resm_end)
+        
+        # Add the resumption RRs to the VM RR
+        for resmrr in resume_rrs:
+            vmrr.pre_rrs.append(resmrr)        
+           
+           
+    def __compute_suspend_resume_time(self, mem, rate):
+        """ Compute the time to suspend/resume a single VM
+                            
+        Arguments:
+        mem -- Amount of memory used by the VM
+        rate -- The rate at which an individual VM is suspended/resumed
+        
+        """            
+        time = float(mem) / rate
+        time = round_datetime_delta(TimeDelta(seconds = time))
+        return time
+    
+    
+    def __estimate_suspend_time(self, lease):
+        """ Estimate the time to suspend an entire lease
+                            
+        Most of the work is done in __estimate_suspend_resume_time. See
+        that method's documentation for more details.
+        
+        Arguments:
+        lease -- Lease that is going to be suspended
+        
+        """               
+        rate = get_config().get("suspend-rate")
+        override = get_config().get("override-suspend-time")
+        if override != None:
+            return override
+        else:
+            return self.__estimate_suspend_resume_time(lease, rate)
+
+
+    def __estimate_resume_time(self, lease):
+        """ Estimate the time to resume an entire lease
+                            
+        Most of the work is done in __estimate_suspend_resume_time. See
+        that method's documentation for more details.
+        
+        Arguments:
+        lease -- Lease that is going to be resumed
+        
+        """           
+        rate = get_config().get("resume-rate") 
+        override = get_config().get("override-resume-time")
+        if override != None:
+            return override
+        else:
+            return self.__estimate_suspend_resume_time(lease, rate)    
+    
+    
+    def __estimate_suspend_resume_time(self, lease, rate):
+        """ Estimate the time to suspend/resume an entire lease
+                            
+        Note that, unlike __compute_suspend_resume_time, this estimates
+        the time to suspend/resume an entire lease (which may involve
+        suspending several VMs)
+        
+        Arguments:
+        lease -- Lease that is going to be suspended/resumed
+        rate -- The rate at which an individual VM is suspended/resumed
+        
+        """              
+        susp_exclusion = get_config().get("suspendresume-exclusion")        
+        enactment_overhead = get_config().get("enactment-overhead") 
+        mem = 0
+        for vnode in lease.requested_resources:
+            mem += lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
+        if susp_exclusion == constants.SUSPRES_EXCLUSION_GLOBAL:
+            return lease.numnodes * (self.__compute_suspend_resume_time(mem, rate) + enactment_overhead)
+        elif susp_exclusion == constants.SUSPRES_EXCLUSION_LOCAL:
+            # Overestimating
+            return lease.numnodes * (self.__compute_suspend_resume_time(mem, rate) + enactment_overhead)
+
+
+    def __estimate_shutdown_time(self, lease):
+        """ Estimate the time to shutdown an entire lease
+                            
+        Arguments:
+        lease -- Lease that is going to be shutdown
+        
+        """            
+        enactment_overhead = get_config().get("enactment-overhead").seconds
+        return get_config().get("shutdown-time") + (enactment_overhead * lease.numnodes)
+
+
+    def __compute_scheduling_threshold(self, lease):
+        """ Compute the scheduling threshold (the 'minimum duration') of a lease
+        
+        To avoid thrashing, Haizea will not schedule a lease unless all overheads
+        can be correctly scheduled (which includes image transfers, suspensions, etc.).
+        However, this can still result in situations where a lease is prepared,
+        and then immediately suspended because of a blocking lease in the future.
+        The scheduling threshold is used to specify that a lease must
+        not be scheduled unless it is guaranteed to run for a minimum amount of
+        time (the rationale behind this is that you ideally don't want leases
+        to be scheduled if they're not going to be active for at least as much time
+        as was spent in overheads).
+        
+        An important part of computing this value is the "scheduling threshold factor".
+        The default value is 1, meaning that the lease will be active for at least
+        as much time T as was spent on overheads (e.g., if preparing the lease requires
+        60 seconds, and we know that it will have to be suspended, requiring 30 seconds,
+        Haizea won't schedule the lease unless it can run for at least 90 minutes).
+        In other words, a scheduling factor of F required a minimum duration of 
+        F*T. A value of 0 could lead to thrashing, since Haizea could end up with
+        situations where a lease starts and immediately gets suspended.         
+        
+        Arguments:
+        lease -- Lease for which we want to find the scheduling threshold
+        """
+        # TODO: Take into account other things like boot overhead, migration overhead, etc.
+        config = get_config()
+        threshold = config.get("force-scheduling-threshold")
+        if threshold != None:
+            # If there is a hard-coded threshold, use that
+            return threshold
+        else:
+            factor = config.get("scheduling-threshold-factor")
+            
+            # First, figure out the "safe duration" (the minimum duration
+            # so that we at least allocate enough time for all the
+            # overheads).
+            susp_overhead = self.__estimate_suspend_time(lease)
+            safe_duration = susp_overhead
+            
+            if lease.get_state() == Lease.STATE_SUSPENDED_QUEUED:
+                resm_overhead = self.__estimate_resume_time(lease)
+                safe_duration += resm_overhead
+            
+            # TODO: Incorporate other overheads into the minimum duration
+            min_duration = safe_duration
+            
+            # At the very least, we want to allocate enough time for the
+            # safe duration (otherwise, we'll end up with incorrect schedules,
+            # where a lease is scheduled to suspend, but isn't even allocated
+            # enough time to suspend). 
+            # The factor is assumed to be non-negative. i.e., a factor of 0
+            # means we only allocate enough time for potential suspend/resume
+            # operations, while a factor of 1 means the lease will get as much
+            # running time as spend on the runtime overheads involved in setting
+            # it up
+            threshold = safe_duration + (min_duration * factor)
+            return threshold
+
+
+    #-------------------------------------------------------------------#
+    #                                                                   #
+    #                  SLOT TABLE EVENT HANDLERS                        #
+    #                                                                   #
+    #-------------------------------------------------------------------#
+
+    def _handle_start_vm(self, l, rr):
+        """ Handles the start of a VMResourceReservation       
+        
+        Arguments:
+        l -- Lease the VMResourceReservation belongs to
+        rr -- THe VMResourceReservation
+        """        
+        self.logger.debug("LEASE-%i Start of handleStartVM" % l.id)
+        l.print_contents()
+        lease_state = l.get_state()
+        if lease_state == Lease.STATE_READY:
+            l.set_state(Lease.STATE_ACTIVE)
+            rr.state = ResourceReservation.STATE_ACTIVE
+            now_time = get_clock().get_time()
+            l.start.actual = now_time
+            
+            try:
+                self.resourcepool.start_vms(l, rr)
+            except EnactmentError, exc:
+                self.logger.error("Enactment error when starting VMs.")
+                # Right now, this is a non-recoverable error, so we just
+                # propagate it upwards to the lease scheduler
+                # In the future, it may be possible to react to these
+                # kind of errors.
+                raise
+                
+        elif lease_state == Lease.STATE_RESUMED_READY:
+            l.set_state(Lease.STATE_ACTIVE)
+            rr.state = ResourceReservation.STATE_ACTIVE
+            # No enactment to do here, since all the suspend/resume actions are
+            # handled during the suspend/resume RRs
+        else:
+            raise InconsistentLeaseStateError(l, doing = "starting a VM")
+        
+        # If this was a future reservation (as determined by backfilling),
+        # remove that status, since the future is now.
+        if rr.lease in self.future_leases:
+            self.future_leases.remove(l)
+        
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleStartVM" % l.id)
+        self.logger.info("Started VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()))
+
+
+    def _handle_end_vm(self, l, rr):
+        """ Handles the end of a VMResourceReservation       
+        
+        Arguments:
+        l -- Lease the VMResourceReservation belongs to
+        rr -- THe VMResourceReservation
+        """        
+        self.logger.debug("LEASE-%i Start of handleEndVM" % l.id)
+        self.logger.vdebug("LEASE-%i Before:" % l.id)
+        l.print_contents()
+        now_time = round_datetime(get_clock().get_time())
+        diff = now_time - rr.start
+        l.duration.accumulate_duration(diff)
+        rr.state = ResourceReservation.STATE_DONE
+                
+        self.logger.vdebug("LEASE-%i After:" % l.id)
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleEndVM" % l.id)
+        self.logger.info("Stopped VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()))
+
+
+    def _handle_unscheduled_end_vm(self, l, vmrr):
+        """ Handles the unexpected end of a VMResourceReservation
+        
+        Arguments:
+        l -- Lease the VMResourceReservation belongs to
+        rr -- THe VMResourceReservation
+        """        
+        
+        self.logger.info("LEASE-%i The VM has ended prematurely." % l.id)
+        for rr in vmrr.post_rrs:
+            self.slottable.remove_reservation(rr)
+        vmrr.post_rrs = []
+        vmrr.end = get_clock().get_time()
+        self._handle_end_vm(l, vmrr)
+
+
+    def _handle_start_suspend(self, l, rr):
+        """ Handles the start of a SuspensionResourceReservation       
+        
+        Arguments:
+        l -- Lease the SuspensionResourceReservation belongs to
+        rr -- The SuspensionResourceReservation
+        
+        """
+        self.logger.debug("LEASE-%i Start of handleStartSuspend" % l.id)
+        l.print_contents()
+        rr.state = ResourceReservation.STATE_ACTIVE
+        
+        try:
+            self.resourcepool.suspend_vms(l, rr)
+        except EnactmentError, exc:
+            self.logger.error("Enactment error when suspending VMs.")
+            # Right now, this is a non-recoverable error, so we just
+            # propagate it upwards to the lease scheduler
+            # In the future, it may be possible to react to these
+            # kind of errors.
+            raise            
+
+        if rr.is_first():
+            l.set_state(Lease.STATE_SUSPENDING)
+            l.print_contents()
+            self.logger.info("Suspending lease %i..." % (l.id))
+        self.logger.debug("LEASE-%i End of handleStartSuspend" % l.id)
+
+
+    def _handle_end_suspend(self, l, rr):
+        """ Handles the end of a SuspensionResourceReservation       
+        
+        Arguments:
+        l -- Lease the SuspensionResourceReservation belongs to
+        rr -- The SuspensionResourceReservation
+        """               
+        self.logger.debug("LEASE-%i Start of handleEndSuspend" % l.id)
+        l.print_contents()
+        # TODO: React to incomplete suspend
+        self.resourcepool.verify_suspend(l, rr)
+        rr.state = ResourceReservation.STATE_DONE
+        if rr.is_last():
+            l.set_state(Lease.STATE_SUSPENDED_PENDING)
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleEndSuspend" % l.id)
+        self.logger.info("Lease %i suspended." % (l.id))
+        
+        if l.get_state() == Lease.STATE_SUSPENDED_PENDING:
+            raise RescheduleLeaseException
+
+
+    def _handle_start_resume(self, l, rr):
+        """ Handles the start of a ResumptionResourceReservation       
+        
+        Arguments:
+        l -- Lease the ResumptionResourceReservation belongs to
+        rr -- The ResumptionResourceReservation
+        
+        """             
+        self.logger.debug("LEASE-%i Start of handleStartResume" % l.id)
+        l.print_contents()
+        
+        try:
+            self.resourcepool.resume_vms(l, rr)
+        except EnactmentError, exc:
+            self.logger.error("Enactment error when resuming VMs.")
+            # Right now, this is a non-recoverable error, so we just
+            # propagate it upwards to the lease scheduler
+            # In the future, it may be possible to react to these
+            # kind of errors.
+            raise
+                    
+        rr.state = ResourceReservation.STATE_ACTIVE
+        if rr.is_first():
+            l.set_state(Lease.STATE_RESUMING)
+            l.print_contents()
+            self.logger.info("Resuming lease %i..." % (l.id))
+        self.logger.debug("LEASE-%i End of handleStartResume" % l.id)
+
+
+    def _handle_end_resume(self, l, rr):
+        """ Handles the end of a ResumptionResourceReservation       
+        
+        Arguments:
+        l -- Lease the ResumptionResourceReservation belongs to
+        rr -- The ResumptionResourceReservation
+        
+        """        
+        self.logger.debug("LEASE-%i Start of handleEndResume" % l.id)
+        l.print_contents()
+        # TODO: React to incomplete resume
+        self.resourcepool.verify_resume(l, rr)
+        rr.state = ResourceReservation.STATE_DONE
+        if rr.is_last():
+            l.set_state(Lease.STATE_RESUMED_READY)
+            self.logger.info("Resumed lease %i" % (l.id))
+        for vnode, pnode in rr.vmrr.nodes.items():
+            self.resourcepool.remove_ramfile(pnode, l.id, vnode)
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleEndResume" % l.id)
+
+
+    def _handle_start_shutdown(self, l, rr):
+        """ Handles the start of a ShutdownResourceReservation       
+        
+        Arguments:
+        l -- Lease the SuspensionResourceReservation belongs to
+        rr -- The SuspensionResourceReservation
+        """        
+        
+        self.logger.debug("LEASE-%i Start of handleStartShutdown" % l.id)
+        l.print_contents()
+        rr.state = ResourceReservation.STATE_ACTIVE
+        try:
+            self.resourcepool.stop_vms(l, rr)
+        except EnactmentError, exc:
+            self.logger.error("Enactment error when shutting down VMs.")
+            # Right now, this is a non-recoverable error, so we just
+            # propagate it upwards to the lease scheduler
+            # In the future, it may be possible to react to these
+            # kind of errors.
+            raise
+        
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleStartShutdown" % l.id)
+
+
+    def _handle_end_shutdown(self, l, rr):
+        """ Handles the end of a SuspensionResourceReservation       
+        
+        Arguments:
+        l -- Lease the SuspensionResourceReservation belongs to
+        rr -- The SuspensionResourceReservation
+        
+        """
+        self.logger.debug("LEASE-%i Start of handleEndShutdown" % l.id)
+        l.print_contents()
+        rr.state = ResourceReservation.STATE_DONE
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleEndShutdown" % l.id)
+        self.logger.info("Lease %i's VMs have shutdown." % (l.id))
+        raise NormalEndLeaseException
+    
+
+    def _handle_start_migrate(self, l, rr):
+        """ Handles the start of a MemImageMigrationResourceReservation       
+        
+        Arguments:
+        l -- Lease the MemImageMigrationResourceReservation belongs to
+        rr -- The MemImageMigrationResourceReservation
+        
+        """             
+        self.logger.debug("LEASE-%i Start of handleStartMigrate" % l.id)
+        l.print_contents()
+        rr.state = ResourceReservation.STATE_ACTIVE
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleStartMigrate" % l.id)
+        self.logger.info("Migrating lease %i..." % (l.id))
+
+
+    def _handle_end_migrate(self, l, rr):
+        """ Handles the end of a MemImageMigrationResourceReservation       
+        
+        Arguments:
+        l -- Lease the MemImageMigrationResourceReservation belongs to
+        rr -- The MemImageMigrationResourceReservation
+        
+        """                
+        self.logger.debug("LEASE-%i Start of handleEndMigrate" % l.id)
+        l.print_contents()
+
+        for vnode in rr.transfers:
+            origin = rr.transfers[vnode][0]
+            dest = rr.transfers[vnode][1]
+            
+            # Update RAM files
+            self.resourcepool.remove_ramfile(origin, l.id, vnode)
+            self.resourcepool.add_ramfile(dest, l.id, vnode, l.requested_resources[vnode].get_quantity(constants.RES_MEM))
+        
+        rr.state = ResourceReservation.STATE_DONE
+        l.print_contents()
+        self.logger.debug("LEASE-%i End of handleEndMigrate" % l.id)
+        self.logger.info("Migrated lease %i..." % (l.id))
+
+
+
+class VMResourceReservation(ResourceReservation):
+    def __init__(self, lease, start, end, nodes, res):
+        ResourceReservation.__init__(self, lease, start, end, res)
+        self.nodes = nodes # { vnode -> pnode }
+        self.pre_rrs = []
+        self.post_rrs = []
+
+        # ONLY for simulation
+        self.__update_prematureend()
+
+    def update_start(self, time):
+        self.start = time
+        # ONLY for simulation
+        self.__update_prematureend()
+
+    def update_end(self, time):
+        self.end = time
+        # ONLY for simulation
+        self.__update_prematureend()
+        
+    # ONLY for simulation
+    def __update_prematureend(self):
+        if self.lease.duration.known != None:
+            remdur = self.lease.duration.get_remaining_known_duration()
+            rrdur = self.end - self.start
+            if remdur < rrdur:
+                self.prematureend = self.start + remdur
+                # Kludgy, but this corner case actually does happen
+                # (because of preemptions, it may turn out that
+                # the premature end time coincides with the
+                # starting time of the VMRR)
+                if self.prematureend == self.start:
+                    self.prematureend += 1 
+            else:
+                self.prematureend = None
+        else:
+            self.prematureend = None 
+
+    def get_final_end(self):
+        if len(self.post_rrs) == 0:
+            return self.end
+        else:
+            return self.post_rrs[-1].end
+
+    def is_suspending(self):
+        return len(self.post_rrs) > 0 and isinstance(self.post_rrs[0], SuspensionResourceReservation)
+
+    def is_shutting_down(self):
+        return len(self.post_rrs) > 0 and isinstance(self.post_rrs[0], ShutdownResourceReservation)
+
+    def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
+        for resmrr in self.pre_rrs:
+            resmrr.print_contents(loglevel)
+            self.logger.log(loglevel, "--")
+        self.logger.log(loglevel, "Type           : VM")
+        self.logger.log(loglevel, "Nodes          : %s" % pretty_nodemap(self.nodes))
+        if self.prematureend != None:
+            self.logger.log(loglevel, "Premature end  : %s" % self.prematureend)
+        ResourceReservation.print_contents(self, loglevel)
+        for susprr in self.post_rrs:
+            self.logger.log(loglevel, "--")
+            susprr.print_contents(loglevel)
+
+        
+class SuspensionResourceReservation(ResourceReservation):
+    def __init__(self, lease, start, end, res, vnodes, vmrr):
+        ResourceReservation.__init__(self, lease, start, end, res)
+        self.vmrr = vmrr
+        self.vnodes = vnodes
+
+    def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
+        self.logger.log(loglevel, "Type           : SUSPEND")
+        self.logger.log(loglevel, "Vnodes         : %s" % self.vnodes)
+        ResourceReservation.print_contents(self, loglevel)
+        
+    def is_first(self):
+        return (self == self.vmrr.post_rrs[0])
+
+    def is_last(self):
+        return (self == self.vmrr.post_rrs[-1])   
+        
+        
+class ResumptionResourceReservation(ResourceReservation):
+    def __init__(self, lease, start, end, res, vnodes, vmrr):
+        ResourceReservation.__init__(self, lease, start, end, res)
+        self.vmrr = vmrr
+        self.vnodes = vnodes
+
+    def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
+        self.logger.log(loglevel, "Type           : RESUME")
+        self.logger.log(loglevel, "Vnodes         : %s" % self.vnodes)
+        ResourceReservation.print_contents(self, loglevel)
+
+    def is_first(self):
+        resm_rrs = [r for r in self.vmrr.pre_rrs if isinstance(r, ResumptionResourceReservation)]
+        return (self == resm_rrs[0])
+
+    def is_last(self):
+        resm_rrs = [r for r in self.vmrr.pre_rrs if isinstance(r, ResumptionResourceReservation)]
+        return (self == resm_rrs[-1])
+    
+    
+class ShutdownResourceReservation(ResourceReservation):
+    def __init__(self, lease, start, end, res, vnodes, vmrr):
+        ResourceReservation.__init__(self, lease, start, end, res)
+        self.vmrr = vmrr
+        self.vnodes = vnodes
+
+    def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
+        self.logger.log(loglevel, "Type           : SHUTDOWN")
+        ResourceReservation.print_contents(self, loglevel)
+
+
+class MemImageMigrationResourceReservation(MigrationResourceReservation):
+    def __init__(self, lease, start, end, res, vmrr, transfers):
+        MigrationResourceReservation.__init__(self, lease, start, end, res, vmrr, transfers)
+  
+    def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
+        self.logger.log(loglevel, "Type           : MEM IMAGE MIGRATION")
+        self.logger.log(loglevel, "Transfers      : %s" % self.transfers)
+        ResourceReservation.print_contents(self, loglevel)   
\ No newline at end of file

Added: trunk/src/haizea/policies/__init__.py
===================================================================
--- trunk/src/haizea/policies/__init__.py	                        (rev 0)
+++ trunk/src/haizea/policies/__init__.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,32 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+"""This module includes sub-modules with pluggable policies for Haizea.
+"""
+
+# The following dictionaries provide a shorthand notation to refer to
+# the policy classes (this shorthand is used in the configuration file,
+# so the fully-qualified class name doesn't have to be written)
+admission_class_mappings = {"accept-all": "haizea.policies.admission.AcceptAllPolicy",
+                            "no-ARs": "haizea.policies.admission.NoARsPolicy"}
+
+preemption_class_mappings = {"no-preemption": "haizea.policies.preemption.NoPreemptionPolicy",
+                             "ar-preempts-everything": "haizea.policies.preemption.ARPreemptsEverythingPolicy"}
+
+host_class_mappings = {"no-policy": "haizea.policies.host_selection.NoPolicy",
+                       "greedy": "haizea.policies.host_selection.GreedyPolicy"}
\ No newline at end of file

Added: trunk/src/haizea/policies/admission.py
===================================================================
--- trunk/src/haizea/policies/admission.py	                        (rev 0)
+++ trunk/src/haizea/policies/admission.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,72 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+"""This module provides pluggable lease admission policies. See the documentation
+for haizea.core.schedule.policy.LeaseAdmissionPolicy for more details on
+lease admission policies.
+"""
+
+
+from haizea.core.scheduler.policy import LeaseAdmissionPolicy
+from haizea.core.leases import Lease
+
+class AcceptAllPolicy(LeaseAdmissionPolicy):
+    """A simple admission policy: all lease requests are accepted.
+    """
+    def __init__(self, slottable):
+        """Constructor
+        
+        Argument
+        slottable -- A fully constructed SlotTable
+        """  
+        LeaseAdmissionPolicy.__init__(self, slottable)
+        
+    def accept_lease(self, lease):
+        """Lease admission function
+        
+        See class documentation for details on what policy is implemented here.
+        Returns True if the lease can be accepted, False if it should be rejected.
+        
+        Argument
+        lease -- Lease request
+        """           
+        return True  
+    
+class NoARsPolicy(LeaseAdmissionPolicy):
+    """A simple admission policy: all lease requests, except AR requests,
+    are accepted.
+    """
+    
+    def __init__(self, slottable):
+        """Constructor
+        
+        Argument
+        slottable -- A fully constructed SlotTable
+        """  
+        LeaseAdmissionPolicy.__init__(self, slottable)
+        
+    def accept_lease(self, lease):
+        """Lease admission function
+        
+        See class documentation for details on what policy is implemented here.
+        Returns True if the lease can be accepted, False if it should be rejected.
+        
+        Argument
+        lease -- Lease request
+        """        
+        return lease.get_type() != Lease.ADVANCE_RESERVATION
\ No newline at end of file

Added: trunk/src/haizea/policies/host_selection.py
===================================================================
--- trunk/src/haizea/policies/host_selection.py	                        (rev 0)
+++ trunk/src/haizea/policies/host_selection.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,111 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+"""This module provides pluggable host selection policies. See the documentation
+for haizea.core.schedule.policy.HostSelectionPolicy for more details on
+host selection policies.
+"""
+
+from haizea.core.scheduler.policy import HostSelectionPolicy
+
+class NoPolicy(HostSelectionPolicy):
+    """A simple host selection policy: all hosts have the same score
+    """
+    def __init__(self, slottable):
+        """Constructor
+        
+        Argument
+        slottable -- A fully constructed SlotTable
+        """  
+        HostSelectionPolicy.__init__(self, slottable)
+    
+    
+    def get_host_score(self, node, time, lease):
+        """Computes the score of a host
+        
+        See class documentation for details on what policy is implemented here.
+        See documentation of HostSelectionPolicy.get_host_score for more details
+        on this method.
+        
+        Arguments:
+        node -- Physical node (the integer identifier used in the slot table)
+        time -- Time at which the lease might be scheduled
+        lease -- Lease that is being scheduled.
+        """             
+        return 1 
+    
+    
+
+class GreedyPolicy(HostSelectionPolicy):
+    """A greedy host selection policy.
+    
+    This policy scores hosts such that hosts with fewer leases already
+    scheduled on them, with the highest capacity, and with fewest leases
+    scheduled in the future are scored highest.
+    
+    """
+    def __init__(self, slottable):
+        """Constructor
+        
+        Argument
+        slottable -- A fully constructed SlotTable
+        """  
+        HostSelectionPolicy.__init__(self, slottable)
+    
+    def get_host_score(self, node, time, lease):
+        """Computes the score of a host
+        
+        See class documentation for details on what policy is implemented here.
+        See documentation of HostSelectionPolicy.get_host_score for more details
+        on this method.
+        
+        Arguments:
+        node -- Physical node (the integer identifier used in the slot table)
+        time -- Time at which the lease might be scheduled
+        lease -- Lease that is being scheduled.
+        """                     
+        aw = self.slottable.get_availability_window(time)
+
+        leases_in_node_horizon = 4
+        
+        # 1st: We prefer nodes with fewer leases to preempt
+        leases_in_node = len(aw.get_leases_at(node, time))
+        if leases_in_node > leases_in_node_horizon:
+            leases_in_node = leases_in_node_horizon
+        
+        # Nodes with fewer leases already scheduled in them get 
+        # higher scores
+        leases_in_node = (leases_in_node_horizon - leases_in_node) / float(leases_in_node_horizon)
+        leases_in_node_score = leases_in_node
+
+
+        # 2nd: we prefer nodes with the highest capacity
+        avail = aw.get_availability_at(node, time)
+        # TODO: normalize into a score
+        high_capacity_score = 1.0
+        
+        # 3rd: we prefer nodes where the current capacity
+        # doesn't change for the longest time.
+        duration = aw.get_capacity_interval(node, time)
+        if duration == None or duration>=lease.duration.requested:
+            duration_score = 1.0
+        else:
+            duration_score = duration.seconds / float(lease.duration.requested.seconds)
+
+        return 0.5 * leases_in_node_score + 0.25 * high_capacity_score + 0.25 * duration_score
+      
\ No newline at end of file

Added: trunk/src/haizea/policies/preemption.py
===================================================================
--- trunk/src/haizea/policies/preemption.py	                        (rev 0)
+++ trunk/src/haizea/policies/preemption.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,82 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+"""This module provides pluggable lease preemption policies. See the documentation
+for haizea.core.schedule.policy.PreemptabilityPolicy for more details on
+lease preemption policies.
+"""
+
+from haizea.core.leases import Lease
+from haizea.core.scheduler.policy import PreemptabilityPolicy
+
+
+class NoPreemptionPolicy(PreemptabilityPolicy):
+    """Simple preemption policy: preemption is never allowed.
+    """
+    def __init__(self, slottable):
+        """Constructor
+        
+        Argument
+        slottable -- A fully constructed SlotTable
+        """        
+        PreemptabilityPolicy.__init__(self, slottable)
+    
+    def get_lease_preemptability_score(self, preemptor, preemptee, time):
+        """Computes the lease preemptability score
+        
+        See class documentation for details on what policy is implemented here.
+        See documentation of PreemptabilityPolicy.get_lease_preemptability_score
+        for more details on this function.
+        
+        Arguments:
+        preemptor -- Preemptor lease
+        preemptee -- Preemptee lease
+        time -- Time at which preemption would take place
+        """                    
+        return -1
+
+class ARPreemptsEverythingPolicy(PreemptabilityPolicy):
+    """A simple preemption policy where AR leases can always preempt
+    every other type of lease. Given two possible leases to preempt,
+    the "youngest" one is preferred (i.e., the one that was most recently
+    submitted).
+    """    
+    def __init__(self, slottable):
+        """Constructor
+        
+        Argument
+        slottable -- A fully constructed SlotTable
+        """        
+        PreemptabilityPolicy.__init__(self, slottable)
+    
+    def get_lease_preemptability_score(self, preemptor, preemptee, time):
+        """Computes the lease preemptability score
+        
+        See class documentation for details on what policy is implemented here.
+        See documentation of PreemptabilityPolicy.get_lease_preemptability_score
+        for more details on this function.
+        
+        Arguments:
+        preemptor -- Preemptor lease
+        preemptee -- Preemptee lease
+        time -- Time at which preemption would take place
+        """        
+        if preemptor.get_type() == Lease.ADVANCE_RESERVATION and preemptee.get_type() == Lease.BEST_EFFORT:
+            return self._get_aging_factor(preemptee, time)
+        else:
+            return -1
\ No newline at end of file

Modified: trunk/tests/base_config_simulator.conf
===================================================================
--- trunk/tests/base_config_simulator.conf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/base_config_simulator.conf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,19 +1,22 @@
 [general]
-loglevel: VDEBUG
+loglevel: STATUS
 mode: simulated
 lease-preparation: unmanaged
 datafile: /var/tmp/haizea/results.dat
-lease-failure-handling: exit
+lease-failure-handling: exit-raise
 
 [simulation]
 clock: simulated
 starttime: 2006-11-25 13:00:00
-nodes: 2
-resources: CPU,2;Mem,2048;Net (in),100;Net (out),100;Disk,20000
+resources: in-tracefile
 imagetransfer-bandwidth: 100
 #status-message-interval: 15
 
 [scheduling]
+mapper: greedy
+policy-admission: accept-all
+policy-preemption: ar-preempts-everything
+policy-host-selection: greedy
 shutdown-time: 0
 suspendresume-exclusion: local
 wakeup-interval: 3
@@ -24,8 +27,7 @@
 suspension: all
 suspend-rate: 32
 resume-rate: 32
-migration: True
-what-to-migrate: mem+disk
+migration: yes
 
 
 [deploy-imagetransfer]

Modified: trunk/tests/common.py
===================================================================
--- trunk/tests/common.py	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/common.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -3,8 +3,10 @@
 import threading
 import shutil
 
-from haizea.resourcemanager.configfile import HaizeaConfig
-from haizea.resourcemanager.rm import ResourceManager
+from haizea.core.configfile import HaizeaConfig
+from haizea.core.scheduler.slottable import ResourceReservation, SlotTable
+from haizea.core.leases import Lease, Timestamp, Duration
+from haizea.core.manager import Manager
 
 class BaseTest(object):
     def __init__(self):
@@ -26,48 +28,48 @@
 
     def test_preemption(self):
         self.set_tracefile("preemption.lwf")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         
     def test_preemption_prematureend(self):
         self.set_tracefile("preemption_prematureend.lwf")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         
     def test_preemption_prematureend2(self):
         self.set_tracefile("preemption_prematureend2.lwf")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         
     def test_reservation(self):
         self.set_tracefile("reservation.lwf")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         
     def test_reservation_prematureend(self):
         self.set_tracefile("reservation_prematureend.lwf")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         
     def test_migrate(self):
         self.set_tracefile("migrate.lwf")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         
     def test_reuse1(self):
         self.set_tracefile("reuse1.lwf")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         
     def test_reuse2(self):
         self.set_tracefile("reuse2.lwf")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         
     def test_wait(self):
         self.set_tracefile("wait.lwf")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         
         
 class BaseOpenNebulaTest(BaseTest):
@@ -76,8 +78,8 @@
 
     def do_test(self, db):
         shutil.copyfile(db, "one.db")
-        rm = ResourceManager(HaizeaConfig(self.config))
-        rm.start()
+        haizea = Manager(HaizeaConfig(self.config))
+        haizea.start()
         os.remove("one.db")
     
 
@@ -86,10 +88,41 @@
         self.haizea_thread = None
 
     def start(self):
-        self.rm = ResourceManager(HaizeaConfig(self.config))
-        self.haizea_thread = threading.Thread(target=self.rm.start)
+        self.haizea = Manager(HaizeaConfig(self.config))
+        self.haizea_thread = threading.Thread(target=self.haizea.start)
         self.haizea_thread.start()
         
     def stop(self):
-        self.rm.stop()
-        self.haizea_thread.join()
\ No newline at end of file
+        self.haizea.stop()
+        self.haizea_thread.join()
+        
+        
+def create_ar_lease(lease_id, submit_time, start, end, preemptible, requested_resources):
+    start = Timestamp(start)
+    duration = Duration(end - start.requested)
+    lease = Lease.create_new(submit_time = submit_time, 
+                  requested_resources = requested_resources, 
+                  start = start, 
+                  duration = duration,
+                  deadline = None, 
+                  preemptible = preemptible, 
+                  software = None)
+    
+    lease.id = lease_id
+    
+    return lease
+
+def create_reservation_from_lease(lease, mapping, slottable):
+    start = lease.start.requested
+    end = start + lease.duration.requested
+    res = dict([(mapping[vnode],r) for vnode,r in lease.requested_resources.items()])
+    rr = ResourceReservation(lease, start, end, res)
+    slottable.add_reservation(rr)
+
+def create_tmp_slottable(slottable):
+    tmp_slottable = SlotTable(slottable.resource_types)
+    tmp_slottable.nodes = slottable.nodes
+    tmp_slottable.reservations_by_start = slottable.reservations_by_start[:]
+    tmp_slottable.reservations_by_end = slottable.reservations_by_end[:]
+
+    return tmp_slottable
\ No newline at end of file

Modified: trunk/tests/migrate.lwf
===================================================================
--- trunk/tests/migrate.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/migrate.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,9 +1,72 @@
-# Name: MIGRATE
-# Description: Slightly convoluted combination of requests with
-#   the ultimate purpose of forcing one of the leases (the third
-#   one) to cold-migrate from nodes P3, P4 to nodes P1, P2
-#   (which become available earlier than expected due to the
-#   early end of the first AR lease)
-0    1800 3600 900  2 1 1024 0 foobar1.img 1024
-0    90   3600 3600 2 1 1024 0 foobar2.img 1024
-0    -1   3600 3600 2 1 1024 0 foobar3.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="migrate.lwf">
+  <description>
+    Slightly convoluted combination of requests with
+    the ultimate purpose of forcing one of the leases (the third
+    one) to cold-migrate from nodes P3, P4 to nodes P1, P2
+    (which become available earlier than expected due to the
+    early end of the first AR lease)
+  </description>
+  
+  <site>
+    <resource-types names="CPU Memory"/>
+    <nodes>
+      <node-set numnodes="4">
+        <res type="CPU" amount="100"/>
+        <res type="Memory" amount="1024"/>
+      </node-set>
+    </nodes>
+  </site>
+  
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <lease id="1" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="00:01:30.00"/>
+        </start>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="foobar2.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>  
+    <lease-request arrival="00:00:00.00">
+      <lease id="2" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="00:30:00.00"/>
+        </start>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="foobar1.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="3" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="foobar3.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/tests/preemption.lwf
===================================================================
--- trunk/tests/preemption.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/preemption.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,6 +1,52 @@
-# Name: PREEMPT
-# Description: A simple trace where an AR lease 
-# preempts a best-effort lease that is already
-# running. 
-0   -1   3600 3600 1 1 1024 0 foobar1.img 1024
-900 1800 1800 1800 4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="preemption">
+  <description>
+	A simple trace where an AR lease preempts a 
+	best-effort lease that is already running. 
+  </description>
+  
+  <site>
+    <resource-types names="CPU Memory"/>
+    <nodes>
+      <node-set numnodes="4">
+        <res type="CPU" amount="100"/>
+        <res type="Memory" amount="1024"/>
+      </node-set>
+    </nodes>
+  </site>
+  
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <lease id="1" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="foobar1.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <lease id="2" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="00:30:00.00"/>
+        </start>
+        <duration time="00:30:00.00"/>
+        <software>
+          <disk-image id="foobar2.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/tests/preemption_prematureend.lwf
===================================================================
--- trunk/tests/preemption_prematureend.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/preemption_prematureend.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,5 +1,52 @@
-# Name: PREEMPT-PREMATUREEND
-# Description: Same as PREEMPT, but with 
-# premature end time for the best-effort lease.
-0   -1   3600 2700 1 1 1024 0 foobar1.img 1024
-900 1800 900  900  4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="preemption_prematureend">
+  <description>
+	Same as preempt.lwf, but with premature end time for the best-effort lease.
+  </description>
+
+  <site>
+    <resource-types names="CPU Memory"/>
+    <nodes>
+      <node-set numnodes="4">
+        <res type="CPU" amount="100"/>
+        <res type="Memory" amount="1024"/>
+      </node-set>
+    </nodes>
+  </site>
+    
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <realduration time="00:45:00.00"/>
+      <lease id="1" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="foobar1.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <lease id="2" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="00:30:00.00"/>
+        </start>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar2.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/tests/preemption_prematureend2.lwf
===================================================================
--- trunk/tests/preemption_prematureend2.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/preemption_prematureend2.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,5 +1,53 @@
-# Name: PREEMPT-PREMATUREEND2
-# Description: Same as PREEMPT, but with 
-# premature end time for both leases.
-0   -1   3600 2700 1 1 1024 0 foobar1.img 1024
-900 1800 900  600  4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="preemption_prematureend2">
+  <description>
+	Same as preempt.lwf, but with premature end time for both leases.
+  </description>
+
+  <site>
+    <resource-types names="CPU Memory"/>
+    <nodes>
+      <node-set numnodes="4">
+        <res type="CPU" amount="100"/>
+        <res type="Memory" amount="1024"/>
+      </node-set>
+    </nodes>
+  </site>
+  
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <realduration time="00:45:00.00"/>
+      <lease id="1" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="foobar1.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <realduration time="00:10:00.00"/>
+      <lease id="2" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="00:30:00.00"/>
+        </start>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar2.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/tests/reservation.lwf
===================================================================
--- trunk/tests/reservation.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/reservation.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,7 +1,52 @@
-# Name: RESERVATION
-# Description: Two best-effort leases. The first one is a long
-# single-node lease, while the second one is a short wide lease
-# using up all the nodes. With backfilling, the scheduler
-# should make a future reservation for the second lease.
-0   -1   3600 3600 1 1 1024 0 foobar1.img 1024
-900 -1   900  900  4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="reservation">
+  <description>
+    Two best-effort leases. The first one is a long
+    single-node lease, while the second one is a short wide lease
+    using up all the nodes. With backfilling, the scheduler
+    should make a future reservation for the second lease.
+  </description>
+  
+  <site>
+    <resource-types names="CPU Memory"/>
+    <nodes>
+      <node-set numnodes="4">
+        <res type="CPU" amount="100"/>
+        <res type="Memory" amount="1024"/>
+      </node-set>
+    </nodes>
+  </site>
+  
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <lease id="1" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="foobar1.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <lease id="2" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar2.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/tests/reservation_prematureend.lwf
===================================================================
--- trunk/tests/reservation_prematureend.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/reservation_prematureend.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,8 +1,54 @@
-# Name: RESERVATION-PREMATUREEND
-# Description: Same as RESERVATION. However, the first lease
-# ends prematurely, freeing up resources earlier than
-# expected. The scheduler should reschedule the second lease
-# (for which a reservation was made in the future)
-# since it can now start earlier
-0   -1   3600 1800 1 1 1024 0 foobar1.img 1024
-900 -1   900  900  4 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="reservation_prematureend">
+  <description>
+    Same as RESERVATION. However, the first lease
+    ends prematurely, freeing up resources earlier than
+    expected. The scheduler should reschedule the second lease
+    (for which a reservation was made in the future)
+    since it can now start earlier
+  </description>
+  
+  <site>
+    <resource-types names="CPU Memory"/>
+    <nodes>
+      <node-set numnodes="4">
+        <res type="CPU" amount="100"/>
+        <res type="Memory" amount="1024"/>
+      </node-set>
+    </nodes>
+  </site>
+  
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <realduration time="00:30:00.00"/>
+      <lease id="1" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="foobar1.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <lease id="2" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar2.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/tests/reuse1.lwf
===================================================================
--- trunk/tests/reuse1.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/reuse1.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,7 +1,54 @@
-# Name: REUSE-1
-# Description: Two lease requests, both requiring the same
-#   disk image. The second (best-effort) should be able to avoid 
-#   doing an image transfer by reusing the cached image from the 
-#   first (AR) request.
-0     900  900  900 1 1 1024 0 foobar.img 1024
-2700   -1  900  900 1 1 1024 0 foobar.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="reuse1">
+  <description>
+	Two lease requests, both requiring the same
+    disk image. The second (best-effort) should be able to avoid 
+    doing an image transfer by reusing the cached image from the 
+    first (AR) request.
+  </description>
+  
+  <site>
+    <resource-types names="CPU Memory"/>
+    <nodes>
+      <node-set numnodes="4">
+        <res type="CPU" amount="100"/>
+        <res type="Memory" amount="1024"/>
+      </node-set>
+    </nodes>
+  </site>
+  
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <lease id="1" preemptible="false">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="00:15:00.00"/>
+        </start>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <lease id="2" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/tests/reuse2.lwf
===================================================================
--- trunk/tests/reuse2.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/reuse2.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,10 +1,73 @@
-# Name: REUSE-2
-# Description: The first two lease requests are AR leases happening
-#   at the same time, but with different images. The third one is
-#   a best-effort one, using the image from the second AR request
-#   (which should be scheduled on nodes P3, P4). The scheduler should
-#   prefer scheduling the best-effort lease on P3, P4 since it
-#   avoids an image transfer.
-0     900  900  900 2 1 1024 0 foobar1.img 1024
-0     900  900  900 2 1 1024 0 foobar2.img 1024
-2700   -1  900  900 1 1 1024 0 foobar2.img 1024
\ No newline at end of file
+<?xml version="1.0"?>
+<lease-workload name="reuse2">
+  <description>
+	The first two lease requests are AR leases happening
+    at the same time, but with different images. The third one is
+    a best-effort one, using the image from the second AR request
+    (which should be scheduled on nodes P3, P4). The scheduler should
+    prefer scheduling the best-effort lease on P3, P4 since it
+    avoids an image transfer.
+  </description>
+  
+  <site>
+    <resource-types names="CPU Memory"/>
+    <nodes>
+      <node-set numnodes="4">
+        <res type="CPU" amount="100"/>
+        <res type="Memory" amount="1024"/>
+      </node-set>
+    </nodes>
+  </site>
+  
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <lease id="1" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="00:15:00.00"/>
+        </start>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar1.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="2" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="00:15:00.00"/>
+        </start>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar2.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <lease id="3" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar2.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Added: trunk/tests/sample_slottables.py
===================================================================
--- trunk/tests/sample_slottables.py	                        (rev 0)
+++ trunk/tests/sample_slottables.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,189 @@
+from haizea.core.leases import Lease, Capacity
+from haizea.core.scheduler.resourcepool import ResourcePoolNode
+from haizea.core.scheduler.slottable import ResourceTuple, SlotTable, ResourceReservation, AvailabilityWindow
+from mx import DateTime
+import haizea.common.constants as constants
+from common import create_ar_lease,  create_reservation_from_lease
+
+T1200 = DateTime.DateTime(2006,11,25,12,00)
+T1255 = DateTime.DateTime(2006,11,25,12,55)
+T1300 = DateTime.DateTime(2006,11,25,13,00)
+T1305 = DateTime.DateTime(2006,11,25,13,05)
+T1315 = DateTime.DateTime(2006,11,25,13,15)
+T1325 = DateTime.DateTime(2006,11,25,13,25)
+T1330 = DateTime.DateTime(2006,11,25,13,30)
+T1335 = DateTime.DateTime(2006,11,25,13,35)
+T1345 = DateTime.DateTime(2006,11,25,13,45)
+T1350 = DateTime.DateTime(2006,11,25,13,50)
+T1355 = DateTime.DateTime(2006,11,25,13,55)
+T1400 = DateTime.DateTime(2006,11,25,14,00)
+T1415 = DateTime.DateTime(2006,11,25,14,15)
+T1420 = DateTime.DateTime(2006,11,25,14,20)
+
+resource_types_with_max_instances = [(constants.RES_CPU,1),(constants.RES_MEM,1)]
+
+def create_capacities(slottable):
+    FULL_NODE = Capacity([constants.RES_CPU,constants.RES_MEM])
+    FULL_NODE.set_quantity(constants.RES_CPU, 100)
+    FULL_NODE.set_quantity(constants.RES_MEM, 1024)
+    FULL_NODE = slottable.create_resource_tuple_from_capacity(FULL_NODE)
+    
+    HALF_NODE = Capacity([constants.RES_CPU,constants.RES_MEM])
+    HALF_NODE.set_quantity(constants.RES_CPU, 50)
+    HALF_NODE.set_quantity(constants.RES_MEM, 512)
+    HALF_NODE = slottable.create_resource_tuple_from_capacity(HALF_NODE)
+
+    QRTR_NODE = Capacity([constants.RES_CPU,constants.RES_MEM])
+    QRTR_NODE.set_quantity(constants.RES_CPU, 25)
+    QRTR_NODE.set_quantity(constants.RES_MEM, 256)
+    QRTR_NODE = slottable.create_resource_tuple_from_capacity(QRTR_NODE)
+
+    EMPT_NODE = slottable.create_empty_resource_tuple()
+    
+    return FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE
+        
+def sample_slottable_1():
+    slottable = SlotTable([(constants.RES_CPU,ResourceTuple.SINGLE_INSTANCE),(constants.RES_MEM,ResourceTuple.SINGLE_INSTANCE)])
+    FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(slottable)
+    
+    slottable.add_node(1, FULL_NODE)
+    slottable.add_node(2, FULL_NODE)  
+    slottable.add_node(3, FULL_NODE)  
+    slottable.add_node(4, FULL_NODE)  
+
+    lease1 = Lease.create_new(None,{},None,None,None,1,None)
+    lease1.id = 1
+    res1 = {2: HALF_NODE}
+    rr1_1 = ResourceReservation(lease1, T1315, T1325, res1)
+    rr1_2 = ResourceReservation(lease1, T1325, T1330, res1)
+    slottable.add_reservation(rr1_1)
+    slottable.add_reservation(rr1_2)
+
+    lease2 = Lease.create_new(None,{},None,None,None,2,None)
+    lease2.id = 2
+    res2 = {2: FULL_NODE, 3: FULL_NODE}
+    rr2 = ResourceReservation(lease2, T1330, T1345, res2)
+    slottable.add_reservation(rr2)
+
+    lease3 = Lease.create_new(None,{},None,None,None,1,None)
+    lease3.id = 3
+    res3 = {4: FULL_NODE}
+    rr3_1 = ResourceReservation(lease3, T1330, T1355, res3)
+    rr3_2 = ResourceReservation(lease3, T1355, T1400, res3)
+    slottable.add_reservation(rr3_1)
+    slottable.add_reservation(rr3_2)
+
+    lease4 = Lease.create_new(None,{},None,None,None,1,None)
+    lease4.id = 4
+    res4 = {2: QRTR_NODE, 3: HALF_NODE}
+    rr4 = ResourceReservation(lease4, T1350, T1415, res4)
+    slottable.add_reservation(rr4)
+
+    lease5 = Lease.create_new(None,{},None,None,None,1,None)
+    lease5.id = 5
+    res5 = {2: QRTR_NODE}
+    rr5 = ResourceReservation(lease5, T1350, T1415, res5)
+    slottable.add_reservation(rr5)
+    
+    lease6 = Lease.create_new(None,{},None,None,None,1,None)
+    lease6.id = 6
+    res6 = {1: FULL_NODE}
+    rr6 = ResourceReservation(lease6, T1255, T1305, res6)
+    slottable.add_reservation(rr6)      
+    
+    return slottable, [lease1,lease2,lease3,lease4,lease5,lease6]
+        
+def sample_slottable_2():
+    slottable = SlotTable([(constants.RES_CPU,ResourceTuple.SINGLE_INSTANCE),(constants.RES_MEM,ResourceTuple.SINGLE_INSTANCE)])
+    FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(slottable)
+    
+    slottable.add_node(1, FULL_NODE)
+    slottable.add_node(2, FULL_NODE)  
+    slottable.add_node(3, FULL_NODE)  
+    slottable.add_node(4, FULL_NODE)  
+
+    lease1 = create_ar_lease(lease_id = 1,
+                             submit_time = T1200,
+                             start = T1330,
+                             end   = T1345,
+                             preemptible = False,
+                             requested_resources = {1: FULL_NODE, 2: FULL_NODE})
+    create_reservation_from_lease(lease1, {1:1,2:2}, slottable)
+    
+    lease2 = create_ar_lease(lease_id = 2,
+                             submit_time = T1200,
+                             start = T1315,
+                             end   = T1330,
+                             preemptible = False,
+                             requested_resources = {1: HALF_NODE})
+    create_reservation_from_lease(lease2, {1:1}, slottable)
+
+    return slottable, [lease1,lease2]
+
+def sample_slottable_3():
+    slottable = SlotTable([(constants.RES_CPU,ResourceTuple.SINGLE_INSTANCE),(constants.RES_MEM,ResourceTuple.SINGLE_INSTANCE)])
+    FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(slottable)
+    
+    slottable.add_node(1, FULL_NODE)
+    slottable.add_node(2, FULL_NODE)  
+    slottable.add_node(3, FULL_NODE)  
+    slottable.add_node(4, FULL_NODE)  
+
+    lease1 = create_ar_lease(lease_id = 1,
+                             submit_time = T1200,
+                             start = T1345,
+                             end   = T1415,
+                             preemptible = False,
+                             requested_resources = {1: FULL_NODE})
+    create_reservation_from_lease(lease1, {1:1}, slottable)
+
+    lease2 = create_ar_lease(lease_id = 2,
+                             submit_time = T1200,
+                             start = T1330,
+                             end   = T1415,
+                             preemptible = False,
+                             requested_resources = {1: HALF_NODE})
+    create_reservation_from_lease(lease2, {1:2}, slottable)
+
+    lease3 = create_ar_lease(lease_id = 3,
+                             submit_time = T1200,
+                             start = T1400,
+                             end   = T1415,
+                             preemptible = False,
+                             requested_resources = {1: HALF_NODE})
+    create_reservation_from_lease(lease3, {1:2}, slottable)
+
+    return slottable, [lease1, lease2, lease3]
+
+def sample_slottable_4():
+    slottable = SlotTable([(constants.RES_CPU,ResourceTuple.SINGLE_INSTANCE),(constants.RES_MEM,ResourceTuple.SINGLE_INSTANCE)])
+    FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(slottable)
+    
+    slottable.add_node(1, FULL_NODE)
+
+    lease1 = create_ar_lease(lease_id = 1,
+                             submit_time = T1200,
+                             start = T1315,
+                             end   = T1420,
+                             preemptible = False,
+                             requested_resources = {1: HALF_NODE})
+    create_reservation_from_lease(lease1, {1:1}, slottable)
+
+    lease2 = create_ar_lease(lease_id = 2,
+                             submit_time = T1200,
+                             start = T1330,
+                             end   = T1415,
+                             preemptible = False,
+                             requested_resources = {1: QRTR_NODE})
+    create_reservation_from_lease(lease2, {1:1}, slottable)
+
+    #lease3 = create_ar_lease(lease_id = 3,
+    #                         submit_time = T1200,
+    #                         start = T1345,
+    #                         end   = T1400,
+    #                         preemptible = False,
+    #                         requested_resources = {1: QRTR_NODE})
+    #create_reservation_from_lease(lease1, {1:1}, slottable)
+
+
+    return slottable, [lease1, lease2, None]
\ No newline at end of file

Added: trunk/tests/test_mapper.py
===================================================================
--- trunk/tests/test_mapper.py	                        (rev 0)
+++ trunk/tests/test_mapper.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,241 @@
+from haizea.core.leases import Lease
+from haizea.core.scheduler.resourcepool import ResourcePoolNode
+from haizea.core.scheduler.policy import PolicyManager
+from haizea.core.scheduler.slottable import ResourceTuple, SlotTable, ResourceReservation, AvailabilityWindow
+from haizea.core.scheduler.mapper import GreedyMapper
+from haizea.policies.host_selection import GreedyPolicy
+from mx import DateTime
+import haizea.common.constants as constants
+from sample_slottables import *
+from common import create_tmp_slottable
+
+class SimplePolicy(PolicyManager):
+    def __init__(self, slottable, preemption):
+        PolicyManager.__init__(self, None, None, None)
+        self.preemption = preemption
+        self.host_selection = GreedyPolicy(slottable)
+    
+    def get_lease_preemptability_score(self, preemptor, preemptee, time):
+        if self.preemption:
+            return 1
+        else:
+            return -1
+        
+    def accept_lease(self, lease):
+        return True  
+    
+
+class TestMapper(object):
+    def __init__(self):
+        pass
+   
+    def mapping_assertions(self, start, end, requested_resources, strictend, mustmap, 
+                           maxend = None, can_preempt = []):
+        lease = create_ar_lease(lease_id = 100,
+                                submit_time = T1200,
+                                start = start,
+                                end = end,
+                                preemptible = False,
+                                requested_resources = requested_resources)
+        
+        mapping, actualend, preemptions = self.mapper.map(lease, requested_resources,
+                                                     start, end, 
+                                                     strictend = strictend)
+        
+        if mustmap:
+            assert(mapping != None and actualend != None and preemptions != None)
+            if strictend:
+                assert(end == actualend)
+            else:
+                assert(actualend <= maxend)
+            assert(set(preemptions).issubset(set(can_preempt)))
+
+        else:
+            assert(mapping == None and actualend == None and preemptions == None)
+            return
+        
+        # Sanity check slottable
+        tmp_slottable = create_tmp_slottable(self.slottable)
+        
+        # Add reservation
+        res = dict([(mapping[vnode],r) for vnode,r in requested_resources.items()])
+        rr = ResourceReservation(lease, start, actualend, res)
+        tmp_slottable.add_reservation(rr)
+        
+        if len(preemptions) > 0:
+            passed, node, time, capacity = tmp_slottable.sanity_check()
+            assert(not passed)
+            
+            # Remove preempted reservations
+            remove = set()
+            for rr in [x.value for x in tmp_slottable.reservations_by_start]:
+                if rr.lease in preemptions:
+                    remove.add(rr)
+
+            for rr in remove:
+                tmp_slottable.remove_reservation(rr)
+
+        passed, node, time, capacity = tmp_slottable.sanity_check()
+        assert(passed)
+
+   
+    def test_mapping_nopreemption_strictend(self):
+        self.slottable, leases = sample_slottable_2()
+        FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(self.slottable)
+        policy = SimplePolicy(self.slottable, preemption = False)
+        self.mapper = GreedyMapper(self.slottable, policy)
+        
+        self.mapping_assertions(start = T1300, 
+                                end = T1345,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE},
+                                strictend = True,
+                                mustmap = True)
+
+        self.mapping_assertions(start = T1300, 
+                                end = T1330,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: HALF_NODE},
+                                strictend = True,
+                                mustmap = True)
+
+        self.mapping_assertions(start = T1300, 
+                                end = T1315,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: FULL_NODE},
+                                strictend = True,
+                                mustmap = True)
+
+        self.mapping_assertions(start = T1330, 
+                                end = T1345,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE},
+                                strictend = True,
+                                mustmap = True)
+
+        self.mapping_assertions(start = T1330, 
+                                end = T1345,
+                                requested_resources = {1: HALF_NODE, 2: HALF_NODE, 3: HALF_NODE, 4: HALF_NODE},
+                                strictend = True,
+                                mustmap = True)
+
+        self.mapping_assertions(start = T1300, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE},
+                                strictend = True,
+                                mustmap = True)
+
+        self.mapping_assertions(start = T1300, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: FULL_NODE},
+                                strictend = True,
+                                mustmap = False)
+
+        self.mapping_assertions(start = T1300, 
+                                end = T1330,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: FULL_NODE},
+                                strictend = True,
+                                mustmap = False)
+
+        self.mapping_assertions(start = T1330, 
+                                end = T1345,
+                                requested_resources = {1: HALF_NODE, 2: HALF_NODE, 3: HALF_NODE, 4: HALF_NODE, 5: HALF_NODE},
+                                strictend = True,
+                                mustmap = False)
+
+    def test_mapping_nopreemption_nostrictend(self):
+        self.slottable, leases = sample_slottable_3()
+        FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(self.slottable)
+        policy = SimplePolicy(self.slottable, preemption = False)
+        self.mapper = GreedyMapper(self.slottable, policy)
+        
+        self.mapping_assertions(start = T1315, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: HALF_NODE},
+                                strictend = False,
+                                mustmap = True,
+                                maxend = T1400)
+        
+        self.mapping_assertions(start = T1315, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: FULL_NODE},
+                                strictend = False,
+                                mustmap = True,
+                                maxend = T1330)
+
+        self.mapping_assertions(start = T1315, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: HALF_NODE},
+                                strictend = False,
+                                mustmap = True,
+                                maxend = T1345)
+        
+        self.mapping_assertions(start = T1315, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: HALF_NODE, 4: HALF_NODE, 5: HALF_NODE, 6: HALF_NODE},
+                                strictend = False,
+                                mustmap = True,
+                                maxend = T1330)
+
+        self.mapping_assertions(start = T1330, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: HALF_NODE},
+                                strictend = False,
+                                mustmap = True,
+                                maxend = T1345)
+
+        self.mapping_assertions(start = T1345, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: HALF_NODE},
+                                strictend = False,
+                                mustmap = True,
+                                maxend = T1400)
+
+        self.mapping_assertions(start = T1330, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: FULL_NODE},
+                                strictend = False,
+                                mustmap = False)
+        
+        self.mapping_assertions(start = T1400, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: HALF_NODE},
+                                strictend = False,
+                                mustmap = False)
+        
+    def test_mapping_preemption_strictend(self):
+        self.slottable, leases = sample_slottable_3()
+        FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(self.slottable)
+        policy = SimplePolicy(self.slottable, preemption = True)        
+        self.mapper = GreedyMapper(self.slottable, policy)
+
+        self.mapping_assertions(start = T1315, 
+                                end = T1345,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: FULL_NODE},
+                                strictend = True,
+                                mustmap = True,
+                                can_preempt = [leases[1]])
+
+        self.mapping_assertions(start = T1330, 
+                                end = T1345,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: FULL_NODE},
+                                strictend = True,
+                                mustmap = True,
+                                can_preempt = [leases[1]])
+
+        self.mapping_assertions(start = T1345, 
+                                end = T1400,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE},
+                                strictend = True,
+                                mustmap = True,
+                                can_preempt = [leases[0],leases[1]])
+        
+        self.mapping_assertions(start = T1315, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: FULL_NODE, 4: FULL_NODE},
+                                strictend = True,
+                                mustmap = True,
+                                can_preempt = leases)
+                
+        self.mapping_assertions(start = T1315, 
+                                end = T1415,
+                                requested_resources = {1: FULL_NODE, 2: FULL_NODE, 3: HALF_NODE},
+                                strictend = True,
+                                mustmap = True,
+                                can_preempt = leases)

Deleted: trunk/tests/test_opennebula.py
===================================================================
--- trunk/tests/test_opennebula.py	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/test_opennebula.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,11 +0,0 @@
-from common import BaseOpenNebulaTest
-
-class TestOpenNebula(BaseOpenNebulaTest):
-    def __init__(self):
-        self.config = self.load_configfile("base_config_opennebula.conf")
-   
-    def test_twoleases(self):
-        self.do_test("one-twoleases.db")          
-        
-    def test_threeleases(self):
-        self.do_test("one-threeleases.db")
\ No newline at end of file

Added: trunk/tests/test_slottable.py
===================================================================
--- trunk/tests/test_slottable.py	                        (rev 0)
+++ trunk/tests/test_slottable.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,677 @@
+from haizea.core.leases import Lease, Capacity
+from haizea.core.scheduler.resourcepool import ResourcePoolNode
+from haizea.core.scheduler.slottable import ResourceTuple, SlotTable, ResourceReservation, AvailabilityWindow
+from mx import DateTime
+from sample_slottables import *
+import haizea.common.constants as constants
+
+class TestSlotTable(object):
+    def __init__(self):
+        self.slottable = None
+   
+    def test_resource_tuple(self):
+        
+        multiinst = [(constants.RES_CPU,ResourceTuple.MULTI_INSTANCE),(constants.RES_MEM,ResourceTuple.SINGLE_INSTANCE)]
+        
+        self.slottable = SlotTable(multiinst)
+                
+        c1_100 = Capacity([constants.RES_CPU,constants.RES_MEM])
+        c1_100.set_quantity(constants.RES_CPU, 100)
+        c1_100.set_quantity(constants.RES_MEM, 1024)
+        c1_100 = self.slottable.create_resource_tuple_from_capacity(c1_100)
+
+        c2_100 = Capacity([constants.RES_CPU,constants.RES_MEM])
+        c2_100.set_ninstances(constants.RES_CPU, 2)
+        c2_100.set_quantity_instance(constants.RES_CPU, 1, 100)
+        c2_100.set_quantity_instance(constants.RES_CPU, 2, 100)
+        c2_100.set_quantity(constants.RES_MEM, 1024)
+        c2_100 = self.slottable.create_resource_tuple_from_capacity(c2_100)
+
+        c1_50 = Capacity([constants.RES_CPU,constants.RES_MEM])
+        c1_50.set_quantity(constants.RES_CPU, 50)
+        c1_50.set_quantity(constants.RES_MEM, 1024)
+        c1_50 = self.slottable.create_resource_tuple_from_capacity(c1_50)
+
+        c2_50 = Capacity([constants.RES_CPU,constants.RES_MEM])
+        c2_50.set_ninstances(constants.RES_CPU, 2)
+        c2_50.set_quantity_instance(constants.RES_CPU, 1, 50)
+        c2_50.set_quantity_instance(constants.RES_CPU, 2, 50)
+        c2_50.set_quantity(constants.RES_MEM, 1024)
+        c2_50 = self.slottable.create_resource_tuple_from_capacity(c2_50)
+
+        assert c1_100.fits_in(c2_100)
+        assert not c1_100.fits_in(c1_50)
+        assert not c1_100.fits_in(c2_50)
+
+        assert not c2_100.fits_in(c1_100)
+        assert not c2_100.fits_in(c1_50)
+        assert not c2_100.fits_in(c2_50)
+
+        assert c1_50.fits_in(c1_100)
+        assert c1_50.fits_in(c2_100)
+        assert c1_50.fits_in(c2_50)
+
+        assert c2_50.fits_in(c1_100)
+        assert c2_50.fits_in(c2_100)
+        assert not c2_50.fits_in(c1_50)
+        
+        empty = self.slottable.create_empty_resource_tuple()
+        empty.incr(c2_100)
+        assert empty._res[0] == 1024
+        assert empty.multiinst[1] == [100,100]
+ 
+        empty = self.slottable.create_empty_resource_tuple()
+        empty.incr(c1_100)
+        assert empty._res[0] == 1024
+        assert empty.multiinst[1] == [100]
+        empty.incr(c1_100)
+        assert empty._res[0] == 2048
+        assert empty.multiinst[1] == [100,100]
+
+        empty = self.slottable.create_empty_resource_tuple()
+        empty.incr(c1_100)
+        assert empty._res[0] == 1024
+        assert empty.multiinst[1] == [100]
+        empty.incr(c1_50)
+        assert empty._res[0] == 2048
+        assert empty.multiinst[1] == [100,50]
+   
+        c1_100a = ResourceTuple.copy(c1_100)
+        c1_100a.decr(c1_50)
+        assert c1_100a._res[0] == 0
+        assert c1_100a.multiinst[1] == [50]
+
+        c2_100a = ResourceTuple.copy(c2_100)
+        c2_100a._res[0] = 2048
+        c2_100a.decr(c1_50)
+        assert c2_100a._res[0] == 1024
+        assert c2_100a.multiinst[1] == [50,100]
+        c2_100a.decr(c1_50)
+        assert c2_100a._res[0] == 0
+        assert c2_100a.multiinst[1] == [0,100]
+
+        c2_100a = ResourceTuple.copy(c2_100)
+        c2_100a._res[0] = 2048
+        c2_100a.decr(c2_50)
+        assert c2_100a._res[0] == 1024
+        assert c2_100a.multiinst[1] == [0,100]
+        c2_100a.decr(c2_50)
+        assert c2_100a._res[0] == 0
+        assert c2_100a.multiinst[1] == [0,0]
+   
+    def test_slottable(self):
+        def assert_capacity(node, percent):
+            assert node.capacity.get_by_type(constants.RES_CPU) == percent * 100
+            assert node.capacity.get_by_type(constants.RES_MEM) == percent * 1024
+            
+        def reservations_1_assertions():
+            assert not self.slottable.is_empty()
+            nodes = self.slottable.get_availability(T1300)
+            assert_capacity(nodes[1], 0.5)
+            assert_capacity(nodes[2], 0.5)
+            nodes = self.slottable.get_availability(T1330)
+            assert_capacity(nodes[1], 1.0)
+            assert_capacity(nodes[2], 1.0)
+            
+        def reservations_2_assertions():
+            nodes = self.slottable.get_availability(T1300)
+            assert_capacity(nodes[1], 0.5)
+            assert_capacity(nodes[2], 0.5)
+            rrs = self.slottable.get_reservations_at(T1300)
+            assert len(rrs) == 1
+            assert rrs[0] == rr1
+    
+            nodes = self.slottable.get_availability(T1330)
+            assert_capacity(nodes[1], 0.75)
+            assert_capacity(nodes[2], 0.5)
+            rrs = self.slottable.get_reservations_at(T1330)
+            assert len(rrs) == 1
+            assert rrs[0] == rr2
+    
+            nodes = self.slottable.get_availability(T1400)
+            assert_capacity(nodes[1], 1.0)
+            assert_capacity(nodes[2], 1.0)
+            rrs = self.slottable.get_reservations_at(T1400)
+            assert len(rrs) == 0
+            
+        def reservations_3_assertions():
+            nodes = self.slottable.get_availability(T1300)
+            assert_capacity(nodes[1], 0.5)
+            assert_capacity(nodes[2], 0.5)
+            rrs = self.slottable.get_reservations_at(T1300)
+            assert len(rrs) == 1
+            assert rrs[0] == rr1
+    
+            nodes = self.slottable.get_availability(T1315)
+            assert_capacity(nodes[1], 0.25)
+            assert_capacity(nodes[2], 0.25)
+            rrs = self.slottable.get_reservations_at(T1315)
+            assert len(rrs) == 2
+            assert rr1 in rrs and rr3 in rrs
+
+            nodes = self.slottable.get_availability(T1330)
+            assert_capacity(nodes[1], 0.5)
+            assert_capacity(nodes[2], 0.25)
+            rrs = self.slottable.get_reservations_at(T1330)
+            assert len(rrs) == 2
+            assert rr2 in rrs and rr3 in rrs
+
+            nodes = self.slottable.get_availability(T1345)
+            assert_capacity(nodes[1], 0.75)
+            assert_capacity(nodes[2], 0.5)
+            rrs = self.slottable.get_reservations_at(T1345)
+            assert len(rrs) == 1
+            assert rrs[0] == rr2
+    
+            nodes = self.slottable.get_availability(T1400)
+            assert_capacity(nodes[1], 1.0)
+            assert_capacity(nodes[2], 1.0)
+            rrs = self.slottable.get_reservations_at(T1400)
+            assert len(rrs) == 0
+
+        def reservations_4_assertions():
+            nodes = self.slottable.get_availability(T1300)
+            assert_capacity(nodes[1], 0.5)
+            assert_capacity(nodes[2], 0.5)
+            rrs = self.slottable.get_reservations_at(T1300)
+            assert len(rrs) == 1
+            assert rrs[0] == rr1
+    
+            nodes = self.slottable.get_availability(T1315)
+            assert_capacity(nodes[1], 0.25)
+            assert_capacity(nodes[2], 0.25)
+            rrs = self.slottable.get_reservations_at(T1315)
+            assert len(rrs) == 2
+            assert rr1 in rrs and rr3 in rrs
+
+            nodes = self.slottable.get_availability(T1330)
+            assert_capacity(nodes[1], 0)
+            assert_capacity(nodes[2], 0)
+            rrs = self.slottable.get_reservations_at(T1330)
+            assert len(rrs) == 3
+            assert rr4 in rrs and rr2 in rrs and rr3 in rrs
+
+            nodes = self.slottable.get_availability(T1345)
+            assert_capacity(nodes[1], 0.25)
+            assert_capacity(nodes[2], 0.25)
+            rrs = self.slottable.get_reservations_at(T1345)
+            assert len(rrs) == 2
+            assert rr2 in rrs and rr4 in rrs
+    
+            nodes = self.slottable.get_availability(T1400)
+            assert_capacity(nodes[1], 0.5)
+            assert_capacity(nodes[2], 0.75)
+            rrs = self.slottable.get_reservations_at(T1400)
+            assert len(rrs) == 1
+            assert rrs[0] == rr4
+    
+            nodes = self.slottable.get_availability(T1415)
+            assert_capacity(nodes[1], 1.0)
+            assert_capacity(nodes[2], 1.0)
+            rrs = self.slottable.get_reservations_at(T1415)
+            assert len(rrs) == 0
+        
+        def reservations_5_assertions():
+            nodes = self.slottable.get_availability(T1300)
+            assert_capacity(nodes[1], 0.5)
+            assert_capacity(nodes[2], 0.5)
+            rrs = self.slottable.get_reservations_at(T1300)
+            assert len(rrs) == 1
+            assert rrs[0] == rr1
+    
+            nodes = self.slottable.get_availability(T1315)
+            assert_capacity(nodes[1], 0.25)
+            assert_capacity(nodes[2], 0.25)
+            rrs = self.slottable.get_reservations_at(T1315)
+            assert len(rrs) == 2
+            assert set(rrs) == set([rr1,rr3])
+
+            nodes = self.slottable.get_availability(T1330)
+            assert_capacity(nodes[1], 0)
+            assert_capacity(nodes[2], 0)
+            rrs = self.slottable.get_reservations_at(T1330)
+            assert len(rrs) == 3
+            assert set(rrs) == set([rr2,rr3,rr4])
+
+            nodes = self.slottable.get_availability(T1345)
+            assert_capacity(nodes[1], 0.25)
+            assert_capacity(nodes[2], 0)
+            rrs = self.slottable.get_reservations_at(T1345)
+            assert len(rrs) == 3
+            assert set(rrs) == set([rr2,rr4,rr5])
+    
+            nodes = self.slottable.get_availability(T1400)
+            assert_capacity(nodes[1], 0.5)
+            assert_capacity(nodes[2], 0.5)
+            rrs = self.slottable.get_reservations_at(T1400)
+            assert len(rrs) == 2
+            assert set(rrs) == set([rr4,rr5])
+    
+            nodes = self.slottable.get_availability(T1415)
+            assert_capacity(nodes[1], 1.0)
+            assert_capacity(nodes[2], 1.0)
+            rrs = self.slottable.get_reservations_at(T1415)
+            assert len(rrs) == 0
+            
+            rrs = self.slottable.get_reservations_starting_between(T1300, T1315)
+            assert set(rrs) == set([rr1,rr3])
+            rrs = self.slottable.get_reservations_starting_between(T1300, T1330)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4])
+            rrs = self.slottable.get_reservations_starting_between(T1300, T1345)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_starting_between(T1315, T1330)
+            assert set(rrs) == set([rr2,rr3,rr4])
+            rrs = self.slottable.get_reservations_starting_between(T1315, T1345)
+            assert set(rrs) == set([rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_starting_between(T1330, T1345)
+            assert set(rrs) == set([rr2,rr4,rr5])
+            rrs = self.slottable.get_reservations_starting_between(T1400, T1415)
+            assert len(rrs) == 0
+            rrs = self.slottable.get_reservations_starting_between(T1305, T1335)
+            assert set(rrs) == set([rr3,rr2,rr4])
+
+            rrs = self.slottable.get_reservations_ending_between(T1300, T1305)
+            assert len(rrs) == 0
+            rrs = self.slottable.get_reservations_ending_between(T1300, T1315)
+            assert len(rrs) == 0
+            rrs = self.slottable.get_reservations_ending_between(T1300, T1330)
+            assert set(rrs) == set([rr1])
+            rrs = self.slottable.get_reservations_ending_between(T1300, T1335)
+            assert set(rrs) == set([rr1])
+            rrs = self.slottable.get_reservations_ending_between(T1300, T1345)
+            assert set(rrs) == set([rr1,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1300, T1400)
+            assert set(rrs) == set([rr1,rr2,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1300, T1415)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_between(T1305, T1315)
+            assert len(rrs) == 0
+            rrs = self.slottable.get_reservations_ending_between(T1305, T1330)
+            assert set(rrs) == set([rr1])
+            rrs = self.slottable.get_reservations_ending_between(T1305, T1335)
+            assert set(rrs) == set([rr1])
+            rrs = self.slottable.get_reservations_ending_between(T1305, T1345)
+            assert set(rrs) == set([rr1,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1305, T1400)
+            assert set(rrs) == set([rr1,rr2,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1305, T1415)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_between(T1315, T1330)
+            assert set(rrs) == set([rr1])
+            rrs = self.slottable.get_reservations_ending_between(T1315, T1335)
+            assert set(rrs) == set([rr1])
+            rrs = self.slottable.get_reservations_ending_between(T1315, T1345)
+            assert set(rrs) == set([rr1,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1315, T1400)
+            assert set(rrs) == set([rr1,rr2,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1315, T1415)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_between(T1330, T1335)
+            assert set(rrs) == set([rr1])
+            rrs = self.slottable.get_reservations_ending_between(T1330, T1345)
+            assert set(rrs) == set([rr1,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1330, T1400)
+            assert set(rrs) == set([rr1,rr2,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1330, T1415)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_between(T1335, T1345)
+            assert set(rrs) == set([rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1335, T1400)
+            assert set(rrs) == set([rr2,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1335, T1415)
+            assert set(rrs) == set([rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_between(T1345, T1400)
+            assert set(rrs) == set([rr2,rr3])
+            rrs = self.slottable.get_reservations_ending_between(T1345, T1415)
+            assert set(rrs) == set([rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_between(T1400, T1415)
+            assert set(rrs) == set([rr2,rr4,rr5])
+            
+            rrs = self.slottable.get_reservations_starting_on_or_after(T1300)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_starting_on_or_after(T1305)
+            assert set(rrs) == set([rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_starting_on_or_after(T1315)
+            assert set(rrs) == set([rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_starting_on_or_after(T1330)
+            assert set(rrs) == set([rr2,rr4,rr5])
+            rrs = self.slottable.get_reservations_starting_on_or_after(T1335)
+            assert set(rrs) == set([rr5])
+            rrs = self.slottable.get_reservations_starting_on_or_after(T1345)
+            assert set(rrs) == set([rr5])
+            rrs = self.slottable.get_reservations_starting_on_or_after(T1400)
+            assert len(rrs) == 0
+            rrs = self.slottable.get_reservations_starting_on_or_after(T1415)
+            assert len(rrs) == 0
+            
+            rrs = self.slottable.get_reservations_ending_on_or_after(T1300)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_on_or_after(T1305)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_on_or_after(T1315)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_on_or_after(T1330)
+            assert set(rrs) == set([rr1,rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_on_or_after(T1335)
+            assert set(rrs) == set([rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_on_or_after(T1345)
+            assert set(rrs) == set([rr2,rr3,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_on_or_after(T1400)
+            assert set(rrs) == set([rr2,rr4,rr5])
+            rrs = self.slottable.get_reservations_ending_on_or_after(T1415)
+            assert set(rrs) == set([rr4,rr5])
+            
+            assert self.slottable.get_next_changepoint(T1255) == T1300
+            assert self.slottable.get_next_changepoint(T1300) == T1315
+            assert self.slottable.get_next_changepoint(T1315) == T1330
+            assert self.slottable.get_next_changepoint(T1330) == T1345
+            assert self.slottable.get_next_changepoint(T1335) == T1345
+            assert self.slottable.get_next_changepoint(T1345) == T1400
+            assert self.slottable.get_next_changepoint(T1400) == T1415
+            assert self.slottable.get_next_changepoint(T1415) == None
+            assert self.slottable.get_next_changepoint(T1420) == None
+        
+        self.slottable = SlotTable([(constants.RES_CPU,ResourceTuple.SINGLE_INSTANCE),(constants.RES_MEM,ResourceTuple.SINGLE_INSTANCE)])
+        FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(self.slottable)
+        
+        self.slottable.add_node(1, FULL_NODE)
+        self.slottable.add_node(2, FULL_NODE)  
+        
+        assert self.slottable.get_total_capacity(constants.RES_CPU) == 200
+        assert self.slottable.get_total_capacity(constants.RES_MEM) == 2048
+        assert self.slottable.is_empty()
+    
+        res1 = {1: HALF_NODE, 2: HALF_NODE}
+        rr1 = ResourceReservation(None, T1300, T1330, res1)
+        self.slottable.add_reservation(rr1)
+        reservations_1_assertions()
+
+        res2 = {1: QRTR_NODE, 2: HALF_NODE}
+        rr2 = ResourceReservation(None, T1330, T1400, res2)
+        self.slottable.add_reservation(rr2)
+        reservations_2_assertions()
+
+        res3 = {1: QRTR_NODE, 2: QRTR_NODE}
+        rr3 = ResourceReservation(None, T1315, T1345, res3)
+        self.slottable.add_reservation(rr3)
+        reservations_3_assertions()
+
+        res4 = {1: HALF_NODE, 2: QRTR_NODE}
+        rr4 = ResourceReservation(None, T1330, T1415, res4)
+        self.slottable.add_reservation(rr4)
+        reservations_4_assertions()
+
+        res5 = {2: QRTR_NODE}
+        rr5 = ResourceReservation(None, T1345, T1415, res5)
+        self.slottable.add_reservation(rr5)
+        reservations_5_assertions()
+
+        self.slottable.remove_reservation(rr5)
+        reservations_4_assertions()
+        self.slottable.remove_reservation(rr4)
+        reservations_3_assertions()
+        self.slottable.remove_reservation(rr3)
+        reservations_2_assertions()
+        self.slottable.remove_reservation(rr2)
+        reservations_1_assertions()
+        self.slottable.remove_reservation(rr1)
+        
+        assert self.slottable.is_empty()
+        
+    def test_availabilitywindow(self):
+        def avail_node_assertions(time, avail, node_id, leases, next_cp):
+            node = aw.changepoints[time].nodes[node_id]
+            nleases = len(leases)
+            assert(node.available == avail)
+            
+            assert(len(node.leases)==nleases)
+            for l in leases:
+                assert(l in node.leases)
+            assert(len(node.available_if_preempting) == nleases)
+            for l in leases:
+                assert(node.available_if_preempting[l] == leases[l])
+            assert(node.next_cp == next_cp)
+            if next_cp != None:
+                assert(node.next_nodeavail == aw.changepoints[next_cp].nodes[node_id])
+        
+        self.slottable, leases = sample_slottable_1()        
+        FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(self.slottable)
+        
+        lease1,lease2,lease3,lease4,lease5,lease6 = leases
+        
+        aw = self.slottable.get_availability_window(T1300)
+        
+        # TODO: Factor out data into a data structure so we can do more
+        # elaborate assertions
+        
+        # 13:00
+        avail_node_assertions(time = T1300, avail = EMPT_NODE, node_id = 1, 
+                              leases = {lease6:FULL_NODE}, next_cp = T1305)
+        avail_node_assertions(time = T1300, avail = FULL_NODE, node_id = 2, 
+                              leases = {}, next_cp = T1315)
+        avail_node_assertions(time = T1300, avail = FULL_NODE, node_id = 3, 
+                              leases = {}, next_cp = T1330)
+        avail_node_assertions(time = T1300, avail = FULL_NODE, node_id = 4, 
+                              leases = {}, next_cp = T1330)
+
+
+        # 13:05
+        avail_node_assertions(time = T1305, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1305, avail = FULL_NODE, node_id = 2, 
+                              leases = {}, next_cp = T1315)
+        avail_node_assertions(time = T1305, avail = FULL_NODE, node_id = 3, 
+                              leases = {}, next_cp = T1330)
+        avail_node_assertions(time = T1305, avail = FULL_NODE, node_id = 4, 
+                              leases = {}, next_cp = T1330)
+
+        # 13:15
+        avail_node_assertions(time = T1315, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1315, avail = HALF_NODE, node_id = 2, 
+                              leases = {lease1:HALF_NODE}, next_cp = T1330)
+        avail_node_assertions(time = T1315, avail = FULL_NODE, node_id = 3, 
+                              leases = {}, next_cp = T1330)
+        avail_node_assertions(time = T1315, avail = FULL_NODE, node_id = 4, 
+                              leases = {}, next_cp = T1330)
+
+        # 13:25
+        avail_node_assertions(time = T1325, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1325, avail = HALF_NODE, node_id = 2, 
+                              leases = {lease1:HALF_NODE}, next_cp = T1330)
+        avail_node_assertions(time = T1325, avail = FULL_NODE, node_id = 3, 
+                              leases = {}, next_cp = T1330)
+        avail_node_assertions(time = T1325, avail = FULL_NODE, node_id = 4, 
+                              leases = {}, next_cp = T1330)
+
+        # 13:30
+        avail_node_assertions(time = T1330, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1330, avail = EMPT_NODE, node_id = 2, 
+                              leases = {lease2:FULL_NODE}, next_cp = T1345)
+        avail_node_assertions(time = T1330, avail = EMPT_NODE, node_id = 3, 
+                              leases = {lease2:FULL_NODE}, next_cp = T1345)
+        avail_node_assertions(time = T1330, avail = EMPT_NODE, node_id = 4, 
+                              leases = {lease3:FULL_NODE}, next_cp = T1400)
+
+        # 13:45
+        avail_node_assertions(time = T1345, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1345, avail = FULL_NODE, node_id = 2, 
+                              leases = {}, next_cp = T1350)
+        avail_node_assertions(time = T1345, avail = FULL_NODE, node_id = 3, 
+                              leases = {}, next_cp = T1350)
+        avail_node_assertions(time = T1345, avail = EMPT_NODE, node_id = 4, 
+                              leases = {lease3:FULL_NODE}, next_cp = T1400)
+
+        # 13:50
+        avail_node_assertions(time = T1350, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1350, avail = HALF_NODE, node_id = 2, 
+                              leases = {lease4:QRTR_NODE,lease5:QRTR_NODE}, next_cp = T1415)
+        avail_node_assertions(time = T1350, avail = HALF_NODE, node_id = 3, 
+                              leases = {lease4:HALF_NODE}, next_cp = T1415)
+        avail_node_assertions(time = T1350, avail = EMPT_NODE, node_id = 4, 
+                              leases = {lease3:FULL_NODE}, next_cp = T1400)
+
+        # 13:55
+        avail_node_assertions(time = T1355, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1355, avail = HALF_NODE, node_id = 2, 
+                              leases = {lease4:QRTR_NODE,lease5:QRTR_NODE}, next_cp = T1415)
+        avail_node_assertions(time = T1355, avail = HALF_NODE, node_id = 3, 
+                              leases = {lease4:HALF_NODE}, next_cp = T1415)
+        avail_node_assertions(time = T1355, avail = EMPT_NODE, node_id = 4, 
+                              leases = {lease3:FULL_NODE}, next_cp = T1400)
+
+        # 14:00
+        avail_node_assertions(time = T1400, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1400, avail = HALF_NODE, node_id = 2, 
+                              leases = {lease4:QRTR_NODE,lease5:QRTR_NODE}, next_cp = T1415)
+        avail_node_assertions(time = T1400, avail = HALF_NODE, node_id = 3, 
+                              leases = {lease4:HALF_NODE}, next_cp = T1415)
+        avail_node_assertions(time = T1400, avail = FULL_NODE, node_id = 4, 
+                              leases = {}, next_cp = None)
+
+        # 14:15
+        avail_node_assertions(time = T1415, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1415, avail = FULL_NODE, node_id = 2, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1415, avail = FULL_NODE, node_id = 3, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1415, avail = FULL_NODE, node_id = 4, 
+                              leases = {}, next_cp = None)
+        
+        avail = aw.get_availability_at_node(T1300, 1)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == EMPT_NODE)
+        assert(avail.avail_list[0].until     == None)
+
+        avail = aw.get_availability_at_node(T1300, 2)
+        assert(len(avail.avail_list)==3)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == T1315)
+        assert(avail.avail_list[1].available == HALF_NODE)
+        assert(avail.avail_list[1].until     == T1330)
+        assert(avail.avail_list[2].available == EMPT_NODE)
+        assert(avail.avail_list[2].until     == None)
+
+        avail = aw.get_availability_at_node(T1300, 3)
+        assert(len(avail.avail_list)==2)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == T1330)
+        assert(avail.avail_list[1].available == EMPT_NODE)
+        assert(avail.avail_list[1].until     == None)
+
+        avail = aw.get_availability_at_node(T1300, 4)
+        assert(len(avail.avail_list)==2)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == T1330)
+        assert(avail.avail_list[1].available == EMPT_NODE)
+        assert(avail.avail_list[1].until     == None)
+
+
+        avail = aw.get_availability_at_node(T1330, 1)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == None)
+
+        avail = aw.get_availability_at_node(T1330, 2)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == EMPT_NODE)
+        assert(avail.avail_list[0].until     == None)
+
+        avail = aw.get_availability_at_node(T1330, 3)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == EMPT_NODE)
+        assert(avail.avail_list[0].until     == None)
+
+        avail = aw.get_availability_at_node(T1330, 4)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == EMPT_NODE)
+        assert(avail.avail_list[0].until     == None)
+        
+        
+        avail = aw.get_availability_at_node(T1345, 1)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == None)
+
+        avail = aw.get_availability_at_node(T1345, 2)
+        assert(len(avail.avail_list)==2)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == T1350)
+        assert(avail.avail_list[1].available == HALF_NODE)
+        assert(avail.avail_list[1].until     == None)
+
+        avail = aw.get_availability_at_node(T1345, 3)
+        assert(len(avail.avail_list)==2)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == T1350)
+        assert(avail.avail_list[1].available == HALF_NODE)
+        assert(avail.avail_list[1].until     == None)
+
+        avail = aw.get_availability_at_node(T1345, 4)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == EMPT_NODE)
+        assert(avail.avail_list[0].until     == None)        
+
+        self.slottable.awcache = None
+        aw = self.slottable.get_availability_window(T1415)
+        # 14:15
+        avail_node_assertions(time = T1415, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1415, avail = FULL_NODE, node_id = 2, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1415, avail = FULL_NODE, node_id = 3, 
+                              leases = {}, next_cp = None)
+        avail_node_assertions(time = T1415, avail = FULL_NODE, node_id = 4, 
+                              leases = {}, next_cp = None)
+
+        avail = aw.get_availability_at_node(T1415, 1)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == None)
+        
+        avail = aw.get_availability_at_node(T1415, 2)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == None)
+        
+        avail = aw.get_availability_at_node(T1415, 3)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == None)
+        
+        avail = aw.get_availability_at_node(T1415, 4)
+        assert(len(avail.avail_list)==1)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == None) 
+        
+        self.slottable, leases = sample_slottable_4()        
+        FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE = create_capacities(self.slottable)
+        
+        lease1,lease2,lease3 = leases
+        aw = self.slottable.get_availability_window(T1300)
+        
+        # 13:30
+        avail_node_assertions(time = T1300, avail = FULL_NODE, node_id = 1, 
+                              leases = {}, next_cp = T1315)
+        avail_node_assertions(time = T1315, avail = HALF_NODE, node_id = 1, 
+                              leases = {lease1:HALF_NODE}, next_cp = T1330)
+        avail_node_assertions(time = T1330, avail = QRTR_NODE, node_id = 1, 
+                              leases = {lease1:HALF_NODE,lease2:QRTR_NODE}, next_cp = T1415)
+        
+        avail = aw.get_availability_at_node(T1300, 1)
+        assert(len(avail.avail_list)==3)
+        assert(avail.avail_list[0].available == FULL_NODE)
+        assert(avail.avail_list[0].until     == T1315)
+        assert(avail.avail_list[1].available == HALF_NODE)
+        assert(avail.avail_list[1].until     == T1330)
+        assert(avail.avail_list[2].available == QRTR_NODE)
+        assert(avail.avail_list[2].until     == None)
+        
+                
\ No newline at end of file

Modified: trunk/tests/test_xmlrpc.py
===================================================================
--- trunk/tests/test_xmlrpc.py	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/test_xmlrpc.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,5 +1,6 @@
 from common import BaseXMLRPCTest
 from mx.DateTime import TimeDelta
+from haizea.common.utils import reset_lease_id_counter
 import haizea.cli.rpc_commands as rpc
 import time
 
@@ -7,6 +8,7 @@
     def __init__(self):
         self.config = self.load_configfile("base_config_simulator.conf")
         self.config.set("simulation", "clock", "real")
+        self.config.set("simulation", "resources", "4  CPU:100 Memory:1024")
         self.config.set("scheduling", "backfilling", "off")
         self.config.set("scheduling", "non-schedulable-interval", "2")
    
@@ -21,6 +23,7 @@
         self.stop()
         
     def test_ar(self):
+        reset_lease_id_counter()
         self.start()
         cmd = rpc.haizea_request_lease(["-D", "-t", "+00:01:00", "-d", "00:10:00", "-n", "1", "--non-preemptible", 
                                         "-c", "1", "-m", "512", "-i", "foobar.img", "-z", "600"])
@@ -35,6 +38,7 @@
         self.stop()
     
     def test_be(self):
+        reset_lease_id_counter()        
         self.start()
         cmd = rpc.haizea_request_lease(["-D", "-t", "best_effort", "-d", "00:10:00", "-n", "4", "--non-preemptible", 
                                         "-c", "1", "-m", "512", "-i", "foobar.img", "-z", "600"])
@@ -46,18 +50,19 @@
         cmd = rpc.haizea_list_leases(["-D"])
         cmd.run()
         # Cancel the queued request
-        cmd = rpc.haizea_cancel_lease(["-D", "-l", "3"])
+        cmd = rpc.haizea_cancel_lease(["-D", "-l", "2"])
         cmd.run()
         cmd = rpc.haizea_list_leases(["-D"])
         cmd.run()
         # Cancel the running request
-        cmd = rpc.haizea_cancel_lease(["-D", "-l", "2"])
+        cmd = rpc.haizea_cancel_lease(["-D", "-l", "1"])
         cmd.run()
         cmd = rpc.haizea_list_leases(["-D"])
         cmd.run()
         self.stop()
     
     def test_im(self):
+        reset_lease_id_counter()  
         self.start()
         cmd = rpc.haizea_request_lease(["-D", "-t", "now", "-d", "00:10:00", "-n", "1", "--non-preemptible", 
                                         "-c", "1", "-m", "512", "-i", "foobar.img", "-z", "600"])
@@ -65,7 +70,7 @@
         time.sleep(5)
         cmd = rpc.haizea_list_leases(["-D"])
         cmd.run()
-        cmd = rpc.haizea_cancel_lease(["-D", "-l", "4"])
+        cmd = rpc.haizea_cancel_lease(["-D", "-l", "1"])
         cmd.run()
         cmd = rpc.haizea_list_leases(["-D"])
         cmd.run()

Modified: trunk/tests/wait.lwf
===================================================================
--- trunk/tests/wait.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/tests/wait.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,11 +1,140 @@
-# Name: WAIT
-# Description: All best-effort requests, all of which (except the first)
-# will have to wait in the queue before starting.
-#0    -1   900  900  4 1 1024 0 foobar1.img 1024
-#0    -1   900  900  4 1 1024 0 foobar2.img 1024
-#0    -1   900  900  4 1 1024 0 foobar3.img 1024
-#0    -1   900  900  4 1 1024 0 foobar4.img 1024
-#0    -1   900  900  4 1 1024 0 foobar5.img 1024
-#0    -1   900  900  4 1 1024 0 foobar6.img 1024
-#0    -1   900  900  4 1 1024 0 foobar7.img 1024
-#0    -1   900  900  4 1 1024 0 foobar8.img 1024
+<?xml version="1.0"?>
+<lease-workload name="wait">
+  <description>
+	All best-effort requests, all of which (except the first)
+    will have to wait in the queue before starting.
+  </description>
+  
+  <site>
+    <resource-types names="CPU Memory"/>
+    <nodes>
+      <node-set numnodes="4">
+        <res type="CPU" amount="100"/>
+        <res type="Memory" amount="1024"/>
+      </node-set>
+    </nodes>
+  </site>
+  
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <lease id="1" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar1.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="2" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar2.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="3" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar3.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="4" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar4.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="5" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar5.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="6" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar6.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="7" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar7.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="8" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="foobar8.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/traces/multi/inj1.lwf
===================================================================
--- trunk/traces/multi/inj1.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/traces/multi/inj1.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,13 +1,227 @@
-3600 4500 450 450 2 1 1024 0 foobar.img 1024
-7200 8100 450 450 2 1 1024 0 foobar.img 1024
-10800 11700 450 450 1 1 1024 0 foobar.img 1024
-14400 15300 450 450 2 1 1024 0 foobar.img 1024
-18000 18900 450 450 1 1 1024 0 foobar.img 1024
-21600 22500 450 450 2 1 1024 0 foobar.img 1024
-25200 26100 450 450 2 1 1024 0 foobar.img 1024
-28800 29700 450 450 2 1 1024 0 foobar.img 1024
-32400 33300 450 450 1 1 1024 0 foobar.img 1024
-36000 36900 450 450 1 1 1024 0 foobar.img 1024
-39600 40500 450 450 1 1 1024 0 foobar.img 1024
-43200 44100 450 450 1 1 1024 0 foobar.img 1024
-46800 47700 450 450 1 1 1024 0 foobar.img 1024
+<?xml version="1.0"?>
+<lease-workload name="inj1.lwf.old">
+  <description/>
+  <lease-requests>
+    <lease-request arrival="01:00:00.00">
+      <lease id="1" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="01:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <lease id="2" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="02:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <lease id="3" preemptible="false">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="03:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <lease id="4" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="04:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <lease id="5" preemptible="false">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="05:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <lease id="6" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="06:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <lease id="7" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="07:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <lease id="8" preemptible="false">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="08:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="09:00:00.00">
+      <lease id="9" preemptible="false">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="09:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="10:00:00.00">
+      <lease id="10" preemptible="false">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="10:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="11:00:00.00">
+      <lease id="11" preemptible="false">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="11:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="12:00:00.00">
+      <lease id="12" preemptible="false">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="12:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="13:00:00.00">
+      <lease id="13" preemptible="false">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="13:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/traces/multi/inj2.lwf
===================================================================
--- trunk/traces/multi/inj2.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/traces/multi/inj2.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,13 +1,227 @@
-3600 4500 450 450 4 1 1024 0 foobar.img 1024
-7200 8100 450 450 4 1 1024 0 foobar.img 1024
-10800 11700 450 450 4 1 1024 0 foobar.img 1024
-14400 15300 450 450 4 1 1024 0 foobar.img 1024
-18000 18900 450 450 4 1 1024 0 foobar.img 1024
-21600 22500 450 450 4 1 1024 0 foobar.img 1024
-25200 26100 450 450 4 1 1024 0 foobar.img 1024
-28800 29700 450 450 4 1 1024 0 foobar.img 1024
-32400 33300 450 450 4 1 1024 0 foobar.img 1024
-36000 36900 450 450 4 1 1024 0 foobar.img 1024
-39600 40500 450 450 4 1 1024 0 foobar.img 1024
-43200 44100 450 450 4 1 1024 0 foobar.img 1024
-46800 47700 450 450 4 1 1024 0 foobar.img 1024
+<?xml version="1.0"?>
+<lease-workload name="inj2.lwf.old">
+  <description/>
+  <lease-requests>
+    <lease-request arrival="01:00:00.00">
+      <lease id="1" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="01:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <lease id="2" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="02:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <lease id="3" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="03:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <lease id="4" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="04:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <lease id="5" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="05:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <lease id="6" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="06:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <lease id="7" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="07:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <lease id="8" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="08:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="09:00:00.00">
+      <lease id="9" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="09:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="10:00:00.00">
+      <lease id="10" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="10:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="11:00:00.00">
+      <lease id="11" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="11:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="12:00:00.00">
+      <lease id="12" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="12:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="13:00:00.00">
+      <lease id="13" preemptible="false">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start>
+          <exact time="13:15:00.00"/>
+        </start>
+        <duration time="00:07:30.00"/>
+        <software>
+          <disk-image id="foobar.img" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/traces/multi/withoutprematureend.lwf
===================================================================
--- trunk/traces/multi/withoutprematureend.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/traces/multi/withoutprematureend.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,139 +1,2091 @@
-0 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-0 -1 1800 1800 1 1 1024 0 'foobar.img' 1024
-0 -1 900 900 1 1 1024 0 'foobar.img' 1024
-0 -1 900 900 2 1 1024 0 'foobar.img' 1024
-900 -1 900 900 3 1 1024 0 'foobar.img' 1024
-900 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-900 -1 900 900 1 1 1024 0 'foobar.img' 1024
-900 -1 900 900 1 1 1024 0 'foobar.img' 1024
-1800 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-1800 -1 900 900 2 1 1024 0 'foobar.img' 1024
-1800 -1 900 900 3 1 1024 0 'foobar.img' 1024
-1800 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-2700 -1 900 900 1 1 1024 0 'foobar.img' 1024
-2700 -1 900 900 1 1 1024 0 'foobar.img' 1024
-2700 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-2700 -1 900 900 2 1 1024 0 'foobar.img' 1024
-3600 -1 900 900 3 1 1024 0 'foobar.img' 1024
-3600 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-3600 -1 900 900 1 1 1024 0 'foobar.img' 1024
-3600 -1 900 900 1 1 1024 0 'foobar.img' 1024
-4500 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-4500 -1 900 900 2 1 1024 0 'foobar.img' 1024
-4500 -1 900 900 3 1 1024 0 'foobar.img' 1024
-4500 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-5400 -1 900 900 1 1 1024 0 'foobar.img' 1024
-5400 -1 900 900 1 1 1024 0 'foobar.img' 1024
-5400 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-5400 -1 900 900 2 1 1024 0 'foobar.img' 1024
-6300 -1 900 900 3 1 1024 0 'foobar.img' 1024
-6300 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-6300 -1 900 900 1 1 1024 0 'foobar.img' 1024
-6300 -1 900 900 1 1 1024 0 'foobar.img' 1024
-7200 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-7200 -1 900 900 2 1 1024 0 'foobar.img' 1024
-7200 -1 900 900 3 1 1024 0 'foobar.img' 1024
-7200 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-8100 -1 900 900 1 1 1024 0 'foobar.img' 1024
-8100 -1 900 900 1 1 1024 0 'foobar.img' 1024
-8100 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-8100 -1 900 900 2 1 1024 0 'foobar.img' 1024
-9000 -1 900 900 3 1 1024 0 'foobar.img' 1024
-9000 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-9000 -1 900 900 1 1 1024 0 'foobar.img' 1024
-9000 -1 900 900 1 1 1024 0 'foobar.img' 1024
-9900 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-9900 -1 900 900 2 1 1024 0 'foobar.img' 1024
-9900 -1 900 900 3 1 1024 0 'foobar.img' 1024
-9900 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-10800 -1 900 900 1 1 1024 0 'foobar.img' 1024
-10800 -1 900 900 1 1 1024 0 'foobar.img' 1024
-10800 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-10800 -1 900 900 2 1 1024 0 'foobar.img' 1024
-11700 -1 900 900 3 1 1024 0 'foobar.img' 1024
-11700 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-11700 -1 900 900 1 1 1024 0 'foobar.img' 1024
-11700 -1 900 900 1 1 1024 0 'foobar.img' 1024
-12600 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-12600 -1 900 900 2 1 1024 0 'foobar.img' 1024
-12600 -1 900 900 3 1 1024 0 'foobar.img' 1024
-12600 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-13500 -1 900 900 1 1 1024 0 'foobar.img' 1024
-13500 -1 900 900 1 1 1024 0 'foobar.img' 1024
-13500 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-13500 -1 900 900 2 1 1024 0 'foobar.img' 1024
-14400 -1 900 900 3 1 1024 0 'foobar.img' 1024
-14400 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-14400 -1 900 900 1 1 1024 0 'foobar.img' 1024
-14400 -1 900 900 1 1 1024 0 'foobar.img' 1024
-15300 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-15300 -1 900 900 2 1 1024 0 'foobar.img' 1024
-15300 -1 900 900 3 1 1024 0 'foobar.img' 1024
-15300 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-16200 -1 900 900 1 1 1024 0 'foobar.img' 1024
-16200 -1 900 900 1 1 1024 0 'foobar.img' 1024
-16200 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-16200 -1 900 900 2 1 1024 0 'foobar.img' 1024
-17100 -1 900 900 3 1 1024 0 'foobar.img' 1024
-17100 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-17100 -1 900 900 1 1 1024 0 'foobar.img' 1024
-17100 -1 900 900 1 1 1024 0 'foobar.img' 1024
-18000 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-18000 -1 900 900 2 1 1024 0 'foobar.img' 1024
-18000 -1 900 900 3 1 1024 0 'foobar.img' 1024
-18000 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-18900 -1 900 900 1 1 1024 0 'foobar.img' 1024
-18900 -1 900 900 1 1 1024 0 'foobar.img' 1024
-18900 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-18900 -1 900 900 2 1 1024 0 'foobar.img' 1024
-19800 -1 900 900 3 1 1024 0 'foobar.img' 1024
-19800 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-19800 -1 900 900 1 1 1024 0 'foobar.img' 1024
-19800 -1 900 900 1 1 1024 0 'foobar.img' 1024
-20700 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-20700 -1 900 900 1 1 1024 0 'foobar.img' 1024
-20700 -1 900 900 1 1 1024 0 'foobar.img' 1024
-20700 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-21600 -1 900 900 1 1 1024 0 'foobar.img' 1024
-21600 -1 900 900 1 1 1024 0 'foobar.img' 1024
-21600 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-21600 -1 900 900 1 1 1024 0 'foobar.img' 1024
-22500 -1 900 900 1 1 1024 0 'foobar.img' 1024
-22500 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-22500 -1 900 900 1 1 1024 0 'foobar.img' 1024
-22500 -1 900 900 1 1 1024 0 'foobar.img' 1024
-23400 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-23400 -1 900 900 1 1 1024 0 'foobar.img' 1024
-23400 -1 900 900 1 1 1024 0 'foobar.img' 1024
-23400 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-24300 -1 900 900 4 1 1024 0 'foobar.img' 1024
-24300 -1 900 900 4 1 1024 0 'foobar.img' 1024
-24300 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-24300 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-25200 -1 900 900 1 1 1024 0 'foobar.img' 1024
-25200 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-25200 -1 900 900 1 1 1024 0 'foobar.img' 1024
-25200 -1 900 900 1 1 1024 0 'foobar.img' 1024
-26100 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-26100 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-26100 -1 900 900 1 1 1024 0 'foobar.img' 1024
-26100 -1 900 900 1 1 1024 0 'foobar.img' 1024
-27000 -1 900 900 1 1 1024 0 'foobar.img' 1024
-27000 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-27000 -1 900 900 1 1 1024 0 'foobar.img' 1024
-27000 -1 900 900 1 1 1024 0 'foobar.img' 1024
-27900 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-27900 -1 900 900 1 1 1024 0 'foobar.img' 1024
-27900 -1 900 900 4 1 1024 0 'foobar.img' 1024
-27900 -1 3600 3600 4 1 1024 0 'foobar.img' 1024
-28800 -1 900 900 4 1 1024 0 'foobar.img' 1024
-28800 -1 900 900 1 1 1024 0 'foobar.img' 1024
-28800 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-28800 -1 900 900 2 1 1024 0 'foobar.img' 1024
-29700 -1 900 900 1 1 1024 0 'foobar.img' 1024
-29700 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-29700 -1 900 900 1 1 1024 0 'foobar.img' 1024
-29700 -1 900 900 4 1 1024 0 'foobar.img' 1024
-30600 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
-30600 -1 900 900 1 1 1024 0 'foobar.img' 1024
-30600 -1 3600 3600 1 1 1024 0 'foobar.img' 1024
+<?xml version="1.0"?>
+<lease-workload name="withoutprematureend.lwf.old">
+  <description/>
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <lease id="1" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="2" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:30:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="3" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <lease id="4" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <lease id="5" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <lease id="6" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <lease id="7" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <lease id="8" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:30:00.00">
+      <lease id="9" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:30:00.00">
+      <lease id="10" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:30:00.00">
+      <lease id="11" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:30:00.00">
+      <lease id="12" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <lease id="13" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <lease id="14" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <lease id="15" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <lease id="16" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:00:00.00">
+      <lease id="17" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:00:00.00">
+      <lease id="18" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:00:00.00">
+      <lease id="19" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:00:00.00">
+      <lease id="20" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:15:00.00">
+      <lease id="21" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:15:00.00">
+      <lease id="22" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:15:00.00">
+      <lease id="23" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:15:00.00">
+      <lease id="24" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:30:00.00">
+      <lease id="25" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:30:00.00">
+      <lease id="26" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:30:00.00">
+      <lease id="27" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:30:00.00">
+      <lease id="28" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:45:00.00">
+      <lease id="29" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:45:00.00">
+      <lease id="30" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:45:00.00">
+      <lease id="31" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:45:00.00">
+      <lease id="32" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <lease id="33" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <lease id="34" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <lease id="35" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <lease id="36" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:15:00.00">
+      <lease id="37" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:15:00.00">
+      <lease id="38" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:15:00.00">
+      <lease id="39" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:15:00.00">
+      <lease id="40" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:30:00.00">
+      <lease id="41" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:30:00.00">
+      <lease id="42" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:30:00.00">
+      <lease id="43" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:30:00.00">
+      <lease id="44" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:45:00.00">
+      <lease id="45" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:45:00.00">
+      <lease id="46" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:45:00.00">
+      <lease id="47" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:45:00.00">
+      <lease id="48" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <lease id="49" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <lease id="50" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <lease id="51" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <lease id="52" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:15:00.00">
+      <lease id="53" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:15:00.00">
+      <lease id="54" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:15:00.00">
+      <lease id="55" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:15:00.00">
+      <lease id="56" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:30:00.00">
+      <lease id="57" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:30:00.00">
+      <lease id="58" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:30:00.00">
+      <lease id="59" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:30:00.00">
+      <lease id="60" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:45:00.00">
+      <lease id="61" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:45:00.00">
+      <lease id="62" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:45:00.00">
+      <lease id="63" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:45:00.00">
+      <lease id="64" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <lease id="65" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <lease id="66" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <lease id="67" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <lease id="68" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:15:00.00">
+      <lease id="69" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:15:00.00">
+      <lease id="70" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:15:00.00">
+      <lease id="71" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:15:00.00">
+      <lease id="72" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:30:00.00">
+      <lease id="73" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:30:00.00">
+      <lease id="74" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:30:00.00">
+      <lease id="75" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:30:00.00">
+      <lease id="76" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:45:00.00">
+      <lease id="77" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:45:00.00">
+      <lease id="78" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:45:00.00">
+      <lease id="79" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:45:00.00">
+      <lease id="80" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <lease id="81" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <lease id="82" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <lease id="83" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <lease id="84" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:15:00.00">
+      <lease id="85" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:15:00.00">
+      <lease id="86" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:15:00.00">
+      <lease id="87" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:15:00.00">
+      <lease id="88" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:30:00.00">
+      <lease id="89" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:30:00.00">
+      <lease id="90" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:30:00.00">
+      <lease id="91" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:30:00.00">
+      <lease id="92" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:45:00.00">
+      <lease id="93" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:45:00.00">
+      <lease id="94" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:45:00.00">
+      <lease id="95" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:45:00.00">
+      <lease id="96" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <lease id="97" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <lease id="98" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <lease id="99" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <lease id="100" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:15:00.00">
+      <lease id="101" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:15:00.00">
+      <lease id="102" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:15:00.00">
+      <lease id="103" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:15:00.00">
+      <lease id="104" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:30:00.00">
+      <lease id="105" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:30:00.00">
+      <lease id="106" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:30:00.00">
+      <lease id="107" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:30:00.00">
+      <lease id="108" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:45:00.00">
+      <lease id="109" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:45:00.00">
+      <lease id="110" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:45:00.00">
+      <lease id="111" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:45:00.00">
+      <lease id="112" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <lease id="113" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <lease id="114" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <lease id="115" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <lease id="116" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:15:00.00">
+      <lease id="117" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:15:00.00">
+      <lease id="118" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:15:00.00">
+      <lease id="119" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:15:00.00">
+      <lease id="120" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:30:00.00">
+      <lease id="121" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:30:00.00">
+      <lease id="122" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:30:00.00">
+      <lease id="123" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:30:00.00">
+      <lease id="124" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:45:00.00">
+      <lease id="125" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:45:00.00">
+      <lease id="126" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:45:00.00">
+      <lease id="127" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:45:00.00">
+      <lease id="128" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <lease id="129" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <lease id="130" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <lease id="131" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <lease id="132" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:15:00.00">
+      <lease id="133" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:15:00.00">
+      <lease id="134" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:15:00.00">
+      <lease id="135" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:15:00.00">
+      <lease id="136" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:30:00.00">
+      <lease id="137" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:30:00.00">
+      <lease id="138" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:30:00.00">
+      <lease id="139" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Modified: trunk/traces/multi/withprematureend.lwf
===================================================================
--- trunk/traces/multi/withprematureend.lwf	2009-08-05 10:09:26 UTC (rev 631)
+++ trunk/traces/multi/withprematureend.lwf	2009-08-05 11:12:04 UTC (rev 632)
@@ -1,139 +1,2230 @@
-0 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-0 -1 1800 600 1 1 1024 0 'foobar.img' 1024
-0 -1 900 300 1 1 1024 0 'foobar.img' 1024
-0 -1 900 300 2 1 1024 0 'foobar.img' 1024
-900 -1 900 300 3 1 1024 0 'foobar.img' 1024
-900 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-900 -1 900 300 1 1 1024 0 'foobar.img' 1024
-900 -1 900 300 1 1 1024 0 'foobar.img' 1024
-1800 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-1800 -1 900 300 2 1 1024 0 'foobar.img' 1024
-1800 -1 900 300 3 1 1024 0 'foobar.img' 1024
-1800 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-2700 -1 900 300 1 1 1024 0 'foobar.img' 1024
-2700 -1 900 300 1 1 1024 0 'foobar.img' 1024
-2700 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-2700 -1 900 300 2 1 1024 0 'foobar.img' 1024
-3600 -1 900 300 3 1 1024 0 'foobar.img' 1024
-3600 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-3600 -1 900 300 1 1 1024 0 'foobar.img' 1024
-3600 -1 900 300 1 1 1024 0 'foobar.img' 1024
-4500 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-4500 -1 900 300 2 1 1024 0 'foobar.img' 1024
-4500 -1 900 300 3 1 1024 0 'foobar.img' 1024
-4500 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-5400 -1 900 300 1 1 1024 0 'foobar.img' 1024
-5400 -1 900 300 1 1 1024 0 'foobar.img' 1024
-5400 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-5400 -1 900 300 2 1 1024 0 'foobar.img' 1024
-6300 -1 900 300 3 1 1024 0 'foobar.img' 1024
-6300 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-6300 -1 900 300 1 1 1024 0 'foobar.img' 1024
-6300 -1 900 300 1 1 1024 0 'foobar.img' 1024
-7200 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-7200 -1 900 300 2 1 1024 0 'foobar.img' 1024
-7200 -1 900 300 3 1 1024 0 'foobar.img' 1024
-7200 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-8100 -1 900 300 1 1 1024 0 'foobar.img' 1024
-8100 -1 900 300 1 1 1024 0 'foobar.img' 1024
-8100 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-8100 -1 900 300 2 1 1024 0 'foobar.img' 1024
-9000 -1 900 300 3 1 1024 0 'foobar.img' 1024
-9000 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-9000 -1 900 300 1 1 1024 0 'foobar.img' 1024
-9000 -1 900 300 1 1 1024 0 'foobar.img' 1024
-9900 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-9900 -1 900 300 2 1 1024 0 'foobar.img' 1024
-9900 -1 900 300 3 1 1024 0 'foobar.img' 1024
-9900 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-10800 -1 900 300 1 1 1024 0 'foobar.img' 1024
-10800 -1 900 300 1 1 1024 0 'foobar.img' 1024
-10800 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-10800 -1 900 300 2 1 1024 0 'foobar.img' 1024
-11700 -1 900 300 3 1 1024 0 'foobar.img' 1024
-11700 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-11700 -1 900 300 1 1 1024 0 'foobar.img' 1024
-11700 -1 900 300 1 1 1024 0 'foobar.img' 1024
-12600 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-12600 -1 900 300 2 1 1024 0 'foobar.img' 1024
-12600 -1 900 300 3 1 1024 0 'foobar.img' 1024
-12600 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-13500 -1 900 300 1 1 1024 0 'foobar.img' 1024
-13500 -1 900 300 1 1 1024 0 'foobar.img' 1024
-13500 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-13500 -1 900 300 2 1 1024 0 'foobar.img' 1024
-14400 -1 900 300 3 1 1024 0 'foobar.img' 1024
-14400 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-14400 -1 900 300 1 1 1024 0 'foobar.img' 1024
-14400 -1 900 300 1 1 1024 0 'foobar.img' 1024
-15300 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-15300 -1 900 300 2 1 1024 0 'foobar.img' 1024
-15300 -1 900 300 3 1 1024 0 'foobar.img' 1024
-15300 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-16200 -1 900 300 1 1 1024 0 'foobar.img' 1024
-16200 -1 900 300 1 1 1024 0 'foobar.img' 1024
-16200 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-16200 -1 900 300 2 1 1024 0 'foobar.img' 1024
-17100 -1 900 300 3 1 1024 0 'foobar.img' 1024
-17100 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-17100 -1 900 300 1 1 1024 0 'foobar.img' 1024
-17100 -1 900 300 1 1 1024 0 'foobar.img' 1024
-18000 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-18000 -1 900 300 2 1 1024 0 'foobar.img' 1024
-18000 -1 900 300 3 1 1024 0 'foobar.img' 1024
-18000 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-18900 -1 900 300 1 1 1024 0 'foobar.img' 1024
-18900 -1 900 300 1 1 1024 0 'foobar.img' 1024
-18900 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-18900 -1 900 300 2 1 1024 0 'foobar.img' 1024
-19800 -1 900 300 3 1 1024 0 'foobar.img' 1024
-19800 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-19800 -1 900 300 1 1 1024 0 'foobar.img' 1024
-19800 -1 900 300 1 1 1024 0 'foobar.img' 1024
-20700 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-20700 -1 900 300 1 1 1024 0 'foobar.img' 1024
-20700 -1 900 300 1 1 1024 0 'foobar.img' 1024
-20700 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-21600 -1 900 300 1 1 1024 0 'foobar.img' 1024
-21600 -1 900 300 1 1 1024 0 'foobar.img' 1024
-21600 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-21600 -1 900 300 1 1 1024 0 'foobar.img' 1024
-22500 -1 900 300 1 1 1024 0 'foobar.img' 1024
-22500 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-22500 -1 900 300 1 1 1024 0 'foobar.img' 1024
-22500 -1 900 300 1 1 1024 0 'foobar.img' 1024
-23400 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-23400 -1 900 300 1 1 1024 0 'foobar.img' 1024
-23400 -1 900 300 1 1 1024 0 'foobar.img' 1024
-23400 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-24300 -1 900 300 4 1 1024 0 'foobar.img' 1024
-24300 -1 900 300 4 1 1024 0 'foobar.img' 1024
-24300 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-24300 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-25200 -1 900 300 1 1 1024 0 'foobar.img' 1024
-25200 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-25200 -1 900 300 1 1 1024 0 'foobar.img' 1024
-25200 -1 900 300 1 1 1024 0 'foobar.img' 1024
-26100 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-26100 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-26100 -1 900 300 1 1 1024 0 'foobar.img' 1024
-26100 -1 900 300 1 1 1024 0 'foobar.img' 1024
-27000 -1 900 300 1 1 1024 0 'foobar.img' 1024
-27000 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-27000 -1 900 300 1 1 1024 0 'foobar.img' 1024
-27000 -1 900 300 1 1 1024 0 'foobar.img' 1024
-27900 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-27900 -1 900 300 1 1 1024 0 'foobar.img' 1024
-27900 -1 900 300 4 1 1024 0 'foobar.img' 1024
-27900 -1 3600 900 4 1 1024 0 'foobar.img' 1024
-28800 -1 900 300 4 1 1024 0 'foobar.img' 1024
-28800 -1 900 300 1 1 1024 0 'foobar.img' 1024
-28800 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-28800 -1 900 300 2 1 1024 0 'foobar.img' 1024
-29700 -1 900 300 1 1 1024 0 'foobar.img' 1024
-29700 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-29700 -1 900 300 1 1 1024 0 'foobar.img' 1024
-29700 -1 900 300 4 1 1024 0 'foobar.img' 1024
-30600 -1 3600 900 1 1 1024 0 'foobar.img' 1024
-30600 -1 900 300 1 1 1024 0 'foobar.img' 1024
-30600 -1 3600 900 1 1 1024 0 'foobar.img' 1024
+<?xml version="1.0"?>
+<lease-workload name="withprematureend.lwf.old">
+  <description/>
+  <lease-requests>
+    <lease-request arrival="00:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="1" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <realduration time="00:10:00.00"/>
+      <lease id="2" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:30:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="3" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="4" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="5" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="6" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="7" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="8" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="9" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="10" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="11" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="12" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="13" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="14" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="15" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="00:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="16" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="17" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="18" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="19" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="20" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="21" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="22" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="23" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="24" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="25" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="26" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="27" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="28" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="29" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="30" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="31" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="01:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="32" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="33" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="34" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="35" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="36" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="37" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="38" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="39" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="40" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="41" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="42" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="43" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="44" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="45" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="46" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="47" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="02:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="48" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="49" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="50" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="51" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="52" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="53" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="54" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="55" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="56" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="57" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="58" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="59" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="60" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="61" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="62" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="63" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="03:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="64" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="65" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="66" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="67" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="68" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="69" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="70" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="71" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="72" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="73" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="74" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="75" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="76" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="77" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="78" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="79" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="04:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="80" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="81" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="82" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="83" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="84" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="85" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="86" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="87" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="88" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="89" preemptible="true">
+        <nodes>
+          <node-set numnodes="3">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="90" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="91" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="92" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="93" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="94" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="95" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="05:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="96" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="97" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="98" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="99" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="100" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="101" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="102" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="103" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="104" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="105" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="106" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="107" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="108" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="109" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="110" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="111" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="06:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="112" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="113" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="114" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="115" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="116" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="117" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="118" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="119" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="120" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="121" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="122" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="123" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="124" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="125" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="126" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:45:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="127" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="07:45:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="128" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="129" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="130" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="131" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:00:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="132" preemptible="true">
+        <nodes>
+          <node-set numnodes="2">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="133" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:15:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="134" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="135" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:15:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="136" preemptible="true">
+        <nodes>
+          <node-set numnodes="4">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="137" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:30:00.00">
+      <realduration time="00:05:00.00"/>
+      <lease id="138" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="00:15:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+    <lease-request arrival="08:30:00.00">
+      <realduration time="00:15:00.00"/>
+      <lease id="139" preemptible="true">
+        <nodes>
+          <node-set numnodes="1">
+            <res amount="100" type="CPU"/>
+            <res amount="1024" type="Memory"/>
+          </node-set>
+        </nodes>
+        <start/>
+        <duration time="01:00:00.00"/>
+        <software>
+          <disk-image id="'foobar.img'" size="1024"/>
+        </software>
+      </lease>
+    </lease-request>
+  </lease-requests>
+</lease-workload>

Added: trunk/traces/undocumented/generators.py
===================================================================
--- trunk/traces/undocumented/generators.py	                        (rev 0)
+++ trunk/traces/undocumented/generators.py	2009-08-05 11:12:04 UTC (rev 632)
@@ -0,0 +1,101 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.common.config import TraceConfig, ImageConfig
+from haizea.traces.formats import LWF, LWFEntry
+
+def generateTrace(config, file, guaranteeAvg = False):
+    tracedur = config.getTraceDuration()
+    
+    print config.intervaldist.getAvg()
+    
+    avgnumreq = tracedur / config.intervaldist.getAvg()
+    idealaccumdur = avgnumreq * config.durationdist.getAvg() * config.numnodesdist.getAvg()
+
+    print avgnumreq
+    print config.durationdist.getAvg()
+    print config.numnodesdist.getAvg()
+    print idealaccumdur
+
+    good = False
+    deadlineavg = config.deadlinedist.get()
+
+    while not good:
+        entries = []
+        time = - deadlineavg
+        accumdur = 0
+        while time + deadlineavg + config.durationdist.getAvg() < tracedur:
+            entry = LWFEntry()
+            entry.reqTime = time
+            entry.startTime = time + config.deadlinedist.get()
+            entry.duration = config.durationdist.get()
+            entry.realDuration = entry.duration
+            entry.numNodes = config.numnodesdist.get()
+            entry.CPU = 1
+            entry.mem = 1024
+            entry.disk = 0
+            entry.vmImage = "NONE.img"
+            entry.vmImageSize = 600
+            accumdur += entry.duration * entry.numNodes
+            entries.append(entry)
+
+            interval = config.intervaldist.get()          
+            time += interval
+            
+        if not guaranteeAvg:
+            good = True
+        else:
+            dev = abs((accumdur / idealaccumdur) - 1)
+            if dev < 0.01:
+                print "Deviation is satisfactory: %.3f" % dev
+                good = True
+            else:
+                print "Deviation is too big: %.3f. Generating again." % dev
+
+    for e in entries:
+        if e.reqTime < 0:
+            e.reqTime = 0
+
+    lwf = LWF(entries)
+    lwf.toFile(file)
+        
+        
+def generateImages(config, file):
+    f = open(file, "w")
+    
+    # Write image sizes
+    for i in config.images:
+        print >>f, "%s %i" % (i, config.sizedist.get())
+    
+    print >>f, "#"
+    
+    l = config.getFileLength()
+    for i in xrange(l):
+        print >>f, config.imagedist.get()
+
+    f.close()
+
+
+if __name__ == "__main__":
+    configfile="../configfiles/images.conf"
+    imagefile="../traces/examples/generated.images"
+
+
+    config = ImageConfig.fromFile(configfile)
+    
+    generateImages(config, imagefile)   
\ No newline at end of file



More information about the Haizea-commit mailing list