[haizea-commit] r465 - in trunk/src/haizea: cli common resourcemanager resourcemanager/deployment resourcemanager/enact/opennebula resourcemanager/enact/simulated resourcemanager/frontends traces

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Wed Aug 6 12:24:46 CDT 2008


Author: borja
Date: 2008-08-06 12:24:45 -0500 (Wed, 06 Aug 2008)
New Revision: 465

Added:
   trunk/src/haizea/resourcemanager/configfile.py
Modified:
   trunk/src/haizea/cli/commands.py
   trunk/src/haizea/common/config.py
   trunk/src/haizea/common/constants.py
   trunk/src/haizea/resourcemanager/datastruct.py
   trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
   trunk/src/haizea/resourcemanager/enact/opennebula/info.py
   trunk/src/haizea/resourcemanager/enact/opennebula/vm.py
   trunk/src/haizea/resourcemanager/enact/simulated/info.py
   trunk/src/haizea/resourcemanager/frontends/opennebula.py
   trunk/src/haizea/resourcemanager/frontends/tracefile.py
   trunk/src/haizea/resourcemanager/log.py
   trunk/src/haizea/resourcemanager/resourcepool.py
   trunk/src/haizea/resourcemanager/rm.py
   trunk/src/haizea/resourcemanager/scheduler.py
   trunk/src/haizea/resourcemanager/slottable.py
   trunk/src/haizea/traces/readers.py
Log:
Overhauled configuration file management. Lots of minor changes in most modules on account of this.

Modified: trunk/src/haizea/cli/commands.py
===================================================================
--- trunk/src/haizea/cli/commands.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/cli/commands.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -19,7 +19,8 @@
 from haizea.resourcemanager.rm import ResourceManager
 from haizea.traces.generators import generateTrace, generateImages
 from haizea.common.utils import gen_traceinj_name, unpickle
-from haizea.common.config import RMConfig, RMMultiConfig, TraceConfig, ImageConfig
+from haizea.resourcemanager.configfile import HaizeaConfig, HaizeaMultiConfig
+from haizea.common.config import ConfigException
 from haizea.cli.optionparser import OptionParser, Option
 import sys
 import os
@@ -67,21 +68,26 @@
                 sys.stderr.write(msg)
                 sys.exit(1)
  
-        configfile=opt.conf
-        if configfile == None:
-            # Look for config file in default locations
-            for loc in DEFAULT_CONFIG_LOCATIONS:
-                if os.path.exists(loc):
-                    config = RMConfig.fromFile(loc)
-                    break
+        try:
+            configfile=opt.conf
+            if configfile == None:
+                # Look for config file in default locations
+                for loc in DEFAULT_CONFIG_LOCATIONS:
+                    if os.path.exists(loc):
+                        config = HaizeaConfig.from_file(loc)
+                        break
+                else:
+                    print >> sys.stdout, "No configuration file specified, and none found at default locations."
+                    print >> sys.stdout, "Make sure a config file exists at:\n  -> %s" % "\n  -> ".join(DEFAULT_CONFIG_LOCATIONS)
+                    print >> sys.stdout, "Or specify a configuration file with the --conf option."
+                    exit(1)
             else:
-                print >> sys.stdout, "No configuration file specified, and none found at default locations."
-                print >> sys.stdout, "Make sure a config file exists at:\n  -> %s" % "\n  -> ".join(DEFAULT_CONFIG_LOCATIONS)
-                print >> sys.stdout, "Or specify a configuration file with the --conf option."
-                exit(1)
-        else:
-            config = RMConfig.fromFile(configfile)
-        
+                config = HaizeaConfig.from_file(configfile)
+        except ConfigException, msg:
+            print >> sys.stderr, "Error in configuration file:"
+            print >> sys.stderr, msg
+            exit(1)
+            
         daemon = not opt.foreground
     
         rm = ResourceManager(config, daemon, pidfile)
@@ -118,11 +124,11 @@
     opt, args = p.parse_args(argv)
     
     configfile=opt.conf
-    multiconfig = RMMultiConfig.fromFile(configfile)
+    multiconfig = HaizeaMultiConfig.from_file(configfile)
     
     dir = opt.dir
     
-    configs = multiconfig.getConfigs()
+    configs = multiconfig.get_configs()
     
     etcdir = os.path.abspath(dir)    
     if not os.path.exists(etcdir):
@@ -148,7 +154,7 @@
     opt, args = p.parse_args(argv)
     
     configfile=opt.conf
-    multiconfig = RMMultiConfig.fromFile(configfile)
+    multiconfig = HaizeaMultiConfig.from_file(configfile)
             
     try:
         from mako.template import Template
@@ -157,7 +163,7 @@
         print "You can download them at http://www.makotemplates.org/"
         exit(1)
 
-    configs = multiconfig.getConfigsToRun()
+    configs = multiconfig.get_configs_to_run()
     
     etcdir = os.path.abspath(opt.confdir)    
     if not os.path.exists(etcdir):

Modified: trunk/src/haizea/common/config.py
===================================================================
--- trunk/src/haizea/common/config.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/common/config.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -19,493 +19,135 @@
 import ConfigParser
 from mx.DateTime import ISO
 from mx.DateTime import TimeDelta
-import haizea.common.constants as constants
-import haizea.common.stats as stats
-import os.path
-from haizea.common.utils import genDataDirName
-
-class Config(object):
-    def __init__(self, config):
-        self.config = config
+import textwrap
         
-    @classmethod
-    def fromFile(cls, configfile):
-        file = open (configfile, "r")
-        c = ConfigParser.ConfigParser()
-        c.readfp(file)
-        return cls(c)
-       
-    def createDiscreteDistributionFromSection(self, section):
-        distType = self.config.get(section, constants.DISTRIBUTION_OPT)
-        probs = None
-        if self.config.has_option(section, constants.MIN_OPT) and self.config.has_option(section, constants.MAX_OPT):
-            min = self.config.getint(section, constants.MIN_OPT)
-            max = self.config.getint(section, constants.MAX_OPT)
-            values = range(min, max+1)
-        elif self.config.has_option(section, constants.ITEMS_OPT):
-            pass
-        elif self.config.has_option(section, constants.ITEMSPROBS_OPT):
-            pass
-#        elif self.config.has_option(section, constants.ITEMSFILE_OPT):
-#            filename = config.get(section, constants.ITEMSFILE_OPT)
-#            file = open (filename, "r")
-#            values = []
-#            for line in file:
-#                value = line.strip().split(";")[0]
-#                values.append(value)
-#        elif self.config.has_option(section, constants.ITEMSPROBSFILE_OPT):
-#            itemsprobsOpt = self.config.get(section, constants.ITEMSPROBSFILE_OPT).split(",")
-#            itemsFile = open(itemsprobsOpt[0], "r")
-#            probsField = int(itemsprobsOpt[1])
-#            values = []
-#            probs = []
-#            for line in itemsFile:
-#                fields = line.split(";")
-#                itemname = fields[0]
-#                itemprob = float(fields[probsField])/100
-#                values.append(itemname)
-#                probs.append(itemprob)
-        dist = None
-        if distType == constants.DIST_UNIFORM:
-            dist = stats.DiscreteUniformDistribution(values)
-        elif distType == constants.DIST_EXPLICIT:
-            if probs == None:
-                raise Exception, "No probabilities specified"
-            dist = stats.DiscreteDistribution(values, probs) 
-            
-        return dist
-        
-    def createContinuousDistributionFromSection(self, section):
-        distType = self.config.get(section, constants.DISTRIBUTION_OPT)
-        min = self.config.getfloat(section, constants.MIN_OPT)
-        max = self.config.get(section, constants.MAX_OPT)
-        if max == "unbounded":
-            max = float("inf")
-        if distType == "uniform":
-            dist = stats.ContinuousUniformDistribution(min, max)
-        elif distType == "normal":
-            mu = self.config.getfloat(section, constants.MEAN_OPT)
-            sigma = self.config.getfloat(section, constants.STDEV_OPT)
-            dist = stats.ContinuousNormalDistribution(min, max, mu, sigma)
-        elif distType == "pareto":
-            pass 
-        
-        return dist
-        
+OPTTYPE_INT = 0
+OPTTYPE_FLOAT = 1
+OPTTYPE_STRING = 2
+OPTTYPE_BOOLEAN = 3
+OPTTYPE_DATETIME = 4
+OPTTYPE_TIMEDELTA = 5
 
-        
-class RMConfig(Config):
-    def __init__(self, config):
-        Config.__init__(self, config)
-        
-    #
-    # GENERAL OPTIONS
-    #
+class ConfigException(Exception):
+    """A simple exception class used for configuration exceptions"""
+    pass
 
-    def getLogLevel(self):
-        return self.config.get(constants.GENERAL_SEC, constants.LOGLEVEL_OPT)
-    
-    def getProfile(self):
-        return self.config.get(constants.GENERAL_SEC, constants.PROFILE_OPT)
-
-    def getMode(self):
-        return self.config.get(constants.GENERAL_SEC, constants.MODE_OPT)
-
-    def get_lease_deployment_type(self):
-        if not self.config.has_option(constants.GENERAL_SEC, constants.LEASE_DEPLOYMENT_OPT):
-            return constants.DEPLOYMENT_UNMANAGED
-        else:
-            return self.config.get(constants.GENERAL_SEC, constants.LEASE_DEPLOYMENT_OPT)
-
-    def getDataFile(self):
-        return self.config.get(constants.GENERAL_SEC, constants.DATAFILE_OPT)
-
-    #
-    # SIMULATION OPTIONS
-    #
+class Section(object):
+    def __init__(self, name, required, required_if=None, doc=None):
+        self.name = name
+        self.required = required
+        self.required_if = required_if
+        self.doc = doc
+        self.options = {}
         
-    def getInitialTime(self):
-        timeopt = self.config.get(constants.SIMULATION_SEC, constants.STARTTIME_OPT)
-        return ISO.ParseDateTime(timeopt)
+    def get_doc(self):
+        return textwrap.dedent(self.doc).strip()
 
-    def getClock(self):
-        if not self.config.has_option(constants.SIMULATION_SEC, constants.CLOCK_OPT):
-            return constants.CLOCK_SIMULATED
-        else:
-            return self.config.get(constants.SIMULATION_SEC, constants.CLOCK_OPT)
-    
-    def getNumPhysicalNodes(self):
-        return self.config.getint(constants.SIMULATION_SEC, constants.NODES_OPT)
-    
-    def getResourcesPerPhysNode(self):
-        return self.config.get(constants.SIMULATION_SEC, constants.RESOURCES_OPT).split(";")
-    
-    def getBandwidth(self):
-        return self.config.getint(constants.SIMULATION_SEC, constants.BANDWIDTH_OPT)
 
-    def getSuspendResumeRate(self):
-        return self.config.getint(constants.SIMULATION_SEC, constants.SUSPENDRATE_OPT)
-    
-    def stopWhen(self):
-        if not self.config.has_option(constants.SIMULATION_SEC, constants.STOPWHEN_OPT):
-            return None
-        else:
-            return self.config.get(constants.SIMULATION_SEC, constants.STOPWHEN_OPT)
-
-    def getForceTransferTime(self):
-        if not self.config.has_option(constants.SIMULATION_SEC, constants.FORCETRANSFERT_OPT):
-            return None
-        else:
-            return TimeDelta(seconds=self.config.getint(constants.SIMULATION_SEC, constants.FORCETRANSFERT_OPT))
-
-    def getRuntimeOverhead(self):
-        if not self.config.has_option(constants.SIMULATION_SEC, constants.RUNOVERHEAD_OPT):
-            return None
-        else:
-            return self.config.getint(constants.SIMULATION_SEC, constants.RUNOVERHEAD_OPT)
-
-    def getBootOverhead(self):
-        if not self.config.has_option(constants.SIMULATION_SEC, constants.BOOTOVERHEAD_OPT):
-            time = 0
-        else:
-            time = self.config.getint(constants.SIMULATION_SEC, constants.BOOTOVERHEAD_OPT)
-        return TimeDelta(seconds=time)
-
-    def overheadOnlyBestEffort(self):
-        if not self.config.has_option(constants.SIMULATION_SEC, constants.RUNOVERHEADBE_OPT):
-            return False
-        else:
-            return self.config.getboolean(constants.SIMULATION_SEC, constants.RUNOVERHEADBE_OPT)
-
-    def getStatusMessageInterval(self):
-        if not self.config.has_option(constants.SIMULATION_SEC, constants.STATUS_INTERVAL_OPT):
-            return None
-        else:
-            return self.config.getint(constants.SIMULATION_SEC, constants.STATUS_INTERVAL_OPT)
-
-    #
-    # OPENNEBULA OPTIONS
-    #
-    def getONEDB(self):
-        return self.config.get(constants.OPENNEBULA_SEC, constants.DB_OPT)
-
-    def getONEvm(self):
-        return self.config.get(constants.OPENNEBULA_SEC, constants.ONEVM_OPT)
-
-    def getONESuspendResumeRate(self):
-        if not self.config.has_option(constants.OPENNEBULA_SEC, constants.ESTIMATESUSPENDRATE_OPT):
-            return 32
-        else:
-            return self.config.getint(constants.OPENNEBULA_SEC, constants.ESTIMATESUSPENDRATE_OPT)
-
-    def get_non_schedulable_interval(self):
-        if not self.config.has_option(constants.OPENNEBULA_SEC, constants.NONSCHEDULABLE_OPT):
-            return 10
-        else:
-            return self.config.getint(constants.OPENNEBULA_SEC, constants.NONSCHEDULABLE_OPT)
-
-    #
-    # SCHEDULING OPTIONS
-    #
-
-    def get_wakeup_interval(self):
-        if not self.config.has_option(constants.SCHEDULING_SEC, constants.WAKEUPINTERVAL_OPT):
-            return 60
-        else:
-            return self.config.getint(constants.SCHEDULING_SEC, constants.WAKEUPINTERVAL_OPT)
-
-    def getSuspensionType(self):
-        return self.config.get(constants.SCHEDULING_SEC, constants.SUSPENSION_OPT)
-
-    def isMigrationAllowed(self):
-        return self.config.getboolean(constants.SCHEDULING_SEC, constants.MIGRATION_OPT)
-
-    def getMustMigrate(self):
-        return self.config.get(constants.SCHEDULING_SEC, constants.MIGRATE_OPT)
-
-    def getMaxReservations(self):
-        if self.getBackfillingType() == constants.BACKFILLING_OFF:
-            return 0
-        elif self.getBackfillingType() == constants.BACKFILLING_AGGRESSIVE:
-            return 1
-        elif self.getBackfillingType() == constants.BACKFILLING_CONSERVATIVE:
-            return 1000000
-        elif self.getBackfillingType() == constants.BACKFILLING_INTERMEDIATE:
-            r = self.config.getint(constants.SCHEDULING_SEC, constants.RESERVATIONS_OPT)
-            return r
-
-    def getSuspendThreshold(self):
-        if not self.config.has_option(constants.SCHEDULING_SEC, constants.SUSPENDTHRESHOLD_OPT):
-            return None
-        else:
-            return TimeDelta(seconds=self.config.getint(constants.SCHEDULING_SEC, constants.SUSPENDTHRESHOLD_OPT))
-
-    def getSuspendThresholdFactor(self):
-        if not self.config.has_option(constants.SCHEDULING_SEC, constants.SUSPENDTHRESHOLDFACTOR_OPT):
-            return 0
-        else:
-            return self.config.getfloat(constants.SCHEDULING_SEC, constants.SUSPENDTHRESHOLDFACTOR_OPT)
-
-    def isBackfilling(self):
-        if self.getBackfillingType() == constants.BACKFILLING_OFF:
-            return False
-        else:
-            return True
+class Option(object):
+    def __init__(self, name, getter, type, required, required_if=None, default=None, valid=None, doc=None):
+        self.name = name
+        self.getter = getter
+        self.type = type
+        self.required = required
+        self.required_if = required_if
+        self.default = default
+        self.valid = valid
+        self.doc = doc
         
-    def getBackfillingType(self):
-        return self.config.get(constants.SCHEDULING_SEC, constants.BACKFILLING_OPT)
+    def get_doc(self):
+        return textwrap.dedent(self.doc).strip()
 
-
-    #
-    # DEPLOYMENT (IMAGETRANSFER) OPTIONS
-    #
-
-    def get_transfer_mechanism(self):
-        return self.config.get(constants.DEPLOY_IMAGETRANSFER_SEC, constants.TRANSFER_MECHANISM_OPT)
-
-    def getReuseAlg(self):
-        if not self.config.has_option(constants.DEPLOY_IMAGETRANSFER_SEC, constants.REUSE_OPT):
-            return constants.REUSE_NONE
-        else:
-            return self.config.get(constants.DEPLOY_IMAGETRANSFER_SEC, constants.REUSE_OPT)
+class Config(object):
+    def __init__(self, config, sections):
+        self.config = config
+        self.sections = sections
+        self.__options = {}
         
-    def getMaxCacheSize(self):
-        if not self.config.has_option(constants.DEPLOY_IMAGETRANSFER_SEC, constants.CACHESIZE_OPT):
-            return constants.CACHESIZE_UNLIMITED
-        else:
-            return self.config.getint(constants.DEPLOY_IMAGETRANSFER_SEC, constants.CACHESIZE_OPT)        
+        self.__load_all()
         
-    def isAvoidingRedundantTransfers(self):
-        if not self.config.has_option(constants.SCHEDULING_SEC, constants.AVOIDREDUNDANT_OPT):
-            return False
-        else:
-            return self.config.getboolean(constants.DEPLOY_IMAGETRANSFER_SEC, constants.AVOIDREDUNDANT_OPT)
-
-    def getNodeSelectionPolicy(self):
-        if not self.config.has_option(constants.DEPLOY_IMAGETRANSFER_SEC, constants.NODESELECTION_OPT):
-            return constants.NODESELECTION_AVOIDPREEMPT
-        else:
-            return self.config.get(constants.DEPLOY_IMAGETRANSFER_SEC, constants.NODESELECTION_OPT)
-
-
-    #
-    # TRACEFILE OPTIONS
-    #
-    def getTracefile(self):
-        return self.config.get(constants.TRACEFILE_SEC, constants.TRACEFILE_OPT)
-
-    def getInjectfile(self):
-        if not self.config.has_option(constants.TRACEFILE_SEC, constants.INJFILE_OPT):
-            return None
-        else:
-            injfile = self.config.get(constants.TRACEFILE_SEC, constants.INJFILE_OPT)
-            if injfile == "None":
-                return None
-            else:
-                return injfile
-
-    def getImagefile(self):
-        if not self.config.has_option(constants.TRACEFILE_SEC, constants.IMGFILE_OPT):
-            return None
-        else:
-            imgfile = self.config.get(constants.TRACEFILE_SEC, constants.IMGFILE_OPT)
-            if imgfile == "None":
-                return None
-            else:
-                return imgfile
-
-class RMMultiConfig(Config):
-    def __init__(self, config):
-        Config.__init__(self, config)
+    def __load_all(self):
+        required_sections = [s for s in self.sections if s.required]
+        conditional_sections = [s for s in self.sections if not s.required and s.required_if != None]
+        optional_sections = [s for s in self.sections if not s.required and s.required_if == None]
         
-    def getProfiles(self):
-        sections = set([s.split(":")[0] for s in self.config.sections()])
-        # Remove multi and common sections
-        sections.difference_update([constants.COMMON_SEC, constants.MULTI_SEC])
-        return list(sections)
-    
-    def getProfilesSubset(self, sec):
-        profiles = self.config.get(sec, constants.PROFILES_OPT)
-        if profiles == "ALL":
-            profiles = self.getProfiles()
-        else:
-            profiles = profiles.split()
-        return profiles
-
-    def getTracesSubset(self, sec):
-        traces = self.config.get(sec, constants.TRACES_OPT)
-        if traces == "ALL":
-            traces = [os.path.basename(t) for t in self.getTracefiles()]
-        else:
-            traces = traces.split()
+        sections = required_sections + conditional_sections + optional_sections
+        
+        for sec in sections:
+            has_section = self.config.has_section(sec.name)
             
-        return traces
-
-    def getInjSubset(self, sec):
-        injs = self.config.get(sec, constants.INJS_OPT)
-        if injs == "ALL":
-            injs = [os.path.basename(t) for t in self.getInjectfiles() if t!=None]
-            injs.append(None)
-        elif injs == "NONE":
-            injs = [None]
-        else:
-            injs = injs.split()
-        return injs
-
-    def getTracefiles(self):
-        dir = self.config.get(constants.MULTI_SEC, constants.TRACEDIR_OPT)
-        traces = self.config.get(constants.MULTI_SEC, constants.TRACEFILES_OPT).split()
-        return [dir + "/" + t for t in traces]
-
-    def getInjectfiles(self):
-        dir = self.config.get(constants.MULTI_SEC, constants.INJDIR_OPT)
-        inj = self.config.get(constants.MULTI_SEC, constants.INJFILES_OPT).split()
-        inj = [dir + "/" + i for i in inj]
-        inj.append(None)
-        return inj
-    
-    def getConfigs(self):
-        profiles = self.getProfiles()
-        tracefiles = self.getTracefiles()
-        injectfiles = self.getInjectfiles()
-
-        configs = []
-        for profile in profiles:
-            for tracefile in tracefiles:
-                for injectfile in injectfiles:
-                    profileconfig = ConfigParser.ConfigParser()
-                    commonsections = [s for s in self.config.sections() if s.startswith("common:")]
-                    profilesections = [s for s in self.config.sections() if s.startswith(profile +":")]
-                    sections = commonsections + profilesections
-                    for s in sections:
-                        s_noprefix = s.split(":")[1]
-                        items = self.config.items(s)
-                        if not profileconfig.has_section(s_noprefix):
-                            profileconfig.add_section(s_noprefix)
-                        for item in items:
-                            profileconfig.set(s_noprefix, item[0], item[1])
-                            
-                    # The tracefile section may have not been created
-                    if not profileconfig.has_section(constants.TRACEFILE_SEC):
-                        profileconfig.add_section(constants.TRACEFILE_SEC)
-
-                    # Add tracefile option
-                    profileconfig.set(constants.TRACEFILE_SEC, constants.TRACEFILE_OPT, tracefile)
+            # If the section is required, check if it exists
+            if sec.required and not has_section:
+                raise ConfigException, "Required section [%s] not found" % sec.name
+            
+            # If the section is conditionally required, check that
+            # it meets the conditions
+            if sec.required_if != None:
+                for req in sec.required_if:
+                    (condsec,condopt) = req[0]
+                    condvalue = req[1]
                     
-                    # Add injected file option
-                    if injectfile == None:
-                        inj = "None"
-                    else:
-                        inj = injectfile
-                    profileconfig.set(constants.TRACEFILE_SEC, constants.INJFILE_OPT, inj)
-
-                    # Add datadir option
-                    datadirname = genDataDirName(profile, tracefile, injectfile)
-                    basedatadir = self.config.get(constants.MULTI_SEC, constants.BASEDATADIR_OPT)
-                    # TODO: Change this so there will be a single directory with all the
-                    # data files, instead of multiple directories
-                    datafile = basedatadir + "/" + datadirname + "/haizea.dat"
-                    profileconfig.set(constants.GENERAL_SEC, constants.DATAFILE_OPT, datadir)
+                    if self.config.has_option(condsec,condopt) and self.config.get(condsec,condopt) == condvalue:
+                        if not has_section:
+                            raise ConfigException, "Section '%s' is required when %s.%s==%s" % (sec.name, condsec, condopt, condvalue)
                     
-                    # Set profile option (only used internally)
-                    profileconfig.set(constants.GENERAL_SEC, constants.PROFILE_OPT, profile)
-                    
-                    c = RMConfig(profileconfig)
-                    configs.append(c)
-        
-        return configs
+            # Load options
+            if has_section:
+                for opt in sec.options:
+                    self.__load_option(sec, opt)
 
-            
-    def getConfigsToRun(self):
-        configs = self.getConfigs()
-        
-        # TODO: Come up with a new way to filter what gets run or not
-        #profiles = self.getProfilesSubset(constants.RUN_SEC)
-        #traces = self.getTracesSubset(constants.RUN_SEC)
-        #injs = self.getInjSubset(constants.RUN_SEC)
-        
-#        confs = []
-#        for c in configs:
-#            p = c.getProfile()
-#            t = os.path.basename(c.getTracefile())
-#            i = c.getInjectfile()
-#            if i != None: 
-#                i = os.path.basename(i)
-#
-#            if p in profiles and t in traces and i in injs:
-#                confs.append(c)
-#
-#        return confs
-        return configs
-
     
-
+    def __load_option(self, sec, opt):
+        # Load a single option
+        secname = sec.name
+        optname = opt.name
         
-class TraceConfig(Config):
-    def __init__(self, c):
-        Config.__init__(self, c)
-        self.numnodesdist = self.createDiscreteDistributionFromSection(constants.NUMNODES_SEC)
-        self.deadlinedist = self.createDiscreteDistributionFromSection(constants.DEADLINE_SEC)
-        self.durationdist = self.createDiscreteDistributionFromSection(constants.DURATION_SEC)
-        self.imagesdist = self.createDiscreteDistributionFromSection(constants.IMAGES_SEC)
-        if self.isGenerateBasedOnWorkload():
-            # Find interval between requests
-            tracedur = self.getTraceDuration()
-            percent = self.getPercent()
-            nodes = self.getNumNodes()
-            accumduration = tracedur * nodes * percent
-            numreqs = accumduration / (self.numnodesdist.getAvg() * self.durationdist.getAvg())
-            intervalavg = int(tracedur / numreqs)
-            min = intervalavg - 3600 # Make this configurable
-            max = intervalavg + 3600 # Make this configurable
-            values = range(min, max+1)
-            self.intervaldist = stats.DiscreteUniformDistribution(values)
+        has_option = self.config.has_option(secname, optname)
+        
+        if not has_option:
+            if opt.required:
+                raise ConfigException, "Required option '%s.%s' not found" % (secname, optname)
+            if opt.required_if != None:
+                for req in opt.required_if:
+                    (condsec,condopt) = req[0]
+                    condvalue = req[1]
+                    
+                    if self.config.has_option(condsec,condopt) and self.config.get(condsec,condopt) == condvalue:
+                        raise ConfigException, "Option '%s.%s' is required when %s.%s==%s" % (secname, optname, condsec, condopt, condvalue)
+            
+            value = opt.default
         else:
-            self.intervaldist = self.createDiscreteDistributionFromSection(constants.INTERVAL_SEC)
+            if opt.type == OPTTYPE_INT:
+                value = self.config.getint(secname, optname)
+            elif opt.type == OPTTYPE_FLOAT:
+                value = self.config.getfloat(secname, optname)
+            elif opt.type == OPTTYPE_STRING:
+                value = self.config.get(secname, optname)
+            elif opt.type == OPTTYPE_BOOLEAN:
+                value = self.config.getboolean(secname, optname)
+            elif opt.type == OPTTYPE_DATETIME:
+                value = self.config.get(secname, optname)
+                value = ISO.ParseDateTime(value)
+            elif opt.type == OPTTYPE_TIMEDELTA:
+                value = self.config.getint(secname, optname)
+                value = TimeDelta(seconds=value)
+                
+            if opt.valid != None:
+                if not value in opt.valid:
+                    raise ConfigException, "Invalid value specified for '%s.%s'. Valid values are %s" % (secname, optname, opt.valid)
+                  
+        self.__options[opt.getter] = value
         
-    def getTraceDuration(self):
-        return self.config.getint(constants.GENERAL_SEC, constants.DURATION_OPT)
+    def get(self, opt):
+        return self.__options[opt]
         
-    def getPercent(self):
-        percent = self.config.getint(constants.WORKLOAD_SEC, constants.PERCENT_OPT)
-        percent = percent / 100.0
-        return percent
-    
-    def getNumNodes(self):
-        return self.config.getint(constants.WORKLOAD_SEC, constants.NUMNODES_OPT)
+    @classmethod
+    def from_file(cls, configfile):
+        file = open (configfile, "r")
+        c = ConfigParser.ConfigParser()
+        c.readfp(file)
+        cfg = cls(c)
+        return cfg
 
-    def getDuration(self):
-        return self.durationdist.get()
-    
-    def isGenerateBasedOnWorkload(self):
-        return self.config.has_section(constants.WORKLOAD_SEC)
-
-
-    
-class ImageConfig(Config):
-    def __init__(self, c):
-        Config.__init__(self, c)
-        self.sizedist = self.createDiscreteDistributionFromSection(constants.SIZE_SEC)
-        numimages = self.config.getint(constants.GENERAL_SEC, constants.IMAGES_OPT)
-        self.images = ["image_" + str(i+1) for i in range(numimages)]
         
-        distribution = self.config.get(constants.GENERAL_SEC, constants.DISTRIBUTION_OPT)
-        if distribution == "uniform":
-            self.imagedist = stats.DiscreteUniformDistribution(self.images) 
-        else:
-            probs = []
-            explicitprobs = distribution.split()
-            for p in explicitprobs:
-                numitems, prob = p.split(",")
-                itemprob = float(prob)/100
-                for i in range(int(numitems)):
-                    probs.append(itemprob)
-            self.imagedist = stats.DiscreteDistribution(self.images, probs)
-            print probs
-    
-    def getFileLength(self):
-        return self.config.getint(constants.GENERAL_SEC, constants.LENGTH_OPT)
-        
-
-        
         
\ No newline at end of file

Modified: trunk/src/haizea/common/constants.py
===================================================================
--- trunk/src/haizea/common/constants.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/common/constants.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -82,83 +82,8 @@
     elif s == RES_STATE_DONE:
         return "Done"
 
-# Configfile sections and options
-PROFILES_SEC="profiles"
-NAMES_OPT="names"
 
-TRACES_SEC="traces"
-TRACEDIR_OPT="tracedir"
-TRACEFILES_OPT="tracefiles"
 
-INJECTIONS_SEC="leaseinjections"
-INJDIR_OPT="injectiondir"
-INJFILES_OPT="injectionfiles"
-
-RUN_SEC="run"
-PROFILES_OPT="profiles"
-TRACES_OPT="traces"
-INJS_OPT="injections"
-
-GENERAL_SEC="general"
-LOGLEVEL_OPT="loglevel"
-MODE_OPT="mode"
-LEASE_DEPLOYMENT_OPT="lease-deployment"
-PROFILE_OPT="profile"
-SUSPENSION_OPT="suspension"
-MIGRATION_OPT="migration"
-MIGRATE_OPT="what-to-migrate"
-TRANSFER_OPT="diskimage-transfer"
-BACKFILLING_OPT="backfilling"
-RESERVATIONS_OPT="backfilling-reservations"
-TRACEFILE_OPT="tracefile"
-INJFILE_OPT="injectionfile"
-IMGFILE_OPT="imagefile"
-REUSE_OPT="reuse"
-CACHESIZE_OPT="diskimage-cache-size"
-NODESELECTION_OPT="nodeselection"
-DATADIR_OPT="datadir"
-
-REPORTING_SEC="reporting"
-CSS_OPT="css"
-REPORTDIR_OPT="reportdir"
-TABLE_OPT="table"
-SLIDESHOW_OPT="slideshow"
-CLIPSTART_OPT="clip-start"
-CLIPEND_OPT="clip-end"
-
-SIMULATION_SEC="simulation"
-STARTTIME_OPT="starttime"
-TEMPLATEDB_OPT="templatedb"
-TARGETDB_OPT="targetdb"
-NODES_OPT="nodes"
-BANDWIDTH_OPT="imagetransfer-bandwidth"
-SUSPENDRATE_OPT="suspendresume-rate"
-SUSPENDTHRESHOLD_OPT="suspend-threshold"
-SUSPENDTHRESHOLDFACTOR_OPT="suspend-threshold-factor"
-RESOURCES_OPT="resources"
-STOPWHEN_OPT="stop-when"
-RUNOVERHEAD_OPT="runtime-overhead"
-BOOTOVERHEAD_OPT="bootshutdown-overhead"
-RUNOVERHEADBE_OPT="runtime-overhead-onlybesteffort"
-FORCETRANSFERT_OPT="force-transfer-time"
-REUSE_OPT="diskimage-reuse"
-AVOIDREDUNDANT_OPT="avoid-redundant-transfers"
-STATUS_INTERVAL_OPT="status-message-interval"
-CLOCK_OPT="clock"
-
-OPENNEBULA_SEC = "opennebula"
-DB_OPT = "db"
-ONEVM_OPT = "onevm"
-ESTIMATESUSPENDRATE_OPT = "suspendresume-rate-estimate"
-WAKEUPINTERVAL_OPT = "wakeup-interval"
-NONSCHEDULABLE_OPT = "non-schedulable-interval"
-
-DEPLOY_IMAGETRANSFER_SEC = "deploy-imagetransfer"
-TRANSFER_MECHANISM_OPT = "transfer-mechanism"
-
-SCHEDULING_SEC="scheduling"
-TRACEFILE_SEC="tracefile"
-
 COMMON_SEC="common"
 MULTI_SEC="multi"
 BASEDATADIR_OPT="basedatadir"
@@ -177,20 +102,21 @@
 
 MIGRATE_NONE="nothing"
 MIGRATE_MEM="mem"
-MIGRATE_MEMVM="mem+vm"
+MIGRATE_MEMDISK="mem+disk"
 
-TRANSFER_NONE="none"
 TRANSFER_UNICAST="unicast"
 TRANSFER_MULTICAST="multicast"
 
-STOPWHEN_BESUBMITTED="best-effort-submitted"
-STOPWHEN_BEDONE="best-effort-done"
+STOPWHEN_ALLDONE = "all-leases-done"
+STOPWHEN_BESUBMITTED="besteffort-submitted"
+STOPWHEN_BEDONE="besteffort-done"
 
 REUSE_NONE="none"
 REUSE_IMAGECACHES="image-caches"
 
-NODESELECTION_AVOIDPREEMPT="avoid-preemption"
-NODESELECTION_PREFERREUSE="prefer-imagereuse"
+RUNTIMEOVERHEAD_NONE="none"
+RUNTIMEOVERHEAD_ALL="all"
+RUNTIMEOVERHEAD_BE="besteffort"
 
 DEPLOYMENT_UNMANAGED = "unmanaged"
 DEPLOYMENT_PREDEPLOY = "predeployed-images"
@@ -199,27 +125,6 @@
 CLOCK_SIMULATED = "simulated"
 CLOCK_REAL = "real"
 
-# Graph configfile sections and options
-TITLE_OPT="title"
-DATAFILE_OPT="datafile"
-TITLEX_OPT="title-x"
-TITLEY_OPT="title-y"
-GRAPHTYPE_OPT="graphtype"
-PROFILE_OPT="profile"
-TRACE_OPT="trace"
-INJ_OPT="injection"
-
-GRAPH_LINE_VALUE="line-value"
-GRAPH_LINE_AVG="line-average"
-GRAPH_STEP_VALUE="step-value"
-GRAPH_POINT_VALUE="point-value"
-GRAPH_POINTLINE_VALUEAVG="point-value+line-avg"
-GRAPH_CUMULATIVE="cumulative"
-GRAPH_NUMNODE_LENGTH_CORRELATION_SIZE="numnode-length-correlation-insize"
-GRAPH_NUMNODE_LENGTH_CORRELATION_Y="numnode-length-correlation-iny"
-GRAPH_NUMNODE_REQLENGTH_CORRELATION_SIZE="numnode-reqlength-correlation-insize"
-GRAPH_NUMNODE_REQLENGTH_CORRELATION_Y="numnode-reqlength-correlation-iny"
-
 # Component names
 RM="RM"
 SCHED="SCHED"

Added: trunk/src/haizea/resourcemanager/configfile.py
===================================================================
--- trunk/src/haizea/resourcemanager/configfile.py	                        (rev 0)
+++ trunk/src/haizea/resourcemanager/configfile.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -0,0 +1,675 @@
+# -------------------------------------------------------------------------- #
+# Copyright 2006-2008, University of Chicago                                 #
+# Copyright 2008, Distributed Systems Architecture Group, Universidad        #
+# Complutense de Madrid (dsa-research.org)                                   #
+#                                                                            #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may    #
+# not use this file except in compliance with the License. You may obtain    #
+# a copy of the License at                                                   #
+#                                                                            #
+# http://www.apache.org/licenses/LICENSE-2.0                                 #
+#                                                                            #
+# Unless required by applicable law or agreed to in writing, software        #
+# distributed under the License is distributed on an "AS IS" BASIS,          #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+# See the License for the specific language governing permissions and        #
+# limitations under the License.                                             #
+# -------------------------------------------------------------------------- #
+
+from haizea.common.config import Section, Option, Config, OPTTYPE_INT, OPTTYPE_FLOAT, OPTTYPE_STRING, OPTTYPE_BOOLEAN, OPTTYPE_DATETIME, OPTTYPE_TIMEDELTA 
+import haizea.common.constants as constants
+import haizea.common.stats as stats
+import os.path
+from mx.DateTime import TimeDelta
+
+class HaizeaConfig(Config):
+    def __init__(self, config):
+        sections = []
+        
+        # ============================= #
+        #                               #
+        #        GENERAL OPTIONS        #
+        #                               #
+        # ============================= #
+
+        general = Section("general", required=True)
+        general.options = \
+        [
+         Option(name        = "loglevel",
+                getter      = "loglevel",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                default     = "INFO",
+                valid       = ["STATUS","INFO","DEBUG","EXTREMEDEBUG"],
+                doc         = """
+                Controls the level (and amount) of 
+                log messages. Valid values are:
+                
+                 - STATUS: Only print status messages
+                 - INFO: Slightly more verbose that STATUS
+                 - DEBUG: Prints information useful for debugging the scheduler.
+                 - EXTREMEDEBUG: Prints very verbose information
+                   on the scheduler's internal data structures. Use only
+                   for short runs.        
+                """),
+         
+         Option(name        = "mode",
+                getter      = "mode",
+                type        = OPTTYPE_STRING,
+                required    = True,
+                valid       = ["simulated","opennebula"],
+                doc         = """
+                Sets the mode the scheduler will run in.
+                Currently the only valid values are "simulated" and
+                "opennebula". The "simulated" mode expects lease
+                requests to be provided through a trace file, and
+                all enactment is simulated. The "opennebula" mode
+                interacts with the OpenNebula virtual infrastructure
+                manager (http://www.opennebula.org/) to obtain lease
+                requests and to do enactment on physical resources.
+                See sample_opennebula.conf for description of
+                OpenNebula-specific options.                
+                """),
+
+         Option(name        = "lease-preparation",
+                getter      = "lease-preparation",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                default     = constants.DEPLOYMENT_UNMANAGED,
+                valid       = [constants.DEPLOYMENT_UNMANAGED,
+                               constants.DEPLOYMENT_PREDEPLOY,
+                               constants.DEPLOYMENT_TRANSFER],
+                doc         = """
+                Sets how the scheduler will handle the
+                preparation overhead of leases. Valid values are:
+                
+                 - unmanaged: The scheduler can assume that there
+                   is no deployment overhead, or that some
+                   other entity is taking care of it (e.g., one
+                   of the enactment backends)
+                 - predeployed-images: The scheduler can assume that
+                   all required disk images are predeployed on the
+                   physical nodes. This is different from "unmanaged"
+                   because the scheduler may still have to handle
+                   making local copies of the predeployed images before
+                   a lease can start.
+                 - imagetransfer: A disk image has to be transferred
+                   from a repository node before the lease can start.
+                """),
+
+         Option(name        = "datafile",
+                getter      = "datafile",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                default     = None,
+                doc         = """
+                This is the file where statistics on
+                the scheduler's run will be saved to (waiting time of leases,
+                utilization data, etc.). If omitted, no data will be saved.
+                """),
+
+         Option(name        = "profile",
+                getter      = "profile",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                doc         = """
+                This option is used internally by Haizea when using
+                multiconfiguration files. See the multiconfiguration
+                documentation for more details.        
+                """)
+        ]
+
+        sections.append(general)
+
+        # ============================= #
+        #                               #
+        #      SCHEDULING OPTIONS       #
+        #                               #
+        # ============================= #
+
+        scheduling = Section("scheduling", required=True)
+        scheduling.options = \
+        [
+         Option(name        = "wakeup-interval",
+                getter      = "wakeup-interval",
+                type        = OPTTYPE_TIMEDELTA,
+                required    = False,
+                default     = TimeDelta(seconds=60),
+                doc         = """
+                Interval at which Haizea will wake up
+                to manage resources and process pending requests.
+                This option is not used when using a simulated clock,
+                since the clock will skip directly to the time where an
+                event is happening.
+                """),
+
+         Option(name        = "backfilling",
+                getter      = "backfilling",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                default     = None,
+                valid       = [constants.BACKFILLING_OFF,
+                               constants.BACKFILLING_AGGRESSIVE,
+                               constants.BACKFILLING_CONSERVATIVE,
+                               constants.BACKFILLING_INTERMEDIATE],
+                doc         = """
+                Backfilling algorithm to use. Valid values are:
+                
+                 - off: don't do backfilling
+                 - aggressive: at most 1 reservation in the future
+                 - conservative: unlimited reservations in the future
+                 - intermediate: N reservations in the future (N is specified
+                   in the backfilling-reservations option)
+                """),
+
+         Option(name        = "backfilling-reservations",
+                getter      = "backfilling-reservations",
+                type        = OPTTYPE_INT,
+                required    = False,
+                required_if = [(("scheduling","backfilling"),constants.BACKFILLING_INTERMEDIATE)],
+                doc         = """
+                Number of future reservations to allow when
+                using the "intermediate" backfilling option.
+                """),
+
+         Option(name        = "suspension",
+                getter      = "suspension",
+                type        = OPTTYPE_STRING,
+                required    = True,
+                valid       = [constants.SUSPENSION_NONE,
+                               constants.SUSPENSION_SERIAL,
+                               constants.SUSPENSION_ALL],
+                doc         = """
+                Specifies what can be suspended. Valid values are:
+                
+                 - none: suspension is never allowed
+                 - serial-only: only 1-node leases can be suspended
+                 - all: any lease can be suspended                
+                """),
+
+         Option(name        = "suspend-threshold-factor",
+                getter      = "suspend-threshold-factor",
+                type        = OPTTYPE_INT,
+                required    = False,
+                default     = 0,
+                doc         = """
+                Documentation                
+                """),
+
+         Option(name        = "force-suspend-threshold",
+                getter      = "force-suspend-threshold",
+                type        = OPTTYPE_TIMEDELTA,
+                required    = False,
+                doc         = """
+                Documentation                
+                """),
+
+         Option(name        = "migration",
+                getter      = "migration",
+                type        = OPTTYPE_BOOLEAN,
+                required    = True,
+                doc         = """
+                Specifies whether leases can be migrated from one
+                physical node to another. Valid values are "True" or "False"                
+                """),
+
+         Option(name        = "what-to-migrate",
+                getter      = "what-to-migrate",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                required_if = [(("scheduling","migration"),True)],
+                default     = constants.MIGRATE_NONE,
+                valid       = [constants.MIGRATE_NONE,
+                               constants.MIGRATE_MEM,
+                               constants.MIGRATE_MEMDISK],
+                doc         = """
+                Specifies what data has to be moved around when
+                migrating a lease. Valid values are:
+                
+                 - nothing: migration can be performed without transferring any
+                   files.
+                 - mem: only the memory must be transferred
+                 - mem+disk: both the memory and the VM disk image must be
+                   transferred                
+                """)
+
+        ]
+        sections.append(scheduling)
+        
+        # ============================= #
+        #                               #
+        #      SIMULATION OPTIONS       #
+        #                               #
+        # ============================= #
+        
+        simulation = Section("simulation", required=False,
+                             required_if = [(("general","mode"),"simulated")] )
+        simulation.options = \
+        [
+         Option(name        = "clock",
+                getter      = "clock",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                default     = constants.CLOCK_REAL,
+                valid       = [constants.CLOCK_REAL,
+                               constants.CLOCK_SIMULATED],
+                doc         = """
+                Type of clock to use in simulation:
+                
+                 - "simulated": A simulated clock that fastforwards through
+                    time. Can only use the tracefile request
+                    frontend
+                 - "real": A real clock is used, but simulated resources and
+                   enactment actions are used. Can only use the RPC
+                   request frontend.                
+                """),
+
+         Option(name        = "starttime",
+                getter      = "starttime",
+                type        = OPTTYPE_DATETIME,
+                required    = False,
+                required_if = [(("simulation","clock"),constants.CLOCK_SIMULATED)],
+                doc         = """
+                Time at which simulated clock will start.                
+                """),
+
+         Option(name        = "nodes",
+                getter      = "simul.nodes",
+                type        = OPTTYPE_INT,
+                required    = True,
+                doc         = """
+                Number of nodes in the simulated cluster                
+                """) ,               
+
+         Option(name        = "resources",
+                getter      = "simul.resources",
+                type        = OPTTYPE_STRING,
+                required    = True,
+                doc         = """
+                Resources in each node. Five types of resources
+                are recognized right now:
+                
+                 - CPU: Number of processors per node
+                 - Mem: Memory (in MB)
+                 - Net (in): Inbound network bandwidth (in Mbps) 
+                 - Net (out): Outbound network bandwidth (in Mbps) 
+                 - Disk: Disk space in MB (not counting space for disk cache)
+                """),
+
+         Option(name        = "imagetransfer-bandwidth",
+                getter      = "imagetransfer-bandwidth",
+                type        = OPTTYPE_INT,
+                required    = True,
+                doc         = """
+                Bandwidth (in Mbps) available for image transfers.
+                This would correspond to the outbound network bandwidth of the
+                node where the images are stored.                
+                """),
+
+         Option(name        = "suspendresume-rate",
+                getter      = "simul.suspendresume-rate",
+                type        = OPTTYPE_FLOAT,
+                required    = True,
+                doc         = """
+                Rate at which VMs are assumed to suspend (in MB of
+                memory per second)                
+                """),
+
+         Option(name        = "stop-when",
+                getter      = "stop-when",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                default     = constants.STOPWHEN_ALLDONE,
+                valid       = [constants.STOPWHEN_ALLDONE,
+                               constants.STOPWHEN_BESUBMITTED,
+                               constants.STOPWHEN_BEDONE],
+                doc         = """
+                When using the simulated clock, this specifies when the
+                simulation must end. Valid options are:
+                
+                 - all-leases-done: All requested leases have been completed
+                   and there are no queued/pending requests.
+                 - besteffort-submitted: When all best-effort leases have been
+                   submitted.
+                 - besteffort-done: When all best-effort leases have been
+                   completed.                
+                """),
+
+         Option(name        = "status-message-interval",
+                getter      = "status-message-interval",
+                type        = OPTTYPE_INT,
+                required    = False,
+                default     = None,
+                doc         = """
+                If specified, the simulated clock will print a status
+                message with some basic statistics. This is useful to keep track
+                of long simulations. The interval is specified in minutes.                
+                """)
+
+        ]
+        sections.append(simulation)
+        
+
+        # ============================= #
+        #                               #
+        #      DEPLOYMENT OPTIONS       #
+        #     (w/ image transfers)      #
+        #                               #
+        # ============================= #
+
+        imgtransfer = Section("deploy-imagetransfer", required=False,
+                             required_if = [(("general","lease-deployment"),"imagetransfer")])
+        imgtransfer.options = \
+        [
+         Option(name        = "transfer-mechanism",
+                getter      = "transfer-mechanism",
+                type        = OPTTYPE_STRING,
+                required    = True,
+                valid       = [constants.TRANSFER_UNICAST,
+                               constants.TRANSFER_MULTICAST],
+                doc         = """
+                Specifies how disk images are transferred. Valid values are:
+                 - unicast: A disk image can be transferred to just one node at a time
+                   (NOTE: Not currently supported)
+                 - multicast: A disk image can be multicast to multiple nodes at 
+                   the same time.                
+                """),
+
+         Option(name        = "avoid-redundant-transfers",
+                getter      = "avoid-redundant-transfers",
+                type        = OPTTYPE_BOOLEAN,
+                required    = False,
+                default     = True,
+                doc         = """
+                Specifies whether the scheduler should take steps to
+                detect and avoid redundant transfers (e.g., if two leases are
+                scheduled on the same node, and they both require the same disk
+                image, don't transfer the image twice; allow one to "piggyback"
+                on the other). There is generally no reason to set this option
+                to False.
+                """),
+
+         Option(name        = "force-imagetransfer-time",
+                getter      = "force-imagetransfer-time",
+                type        = OPTTYPE_TIMEDELTA,
+                required    = False,
+                doc         = """
+                Documentation                
+                """),
+                
+         Option(name        = "diskimage-reuse",
+                getter      = "diskimage-reuse",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                required_if = None,
+                default     = constants.REUSE_NONE,
+                valid       = [constants.REUSE_NONE,
+                               constants.REUSE_IMAGECACHES],
+                doc         = """
+                Specifies whether disk image caches should be created
+                on the nodes, so the scheduler can reduce the number of transfers
+                by reusing images. Valid values are:
+                
+                 - none: No image reuse
+                 - image-caches: Use image caching algorithm described in Haizea
+                   publications
+                """),
+
+         Option(name        = "diskimage-cache-size",
+                getter      = "diskimage-cache-size",
+                type        = OPTTYPE_INT,
+                required    = False,
+                required_if = [(("deploy-imagetransfer","diskimage-reuse"),True)],
+                doc         = """
+                Specifies the size (in MB) of the disk image cache on
+                each physical node.                
+                """)
+        ]
+        sections.append(imgtransfer)
+
+        # ============================= #
+        #                               #
+        #      TRACEFILE OPTIONS        #
+        #                               #
+        # ============================= #
+
+        tracefile = Section("tracefile", required=False)
+        tracefile.options = \
+        [
+         Option(name        = "tracefile",
+                getter      = "tracefile",
+                type        = OPTTYPE_STRING,
+                required    = True,
+                doc         = """
+                Path to tracefile to use.                
+                """),
+
+         Option(name        = "imagefile",
+                getter      = "imagefile",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                doc         = """
+                Path to list of images to append to lease requests.
+                If omitted, the images in the tracefile are used.                
+                """),
+
+         Option(name        = "injectionfile",
+                getter      = "injectionfile",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                doc         = """
+                Path to file with leases to "inject" into the tracefile.                
+                """),      
+
+         Option(name        = "add-overhead",
+                getter      = "add-overhead",
+                type        = OPTTYPE_STRING,
+                required    = False,
+                default     = constants.RUNTIMEOVERHEAD_NONE,
+                valid       = [constants.RUNTIMEOVERHEAD_NONE,
+                               constants.RUNTIMEOVERHEAD_ALL,
+                               constants.RUNTIMEOVERHEAD_BE],
+                doc         = """
+                Documentation                
+                """),   
+
+         Option(name        = "bootshutdown-overhead",
+                getter      = "bootshutdown-overhead",
+                type        = OPTTYPE_TIMEDELTA,
+                required    = False,
+                default     = TimeDelta(seconds=0),
+                doc         = """
+                Specifies how many seconds will be alloted to
+                boot and shutdown of the lease.                
+                """),      
+
+         Option(name        = "runtime-slowdown-overhead",
+                getter      = "runtime-slowdown-overhead",
+                type        = OPTTYPE_FLOAT,
+                required    = False,
+                default     = 0,
+                doc         = """
+                Adds a runtime overhead (in %) to the lease duration.                
+                """)
+                      
+        ]
+        sections.append(tracefile)
+        
+        # ============================= #
+        #                               #
+        #      OPENNEBULA OPTIONS       #
+        #                               #
+        # ============================= #
+
+        opennebula = Section("opennebula", required=False,
+                             required_if = [(("general","mode"),"opennebula")])
+        opennebula.options = \
+        [
+         Option(name        = "db",
+                getter      = "one.db",
+                type        = OPTTYPE_STRING,
+                required    = True,
+                doc         = """
+                Location of OpenNebula database.                
+                """),
+
+         Option(name        = "onevm",
+                getter      = "onevm",
+                type        = OPTTYPE_INT,
+                required    = True,
+                doc         = """
+                Location of OpenNebula "onevm" command.                
+                """),
+
+         Option(name        = "suspendresume-rate-estimate",
+                getter      = "one.suspendresume-rate-estimate",
+                type        = OPTTYPE_FLOAT,
+                required    = False,
+                default     = 32,
+                doc         = """
+                Rate at which VMs are estimated to suspend (in MB of
+                memory per second)                
+                """),
+
+         Option(name        = "non-schedulable-interval",
+                getter      = "non-schedulable-interval",
+                type        = OPTTYPE_TIMEDELTA,
+                required    = False,
+                default     = TimeDelta(seconds=10),
+                doc         = """
+                The minimum amount of time that must pass between
+                when a request is scheduled to when it can actually start.
+                The default should be good for most configurations, but
+                may need to be increased if you're dealing with exceptionally
+                high loads.                
+                """)
+        ]
+        sections.append(opennebula)
+
+        Config.__init__(self, config, sections)
+        
+
+
+class HaizeaMultiConfig(Config):
+    def __init__(self, config):
+        Config.__init__(self, config)
+        
+    def getProfiles(self):
+        sections = set([s.split(":")[0] for s in self.config.sections()])
+        # Remove multi and common sections
+        sections.difference_update([constants.COMMON_SEC, constants.MULTI_SEC])
+        return list(sections)
+    
+    def getProfilesSubset(self, sec):
+        profiles = self.config.get(sec, constants.PROFILES_OPT)
+        if profiles == "ALL":
+            profiles = self.getProfiles()
+        else:
+            profiles = profiles.split()
+        return profiles
+
+    def getTracesSubset(self, sec):
+        traces = self.config.get(sec, constants.TRACES_OPT)
+        if traces == "ALL":
+            traces = [os.path.basename(t) for t in self.getTracefiles()]
+        else:
+            traces = traces.split()
+            
+        return traces
+
+    def getInjSubset(self, sec):
+        injs = self.config.get(sec, constants.INJS_OPT)
+        if injs == "ALL":
+            injs = [os.path.basename(t) for t in self.getInjectfiles() if t!=None]
+            injs.append(None)
+        elif injs == "NONE":
+            injs = [None]
+        else:
+            injs = injs.split()
+        return injs
+
+    def getTracefiles(self):
+        dir = self.config.get(constants.MULTI_SEC, constants.TRACEDIR_OPT)
+        traces = self.config.get(constants.MULTI_SEC, constants.TRACEFILES_OPT).split()
+        return [dir + "/" + t for t in traces]
+
+    def getInjectfiles(self):
+        dir = self.config.get(constants.MULTI_SEC, constants.INJDIR_OPT)
+        inj = self.config.get(constants.MULTI_SEC, constants.INJFILES_OPT).split()
+        inj = [dir + "/" + i for i in inj]
+        inj.append(None)
+        return inj
+    
+    def getConfigs(self):
+        profiles = self.getProfiles()
+        tracefiles = self.getTracefiles()
+        injectfiles = self.getInjectfiles()
+
+        configs = []
+        for profile in profiles:
+            for tracefile in tracefiles:
+                for injectfile in injectfiles:
+                    profileconfig = ConfigParser.ConfigParser()
+                    commonsections = [s for s in self.config.sections() if s.startswith("common:")]
+                    profilesections = [s for s in self.config.sections() if s.startswith(profile +":")]
+                    sections = commonsections + profilesections
+                    for s in sections:
+                        s_noprefix = s.split(":")[1]
+                        items = self.config.items(s)
+                        if not profileconfig.has_section(s_noprefix):
+                            profileconfig.add_section(s_noprefix)
+                        for item in items:
+                            profileconfig.set(s_noprefix, item[0], item[1])
+                            
+                    # The tracefile section may have not been created
+                    if not profileconfig.has_section(constants.TRACEFILE_SEC):
+                        profileconfig.add_section(constants.TRACEFILE_SEC)
+
+                    # Add tracefile option
+                    profileconfig.set(constants.TRACEFILE_SEC, constants.TRACEFILE_OPT, tracefile)
+                    
+                    # Add injected file option
+                    if injectfile == None:
+                        inj = "None"
+                    else:
+                        inj = injectfile
+                    profileconfig.set(constants.TRACEFILE_SEC, constants.INJFILE_OPT, inj)
+
+                    # Add datadir option
+                    datadirname = genDataDirName(profile, tracefile, injectfile)
+                    basedatadir = self.config.get(constants.MULTI_SEC, constants.BASEDATADIR_OPT)
+                    # TODO: Change this so there will be a single directory with all the
+                    # data files, instead of multiple directories
+                    datafile = basedatadir + "/" + datadirname + "/haizea.dat"
+                    profileconfig.set(constants.GENERAL_SEC, constants.DATAFILE_OPT, datadir)
+                    
+                    # Set profile option (only used internally)
+                    profileconfig.set(constants.GENERAL_SEC, constants.PROFILE_OPT, profile)
+                    
+                    c = RMConfig(profileconfig)
+                    configs.append(c)
+        
+        return configs
+
+            
+    def getConfigsToRun(self):
+        configs = self.getConfigs()
+        
+        # TODO: Come up with a new way to filter what gets run or not
+        #profiles = self.getProfilesSubset(constants.RUN_SEC)
+        #traces = self.getTracesSubset(constants.RUN_SEC)
+        #injs = self.getInjSubset(constants.RUN_SEC)
+        
+#        confs = []
+#        for c in configs:
+#            p = c.getProfile()
+#            t = os.path.basename(c.getTracefile())
+#            i = c.getInjectfile()
+#            if i != None: 
+#                i = os.path.basename(i)
+#
+#            if p in profiles and t in traces and i in injs:
+#                confs.append(c)
+#
+#        return confs
+        return configs
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/datastruct.py
===================================================================
--- trunk/src/haizea/resourcemanager/datastruct.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/datastruct.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -39,7 +39,7 @@
   * Duration: A wrapper around requested/accumulated/actual durations
 """
 
-from haizea.common.constants import state_str, rstate_str, DS, RES_STATE_SCHEDULED, RES_STATE_ACTIVE, RES_MEM, MIGRATE_NONE, MIGRATE_MEM, MIGRATE_MEMVM, TRANSFER_NONE
+from haizea.common.constants import state_str, rstate_str, DS, RES_STATE_SCHEDULED, RES_STATE_ACTIVE, RES_MEM, MIGRATE_NONE, MIGRATE_MEM, MIGRATE_MEMDISK
 from haizea.common.utils import roundDateTimeDelta, get_lease_id, pretty_nodemap, estimate_transfer_time, xmlrpc_marshall_singlevalue
 
 from operator import attrgetter
@@ -175,26 +175,26 @@
     
     # TODO: Factor out into deployment modules
     def estimate_image_transfer_time(self, bandwidth):
-        forceTransferTime = self.scheduler.rm.config.getForceTransferTime()
+        forceTransferTime = self.scheduler.rm.config.get("force-imagetransfer-time")
         if forceTransferTime != None:
             return forceTransferTime
         else:      
             return estimate_transfer_time(self.diskimage_size, bandwidth)
         
     def estimate_migration_time(self, bandwidth):
-        whattomigrate = self.scheduler.rm.config.getMustMigrate()
+        whattomigrate = self.scheduler.rm.config.get("what-to-migrate")
         if whattomigrate == MIGRATE_NONE:
             return TimeDelta(seconds=0)
         else:
             if whattomigrate == MIGRATE_MEM:
                 mbtotransfer = self.requested_resources.get_by_type(RES_MEM)
-            elif whattomigrate == MIGRATE_MEMVM:
+            elif whattomigrate == MIGRATE_MEMDISK:
                 mbtotransfer = self.diskimage_size + self.requested_resources.get_by_type(RES_MEM)
             return estimate_transfer_time(mbtotransfer, bandwidth)
         
     # TODO: This whole function has to be rethought
     def get_suspend_threshold(self, initial, suspendrate, migrating=False, bandwidth=None):
-        threshold = self.scheduler.rm.config.getSuspendThreshold()
+        threshold = self.scheduler.rm.config.get("force-suspend-threshold")
         if threshold != None:
             # If there is a hard-coded threshold, use that
             return threshold
@@ -216,8 +216,8 @@
                     threshold = self.estimate_suspend_resume_time(suspendrate) * 2
             else:
                 #threshold = self.scheduler.rm.config.getBootOverhead() + deploytime + self.estimateSuspendResumeTime(suspendrate)
-                threshold = self.scheduler.rm.config.getBootOverhead() + self.estimate_suspend_resume_time(suspendrate)
-            factor = self.scheduler.rm.config.getSuspendThresholdFactor() + 1
+                threshold = self.scheduler.rm.config.get("bootshutdown-overhead") + self.estimate_suspend_resume_time(suspendrate)
+            factor = self.scheduler.rm.config.get("suspend-threshold-factor") + 1
             return roundDateTimeDelta(threshold * factor)
         
     def xmlrpc_marshall(self):

Modified: trunk/src/haizea/resourcemanager/deployment/imagetransfer.py
===================================================================
--- trunk/src/haizea/resourcemanager/deployment/imagetransfer.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/deployment/imagetransfer.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -46,9 +46,9 @@
         
     def schedule_for_ar(self, lease, vmrr, nexttime):
         config = self.scheduler.rm.config
-        mechanism = config.get_transfer_mechanism()
-        reusealg = config.getReuseAlg()
-        avoidredundant = config.isAvoidingRedundantTransfers()
+        mechanism = config.get("transfer-mechanism")
+        reusealg = config.get("diskimage-reuse")
+        avoidredundant = config.get("avoid-redundant-transfers")
         
         lease.state = constants.LEASE_STATE_SCHEDULED
         
@@ -104,9 +104,9 @@
 
     def schedule_for_besteffort(self, lease, vmrr, nexttime):
         config = self.scheduler.rm.config
-        mechanism = config.get_transfer_mechanism()
-        reusealg = config.getReuseAlg()
-        avoidredundant = config.isAvoidingRedundantTransfers()
+        mechanism = config.get("transfer-mechanism")
+        reusealg = config.get("diskimage-reuse")
+        avoidredundant = config.get("avoid-redundant-transfers")
         earliest = self.find_earliest_starting_times(lease, nexttime)
         lease.state = constants.LEASE_STATE_SCHEDULED
         transferRRs = []
@@ -151,9 +151,9 @@
     def find_earliest_starting_times(self, lease_req, nexttime):
         nodIDs = [n.nod_id for n in self.resourcepool.getNodes()]  
         config = self.scheduler.rm.config
-        mechanism = config.get_transfer_mechanism()       
-        reusealg = config.getReuseAlg()
-        avoidredundant = config.isAvoidingRedundantTransfers()
+        mechanism = config.get("transfer-mechanism")
+        reusealg = config.get("diskimage-reuse")
+        avoidredundant = config.get("avoid-redundant-transfers")
         
         # Figure out starting time assuming we have to transfer the image
         nextfifo = self.getNextFIFOTransferTime(nexttime)
@@ -302,7 +302,7 @@
         bandwidth = self.resourcepool.imagenode_bandwidth
         imgTransferTime=req.estimate_image_transfer_time(bandwidth)
         config = self.scheduler.rm.config
-        mechanism = config.get_transfer_mechanism()  
+        mechanism = config.get("transfer-mechanism")
         startTime = self.getNextFIFOTransferTime(nexttime)
         
         newtransfers = []

Modified: trunk/src/haizea/resourcemanager/enact/opennebula/info.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/opennebula/info.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/enact/opennebula/info.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -30,10 +30,10 @@
         ResourcePoolInfoBase.__init__(self, resourcepool)
         config = self.resourcepool.rm.config
         self.logger = self.resourcepool.rm.logger
-        self.suspendresumerate = config.getONESuspendResumeRate()
+        self.suspendresumerate = config.get("one.suspendresume-rate-estimate")
 
         # Get information about nodes from DB
-        conn = sqlite.connect(config.getONEDB())
+        conn = sqlite.connect(config.get("one.db"))
         conn.row_factory = sqlite.Row
         
         self.nodes = []

Modified: trunk/src/haizea/resourcemanager/enact/opennebula/vm.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/opennebula/vm.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/enact/opennebula/vm.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -24,9 +24,9 @@
 class VMEnactment(VMEnactmentBase):
     def __init__(self, resourcepool):
         VMEnactmentBase.__init__(self, resourcepool)
-        self.onevm = self.resourcepool.rm.config.getONEvm()
+        self.onevm = self.resourcepool.rm.config.get("onevm")
         
-        self.conn = sqlite.connect(self.resourcepool.rm.config.getONEDB())
+        self.conn = sqlite.connect(self.resourcepool.rm.config.get("one.db"))
         self.conn.row_factory = sqlite.Row
 
         

Modified: trunk/src/haizea/resourcemanager/enact/simulated/info.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/simulated/info.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/enact/simulated/info.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -25,12 +25,12 @@
     def __init__(self, resourcepool):
         ResourcePoolInfoBase.__init__(self, resourcepool)
         config = self.resourcepool.rm.config
-        self.suspendresumerate = config.getSuspendResumeRate()
+        self.suspendresumerate = config.get("simul.suspendresume-rate")
                 
-        numnodes = config.getNumPhysicalNodes()
-        self.bandwidth = config.getBandwidth()        
+        numnodes = config.get("simul.nodes")
+        self.bandwidth = config.get("imagetransfer-bandwidth")
 
-        capacity = self.parseResourcesString(config.getResourcesPerPhysNode())
+        capacity = self.parse_resources_string(config.get("simul.resources"))
         
         self.nodes = [Node(self.resourcepool, i+1, "simul-%i" % (i+1), capacity) for i in range(numnodes)]
         for n in self.nodes:
@@ -59,7 +59,8 @@
                 (constants.RES_NETIN, constants.RESTYPE_INT, "Net (in)"),
                 (constants.RES_NETOUT, constants.RESTYPE_INT, "Net (out)")]
         
-    def parseResourcesString(self, resources):
+    def parse_resources_string(self, resources):
+        resources = resources.split(";")
         desc2type = dict([(x[2], x[0]) for x in self.getResourceTypes()])
         capacity=ds.ResourceTuple.create_empty()
         for r in resources:

Modified: trunk/src/haizea/resourcemanager/frontends/opennebula.py
===================================================================
--- trunk/src/haizea/resourcemanager/frontends/opennebula.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/frontends/opennebula.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -47,7 +47,7 @@
         self.logger = self.rm.logger
         config = self.rm.config
 
-        self.conn = sqlite.connect(config.getONEDB())
+        self.conn = sqlite.connect(config.get("one.db"))
         self.conn.row_factory = sqlite.Row
         
     def getAccumulatedRequests(self):

Modified: trunk/src/haizea/resourcemanager/frontends/tracefile.py
===================================================================
--- trunk/src/haizea/resourcemanager/frontends/tracefile.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/frontends/tracefile.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -29,9 +29,9 @@
         
         config = rm.config
 
-        tracefile = config.getTracefile()
-        injectfile = config.getInjectfile()
-        imagefile = config.getImagefile()
+        tracefile = config.get("tracefile")
+        injectfile = config.get("injectionfile")
+        imagefile = config.get("imagefile")
         
         # Read trace file
         # Requests is a list of lease requests
@@ -58,20 +58,17 @@
                 r.resreq.setByType(constants.RES_DISK, imagesizes[i] + r.resreq.getByType(constants.RES_MEM))
         
         # Add runtime overhead, if necessary
-        overhead = config.getRuntimeOverhead()
-        if overhead != None:
+        add_overhead = config.get("add-overhead")
+        
+        if add_overhead != constants.RUNTIMEOVERHEAD_NONE:
+            slowdown_overhead = config.get("runtime-slowdown-overhead")
+            boot_overhead = config.get("bootshutdown-overhead")
             for r in self.requests:
-                if isinstance(r,BestEffortLease):
-                    r.addRuntimeOverhead(overhead)
-                elif isinstance(r,ARLease):
-                    if not config.overheadOnlyBestEffort():
-                        r.addRuntimeOverhead(overhead)
+                if add_overhead == constants.RUNTIMEOVERHEAD_ALL or (add_overhead == constants.RUNTIMEOVERHEAD_BE and isinstance(r,BestEffortLease)):
+                   if slowdown_overhead != 0:
+                       r.add_runtime_overhead(slowdown_overhead)
+                   r.add_boot_overhead(boot_overhead)
 
-        # Add boot + shutdown overhead
-        overhead = config.getBootOverhead()
-        for r in self.requests:
-            r.add_boot_overhead(overhead)
-
         # Make the scheduler reachable from the lease request
         for r in self.requests:
             r.set_scheduler(rm.scheduler)

Modified: trunk/src/haizea/resourcemanager/log.py
===================================================================
--- trunk/src/haizea/resourcemanager/log.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/log.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -41,7 +41,7 @@
                     "EXTREMEDEBUG": 5,
                     "NOTSET": 0}
         
-        level = self.rm.config.getLogLevel()
+        level = self.rm.config.get("loglevel")
         self.logger.setLevel(self.loglevel[level])
         self.extremedebug = (level == "EXTREMEDEBUG")
 

Modified: trunk/src/haizea/resourcemanager/resourcepool.py
===================================================================
--- trunk/src/haizea/resourcemanager/resourcepool.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/resourcepool.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -39,14 +39,14 @@
         
         self.imagenode_bandwidth = self.info.get_bandwidth()
         
-        self.reusealg = self.rm.config.getReuseAlg()
+        self.reusealg = self.rm.config.get("diskimage-reuse")
         if self.reusealg == constants.REUSE_IMAGECACHES:
-            self.maxcachesize = self.rm.config.getMaxCacheSize()
+            self.maxcachesize = self.rm.config.get("diskimage-cache-size")
         else:
             self.maxcachesize = None
             
     def loadEnactmentModules(self):
-        mode = self.rm.config.getMode()
+        mode = self.rm.config.get("mode")
         try:
             exec "import %s.%s as enact" % (constants.ENACT_PACKAGE, mode)
             self.info = enact.info(self) #IGNORE:E0602
@@ -78,7 +78,7 @@
             taintedImage = None
             
             # TODO: Factor this out
-            lease_deployment_type = self.rm.config.get_lease_deployment_type()
+            lease_deployment_type = self.rm.config.get("lease-preparation")
             if lease_deployment_type == constants.DEPLOYMENT_UNMANAGED:
                 # If we assume predeployment, we mark that there is a new
                 # tainted image, but there is no need to go to the enactment
@@ -277,7 +277,7 @@
         
     def checkImage(self, pnode, lease_id, vnode, imagefile):
         node = self.getNode(pnode)
-        if self.rm.config.getTransferType() == constants.TRANSFER_NONE:
+        if self.rm.config.get("lease-preparation") == constants.DEPLOYMENT_UNMANAGED:
             self.rm.logger.debug("Adding tainted image for L%iV%i in node %i" % (lease_id, vnode, pnode), constants.ENACT)
         elif self.reusealg == constants.REUSE_NONE:
             if not node.hasTaintedImage(lease_id, vnode, imagefile):

Modified: trunk/src/haizea/resourcemanager/rm.py
===================================================================
--- trunk/src/haizea/resourcemanager/rm.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/rm.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -76,8 +76,8 @@
         
         # Create the RM components
         
-        mode = config.getMode()
-        clock = config.getClock()
+        mode = config.get("mode")
+        clock = config.get("clock")
 
         if mode == "simulated" and clock == constants.CLOCK_SIMULATED:
             # Simulations always run in the foreground
@@ -85,7 +85,7 @@
             # Logger
             self.logger = Logger(self)
             # The clock
-            starttime = config.getInitialTime()
+            starttime = config.get("starttime")
             self.clock = SimulatedClock(self, starttime)
             self.rpc_server = None
         elif mode == "opennebula" or (mode == "simulated" and clock == constants.CLOCK_REAL):
@@ -98,8 +98,8 @@
                 self.logger = Logger(self)
 
             # The clock
-            wakeup_interval = config.get_wakeup_interval()
-            non_sched = config.get_non_schedulable_interval()
+            wakeup_interval = config.get("wakeup-interval")
+            non_sched = config.get("non-schedulable-interval")
             self.clock = RealClock(self, wakeup_interval, non_sched)
             
             # RPC server
@@ -122,7 +122,7 @@
             self.frontends = [OpenNebulaFrontend(self)]
             
         # Statistics collection 
-        self.stats = stats.StatsCollection(self, self.config.getDataFile())
+        self.stats = stats.StatsCollection(self, self.config.get("datafile"))
 
     def daemonize(self):
         """Daemonizes the Haizea process.
@@ -385,7 +385,7 @@
         self.starttime = starttime
         self.time = starttime
         self.logger = self.rm.logger
-        self.statusinterval = self.rm.config.getStatusMessageInterval()
+        self.statusinterval = self.rm.config.get("status-message-interval")
        
     def get_time(self):
         """See docstring in base Clock class."""
@@ -515,7 +515,7 @@
         
         # We can also be done if we've specified that we want to stop when
         # the best-effort requests are all done or when they've all been submitted.
-        stopwhen = self.rm.config.stopWhen()
+        stopwhen = self.rm.config.get("stop-when")
         scheduledbesteffort = self.rm.scheduler.scheduledleases.get_leases(type = BestEffortLease)
         pendingbesteffort = [r for r in tracefrontend.requests if isinstance(r, BestEffortLease)]
         if stopwhen == constants.STOPWHEN_BEDONE:
@@ -673,8 +673,14 @@
         sys.exit()
 
 if __name__ == "__main__":
-    from haizea.common.config import RMConfig
+    from haizea.resourcemanager.configfile import HaizeaConfig
+    from haizea.common.config import ConfigException
     CONFIGFILE = "../../../etc/sample.conf"
-    CONFIG = RMConfig.fromFile(CONFIGFILE)
+    try:
+        CONFIG = HaizeaConfig.from_file(CONFIGFILE)
+    except ConfigException, msg:
+        print >> sys.stderr, "Error in configuration file:"
+        print >> sys.stderr, msg
+        exit(1)        
     RM = ResourceManager(CONFIG)
     RM.start()
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/scheduler.py
===================================================================
--- trunk/src/haizea/resourcemanager/scheduler.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/scheduler.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -96,7 +96,7 @@
                               on_start = Scheduler._handle_start_resume,
                               on_end   = Scheduler._handle_end_resume)
             
-        deploy_type = self.rm.config.get_lease_deployment_type()
+        deploy_type = self.rm.config.get("lease-preparation")
         if deploy_type == constants.DEPLOYMENT_UNMANAGED:
             self.deployment = UnmanagedDeployment(self)
         elif deploy_type == constants.DEPLOYMENT_PREDEPLOY:
@@ -104,7 +104,16 @@
         elif deploy_type == constants.DEPLOYMENT_TRANSFER:
             self.deployment = ImageTransferDeployment(self)
 
-        self.maxres = self.rm.config.getMaxReservations()
+        backfilling = self.rm.config.get("backfilling")
+        if backfilling == constants.BACKFILLING_OFF:
+            self.maxres = 0
+        elif backfilling == constants.BACKFILLING_AGGRESSIVE:
+            self.maxres = 1
+        elif backfilling == constants.BACKFILLING_CONSERVATIVE:
+            self.maxres = 1000000 # Arbitrarily large
+        elif backfilling == constants.BACKFILLING_INTERMEDIATE:
+            self.maxres = self.rm.config.get("backfilling-reservations")
+
         self.numbesteffortres = 0
     
     def schedule(self, nexttime):        
@@ -224,35 +233,27 @@
         self.rm.logger.debug("  Duration: %s" % lease_req.duration, constants.SCHED)
         self.rm.logger.debug("  ResReq  : %s" % lease_req.requested_resources, constants.SCHED)
         
-        if self.rm.config.getNodeSelectionPolicy() == constants.NODESELECTION_AVOIDPREEMPT:
-            avoidpreempt = True
-        else:
-            avoidpreempt = False
-
         accepted = False
         try:
-            self.__schedule_ar_lease(lease_req, avoidpreempt=avoidpreempt, nexttime=nexttime)
+            self.__schedule_ar_lease(lease_req, avoidpreempt=True, nexttime=nexttime)
             self.scheduledleases.add(lease_req)
             self.rm.stats.incr_counter(constants.COUNTER_ARACCEPTED, lease_req.id)
             accepted = True
         except SchedException, msg:
-            # If our first try avoided preemption, try again
+            # Our first try avoided preemption, try again
             # without avoiding preemption.
             # TODO: Roll this into the exact slot fitting algorithm
-            if avoidpreempt:
-                try:
-                    self.rm.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg), constants.SCHED)
-                    self.rm.logger.debug("LEASE-%i Trying again without avoiding preemption" % lease_req.id, constants.SCHED)
-                    self.__schedule_ar_lease(lease_req, nexttime, avoidpreempt=False)
-                    self.scheduledleases.add(lease_req)
-                    self.rm.stats.incr_counter(constants.COUNTER_ARACCEPTED, lease_req.id)
-                    accepted = True
-                except SchedException, msg:
-                    self.rm.stats.incr_counter(constants.COUNTER_ARREJECTED, lease_req.id)
-                    self.rm.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg), constants.SCHED)
-            else:
+            try:
+                self.rm.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg), constants.SCHED)
+                self.rm.logger.debug("LEASE-%i Trying again without avoiding preemption" % lease_req.id, constants.SCHED)
+                self.__schedule_ar_lease(lease_req, nexttime, avoidpreempt=False)
+                self.scheduledleases.add(lease_req)
+                self.rm.stats.incr_counter(constants.COUNTER_ARACCEPTED, lease_req.id)
+                accepted = True
+            except SchedException, msg:
                 self.rm.stats.incr_counter(constants.COUNTER_ARREJECTED, lease_req.id)
                 self.rm.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg), constants.SCHED)
+
         if accepted:
             self.rm.logger.info("AR lease request #%i has been accepted." % lease_req.id, constants.SCHED)
         else:
@@ -307,7 +308,7 @@
                     newqueue.enqueue(lease_req)
                     self.rm.logger.debug("LEASE-%i Scheduling exception: %s" % (lease_req.id, msg), constants.SCHED)
                     self.rm.logger.info("Lease %i could not be scheduled at this time." % lease_req.id, constants.SCHED)
-                    if not self.rm.config.isBackfilling():
+                    if not self.is_backfilling():
                         done = True
         
         for lease in self.queue:
@@ -327,13 +328,13 @@
             # (only intra-node transfer)
             earliest = dict([(node+1, [nexttime, constants.REQTRANSFER_NO, None]) for node in range(req.numnodes)])
             
-        susptype = self.rm.config.getSuspensionType()
+        susptype = self.rm.config.get("suspension")
         if susptype == constants.SUSPENSION_NONE or (susptype == constants.SUSPENSION_SERIAL and req.numnodes == 1):
             cansuspend = False
         else:
             cansuspend = True
 
-        canmigrate = self.rm.config.isMigrationAllowed()
+        canmigrate = self.rm.config.get("migration")
         try:
             mustresume = (req.state == constants.LEASE_STATE_SUSPENDED)
             canreserve = self.canReserveBestEffort()
@@ -436,11 +437,11 @@
             self.queue.enqueue_in_order(req)
             self.rm.stats.incr_counter(constants.COUNTER_QUEUESIZE, req.id)
         else:
-            susptype = self.rm.config.getSuspensionType()
+            susptype = self.rm.config.get("suspension")
             timebeforesuspend = time - vmrr.start
             # TODO: Determine if it is in fact the initial VMRR or not. Right now
             # we conservatively overestimate
-            canmigrate = self.rm.config.isMigrationAllowed()
+            canmigrate = self.rm.config.get("migration")
             suspendthreshold = req.get_suspend_threshold(initial=False, suspendrate=suspendresumerate, migrating=canmigrate)
             # We can't suspend if we're under the suspend threshold
             suspendable = timebeforesuspend >= suspendthreshold
@@ -568,7 +569,7 @@
         rr.end = self.rm.clock.get_time()
         self._handle_end_vm(l, rr, enact=enact)
         nexttime = self.rm.clock.get_next_schedulable_time()
-        if self.rm.config.isBackfilling():
+        if self.is_backfilling():
             # We need to reevaluate the schedule to see if there are any future
             # reservations that we can slide back.
             self.reevaluate_schedule(l, rr.nodes.values(), nexttime, [])
@@ -647,3 +648,6 @@
     def updateNodeTransferState(self, nodes, state, lease_id):
         for n in nodes:
             self.rm.resourcepool.getNode(n).transfer_doing = state
+            
+    def is_backfilling(self):
+        return self.maxres > 0

Modified: trunk/src/haizea/resourcemanager/slottable.py
===================================================================
--- trunk/src/haizea/resourcemanager/slottable.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/resourcemanager/slottable.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -819,7 +819,9 @@
         
         nodes = canfit.keys()
         
-        reusealg = self.rm.config.getReuseAlg()
+        # TODO: The deployment module should just provide a list of nodes
+        # it prefers
+        reusealg = self.rm.config.get("diskimage-reuse")
         nodeswithimg=[]
         if reusealg==constants.REUSE_IMAGECACHES:
             nodeswithimg = self.rm.resourcepool.getNodesWithImgInPool(diskImageID, start)

Modified: trunk/src/haizea/traces/readers.py
===================================================================
--- trunk/src/haizea/traces/readers.py	2008-08-05 15:34:20 UTC (rev 464)
+++ trunk/src/haizea/traces/readers.py	2008-08-06 17:24:45 UTC (rev 465)
@@ -24,7 +24,7 @@
 def SWF(tracefile, config):
     file = open (tracefile, "r")
     requests = []
-    inittime = config.getInitialTime()
+    inittime = config.get("starttime")
     for line in file:
         if line[0]!=';':
             req = None



More information about the Haizea-commit mailing list