[haizea-commit] r430 - in trunk/src/haizea: resourcemanager resourcemanager/enact/opennebula resourcemanager/frontends traces

haizea-commit at mailman.cs.uchicago.edu haizea-commit at mailman.cs.uchicago.edu
Thu Jul 10 11:58:55 CDT 2008


Author: borja
Date: 2008-07-10 11:58:55 -0500 (Thu, 10 Jul 2008)
New Revision: 430

Modified:
   trunk/src/haizea/resourcemanager/enact/opennebula/info.py
   trunk/src/haizea/resourcemanager/enact/opennebula/vm.py
   trunk/src/haizea/resourcemanager/frontends/opennebula.py
   trunk/src/haizea/resourcemanager/resourcepool.py
   trunk/src/haizea/traces/readers.py
Log:
Tested (and fixed) OpenNebula code.

Modified: trunk/src/haizea/resourcemanager/enact/opennebula/info.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/opennebula/info.py	2008-07-10 15:36:37 UTC (rev 429)
+++ trunk/src/haizea/resourcemanager/enact/opennebula/info.py	2008-07-10 16:58:55 UTC (rev 430)
@@ -56,7 +56,7 @@
                     capacity.set_by_type(oneattr2haizea[name], int(attr["value"]))
             capacity.set_by_type(constants.RES_CPU, capacity.get_by_type(constants.RES_CPU) / 100.0)
             node = Node(self.resourcepool, nod_id, hostname, capacity)
-            node.enactmentInfo = int(enactID)
+            node.enactment_info = int(enactID)
             self.nodes.append(node)
             
         self.logger.info("Fetched %i nodes from ONE db" % len(self.nodes), constants.ONE)

Modified: trunk/src/haizea/resourcemanager/enact/opennebula/vm.py
===================================================================
--- trunk/src/haizea/resourcemanager/enact/opennebula/vm.py	2008-07-10 15:36:37 UTC (rev 429)
+++ trunk/src/haizea/resourcemanager/enact/opennebula/vm.py	2008-07-10 16:58:55 UTC (rev 430)
@@ -39,14 +39,15 @@
     def start(self, action):
         for vnode in action.vnodes:
             # Unpack action
-            vmid = action.vnodes[vnode].enactmentInfo
+            vmid = action.vnodes[vnode].enactment_info
             hostID = action.vnodes[vnode].pnode
             image = action.vnodes[vnode].diskimage
-            cpu = action.vnodes[vnode].resources.getByType(constants.RES_CPU)
-            memory = action.vnodes[vnode].resources.getByType(constants.RES_MEM)
+            cpu = action.vnodes[vnode].resources.get_by_type(constants.RES_CPU)
+            memory = action.vnodes[vnode].resources.get_by_type(constants.RES_MEM)
             
             self.logger.debug("Received request to start VM for L%iV%i on host %i, image=%s, cpu=%i, mem=%i"
-                         % (action.leaseHaizeaID, vnode, hostID, image, cpu, memory), constants.ONE)
+                         % (action.lease_haizea_id, vnode, hostID, image, cpu, memory), constants.ONE)
+
             cmd = "%s deploy %i %i" % (self.onevm, vmid, hostID)
             status = self.runCommand(cmd)
             if status == 0:
@@ -57,7 +58,7 @@
     def stop(self, action):
         for vnode in action.vnodes:
             # Unpack action
-            vmid = action.vnodes[vnode].enactmentInfo
+            vmid = action.vnodes[vnode].enactment_info
             cmd = "%s shutdown %i" % (self.onevm, vmid)
             status = self.runCommand(cmd)
             if status == 0:
@@ -68,7 +69,7 @@
     def suspend(self, action):
         for vnode in action.vnodes:
             # Unpack action
-            vmid = action.vnodes[vnode].enactmentInfo
+            vmid = action.vnodes[vnode].enactment_info
             cmd = "%s suspend %i" % (self.onevm, vmid)
             status = self.runCommand(cmd)
             if status == 0:
@@ -79,7 +80,7 @@
     def resume(self, action):
         for vnode in action.vnodes:
             # Unpack action
-            vmid = action.vnodes[vnode].enactmentInfo
+            vmid = action.vnodes[vnode].enactment_info
             cmd = "%s resume %i" % (self.onevm, vmid)
             status = self.runCommand(cmd)
             if status == 0:
@@ -92,13 +93,13 @@
         result = 0
         for vnode in action.vnodes:
             # Unpack action
-            vmid = action.vnodes[vnode].enactmentInfo
+            vmid = action.vnodes[vnode].enactment_info
             cur = self.conn.cursor()
             cur.execute("select state from vmpool where oid = %i" % vmid)
             onevm = cur.fetchone()        
             state = onevm["state"]
             if state == 5:
-                self.logger.debug("Suspend of L%iV%i correct." % (action.leaseHaizeaID, vnode), constants.ONE)
+                self.logger.debug("Suspend of L%iV%i correct." % (action.lease_haizea_id, vnode), constants.ONE)
             else:
                 self.logger.warning("ONE did not complete suspend  of L%i%V%i on time. State is %i" % (action.leaseHaizeaID, vnode, state), constants.ONE)
                 result = 1
@@ -109,13 +110,13 @@
         result = 0
         for vnode in action.vnodes:
             # Unpack action
-            vmid = action.vnodes[vnode].enactmentInfo
+            vmid = action.vnodes[vnode].enactment_info
             cur = self.conn.cursor()
             cur.execute("select state from vmpool where oid = %i" % vmid)
             onevm = cur.fetchone()        
             state = onevm["state"]
             if state == 3:
-                self.logger.debug("Suspend of L%iV%i correct." % (action.leaseHaizeaID, vnode), constants.ONE)
+                self.logger.debug("Suspend of L%iV%i correct." % (action.lease_haizea_id, vnode), constants.ONE)
             else:
                 self.logger.warning("ONE did not complete resume of L%i%V%i on time. State is %i" % (action.leaseHaizeaID, vnode, state), constants.ONE)
                 result = 1

Modified: trunk/src/haizea/resourcemanager/frontends/opennebula.py
===================================================================
--- trunk/src/haizea/resourcemanager/frontends/opennebula.py	2008-07-10 15:36:37 UTC (rev 429)
+++ trunk/src/haizea/resourcemanager/frontends/opennebula.py	2008-07-10 16:58:55 UTC (rev 430)
@@ -30,9 +30,9 @@
 HAIZEA_START_BESTEFFORT = "best_effort"
 HAIZEA_DURATION = "duration"
 HAIZEA_DURATION_UNLIMITED = "unlimited"
-HAIZEA_PREEMPTABLE = "preemptable"
-HAIZEA_PREEMPTABLE_YES = "yes"
-HAIZEA_PREEMPTABLE_NO = "no"
+HAIZEA_PREEMPTIBLE = "preemptible"
+HAIZEA_PREEMPTIBLE_YES = "yes"
+HAIZEA_PREEMPTIBLE_NO = "no"
 
 ONE_CPU="CPU"
 ONE_MEMORY="MEMORY"
@@ -96,25 +96,28 @@
             duration = DateTimeDelta(36500)
         else:
             duration = ISO.ParseTimeDelta(duration)
+            
+        preemptible = haizea_param[HAIZEA_PREEMPTIBLE]
+        preemptible = (preemptible == HAIZEA_PREEMPTIBLE_YES)
 
-        return tSubmit, vmimage, vmimagesize, numnodes, resreq, duration
+        return tSubmit, vmimage, vmimagesize, numnodes, resreq, duration, preemptible
     
     def create_besteffort_lease(self, req, attrs, haizea_param):
-        tSubmit, vmimage, vmimagesize, numnodes, resreq, duration = self.get_common_attrs(req, attrs, haizea_param)
+        tSubmit, vmimage, vmimagesize, numnodes, resreq, duration, preemptible = self.get_common_attrs(req, attrs, haizea_param)
  
-        leasereq = BestEffortLease(tSubmit, duration, vmimage, vmimagesize, numnodes, resreq)
+        leasereq = BestEffortLease(tSubmit, duration, vmimage, vmimagesize, numnodes, resreq, preemptible)
         leasereq.state = constants.LEASE_STATE_PENDING
         # Enactment info should be changed to the "array id" when groups
         # are implemented in OpenNebula
-        leasereq.enactmentInfo = int(req["oid"])
+        leasereq.enactment_info = int(req["oid"])
         # Only one node for now
-        leasereq.vnodeEnactmentInfo = {}
-        leasereq.vnodeEnactmentInfo[1] = int(req["oid"])
+        leasereq.vnode_enactment_info = {}
+        leasereq.vnode_enactment_info[1] = int(req["oid"])
         leasereq.set_scheduler(self.rm.scheduler)
         return leasereq
     
     def create_ar_lease(self, req, attrs, haizea_param):
-        tSubmit, vmimage, vmimagesize, numnodes, resreq, duration = self.get_common_attrs(req, attrs, haizea_param)
+        tSubmit, vmimage, vmimagesize, numnodes, resreq, duration, preemptible = self.get_common_attrs(req, attrs, haizea_param)
 
         start = haizea_param[HAIZEA_START]
         if start[0] == "+":
@@ -124,14 +127,14 @@
             start = roundDateTime(self.rm.clock.get_time() + ISO.ParseTime(start[1:]))
         else:
             start = ISO.ParseDateTime(start)
-        leasereq = ARLease(tSubmit, start, duration, vmimage, vmimagesize, numnodes, resreq)
+        leasereq = ARLease(tSubmit, start, duration, vmimage, vmimagesize, numnodes, resreq, preemptible)
         leasereq.state = constants.LEASE_STATE_PENDING
         # Enactment info should be changed to the "array id" when groups
         # are implemented in OpenNebula
         leasereq.enactmentInfo = int(req["oid"])
         # Only one node for now
-        leasereq.vnodeEnactmentInfo = {}
-        leasereq.vnodeEnactmentInfo[1] = int(req["oid"])
+        leasereq.vnode_enactment_info = {}
+        leasereq.vnode_enactment_info[1] = int(req["oid"])
         leasereq.set_scheduler(self.rm.scheduler)
         return leasereq
         
\ No newline at end of file

Modified: trunk/src/haizea/resourcemanager/resourcepool.py
===================================================================
--- trunk/src/haizea/resourcemanager/resourcepool.py	2008-07-10 15:36:37 UTC (rev 429)
+++ trunk/src/haizea/resourcemanager/resourcepool.py	2008-07-10 16:58:55 UTC (rev 430)
@@ -110,9 +110,9 @@
                         taintedImage = self.addTaintedImageToNode(pnode, lease.diskimage_id, lease.diskimage_size, lease.id, vnode)
                         # ENACTMENT
                         # self.storage.createCopyFromCache(pnode, lease.diskImageSize)
-            startAction.vnodes[vnode].pnode = node.enactmentInfo
+            startAction.vnodes[vnode].pnode = node.enactment_info
             startAction.vnodes[vnode].diskimage = taintedImage.filename
-            startAction.vnodes[vnode].res = rr.resources_in_pnode[pnode]
+            startAction.vnodes[vnode].resources = rr.resources_in_pnode[pnode]
 
         self.vm.start(startAction)
         
@@ -341,7 +341,7 @@
         self.workingspacesize = 0
         self.capacity = capacity
         # enactment-specific information
-        self.enactmentInfo = None
+        self.enactment_info = None
         # Kludgy way of keeping track of utilization
         self.transfer_doing = constants.DOING_IDLE
         self.vm_doing = constants.DOING_IDLE

Modified: trunk/src/haizea/traces/readers.py
===================================================================
--- trunk/src/haizea/traces/readers.py	2008-07-10 15:36:37 UTC (rev 429)
+++ trunk/src/haizea/traces/readers.py	2008-07-10 16:58:55 UTC (rev 430)
@@ -40,10 +40,10 @@
                 vmimage = "NOIMAGE"
                 vmimagesize = 600 # Arbitrary
                 numnodes = int(fields[7]) # 7: reqNProcs
-                resreq = ResourceTuple.createEmpty()
-                resreq.setByType(constants.RES_CPU, 1) # One CPU per VM, should be configurable
-                resreq.setByType(constants.RES_MEM, 1024) # Should be configurable
-                resreq.setByType(constants.RES_DISK, vmimagesize + 0) # Should be configurable
+                resreq = ResourceTuple.create_empty()
+                resreq.set_by_type(constants.RES_CPU, 1) # One CPU per VM, should be configurable
+                resreq.set_by_type(constants.RES_MEM, 1024) # Should be configurable
+                resreq.set_by_type(constants.RES_DISK, vmimagesize + 0) # Should be configurable
                 maxdur = TimeDelta(seconds=reqtime)
                 if runtime < 0 and status==5:
                     # This is a job that got cancelled while waiting in the queue
@@ -56,7 +56,8 @@
                     maxqueuetime = None
                 if realdur > maxdur:
                     realdur = maxdur
-                req = BestEffortLease(None, tSubmit, maxdur, vmimage, vmimagesize, numnodes, resreq, realdur, maxqueuetime, timeOnDedicated=realdur)
+                preemptible = True
+                req = BestEffortLease(tSubmit, maxdur, vmimage, vmimagesize, numnodes, resreq, preemptible, realdur)
                 req.state = constants.LEASE_STATE_PENDING
                 requests.append(req)
     return requests
@@ -90,14 +91,16 @@
         vmimage = entry.vmImage
         vmimagesize = entry.vmImageSize
         numnodes = entry.numNodes
-        resreq = ResourceTuple.createEmpty()
-        resreq.setByType(constants.RES_CPU, entry.CPU)
-        resreq.setByType(constants.RES_MEM, entry.mem)
-        resreq.setByType(constants.RES_DISK, vmimagesize + entry.disk)
+        resreq = ResourceTuple.create_empty()
+        resreq.set_by_type(constants.RES_CPU, entry.CPU)
+        resreq.set_by_type(constants.RES_MEM, entry.mem)
+        resreq.set_by_type(constants.RES_DISK, vmimagesize + entry.disk)
         if tStart == None:
-            req = BestEffortLease(tSubmit, duration, vmimage, vmimagesize, numnodes, resreq, realduration)
+            preemptible = True
+            req = BestEffortLease(tSubmit, duration, vmimage, vmimagesize, numnodes, resreq, preemptible, realduration)
         else:
-            req = ARLease(tSubmit, tStart, duration, vmimage, vmimagesize, numnodes, resreq, realduration)
+            preemptible = False
+            req = ARLease(tSubmit, tStart, duration, vmimage, vmimagesize, numnodes, resreq, preemptible, realduration)
         req.state = constants.LEASE_STATE_PENDING
         requests.append(req)
     return requests



More information about the Haizea-commit mailing list