From 780ac2a9c6eb09ced59107c13c840e4938db9859 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Thu, 31 Jan 2013 10:32:05 -0800 Subject: [PATCH 001/486] Introduce POM dependency on VSphere 5.1 SDK, not done yet, WIP. --- deps/install-non-oss.sh | 7 + plugins/hypervisors/vmware/pom.xml | 12 - pom.xml | 2 +- vmware-base/pom.xml | 12 - .../vmware/mo/VirtualMachineMO.java | 830 +++++++++--------- .../vmware/util/VmwareGuestOsMapper.java | 276 +++--- .../hypervisor/vmware/util/VmwareHelper.java | 271 +++--- 7 files changed, 705 insertions(+), 705 deletions(-) diff --git a/deps/install-non-oss.sh b/deps/install-non-oss.sh index 28eb03e1562..3476ea9922f 100755 --- a/deps/install-non-oss.sh +++ b/deps/install-non-oss.sh @@ -29,3 +29,10 @@ mvn install:install-file -Dfile=manageontap.jar -DgroupId=com.cloud.com.netapp mvn install:install-file -Dfile=vim25.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim25 -Dversion=4.1 -Dpackaging=jar mvn install:install-file -Dfile=apputils.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-apputils -Dversion=4.1 -Dpackaging=jar mvn install:install-file -Dfile=vim.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim -Dversion=4.1 -Dpackaging=jar + +# +# From https://my.vmware.com/group/vmware/get-download?downloadGroup=VSP510-WEBSDK-510 +# Version: 5.1, Release-date: 2012-09-10, Build: 774886 +mvn install:install-file -Dfile=vim25_51.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim25 -Dversion=5.1 -Dpackaging=jar + + diff --git a/plugins/hypervisors/vmware/pom.xml b/plugins/hypervisors/vmware/pom.xml index d990e89b388..ad27ab7300a 100644 --- a/plugins/hypervisors/vmware/pom.xml +++ b/plugins/hypervisors/vmware/pom.xml @@ -38,18 +38,6 @@ ${cs.vmware.api.version} compile - - com.cloud.com.vmware - vmware-vim - ${cs.vmware.api.version} - compile - - - com.cloud.com.vmware - vmware-apputils - ${cs.vmware.api.version} - compile - org.apache.axis axis diff --git a/pom.xml b/pom.xml index 35d6520ce6b..c5081f2e969 100644 --- a/pom.xml +++ b/pom.xml @@ -81,7 +81,7 @@ 2.4 1.2 1.0-20081010.060147 - 4.1 + 5.1 1.9.5 1.3.21.1 2.6 diff --git a/vmware-base/pom.xml b/vmware-base/pom.xml index bd536fb574a..09509981f2d 100644 --- a/vmware-base/pom.xml +++ b/vmware-base/pom.xml @@ -43,18 +43,6 @@ ${cs.vmware.api.version} compile - - com.cloud.com.vmware - vmware-vim - ${cs.vmware.api.version} - compile - - - com.cloud.com.vmware - vmware-apputils - ${cs.vmware.api.version} - compile - org.apache.axis axis diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index cd54127fcc2..0dc41a1f597 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -103,22 +103,22 @@ public class VirtualMachineMO extends BaseMO { public VirtualMachineMO(VmwareContext context, ManagedObjectReference morVm) { super(context, morVm); } - + public VirtualMachineMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public Pair getOwnerDatacenter() throws Exception { return DatacenterMO.getOwnerDatacenter(getContext(), getMor()); } - + public Pair getOwnerDatastore(String dsFullPath) throws Exception { String dsName = DatastoreFile.getDatastoreNameFromPath(dsFullPath); - + PropertySpec pSpec = new PropertySpec(); pSpec.setType("Datastore"); pSpec.setPathSet(new String[] { "name" }); - + TraversalSpec vmDatastoreTraversal = new TraversalSpec(); vmDatastoreTraversal.setType("VirtualMachine"); vmDatastoreTraversal.setPath("datastore"); @@ -132,11 +132,11 @@ public class VirtualMachineMO extends BaseMO { PropertyFilterSpec pfSpec = new PropertyFilterSpec(); pfSpec.setPropSet(new PropertySpec[] { pSpec }); pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - + ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), + _context.getServiceContent().getPropertyCollector(), new PropertyFilterSpec[] { pfSpec }); - + if(ocs != null) { for(ObjectContent oc : ocs) { DynamicProperty prop = oc.getPropSet(0); @@ -145,23 +145,23 @@ public class VirtualMachineMO extends BaseMO { } } } - + return null; } - + public HostMO getRunningHost() throws Exception { VirtualMachineRuntimeInfo runtimeInfo = getRuntimeInfo(); return new HostMO(_context, runtimeInfo.getHost()); } - + public String getVmName() throws Exception { return (String)getContext().getServiceUtil().getDynamicProperty(_mor, "name"); } - + public GuestInfo getVmGuestInfo() throws Exception { return (GuestInfo)getContext().getServiceUtil().getDynamicProperty(_mor, "guest"); } - + public boolean isVMwareToolsRunning() throws Exception { GuestInfo guestInfo = getVmGuestInfo(); if(guestInfo != null) { @@ -170,13 +170,13 @@ public class VirtualMachineMO extends BaseMO { } return false; } - + public boolean powerOn() throws Exception { if(getPowerState() == VirtualMachinePowerState.poweredOn) return true; - + ManagedObjectReference morTask = _context.getService().powerOnVM_Task(_mor, null); - + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -184,64 +184,64 @@ public class VirtualMachineMO extends BaseMO { } else { s_logger.error("VMware powerOnVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + return false; } - + public boolean powerOff() throws Exception { if(getPowerState() == VirtualMachinePowerState.poweredOff) return true; - + return powerOffNoCheck(); } - + public boolean safePowerOff(int shutdownWaitMs) throws Exception { - + if(getPowerState() == VirtualMachinePowerState.poweredOff) return true; - + if(isVMwareToolsRunning()) { try { String vmName = this.getName(); - + s_logger.info("Try gracefully shut down VM " + vmName); shutdown(); - + long startTick = System.currentTimeMillis(); while(getPowerState() != VirtualMachinePowerState.poweredOff && System.currentTimeMillis() - startTick < shutdownWaitMs) { - try { + try { Thread.sleep(1000); } catch(InterruptedException e) { } } - + if(getPowerState() != VirtualMachinePowerState.poweredOff) { s_logger.info("can not gracefully shutdown VM within " + (shutdownWaitMs/1000) + " seconds, we will perform force power off on VM " + vmName); return powerOffNoCheck(); } - + return true; } catch(Exception e) { - s_logger.warn("Failed to do guest-os graceful shutdown due to " + VmwareHelper.getExceptionMessage(e)); + s_logger.warn("Failed to do guest-os graceful shutdown due to " + VmwareHelper.getExceptionMessage(e)); } } - + return powerOffNoCheck(); } - + private boolean powerOffNoCheck() throws Exception { ManagedObjectReference morTask = _context.getService().powerOffVM_Task(_mor); - + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); - + // It seems that even if a power-off task is returned done, VM state may still not be marked, // wait up to 5 seconds to make sure to avoid race conditioning for immediate following on operations // that relies on a powered-off VM long startTick = System.currentTimeMillis(); while(getPowerState() != VirtualMachinePowerState.poweredOff && System.currentTimeMillis() - startTick < 5000) { - try { + try { Thread.sleep(1000); } catch(InterruptedException e) { } @@ -249,21 +249,21 @@ public class VirtualMachineMO extends BaseMO { return true; } else { if(getPowerState() == VirtualMachinePowerState.poweredOff) { - // to help deal with possible race-condition + // to help deal with possible race-condition s_logger.info("Current power-off task failed. However, VM has been switched to the state we are expecting for"); return true; } - + s_logger.error("VMware powerOffVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + return false; } - + public VirtualMachinePowerState getPowerState() throws Exception { - + VirtualMachinePowerState powerState = VirtualMachinePowerState.poweredOff; - + // This is really ugly, there is a case that when windows guest VM is doing sysprep, the temporary // rebooting process may let us pick up a "poweredOff" state during VMsync process, this can trigger // a series actions. Unfortunately, from VMware API we can not distinguish power state into such details. @@ -282,13 +282,13 @@ public class VirtualMachineMO extends BaseMO { break; } } - + return powerState; } - + public boolean reset() throws Exception { ManagedObjectReference morTask = _context.getService().resetVM_Task(_mor); - + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -298,28 +298,28 @@ public class VirtualMachineMO extends BaseMO { } return false; } - + public void shutdown() throws Exception { _context.getService().shutdownGuest(_mor); } - + public void rebootGuest() throws Exception { _context.getService().rebootGuest(_mor); } - + public void markAsTemplate() throws Exception { _context.getService().markAsTemplate(_mor); } - + public boolean isTemplate() throws Exception { VirtualMachineConfigInfo configInfo = this.getConfigInfo(); return configInfo.isTemplate(); } - + public boolean migrate(ManagedObjectReference morRp, ManagedObjectReference morTargetHost) throws Exception { ManagedObjectReference morTask = _context.getService().migrateVM_Task(_mor, morRp, morTargetHost, VirtualMachineMovePriority.defaultPriority, null); - + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -327,17 +327,17 @@ public class VirtualMachineMO extends BaseMO { } else { s_logger.error("VMware migrateVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + return false; } - + public boolean relocate(ManagedObjectReference morTargetHost) throws Exception { VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); relocateSpec.setHost(morTargetHost); - - ManagedObjectReference morTask = _context.getService().relocateVM_Task(_mor, + + ManagedObjectReference morTask = _context.getService().relocateVM_Task(_mor, relocateSpec, null); - + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -345,20 +345,20 @@ public class VirtualMachineMO extends BaseMO { } else { s_logger.error("VMware relocateVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + return false; } - + public VirtualMachineSnapshotInfo getSnapshotInfo() throws Exception { return (VirtualMachineSnapshotInfo)_context.getServiceUtil().getDynamicProperty(_mor, "snapshot"); } - - public boolean createSnapshot(String snapshotName, String snapshotDescription, + + public boolean createSnapshot(String snapshotName, String snapshotDescription, boolean dumpMemory, boolean quiesce) throws Exception { - - ManagedObjectReference morTask = _context.getService().createSnapshot_Task(_mor, snapshotName, + + ManagedObjectReference morTask = _context.getService().createSnapshot_Task(_mor, snapshotName, snapshotDescription, dumpMemory, quiesce); - + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -371,28 +371,28 @@ public class VirtualMachineMO extends BaseMO { if(morSnapshot != null) { break; } - + try { Thread.sleep(1000); } catch(InterruptedException e) {} } - + if(morSnapshot == null) s_logger.error("We've been waiting for over 10 seconds for snapshot MOR to be appearing in vCenter after CreateSnapshot task is done, but it is still not there?!"); - + return true; } else { s_logger.error("VMware createSnapshot_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + return false; } - + public boolean removeSnapshot(String snapshotName, boolean removeChildren) throws Exception { ManagedObjectReference morSnapshot = getSnapshotMor(snapshotName); if(morSnapshot == null) { s_logger.warn("Unable to find snapshot: " + snapshotName); return false; } - + ManagedObjectReference morTask = _context.getService().removeSnapshot_Task(morSnapshot, removeChildren); String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { @@ -401,13 +401,13 @@ public class VirtualMachineMO extends BaseMO { } else { s_logger.error("VMware removeSnapshot_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + return false; } - + public boolean removeAllSnapshots() throws Exception { VirtualMachineSnapshotInfo snapshotInfo = getSnapshotInfo(); - + if(snapshotInfo != null && snapshotInfo.getRootSnapshotList() != null) { VirtualMachineSnapshotTree[] tree = snapshotInfo.getRootSnapshotList(); for(VirtualMachineSnapshotTree treeNode : tree) { @@ -421,88 +421,88 @@ public class VirtualMachineMO extends BaseMO { } } } - + return true; } - - public String getSnapshotDiskFileDatastorePath(VirtualMachineFileInfo vmFileInfo, + + public String getSnapshotDiskFileDatastorePath(VirtualMachineFileInfo vmFileInfo, List> datastoreMounts, String snapshotDiskFile) throws Exception { - + // if file path start with "/", need to search all datastore mounts on the host in order - // to form fully qualified datastore path + // to form fully qualified datastore path if(snapshotDiskFile.startsWith("/")) { for(Pair mount: datastoreMounts) { if(snapshotDiskFile.startsWith(mount.second())) { DatastoreMO dsMo = new DatastoreMO(_context, mount.first()); - + String dsFullPath = String.format("[%s] %s", dsMo.getName(), snapshotDiskFile.substring(mount.second().length() + 1)); s_logger.info("Convert snapshot disk file name to datastore path. " + snapshotDiskFile + "->" + dsFullPath); return dsFullPath; } } - + s_logger.info("Convert snapshot disk file name to datastore path. " + snapshotDiskFile + "->" + snapshotDiskFile); return snapshotDiskFile; } else { - - // snapshot directory string from VirtualMachineFileInfo ends with / + + // snapshot directory string from VirtualMachineFileInfo ends with / String dsFullPath = vmFileInfo.getSnapshotDirectory() + snapshotDiskFile; s_logger.info("Convert snapshot disk file name to datastore path. " + snapshotDiskFile + "->" + dsFullPath); return dsFullPath; } } - + public SnapshotDescriptor getSnapshotDescriptor() throws Exception { Pair dcPair = getOwnerDatacenter(); - + String dsPath = getSnapshotDescriptorDatastorePath(); assert(dsPath != null); String url = getContext().composeDatastoreBrowseUrl(dcPair.second(), dsPath); byte[] content = getContext().getResourceContent(url); - + if(content == null || content.length < 1) { s_logger.warn("Snapshot descriptor file (vsd) does not exist anymore?"); } - + SnapshotDescriptor descriptor = new SnapshotDescriptor(); descriptor.parse(content); return descriptor; } - + public String getSnapshotDescriptorDatastorePath() throws Exception { PropertySpec pSpec = new PropertySpec(); pSpec.setType("VirtualMachine"); pSpec.setPathSet(new String[] { "name", "config.files" }); - + ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.FALSE); - + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); pfSpec.setPropSet(new PropertySpec[] { pSpec }); pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - + ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), + _context.getServiceContent().getPropertyCollector(), new PropertyFilterSpec[] { pfSpec }); assert(ocs != null); String vmName = null; VirtualMachineFileInfo fileInfo = null; - + assert(ocs.length == 1); for(ObjectContent oc : ocs) { DynamicProperty[] props = oc.getPropSet(); if(props != null) { assert(props.length == 2); - + for(DynamicProperty prop : props) { if(prop.getName().equals("name")) { vmName = prop.getVal().toString(); } else { - fileInfo = (VirtualMachineFileInfo)prop.getVal(); + fileInfo = (VirtualMachineFileInfo)prop.getVal(); } } } @@ -514,29 +514,29 @@ public class VirtualMachineMO extends BaseMO { DatastoreFile vmxFile = new DatastoreFile(fileInfo.getVmPathName()); return vmxFile.getCompanionPath(vmName + ".vmsd"); } - + public ManagedObjectReference getSnapshotMor(String snapshotName) throws Exception { VirtualMachineSnapshotInfo info = getSnapshotInfo(); if(info != null) { - VirtualMachineSnapshotTree[] snapTree = info.getRootSnapshotList(); - return VmwareHelper.findSnapshotInTree(snapTree, snapshotName); + List snapTree = info.getRootSnapshotList(); + return VmwareHelper.findSnapshotInTree(snapTree, snapshotName); } return null; } - - public boolean createFullClone(String cloneName, ManagedObjectReference morFolder, ManagedObjectReference morResourcePool, + + public boolean createFullClone(String cloneName, ManagedObjectReference morFolder, ManagedObjectReference morResourcePool, ManagedObjectReference morDs) throws Exception { - + VirtualMachineCloneSpec cloneSpec = new VirtualMachineCloneSpec(); VirtualMachineRelocateSpec relocSpec = new VirtualMachineRelocateSpec(); cloneSpec.setLocation(relocSpec); cloneSpec.setPowerOn(false); cloneSpec.setTemplate(false); - + relocSpec.setDatastore(morDs); relocSpec.setPool(morResourcePool); - ManagedObjectReference morTask = _context.getService().cloneVM_Task(_mor, morFolder, cloneName, cloneSpec); - + ManagedObjectReference morTask = _context.getService().cloneVM_Task(_mor, morFolder, cloneName, cloneSpec); + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -544,19 +544,19 @@ public class VirtualMachineMO extends BaseMO { } else { s_logger.error("VMware cloneVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + return false; } - - public boolean createLinkedClone(String cloneName, ManagedObjectReference morBaseSnapshot, - ManagedObjectReference morFolder, ManagedObjectReference morResourcePool, + + public boolean createLinkedClone(String cloneName, ManagedObjectReference morBaseSnapshot, + ManagedObjectReference morFolder, ManagedObjectReference morResourcePool, ManagedObjectReference morDs) throws Exception { - + assert(morBaseSnapshot != null); assert(morFolder != null); assert(morResourcePool != null); assert(morDs != null); - + VirtualDisk[] independentDisks = getAllIndependentDiskDevice(); VirtualMachineRelocateSpec rSpec = new VirtualMachineRelocateSpec(); if(independentDisks.length > 0) { @@ -567,22 +567,22 @@ public class VirtualMachineMO extends BaseMO { diskLocator[i].setDiskId(independentDisks[i].getKey()); diskLocator[i].setDiskMoveType(VirtualMachineRelocateDiskMoveOptions._moveAllDiskBackingsAndDisallowSharing); } - + rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions._createNewChildDiskBacking); rSpec.setDisk(diskLocator); } else { rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions._createNewChildDiskBacking); } rSpec.setPool(morResourcePool); - + VirtualMachineCloneSpec cloneSpec = new VirtualMachineCloneSpec(); cloneSpec.setPowerOn(false); cloneSpec.setTemplate(false); cloneSpec.setLocation(rSpec); cloneSpec.setSnapshot(morBaseSnapshot); - - ManagedObjectReference morTask = _context.getService().cloneVM_Task(_mor, morFolder, cloneName, cloneSpec); - + + ManagedObjectReference morTask = _context.getService().cloneVM_Task(_mor, morFolder, cloneName, cloneSpec); + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -590,30 +590,30 @@ public class VirtualMachineMO extends BaseMO { } else { s_logger.error("VMware cloneVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + return false; } - + public VirtualMachineRuntimeInfo getRuntimeInfo() throws Exception { return (VirtualMachineRuntimeInfo)_context.getServiceUtil().getDynamicProperty( _mor, "runtime"); } - + public VirtualMachineConfigInfo getConfigInfo() throws Exception { return (VirtualMachineConfigInfo)_context.getServiceUtil().getDynamicProperty( _mor, "config"); } - + public VirtualMachineConfigSummary getConfigSummary() throws Exception { return (VirtualMachineConfigSummary)_context.getServiceUtil().getDynamicProperty( _mor, "summary.config"); } - + public VirtualMachineFileInfo getFileInfo() throws Exception { return (VirtualMachineFileInfo)_context.getServiceUtil().getDynamicProperty( _mor, "config.files"); } - + public ManagedObjectReference getParentMor() throws Exception { return (ManagedObjectReference)_context.getServiceUtil().getDynamicProperty( _mor, "parent"); @@ -623,7 +623,7 @@ public class VirtualMachineMO extends BaseMO { PropertySpec pSpec = new PropertySpec(); pSpec.setType("Network"); pSpec.setPathSet(new String[] {"name"}); - + TraversalSpec vm2NetworkTraversal = new TraversalSpec(); vm2NetworkTraversal.setType("VirtualMachine"); vm2NetworkTraversal.setPath("network"); @@ -637,11 +637,11 @@ public class VirtualMachineMO extends BaseMO { PropertyFilterSpec pfSpec = new PropertyFilterSpec(); pfSpec.setPropSet(new PropertySpec[] { pSpec }); pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - + ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), + _context.getServiceContent().getPropertyCollector(), new PropertyFilterSpec[] { pfSpec }); - + List networks = new ArrayList(); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { @@ -655,16 +655,16 @@ public class VirtualMachineMO extends BaseMO { List networks = new ArrayList(); int gcTagKey = getCustomFieldKey("Network", CustomFieldConstants.CLOUD_GC); - + if(gcTagKey == 0) { gcTagKey = getCustomFieldKey("DistributedVirtualPortgroup", CustomFieldConstants.CLOUD_GC_DVP); s_logger.debug("The custom key for dvPortGroup is : " + gcTagKey); } - + PropertySpec pSpec = new PropertySpec(); pSpec.setType("Network"); pSpec.setPathSet(new String[] {"name", "vm", String.format("value[%d]", gcTagKey)}); - + TraversalSpec vm2NetworkTraversal = new TraversalSpec(); vm2NetworkTraversal.setType("VirtualMachine"); vm2NetworkTraversal.setPath("network"); @@ -678,11 +678,11 @@ public class VirtualMachineMO extends BaseMO { PropertyFilterSpec pfSpec = new PropertyFilterSpec(); pfSpec.setPropSet(new PropertySpec[] { pSpec }); pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - + ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), + _context.getServiceContent().getPropertyCollector(), new PropertyFilterSpec[] { pfSpec }); - + if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { ArrayOfManagedObjectReference morVms = null; @@ -700,29 +700,29 @@ public class VirtualMachineMO extends BaseMO { gcTagValue = val.getValue(); } } - - NetworkDetails details = new NetworkDetails(name, oc.getObj(), - (morVms != null ? morVms.getManagedObjectReference() : null), + + NetworkDetails details = new NetworkDetails(name, oc.getObj(), + (morVms != null ? morVms.getManagedObjectReference() : null), gcTagValue); - + networks.add(details); } s_logger.debug("Retrieved " + networks.size() + " networks with key : " + gcTagKey); } - + return networks; } - + /** - * Retrieve path info to access VM files via vSphere web interface - * @return [0] vm-name, [1] data-center-name, [2] datastore-name + * Retrieve path info to access VM files via vSphere web interface + * @return [0] vm-name, [1] data-center-name, [2] datastore-name * @throws Exception */ public String[] getHttpAccessPathInfo() throws Exception { String[] pathInfo = new String[3]; - + Pair dcInfo = getOwnerDatacenter(); - + VirtualMachineFileInfo fileInfo = getFileInfo(); String vmxFilePath = fileInfo.getVmPathName(); String vmxPathTokens[] = vmxFilePath.split("\\[|\\]|/"); @@ -732,14 +732,14 @@ public class VirtualMachineMO extends BaseMO { pathInfo[3] = vmxPathTokens[0].trim(); // vSphere datastore name return pathInfo; } - + public String getVmxHttpAccessUrl() throws Exception { Pair dcInfo = getOwnerDatacenter(); - + VirtualMachineFileInfo fileInfo = getFileInfo(); String vmxFilePath = fileInfo.getVmPathName(); String vmxPathTokens[] = vmxFilePath.split("\\[|\\]|/"); - + StringBuffer sb = new StringBuffer("https://" + _context.getServerAddress() + "/folder/"); sb.append(URLEncoder.encode(vmxPathTokens[2].trim())); sb.append("/"); @@ -748,16 +748,16 @@ public class VirtualMachineMO extends BaseMO { sb.append(URLEncoder.encode(dcInfo.second())); sb.append("&dsName="); sb.append(URLEncoder.encode(vmxPathTokens[1].trim())); - + return sb.toString(); } - + public boolean setVncConfigInfo(boolean enableVnc, String vncPassword, int vncPort, String keyboard) throws Exception { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); OptionValue[] vncOptions = VmwareHelper.composeVncOptions(null, enableVnc, vncPassword, vncPort, keyboard); vmConfigSpec.setExtraConfig(vncOptions); ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, vmConfigSpec); - + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -767,10 +767,10 @@ public class VirtualMachineMO extends BaseMO { } return false; } - + public boolean configureVm(VirtualMachineConfigSpec vmConfigSpec) throws Exception { ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, vmConfigSpec); - + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -780,12 +780,12 @@ public class VirtualMachineMO extends BaseMO { } return false; } - - public boolean configureVm(Ternary[] devices) throws Exception { - + assert(devices != null); - + VirtualMachineConfigSpec configSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[devices.length]; int i = 0; @@ -799,7 +799,7 @@ public class VirtualMachineMO extends BaseMO { configSpec.setDeviceChange(deviceConfigSpecArray); ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, configSpec); - + String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); @@ -809,14 +809,14 @@ public class VirtualMachineMO extends BaseMO { } return false; } - + public Pair getVncPort(String hostNetworkName) throws Exception { HostMO hostMo = getRunningHost(); VmwareHypervisorHostNetworkSummary summary = hostMo.getHyperHostNetworkSummary(hostNetworkName); VirtualMachineConfigInfo configInfo = getConfigInfo(); OptionValue[] values = configInfo.getExtraConfig(); - + if(values != null) { for(OptionValue option : values) { if(option.getKey().equals("RemoteDisplay.vnc.port")) { @@ -829,32 +829,32 @@ public class VirtualMachineMO extends BaseMO { } return new Pair(summary.getHostIp(), 0); } - + // vmdkDatastorePath: [datastore name] vmdkFilePath public void createDisk(String vmdkDatastorePath, int sizeInMb, ManagedObjectReference morDs, int controllerKey) throws Exception { createDisk(vmdkDatastorePath, VirtualDiskType.thin, VirtualDiskMode.persistent, null, sizeInMb, morDs, controllerKey); } - + // vmdkDatastorePath: [datastore name] vmdkFilePath public void createDisk(String vmdkDatastorePath, VirtualDiskType diskType, VirtualDiskMode diskMode, String rdmDeviceName, int sizeInMb, ManagedObjectReference morDs, int controllerKey) throws Exception { - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - createDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + vmdkDatastorePath - + ", sizeInMb: " + sizeInMb + ", diskType: " + diskType + ", diskMode: " + diskMode + ", rdmDeviceName: " + rdmDeviceName + + ", sizeInMb: " + sizeInMb + ", diskType: " + diskType + ", diskMode: " + diskMode + ", rdmDeviceName: " + rdmDeviceName + ", datastore: " + morDs.get_value() + ", controllerKey: " + controllerKey); - + assert(vmdkDatastorePath != null); assert(morDs != null); - + if(controllerKey < 0) { controllerKey = getIDEDeviceControllerKey(); } - + VirtualDisk newDisk = new VirtualDisk(); if(diskType == VirtualDiskType.thin || diskType == VirtualDiskType.preallocated || diskType == VirtualDiskType.eagerZeroedThick) { - + VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); backingInfo.setDiskMode(diskMode.persistent.toString()); if(diskType == VirtualDiskType.thin) { @@ -862,19 +862,19 @@ public class VirtualMachineMO extends BaseMO { } else { backingInfo.setThinProvisioned(false); } - + if(diskType == VirtualDiskType.eagerZeroedThick) { backingInfo.setEagerlyScrub(true); } else { backingInfo.setEagerlyScrub(false); } - + backingInfo.setDatastore(morDs); backingInfo.setFileName(vmdkDatastorePath); newDisk.setBacking(backingInfo); } else if(diskType == VirtualDiskType.rdm || diskType == VirtualDiskType.rdmp) { - VirtualDiskRawDiskMappingVer1BackingInfo backingInfo = - new VirtualDiskRawDiskMappingVer1BackingInfo(); + VirtualDiskRawDiskMappingVer1BackingInfo backingInfo = + new VirtualDiskRawDiskMappingVer1BackingInfo(); if(diskType == VirtualDiskType.rdm) { backingInfo.setCompatibilityMode("virtualMode"); } else { @@ -884,118 +884,118 @@ public class VirtualMachineMO extends BaseMO { if(diskType == VirtualDiskType.rdm) { backingInfo.setDiskMode(diskMode.persistent.toString()); } - + backingInfo.setDatastore(morDs); backingInfo.setFileName(vmdkDatastorePath); newDisk.setBacking(backingInfo); } - + int deviceNumber = getNextDeviceNumber(controllerKey); - + newDisk.setControllerKey(controllerKey); newDisk.setKey(-deviceNumber); newDisk.setUnitNumber(deviceNumber); newDisk.setCapacityInKB(sizeInMb*1024); - VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); + VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); - + deviceConfigSpec.setDevice(newDisk); deviceConfigSpec.setFileOperation(VirtualDeviceConfigSpecFileOperation.create); deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.add); - + deviceConfigSpecArray[0] = deviceConfigSpec; reConfigSpec.setDeviceChange(deviceConfigSpecArray); - + ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); String result = _context.getServiceUtil().waitForTask(morTask); - + if(!result.equals("sucess")) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - createDisk() done(failed)"); throw new Exception("Unable to create disk " + vmdkDatastorePath + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + _context.waitForTaskProgressDone(morTask); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - createDisk() done(successfully)"); } - + public void attachDisk(String[] vmdkDatastorePathChain, ManagedObjectReference morDs) throws Exception { - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + new Gson().toJson(vmdkDatastorePathChain) + ", datastore: " + morDs.get_value()); - - VirtualDevice newDisk = VmwareHelper.prepareDiskDevice(this, getScsiDeviceControllerKey(), + + VirtualDevice newDisk = VmwareHelper.prepareDiskDevice(this, getScsiDeviceControllerKey(), vmdkDatastorePathChain, morDs, -1, 1); - VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); + VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); - + deviceConfigSpec.setDevice(newDisk); deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.add); - + deviceConfigSpecArray[0] = deviceConfigSpec; reConfigSpec.setDeviceChange(deviceConfigSpecArray); - + ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); String result = _context.getServiceUtil().waitForTask(morTask); - + if(!result.equals("sucess")) { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk() done(failed)"); + s_logger.trace("vCenter API trace - attachDisk() done(failed)"); throw new Exception("Failed to attach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + _context.waitForTaskProgressDone(morTask); - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk() done(successfully)"); + s_logger.trace("vCenter API trace - attachDisk() done(successfully)"); } - + public void attachDisk(Pair[] vmdkDatastorePathChain, int controllerKey) throws Exception { - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + new Gson().toJson(vmdkDatastorePathChain)); - - VirtualDevice newDisk = VmwareHelper.prepareDiskDevice(this, controllerKey, + + VirtualDevice newDisk = VmwareHelper.prepareDiskDevice(this, controllerKey, vmdkDatastorePathChain, -1, 1); - VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); + VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); - + deviceConfigSpec.setDevice(newDisk); deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.add); - + deviceConfigSpecArray[0] = deviceConfigSpec; reConfigSpec.setDeviceChange(deviceConfigSpecArray); - + ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); String result = _context.getServiceUtil().waitForTask(morTask); - + if(!result.equals("sucess")) { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk() done(failed)"); + s_logger.trace("vCenter API trace - attachDisk() done(failed)"); throw new Exception("Failed to attach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + _context.waitForTaskProgressDone(morTask); - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk() done(successfully)"); + s_logger.trace("vCenter API trace - attachDisk() done(successfully)"); } - + // vmdkDatastorePath: [datastore name] vmdkFilePath public List> detachDisk(String vmdkDatastorePath, boolean deleteBackingFile) throws Exception { - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - detachDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + s_logger.trace("vCenter API trace - detachDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + vmdkDatastorePath + ", deleteBacking: " + deleteBackingFile); - + // Note: if VM has been taken snapshot, original backing file will be renamed, therefore, when we try to find the matching // VirtualDisk, we only perform prefix matching Pair deviceInfo = getDiskDevice(vmdkDatastorePath, false); @@ -1004,29 +1004,29 @@ public class VirtualMachineMO extends BaseMO { s_logger.trace("vCenter API trace - detachDisk() done (failed)"); throw new Exception("No such disk device: " + vmdkDatastorePath); } - + List> chain = getDiskDatastorePathChain(deviceInfo.first(), true); - - VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); + + VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); - + deviceConfigSpec.setDevice(deviceInfo.first()); if(deleteBackingFile) { deviceConfigSpec.setFileOperation(VirtualDeviceConfigSpecFileOperation.destroy); } deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.remove); - + deviceConfigSpecArray[0] = deviceConfigSpec; reConfigSpec.setDeviceChange(deviceConfigSpecArray); - + ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); String result = _context.getServiceUtil().waitForTask(morTask); - + if(!result.equals("sucess")) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachDisk() done (failed)"); - + throw new Exception("Failed to detach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } _context.waitForTaskProgressDone(morTask); @@ -1038,65 +1038,65 @@ public class VirtualMachineMO extends BaseMO { } catch(Exception e) { s_logger.info("Unable to retrieve snapshot descriptor, will skip updating snapshot reference"); } - + if(snapshotDescriptor != null) { for(Pair pair: chain) { DatastoreFile dsFile = new DatastoreFile(pair.first()); snapshotDescriptor.removeDiskReferenceFromSnapshot(dsFile.getFileName()); } - + Pair dcPair = getOwnerDatacenter(); String dsPath = getSnapshotDescriptorDatastorePath(); assert(dsPath != null); String url = getContext().composeDatastoreBrowseUrl(dcPair.second(), dsPath); getContext().uploadResourceContent(url, snapshotDescriptor.getVmsdContent()); } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachDisk() done (successfully)"); return chain; } - + public void detachAllDisks() throws Exception { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachAllDisk(). target MOR: " + _mor.get_value()); - + VirtualDisk[] disks = getAllDiskDevice(); if(disks.length > 0) { - VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); + VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[disks.length]; - + for(int i = 0; i < disks.length; i++) { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[i].setDevice(disks[i]); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.remove); } reConfigSpec.setDeviceChange(deviceConfigSpecArray); - + ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); String result = _context.getServiceUtil().waitForTask(morTask); - + if(!result.equals("sucess")) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachAllDisk() done(failed)"); throw new Exception("Failed to detach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + _context.waitForTaskProgressDone(morTask); } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachAllDisk() done(successfully)"); } - + // isoDatastorePath: [datastore name] isoFilePath public void attachIso(String isoDatastorePath, ManagedObjectReference morDs, boolean connect, boolean connectAtBoot) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - detachIso(). target MOR: " + _mor.get_value() + ", isoDatastorePath: " + s_logger.trace("vCenter API trace - detachIso(). target MOR: " + _mor.get_value() + ", isoDatastorePath: " + isoDatastorePath + ", datastore: " + morDs.get_value() + ", connect: " + connect + ", connectAtBoot: " + connectAtBoot); - + assert(isoDatastorePath != null); assert(morDs != null); @@ -1106,54 +1106,54 @@ public class VirtualMachineMO extends BaseMO { newCdRom = true; cdRom = new VirtualCdrom(); cdRom.setControllerKey(getIDEDeviceControllerKey()); - + int deviceNumber = getNextIDEDeviceNumber(); - cdRom.setUnitNumber(deviceNumber); + cdRom.setUnitNumber(deviceNumber); cdRom.setKey(-deviceNumber); } - + VirtualDeviceConnectInfo cInfo = new VirtualDeviceConnectInfo(); cInfo.setConnected(connect); cInfo.setStartConnected(connectAtBoot); cdRom.setConnectable(cInfo); - + VirtualCdromIsoBackingInfo backingInfo = new VirtualCdromIsoBackingInfo(); backingInfo.setFileName(isoDatastorePath); backingInfo.setDatastore(morDs); cdRom.setBacking(backingInfo); - VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); + VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); - + deviceConfigSpec.setDevice(cdRom); if(newCdRom) { deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.add); } else { deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.edit); } - + deviceConfigSpecArray[0] = deviceConfigSpec; reConfigSpec.setDeviceChange(deviceConfigSpecArray); - + ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); String result = _context.getServiceUtil().waitForTask(morTask); - + if(!result.equals("sucess")) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachIso() done(failed)"); throw new Exception("Failed to attach ISO due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + _context.waitForTaskProgressDone(morTask); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachIso() done(successfully)"); } - + public void detachIso(String isoDatastorePath) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - detachIso(). target MOR: " + _mor.get_value() + ", isoDatastorePath: " + s_logger.trace("vCenter API trace - detachIso(). target MOR: " + _mor.get_value() + ", isoDatastorePath: " + isoDatastorePath); VirtualDevice device = getIsoDevice(); @@ -1162,48 +1162,48 @@ public class VirtualMachineMO extends BaseMO { s_logger.trace("vCenter API trace - detachIso() done(failed)"); throw new Exception("Unable to find a CDROM device"); } - + VirtualCdromRemotePassthroughBackingInfo backingInfo = new VirtualCdromRemotePassthroughBackingInfo(); backingInfo.setDeviceName(""); device.setBacking(backingInfo); - - VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); + + VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); - + deviceConfigSpec.setDevice(device); deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.edit); - + deviceConfigSpecArray[0] = deviceConfigSpec; reConfigSpec.setDeviceChange(deviceConfigSpecArray); - + ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); String result = _context.getServiceUtil().waitForTask(morTask); - + if(!result.equals("sucess")) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachIso() done(failed)"); throw new Exception("Failed to detachIso due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } _context.waitForTaskProgressDone(morTask); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachIso() done(successfully)"); } - + public Pair getVmdkFileInfo(String vmdkDatastorePath) throws Exception { - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - getVmdkFileInfo(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + s_logger.trace("vCenter API trace - getVmdkFileInfo(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + vmdkDatastorePath); - + Pair dcPair = getOwnerDatacenter(); - + String url = getContext().composeDatastoreBrowseUrl(dcPair.second(), vmdkDatastorePath); byte[] content = getContext().getResourceContent(url); VmdkFileDescriptor descriptor = new VmdkFileDescriptor(); descriptor.parse(content); - + Pair result = new Pair(descriptor, content); if(s_logger.isTraceEnabled()) { s_logger.trace("vCenter API trace - getVmdkFileInfo() done"); @@ -1211,32 +1211,32 @@ public class VirtualMachineMO extends BaseMO { } return result; } - + public void exportVm(String exportDir, String exportName, boolean packToOva, boolean leaveOvaFileOnly) throws Exception { ManagedObjectReference morOvf = _context.getServiceContent().getOvfManager(); - + VirtualMachineRuntimeInfo runtimeInfo = getRuntimeInfo(); HostMO hostMo = new HostMO(_context, runtimeInfo.getHost()); String hostName = hostMo.getHostName(); String vmName = getVmName(); - + DatacenterMO dcMo = new DatacenterMO(_context, hostMo.getHyperHostDatacenter()); - + if(runtimeInfo.getPowerState() != VirtualMachinePowerState.poweredOff) { String msg = "Unable to export VM because it is not at powerdOff state. vmName: " + vmName + ", host: " + hostName; s_logger.error(msg); throw new Exception(msg); } - + ManagedObjectReference morLease = _context.getService().exportVm(getMor()); if(morLease == null) { s_logger.error("exportVm() failed"); throw new Exception("exportVm() failed"); } - + HttpNfcLeaseMO leaseMo = new HttpNfcLeaseMO(_context, morLease); HttpNfcLeaseState state = leaseMo.waitState(new HttpNfcLeaseState[] { HttpNfcLeaseState.ready, HttpNfcLeaseState.error }); - + try { if(state == HttpNfcLeaseState.ready) { final HttpNfcLeaseMO.ProgressReporter progressReporter = leaseMo.createProgressReporter(); @@ -1247,24 +1247,24 @@ public class VirtualMachineMO extends BaseMO { HttpNfcLeaseInfo leaseInfo = leaseMo.getLeaseInfo(); final long totalBytes = leaseInfo.getTotalDiskCapacityInKB() * 1024; long totalBytesDownloaded = 0; - + HttpNfcLeaseDeviceUrl[] deviceUrls = leaseInfo.getDeviceUrl(); if(deviceUrls != null) { - OvfFile[] ovfFiles = new OvfFile[deviceUrls.length]; - for (int i = 0; i < deviceUrls.length; i++) { - String deviceId = deviceUrls[i].getKey(); - String deviceUrlStr = deviceUrls[i].getUrl(); + OvfFile[] ovfFiles = new OvfFile[deviceUrls.length]; + for (int i = 0; i < deviceUrls.length; i++) { + String deviceId = deviceUrls[i].getKey(); + String deviceUrlStr = deviceUrls[i].getUrl(); String orgDiskFileName = deviceUrlStr.substring(deviceUrlStr.lastIndexOf("/") + 1); String diskFileName = String.format("%s-disk%d%s", exportName, i, VmwareHelper.getFileExtension(orgDiskFileName, ".vmdk")); String diskUrlStr = deviceUrlStr.replace("*", hostName); diskUrlStr = HypervisorHostHelper.resolveHostNameInUrl(dcMo, diskUrlStr); String diskLocalPath = exportDir + File.separator + diskFileName; fileNames.add(diskLocalPath); - + if(s_logger.isInfoEnabled()) { s_logger.info("Download VMDK file for export. url: " + deviceUrlStr); } - long lengthOfDiskFile = _context.downloadVmdkFile(diskUrlStr, diskLocalPath, totalBytesDownloaded, + long lengthOfDiskFile = _context.downloadVmdkFile(diskUrlStr, diskLocalPath, totalBytesDownloaded, new ActionDelegate () { @Override public void action(Long param) { @@ -1275,33 +1275,33 @@ public class VirtualMachineMO extends BaseMO { } }); totalBytesDownloaded += lengthOfDiskFile; - + OvfFile ovfFile = new OvfFile(); - ovfFile.setPath(diskFileName); - ovfFile.setDeviceId(deviceId); - ovfFile.setSize(lengthOfDiskFile); - ovfFiles[i] = ovfFile; + ovfFile.setPath(diskFileName); + ovfFile.setDeviceId(deviceId); + ovfFile.setSize(lengthOfDiskFile); + ovfFiles[i] = ovfFile; } - + // write OVF descriptor file - OvfCreateDescriptorParams ovfDescParams = new OvfCreateDescriptorParams(); - ovfDescParams.setOvfFiles(ovfFiles); - OvfCreateDescriptorResult ovfCreateDescriptorResult = _context.getService().createDescriptor(morOvf, getMor(), ovfDescParams); + OvfCreateDescriptorParams ovfDescParams = new OvfCreateDescriptorParams(); + ovfDescParams.setOvfFiles(ovfFiles); + OvfCreateDescriptorResult ovfCreateDescriptorResult = _context.getService().createDescriptor(morOvf, getMor(), ovfDescParams); String ovfPath = exportDir + File.separator + exportName + ".ovf"; fileNames.add(ovfPath); - + FileWriter out = new FileWriter(ovfPath); - out.write(ovfCreateDescriptorResult.getOvfDescriptor()); + out.write(ovfCreateDescriptorResult.getOvfDescriptor()); out.close(); - + // tar files into OVA if(packToOva) { // Important! we need to sync file system before we can safely use tar to work around a linux kernal bug(or feature) s_logger.info("Sync file system before we package OVA..."); - + Script commandSync = new Script(true, "sync", 0, s_logger); commandSync.execute(); - + Script command = new Script(false, "tar", 0, s_logger); command.setWorkDir(exportDir); command.add("-cf", exportName + ".ova"); @@ -1309,10 +1309,10 @@ public class VirtualMachineMO extends BaseMO { for(String name: fileNames) { command.add((new File(name).getName())); } - + s_logger.info("Package OVA with commmand: " + command.toString()); command.execute(); - + // to be safe, physically test existence of the target OVA file if((new File(exportDir + File.separator + exportName + ".ova")).exists()) { success = true; @@ -1331,7 +1331,7 @@ public class VirtualMachineMO extends BaseMO { new File(name).delete(); } } - + if(!success) throw new Exception("Unable to finish the whole process to package as a OVA file"); } @@ -1341,7 +1341,7 @@ public class VirtualMachineMO extends BaseMO { leaseMo.completeLease(); } } - + // snapshot directory in format of: /vmfs/volumes// @Deprecated public void setSnapshotDirectory(String snapshotDir) throws Exception { @@ -1349,15 +1349,15 @@ public class VirtualMachineMO extends BaseMO { Pair dcInfo = getOwnerDatacenter(); String vmxUrl = _context.composeDatastoreBrowseUrl(dcInfo.second(), fileInfo.getVmPathName()); byte[] vmxContent = _context.getResourceContent(vmxUrl); - + BufferedReader in = null; BufferedWriter out = null; ByteArrayOutputStream bos = new ByteArrayOutputStream(); - + boolean replaced = false; try { in = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(vmxContent))); - out = new BufferedWriter(new OutputStreamWriter(bos)); + out = new BufferedWriter(new OutputStreamWriter(bos)); String line; while((line = in.readLine()) != null) { if(line.startsWith("workingDir")) { @@ -1369,7 +1369,7 @@ public class VirtualMachineMO extends BaseMO { out.newLine(); } } - + if(!replaced) { out.newLine(); out.write(String.format("workingDir=\"%s\"", snapshotDir)); @@ -1389,30 +1389,30 @@ public class VirtualMachineMO extends BaseMO { // its disk backing info anyway. // redoRegistration(); } - + // destName does not contain extension name - public void backupCurrentSnapshot(String deviceName, ManagedObjectReference morDestDs, + public void backupCurrentSnapshot(String deviceName, ManagedObjectReference morDestDs, String destDsDirectory, String destName, boolean includeBase) throws Exception { - + SnapshotDescriptor descriptor = getSnapshotDescriptor(); SnapshotInfo[] snapshotInfo = descriptor.getCurrentDiskChain(); if(snapshotInfo.length == 0) { String msg = "No snapshot found in this VM"; throw new Exception(msg); } - + HostMO hostMo = getRunningHost(); DatacenterMO dcMo = getOwnerDatacenter().first(); List> mounts = hostMo.getDatastoreMountsOnHost(); VirtualMachineFileInfo vmFileInfo = getFileInfo(); - - List> backupInfo = new ArrayList>(); - + + List> backupInfo = new ArrayList>(); + for(int i = 0; i < snapshotInfo.length; i++) { if(!includeBase && i == snapshotInfo.length - 1) { break; } - + SnapshotDescriptor.DiskInfo[] disks = snapshotInfo[i].getDisks(); if(disks != null) { String destBaseFileName; @@ -1420,10 +1420,10 @@ public class VirtualMachineMO extends BaseMO { String destParentFileName; for(SnapshotDescriptor.DiskInfo disk : disks) { if(deviceName == null || deviceName.equals(disk.getDeviceName())) { - String srcVmdkFullDsPath = getSnapshotDiskFileDatastorePath(vmFileInfo, + String srcVmdkFullDsPath = getSnapshotDiskFileDatastorePath(vmFileInfo, mounts, disk.getDiskFileName()); Pair srcDsInfo = getOwnerDatastore(srcVmdkFullDsPath); - + Pair vmdkInfo = getVmdkFileInfo(srcVmdkFullDsPath); String srcVmdkBaseFilePath = DatastoreFile.getCompanionDatastorePath( srcVmdkFullDsPath, vmdkInfo.first().getBaseFileName()); @@ -1436,19 +1436,19 @@ public class VirtualMachineMO extends BaseMO { destBaseFileName = destName + (snapshotInfo.length - i - 1) + "-flat.vmdk"; destParentFileName = null; } - + s_logger.info("Copy VMDK base file " + srcVmdkBaseFilePath + " to " + destDsDirectory + "/" + destBaseFileName); - srcDsInfo.first().copyDatastoreFile(srcVmdkBaseFilePath, dcMo.getMor(), + srcDsInfo.first().copyDatastoreFile(srcVmdkBaseFilePath, dcMo.getMor(), morDestDs, destDsDirectory + "/" + destBaseFileName, dcMo.getMor(), true); - + byte[] newVmdkContent = VmdkFileDescriptor.changeVmdkContentBaseInfo( vmdkInfo.second(), destBaseFileName, destParentFileName); - String vmdkUploadUrl = getContext().composeDatastoreBrowseUrl(dcMo.getName(), + String vmdkUploadUrl = getContext().composeDatastoreBrowseUrl(dcMo.getName(), destDsDirectory + "/" + destFileName); - + s_logger.info("Upload VMDK content file to " + destDsDirectory + "/" + destFileName); getContext().uploadResourceContent(vmdkUploadUrl, newVmdkContent); - + backupInfo.add(new Ternary( destFileName, destBaseFileName, destParentFileName) ); @@ -1456,21 +1456,21 @@ public class VirtualMachineMO extends BaseMO { } } } - + byte[] vdiskInfo = VmwareHelper.composeDiskInfo(backupInfo, snapshotInfo.length, includeBase); - String vdiskUploadUrl = getContext().composeDatastoreBrowseUrl(dcMo.getName(), + String vdiskUploadUrl = getContext().composeDatastoreBrowseUrl(dcMo.getName(), destDsDirectory + "/" + destName + ".vdisk"); getContext().uploadResourceContent(vdiskUploadUrl, vdiskInfo); } - + public String[] getCurrentSnapshotDiskChainDatastorePaths(String diskDevice) throws Exception { HostMO hostMo = getRunningHost(); List> mounts = hostMo.getDatastoreMountsOnHost(); VirtualMachineFileInfo vmFileInfo = getFileInfo(); - + SnapshotDescriptor descriptor = getSnapshotDescriptor(); SnapshotInfo[] snapshotInfo = descriptor.getCurrentDiskChain(); - + List diskDsFullPaths = new ArrayList(); for(int i = 0; i < snapshotInfo.length; i++) { SnapshotDescriptor.DiskInfo[] disks = snapshotInfo[i].getDisks(); @@ -1478,7 +1478,7 @@ public class VirtualMachineMO extends BaseMO { for(SnapshotDescriptor.DiskInfo disk: disks) { String deviceNameInDisk = disk.getDeviceName(); if(diskDevice == null || diskDevice.equalsIgnoreCase(deviceNameInDisk)) { - String vmdkFullDsPath = getSnapshotDiskFileDatastorePath(vmFileInfo, + String vmdkFullDsPath = getSnapshotDiskFileDatastorePath(vmFileInfo, mounts, disk.getDiskFileName()); diskDsFullPaths.add(vmdkFullDsPath); } @@ -1487,37 +1487,37 @@ public class VirtualMachineMO extends BaseMO { } return diskDsFullPaths.toArray(new String[0]); } - - public void cloneFromCurrentSnapshot(String clonedVmName, int cpuSpeedMHz, int memoryMb, String diskDevice, + + public void cloneFromCurrentSnapshot(String clonedVmName, int cpuSpeedMHz, int memoryMb, String diskDevice, ManagedObjectReference morDs) throws Exception { assert(morDs != null); String[] disks = getCurrentSnapshotDiskChainDatastorePaths(diskDevice); cloneFromDiskChain(clonedVmName, cpuSpeedMHz, memoryMb, disks, morDs); } - - public void cloneFromDiskChain(String clonedVmName, int cpuSpeedMHz, int memoryMb, + + public void cloneFromDiskChain(String clonedVmName, int cpuSpeedMHz, int memoryMb, String[] disks, ManagedObjectReference morDs) throws Exception { assert(disks != null); assert(disks.length >= 1); - + HostMO hostMo = getRunningHost(); VirtualMachineConfigInfo vmConfigInfo = getConfigInfo(); - + if(!hostMo.createBlankVm(clonedVmName, 1, cpuSpeedMHz, 0, false, memoryMb, 0, vmConfigInfo.getGuestId(), morDs, false)) throw new Exception("Unable to create a blank VM"); - + VirtualMachineMO clonedVmMo = hostMo.findVmOnHyperHost(clonedVmName); if(clonedVmMo == null) throw new Exception("Unable to find just-created blank VM"); - + boolean bSuccess = false; try { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); - + VirtualDevice device = VmwareHelper.prepareDiskDevice(clonedVmMo, -1, disks, morDs, -1, 1); - + deviceConfigSpecArray[0].setDevice(device); deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.add); vmConfigSpec.setDeviceChange(deviceConfigSpecArray); @@ -1530,52 +1530,52 @@ public class VirtualMachineMO extends BaseMO { } } } - + public void plugDevice(VirtualDevice device) throws Exception { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[0].setDevice(device); deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.add); - + vmConfigSpec.setDeviceChange(deviceConfigSpecArray); if(!configureVm(vmConfigSpec)) { throw new Exception("Failed to add devices"); } } - + public void tearDownDevice(VirtualDevice device) throws Exception { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[0].setDevice(device); deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.remove); - + vmConfigSpec.setDeviceChange(deviceConfigSpecArray); if(!configureVm(vmConfigSpec)) { throw new Exception("Failed to detach devices"); } } - + public void tearDownDevices(Class[] deviceClasses) throws Exception { VirtualDevice[] devices = getMatchedDevices(deviceClasses); if(devices.length > 0) { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[devices.length]; - + for(int i = 0; i < devices.length; i++) { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[i].setDevice(devices[i]); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.remove); } - + vmConfigSpec.setDeviceChange(deviceConfigSpecArray); if(!configureVm(vmConfigSpec)) { throw new Exception("Failed to detach devices"); } } } - + public void copyAllVmDiskFiles(DatastoreMO destDsMo, String destDsDir, boolean followDiskChain) throws Exception { VirtualDevice[] disks = getAllDiskDevice(); DatacenterMO dcMo = getOwnerDatacenter().first(); @@ -1584,25 +1584,25 @@ public class VirtualMachineMO extends BaseMO { List> vmdkFiles = this.getDiskDatastorePathChain((VirtualDisk)disk, followDiskChain); for(Pair fileItem : vmdkFiles) { DatastoreMO srcDsMo = new DatastoreMO(_context, fileItem.second()); - + DatastoreFile srcFile = new DatastoreFile(fileItem.first()); DatastoreFile destFile = new DatastoreFile(destDsMo.getName(), destDsDir, srcFile.getFileName()); - + Pair vmdkDescriptor = null; - + vmdkDescriptor = getVmdkFileInfo(fileItem.first()); - + s_logger.info("Copy VM disk file " + srcFile.getPath() + " to " + destFile.getPath()); - srcDsMo.copyDatastoreFile(fileItem.first(), dcMo.getMor(), destDsMo.getMor(), + srcDsMo.copyDatastoreFile(fileItem.first(), dcMo.getMor(), destDsMo.getMor(), destFile.getPath(), dcMo.getMor(), true); - + if(vmdkDescriptor != null) { String vmdkBaseFileName = vmdkDescriptor.first().getBaseFileName(); String baseFilePath = srcFile.getCompanionPath(vmdkBaseFileName); destFile = new DatastoreFile(destDsMo.getName(), destDsDir, vmdkBaseFileName); - + s_logger.info("Copy VM disk file " + baseFilePath + " to " + destFile.getPath()); - srcDsMo.copyDatastoreFile(baseFilePath, dcMo.getMor(), destDsMo.getMor(), + srcDsMo.copyDatastoreFile(baseFilePath, dcMo.getMor(), destDsMo.getMor(), destFile.getPath(), dcMo.getMor(), true); } } @@ -1620,40 +1620,40 @@ public class VirtualMachineMO extends BaseMO { List> vmdkFiles = this.getDiskDatastorePathChain((VirtualDisk)disk, followDiskChain); for(Pair fileItem : vmdkFiles) { DatastoreMO srcDsMo = new DatastoreMO(_context, fileItem.second()); - + DatastoreFile srcFile = new DatastoreFile(fileItem.first()); DatastoreFile destFile = new DatastoreFile(destDsMo.getName(), destDsDir, srcFile.getFileName()); - + Pair vmdkDescriptor = null; vmdkDescriptor = getVmdkFileInfo(fileItem.first()); - + s_logger.info("Move VM disk file " + srcFile.getPath() + " to " + destFile.getPath()); - srcDsMo.moveDatastoreFile(fileItem.first(), dcMo.getMor(), destDsMo.getMor(), + srcDsMo.moveDatastoreFile(fileItem.first(), dcMo.getMor(), destDsMo.getMor(), destFile.getPath(), dcMo.getMor(), true); - + if(vmdkDescriptor != null) { String vmdkBaseFileName = vmdkDescriptor.first().getBaseFileName(); String baseFilePath = srcFile.getCompanionPath(vmdkBaseFileName); destFile = new DatastoreFile(destDsMo.getName(), destDsDir, vmdkBaseFileName); - + s_logger.info("Move VM disk file " + baseFilePath + " to " + destFile.getPath()); - srcDsMo.moveDatastoreFile(baseFilePath, dcMo.getMor(), destDsMo.getMor(), + srcDsMo.moveDatastoreFile(baseFilePath, dcMo.getMor(), destDsMo.getMor(), destFile.getPath(), dcMo.getMor(), true); } } } } } - + public int getNextScsiDiskDeviceNumber() throws Exception { int scsiControllerKey = getScsiDeviceControllerKey(); return getNextDeviceNumber(scsiControllerKey); } - + public int getScsiDeviceControllerKey() throws Exception { VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); - + if(devices != null && devices.length > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualLsiLogicController) { @@ -1661,7 +1661,7 @@ public class VirtualMachineMO extends BaseMO { } } } - + assert(false); throw new Exception("SCSI Controller Not Found"); } @@ -1669,7 +1669,7 @@ public class VirtualMachineMO extends BaseMO { public int getScsiDeviceControllerKeyNoException() throws Exception { VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); - + if(devices != null && devices.length > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualLsiLogicController) { @@ -1677,10 +1677,10 @@ public class VirtualMachineMO extends BaseMO { } } } - + return -1; } - + public void ensureScsiDeviceController() throws Exception { int scsiControllerKey = getScsiDeviceControllerKeyNoException(); if(scsiControllerKey < 0) { @@ -1701,54 +1701,54 @@ public class VirtualMachineMO extends BaseMO { } } } - + // return pair of VirtualDisk and disk device bus name(ide0:0, etc) public Pair getDiskDevice(String vmdkDatastorePath, boolean matchExactly) throws Exception { VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil().getDynamicProperty(_mor, "config.hardware.device"); - + s_logger.info("Look for disk device info from volume : " + vmdkDatastorePath); DatastoreFile dsSrcFile = new DatastoreFile(vmdkDatastorePath); String srcBaseName = dsSrcFile.getFileBaseName(); - + if(devices != null && devices.length > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualDisk) { s_logger.info("Test against disk device, controller key: " + device.getControllerKey() + ", unit number: " + device.getUnitNumber()); - + VirtualDeviceBackingInfo backingInfo = ((VirtualDisk)device).getBacking(); if(backingInfo instanceof VirtualDiskFlatVer2BackingInfo) { VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo; do { s_logger.info("Test against disk backing : " + diskBackingInfo.getFileName()); - + DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName()); String backingBaseName = dsBackingFile.getFileBaseName(); if(matchExactly) { if(backingBaseName .equalsIgnoreCase(srcBaseName)) { String deviceNumbering = getDeviceBusName(devices, device); - + s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering); return new Pair((VirtualDisk)device, deviceNumbering); - } + } } else { if(backingBaseName.contains(srcBaseName)) { String deviceNumbering = getDeviceBusName(devices, device); s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering); return new Pair((VirtualDisk)device, deviceNumbering); - } + } } - + diskBackingInfo = diskBackingInfo.getParent(); } while(diskBackingInfo != null); - } + } } } } - + return null; } - + @Deprecated public List> getDiskDatastorePathChain(VirtualDisk disk, boolean followChain) throws Exception { VirtualDeviceBackingInfo backingInfo = disk.getBacking(); @@ -1763,18 +1763,18 @@ public class VirtualMachineMO extends BaseMO { pathList.add(new Pair(diskBackingInfo.getFileName(), diskBackingInfo.getDatastore())); return pathList; } - + Pair dcPair = getOwnerDatacenter(); VirtualMachineFileInfo vmFilesInfo = getFileInfo(); DatastoreFile snapshotDirFile = new DatastoreFile(vmFilesInfo.getSnapshotDirectory()); DatastoreFile vmxDirFile = new DatastoreFile(vmFilesInfo.getVmPathName()); - + do { if(diskBackingInfo.getParent() != null) { pathList.add(new Pair(diskBackingInfo.getFileName(), diskBackingInfo.getDatastore())); diskBackingInfo = diskBackingInfo.getParent(); } else { - // try getting parent info from VMDK file itself + // try getting parent info from VMDK file itself byte[] content = null; try { String url = getContext().composeDatastoreBrowseUrl(dcPair.second(), diskBackingInfo.getFileName()); @@ -1782,7 +1782,7 @@ public class VirtualMachineMO extends BaseMO { if(content == null || content.length == 0) { break; } - + pathList.add(new Pair(diskBackingInfo.getFileName(), diskBackingInfo.getDatastore())); } catch(Exception e) { // if snapshot directory has been changed to place other than default. VMware has a bug @@ -1790,23 +1790,23 @@ public class VirtualMachineMO extends BaseMO { // in snapshot directory one more time DatastoreFile currentFile = new DatastoreFile(diskBackingInfo.getFileName()); String vmdkFullDsPath = snapshotDirFile.getCompanionPath(currentFile.getFileName()); - + String url = getContext().composeDatastoreBrowseUrl(dcPair.second(), vmdkFullDsPath); content = getContext().getResourceContent(url); if(content == null || content.length == 0) { break; } - + pathList.add(new Pair(vmdkFullDsPath, diskBackingInfo.getDatastore())); } - + VmdkFileDescriptor descriptor = new VmdkFileDescriptor(); descriptor.parse(content); if(descriptor.getParentFileName() != null && !descriptor.getParentFileName().isEmpty()) { // create a fake one VirtualDiskFlatVer2BackingInfo parentDiskBackingInfo = new VirtualDiskFlatVer2BackingInfo(); parentDiskBackingInfo.setDatastore(diskBackingInfo.getDatastore()); - + String parentFileName = descriptor.getParentFileName(); if(parentFileName.startsWith("/")) { int fileNameStartPos = parentFileName.lastIndexOf("/"); @@ -1815,16 +1815,16 @@ public class VirtualMachineMO extends BaseMO { } else { parentDiskBackingInfo.setFileName(snapshotDirFile.getCompanionPath(parentFileName)); } - diskBackingInfo = parentDiskBackingInfo; + diskBackingInfo = parentDiskBackingInfo; } else { break; } } } while(diskBackingInfo != null); - + return pathList; } - + private String getDeviceBusName(VirtualDevice[] allDevices, VirtualDevice theDevice) throws Exception { for(VirtualDevice device : allDevices) { if(device.getKey() == theDevice.getControllerKey().intValue()) { @@ -1839,7 +1839,7 @@ public class VirtualMachineMO extends BaseMO { } throw new Exception("Unable to find device controller"); } - + public VirtualDisk[] getAllDiskDevice() throws Exception { List deviceList = new ArrayList(); VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil().getDynamicProperty(_mor, "config.hardware.device"); @@ -1850,7 +1850,7 @@ public class VirtualMachineMO extends BaseMO { } } } - + return deviceList.toArray(new VirtualDisk[0]); } @@ -1871,20 +1871,20 @@ public class VirtualMachineMO extends BaseMO { } else if(disk.getBacking() instanceof VirtualDiskSparseVer2BackingInfo) { diskMode = ((VirtualDiskSparseVer2BackingInfo)disk.getBacking()).getDiskMode(); } - + if(diskMode.indexOf("independent") != -1) { independentDisks.add(disk); } } } - + return independentDisks.toArray(new VirtualDisk[0]); } - + public int tryGetIDEDeviceControllerKey() throws Exception { VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); - + if(devices != null && devices.length > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualIDEController) { @@ -1892,14 +1892,14 @@ public class VirtualMachineMO extends BaseMO { } } } - + return -1; } - + public int getIDEDeviceControllerKey() throws Exception { VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); - + if(devices != null && devices.length > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualIDEController) { @@ -1907,16 +1907,16 @@ public class VirtualMachineMO extends BaseMO { } } } - + assert(false); throw new Exception("IDE Controller Not Found"); } - + public int getNextIDEDeviceNumber() throws Exception { int controllerKey = getIDEDeviceControllerKey(); return getNextDeviceNumber(controllerKey); } - + public VirtualDevice getIsoDevice() throws Exception { VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); @@ -1929,11 +1929,11 @@ public class VirtualMachineMO extends BaseMO { } return null; } - + public int getPCIDeviceControllerKey() throws Exception { VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); - + if(devices != null && devices.length > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualPCIController) { @@ -1941,20 +1941,20 @@ public class VirtualMachineMO extends BaseMO { } } } - + assert(false); throw new Exception("PCI Controller Not Found"); } - + public int getNextPCIDeviceNumber() throws Exception { int controllerKey = getPCIDeviceControllerKey(); return getNextDeviceNumber(controllerKey); } - + public int getNextDeviceNumber(int controllerKey) throws Exception { VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); - + int deviceNumber = -1; if(devices != null && devices.length > 0) { for(VirtualDevice device : devices) { @@ -1967,11 +1967,11 @@ public class VirtualMachineMO extends BaseMO { } return ++deviceNumber; } - + public VirtualDevice[] getNicDevices() throws Exception { VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); - + List nics = new ArrayList(); if(devices != null) { for(VirtualDevice device : devices) { @@ -1980,14 +1980,14 @@ public class VirtualMachineMO extends BaseMO { } } } - + return nics.toArray(new VirtualDevice[0]); } - + public Pair getNicDeviceIndex(String networkNamePrefix) throws Exception { VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); - + List nics = new ArrayList(); if(devices != null) { for(VirtualDevice device : devices) { @@ -1996,7 +1996,7 @@ public class VirtualMachineMO extends BaseMO { } } } - + Collections.sort(nics, new Comparator() { @Override public int compare(VirtualDevice arg0, VirtualDevice arg1) { @@ -2009,7 +2009,7 @@ public class VirtualMachineMO extends BaseMO { return 0; } }); - + int index = 0; String attachedNetworkSummary; String dvPortGroupName; @@ -2042,12 +2042,12 @@ public class VirtualMachineMO extends BaseMO { public VirtualDevice[] getMatchedDevices(Class[] deviceClasses) throws Exception { assert(deviceClasses != null); - + List returnList = new ArrayList(); - + VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). getDynamicProperty(_mor, "config.hardware.device"); - + if(devices != null) { for(VirtualDevice device : devices) { for(Class clz : deviceClasses) { @@ -2058,40 +2058,40 @@ public class VirtualMachineMO extends BaseMO { } } } - + return returnList.toArray(new VirtualDevice[0]); } - + public void mountToolsInstaller() throws Exception { _context.getService().mountToolsInstaller(_mor); } - + public void unmountToolsInstaller() throws Exception { _context.getService().unmountToolsInstaller(_mor); } - + public void redoRegistration(ManagedObjectReference morHost) throws Exception { String vmName = getVmName(); VirtualMachineFileInfo vmFileInfo = getFileInfo(); boolean isTemplate = isTemplate(); - + HostMO hostMo; if(morHost != null) hostMo = new HostMO(getContext(), morHost); else hostMo = getRunningHost(); - + ManagedObjectReference morFolder = getParentMor(); ManagedObjectReference morPool = hostMo.getHyperHostOwnerResourcePool(); - + _context.getService().unregisterVM(_mor); - + ManagedObjectReference morTask = _context.getService().registerVM_Task( - morFolder, + morFolder, vmFileInfo.getVmPathName(), - vmName, false, + vmName, false, morPool, hostMo.getMor()); - + String result = _context.getServiceUtil().waitForTask(morTask); if (!result.equalsIgnoreCase("Sucess")) { throw new Exception("Unable to register template due to " + TaskMO.getTaskFailureInfo(_context, morTask)); @@ -2099,7 +2099,7 @@ public class VirtualMachineMO extends BaseMO { _context.waitForTaskProgressDone(morTask); if(isTemplate) { VirtualMachineMO vmNewRegistration = hostMo.findVmOnHyperHost(vmName); - assert(vmNewRegistration != null); + assert(vmNewRegistration != null); vmNewRegistration.markAsTemplate(); } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareGuestOsMapper.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareGuestOsMapper.java index 7d26983fcbf..14331481513 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareGuestOsMapper.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareGuestOsMapper.java @@ -24,146 +24,150 @@ import com.vmware.vim25.VirtualMachineGuestOsIdentifier; public class VmwareGuestOsMapper { private static Map s_mapper = new HashMap(); static { - s_mapper.put("DOS", VirtualMachineGuestOsIdentifier.dosGuest); - s_mapper.put("OS/2", VirtualMachineGuestOsIdentifier.os2Guest); + s_mapper.put("DOS", VirtualMachineGuestOsIdentifier.DOS_GUEST); + s_mapper.put("OS/2", VirtualMachineGuestOsIdentifier.OS_2_GUEST); - s_mapper.put("Windows 3.1", VirtualMachineGuestOsIdentifier.win31Guest); - s_mapper.put("Windows 95", VirtualMachineGuestOsIdentifier.win95Guest); - s_mapper.put("Windows 98", VirtualMachineGuestOsIdentifier.win98Guest); - s_mapper.put("Windows NT 4", VirtualMachineGuestOsIdentifier.winNTGuest); - s_mapper.put("Windows XP (32-bit)", VirtualMachineGuestOsIdentifier.winXPProGuest); - s_mapper.put("Windows XP (64-bit)", VirtualMachineGuestOsIdentifier.winXPPro64Guest); - s_mapper.put("Windows XP SP2 (32-bit)", VirtualMachineGuestOsIdentifier.winXPProGuest); - s_mapper.put("Windows XP SP3 (32-bit)", VirtualMachineGuestOsIdentifier.winXPProGuest); - s_mapper.put("Windows Vista (32-bit)", VirtualMachineGuestOsIdentifier.winVistaGuest); - s_mapper.put("Windows Vista (64-bit)", VirtualMachineGuestOsIdentifier.winVista64Guest); - s_mapper.put("Windows 7 (32-bit)", VirtualMachineGuestOsIdentifier.windows7Guest); - s_mapper.put("Windows 7 (64-bit)", VirtualMachineGuestOsIdentifier.windows7_64Guest); + s_mapper.put("Windows 3.1", VirtualMachineGuestOsIdentifier.WIN_31_GUEST); + s_mapper.put("Windows 95", VirtualMachineGuestOsIdentifier.WIN_95_GUEST); + s_mapper.put("Windows 98", VirtualMachineGuestOsIdentifier.WIN_98_GUEST); + s_mapper.put("Windows NT 4", VirtualMachineGuestOsIdentifier.WIN_NT_GUEST); + s_mapper.put("Windows XP (32-bit)", VirtualMachineGuestOsIdentifier.WIN_XP_PRO_GUEST); + s_mapper.put("Windows XP (64-bit)", VirtualMachineGuestOsIdentifier.WIN_XP_PRO_64_GUEST); + s_mapper.put("Windows XP SP2 (32-bit)", VirtualMachineGuestOsIdentifier.WIN_XP_PRO_GUEST); + s_mapper.put("Windows XP SP3 (32-bit)", VirtualMachineGuestOsIdentifier.WIN_XP_PRO_GUEST); + s_mapper.put("Windows Vista (32-bit)", VirtualMachineGuestOsIdentifier.WIN_VISTA_GUEST); + s_mapper.put("Windows Vista (64-bit)", VirtualMachineGuestOsIdentifier.WIN_VISTA_64_GUEST); + s_mapper.put("Windows 7 (32-bit)", VirtualMachineGuestOsIdentifier.WINDOWS_7_GUEST); + s_mapper.put("Windows 7 (64-bit)", VirtualMachineGuestOsIdentifier.WINDOWS_7_64_GUEST); - s_mapper.put("Windows 2000 Professional", VirtualMachineGuestOsIdentifier.win2000ProGuest); - s_mapper.put("Windows 2000 Server", VirtualMachineGuestOsIdentifier.win2000ServGuest); - s_mapper.put("Windows 2000 Server SP4 (32-bit)", VirtualMachineGuestOsIdentifier.win2000ServGuest); - s_mapper.put("Windows 2000 Advanced Server", VirtualMachineGuestOsIdentifier.win2000AdvServGuest); - - s_mapper.put("Windows Server 2003 Enterprise Edition(32-bit)", VirtualMachineGuestOsIdentifier.winNetEnterpriseGuest); - s_mapper.put("Windows Server 2003 Enterprise Edition(64-bit)", VirtualMachineGuestOsIdentifier.winNetEnterprise64Guest); - s_mapper.put("Windows Server 2008 R2 (64-bit)", VirtualMachineGuestOsIdentifier.winLonghorn64Guest); - s_mapper.put("Windows Server 2003 DataCenter Edition(32-bit)", VirtualMachineGuestOsIdentifier.winNetDatacenterGuest); - s_mapper.put("Windows Server 2003 DataCenter Edition(64-bit)", VirtualMachineGuestOsIdentifier.winNetDatacenter64Guest); - s_mapper.put("Windows Server 2003 Standard Edition(32-bit)", VirtualMachineGuestOsIdentifier.winNetStandardGuest); - s_mapper.put("Windows Server 2003 Standard Edition(64-bit)", VirtualMachineGuestOsIdentifier.winNetStandard64Guest); - s_mapper.put("Windows Server 2003 Web Edition", VirtualMachineGuestOsIdentifier.winNetWebGuest); - s_mapper.put("Microsoft Small Bussiness Server 2003", VirtualMachineGuestOsIdentifier.winNetBusinessGuest); - - s_mapper.put("Windows Server 2008 (32-bit)", VirtualMachineGuestOsIdentifier.winLonghornGuest); - s_mapper.put("Windows Server 2008 (64-bit)", VirtualMachineGuestOsIdentifier.winLonghorn64Guest); - - s_mapper.put("Open Enterprise Server", VirtualMachineGuestOsIdentifier.oesGuest); - - s_mapper.put("Asianux 3(32-bit)", VirtualMachineGuestOsIdentifier.asianux3Guest); - s_mapper.put("Asianux 3(64-bit)", VirtualMachineGuestOsIdentifier.asianux3_64Guest); - - s_mapper.put("Debian GNU/Linux 5(64-bit)", VirtualMachineGuestOsIdentifier.debian5_64Guest); - s_mapper.put("Debian GNU/Linux 5.0 (32-bit)", VirtualMachineGuestOsIdentifier.debian5Guest); - s_mapper.put("Debian GNU/Linux 4(32-bit)", VirtualMachineGuestOsIdentifier.debian4Guest); - s_mapper.put("Debian GNU/Linux 4(64-bit)", VirtualMachineGuestOsIdentifier.debian4_64Guest); - - s_mapper.put("Novell Netware 6.x", VirtualMachineGuestOsIdentifier.netware6Guest); - s_mapper.put("Novell Netware 5.1", VirtualMachineGuestOsIdentifier.netware5Guest); - - s_mapper.put("Sun Solaris 10(32-bit)", VirtualMachineGuestOsIdentifier.solaris10Guest); - s_mapper.put("Sun Solaris 10(64-bit)", VirtualMachineGuestOsIdentifier.solaris10_64Guest); - s_mapper.put("Sun Solaris 9(Experimental)", VirtualMachineGuestOsIdentifier.solaris9Guest); - s_mapper.put("Sun Solaris 8(Experimental)", VirtualMachineGuestOsIdentifier.solaris8Guest); - - s_mapper.put("FreeBSD (32-bit)", VirtualMachineGuestOsIdentifier.freebsdGuest); - s_mapper.put("FreeBSD (64-bit)", VirtualMachineGuestOsIdentifier.freebsd64Guest); - - s_mapper.put("SCO OpenServer 5", VirtualMachineGuestOsIdentifier.otherGuest); - s_mapper.put("SCO UnixWare 7", VirtualMachineGuestOsIdentifier.unixWare7Guest); - - s_mapper.put("SUSE Linux Enterprise 8(32-bit)", VirtualMachineGuestOsIdentifier.suseGuest); - s_mapper.put("SUSE Linux Enterprise 8(64-bit)", VirtualMachineGuestOsIdentifier.suse64Guest); - s_mapper.put("SUSE Linux Enterprise 9(32-bit)", VirtualMachineGuestOsIdentifier.suseGuest); - s_mapper.put("SUSE Linux Enterprise 9(64-bit)", VirtualMachineGuestOsIdentifier.suse64Guest); - s_mapper.put("SUSE Linux Enterprise 10(32-bit)", VirtualMachineGuestOsIdentifier.suseGuest); - s_mapper.put("SUSE Linux Enterprise 10(64-bit)", VirtualMachineGuestOsIdentifier.suse64Guest); - s_mapper.put("SUSE Linux Enterprise 10(32-bit)", VirtualMachineGuestOsIdentifier.suseGuest); - s_mapper.put("Other SUSE Linux(32-bit)", VirtualMachineGuestOsIdentifier.suseGuest); - s_mapper.put("Other SUSE Linux(64-bit)", VirtualMachineGuestOsIdentifier.suse64Guest); - - s_mapper.put("CentOS 4.5 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 4.6 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 4.7 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 4.8 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 5.0 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 5.0 (64-bit)", VirtualMachineGuestOsIdentifier.centos64Guest); - s_mapper.put("CentOS 5.1 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 5.1 (64-bit)", VirtualMachineGuestOsIdentifier.centos64Guest); - s_mapper.put("CentOS 5.2 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 5.2 (64-bit)", VirtualMachineGuestOsIdentifier.centos64Guest); - s_mapper.put("CentOS 5.3 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 5.3 (64-bit)", VirtualMachineGuestOsIdentifier.centos64Guest); - s_mapper.put("CentOS 5.4 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 5.4 (64-bit)", VirtualMachineGuestOsIdentifier.centos64Guest); - s_mapper.put("CentOS 5.5 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 5.5 (64-bit)", VirtualMachineGuestOsIdentifier.centos64Guest); - s_mapper.put("CentOS 5.6 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 5.6 (64-bit)", VirtualMachineGuestOsIdentifier.centos64Guest); - s_mapper.put("CentOS 6.0 (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("CentOS 6.0 (64-bit)", VirtualMachineGuestOsIdentifier.centos64Guest); - s_mapper.put("Other CentOS (32-bit)", VirtualMachineGuestOsIdentifier.centosGuest); - s_mapper.put("Other CentOS (64-bit)", VirtualMachineGuestOsIdentifier.centos64Guest); - - s_mapper.put("Red Hat Enterprise Linux 2", VirtualMachineGuestOsIdentifier.rhel2Guest); - s_mapper.put("Red Hat Enterprise Linux 3(32-bit)", VirtualMachineGuestOsIdentifier.rhel3Guest); - s_mapper.put("Red Hat Enterprise Linux 3(64-bit)", VirtualMachineGuestOsIdentifier.rhel3_64Guest); - s_mapper.put("Red Hat Enterprise Linux 4(32-bit)", VirtualMachineGuestOsIdentifier.rhel4Guest); - s_mapper.put("Red Hat Enterprise Linux 4(64-bit)", VirtualMachineGuestOsIdentifier.rhel4_64Guest); - s_mapper.put("Red Hat Enterprise Linux 5(32-bit)", VirtualMachineGuestOsIdentifier.rhel5Guest); - s_mapper.put("Red Hat Enterprise Linux 5(64-bit)", VirtualMachineGuestOsIdentifier.rhel5_64Guest); - s_mapper.put("Red Hat Enterprise Linux 6(32-bit)", VirtualMachineGuestOsIdentifier.rhel6Guest); - s_mapper.put("Red Hat Enterprise Linux 6(64-bit)", VirtualMachineGuestOsIdentifier.rhel6_64Guest); - - s_mapper.put("Red Hat Enterprise Linux 4.5 (32-bit)", VirtualMachineGuestOsIdentifier.rhel4Guest); - s_mapper.put("Red Hat Enterprise Linux 4.6 (32-bit)", VirtualMachineGuestOsIdentifier.rhel4Guest); - s_mapper.put("Red Hat Enterprise Linux 4.7 (32-bit)", VirtualMachineGuestOsIdentifier.rhel4Guest); - s_mapper.put("Red Hat Enterprise Linux 4.8 (32-bit)", VirtualMachineGuestOsIdentifier.rhel4Guest); - s_mapper.put("Red Hat Enterprise Linux 5.0(32-bit)", VirtualMachineGuestOsIdentifier.rhel5Guest); - s_mapper.put("Red Hat Enterprise Linux 5.0(64-bit)", VirtualMachineGuestOsIdentifier.rhel5_64Guest); - s_mapper.put("Red Hat Enterprise Linux 5.1(32-bit)", VirtualMachineGuestOsIdentifier.rhel5Guest); - s_mapper.put("Red Hat Enterprise Linux 5.1(64-bit)", VirtualMachineGuestOsIdentifier.rhel5_64Guest); - s_mapper.put("Red Hat Enterprise Linux 5.2(32-bit)", VirtualMachineGuestOsIdentifier.rhel5Guest); - s_mapper.put("Red Hat Enterprise Linux 5.2(64-bit)", VirtualMachineGuestOsIdentifier.rhel5_64Guest); - s_mapper.put("Red Hat Enterprise Linux 5.3(32-bit)", VirtualMachineGuestOsIdentifier.rhel5Guest); - s_mapper.put("Red Hat Enterprise Linux 5.3(64-bit)", VirtualMachineGuestOsIdentifier.rhel5_64Guest); - s_mapper.put("Red Hat Enterprise Linux 5.4(32-bit)", VirtualMachineGuestOsIdentifier.rhel5Guest); - s_mapper.put("Red Hat Enterprise Linux 5.4(64-bit)", VirtualMachineGuestOsIdentifier.rhel5_64Guest); - - s_mapper.put("Ubuntu 8.04 (32-bit)", VirtualMachineGuestOsIdentifier.ubuntuGuest); - s_mapper.put("Ubuntu 8.04 (64-bit)", VirtualMachineGuestOsIdentifier.ubuntu64Guest); - s_mapper.put("Ubuntu 8.10 (32-bit)", VirtualMachineGuestOsIdentifier.ubuntuGuest); - s_mapper.put("Ubuntu 8.10 (64-bit)", VirtualMachineGuestOsIdentifier.ubuntu64Guest); - s_mapper.put("Ubuntu 9.04 (32-bit)", VirtualMachineGuestOsIdentifier.ubuntuGuest); - s_mapper.put("Ubuntu 9.04 (64-bit)", VirtualMachineGuestOsIdentifier.ubuntu64Guest); - s_mapper.put("Ubuntu 9.10 (32-bit)", VirtualMachineGuestOsIdentifier.ubuntuGuest); - s_mapper.put("Ubuntu 9.10 (64-bit)", VirtualMachineGuestOsIdentifier.ubuntu64Guest); - s_mapper.put("Ubuntu 10.04 (32-bit)", VirtualMachineGuestOsIdentifier.ubuntuGuest); - s_mapper.put("Ubuntu 10.04 (64-bit)", VirtualMachineGuestOsIdentifier.ubuntu64Guest); - s_mapper.put("Ubuntu 10.10 (32-bit)", VirtualMachineGuestOsIdentifier.ubuntuGuest); - s_mapper.put("Ubuntu 10.10 (64-bit)", VirtualMachineGuestOsIdentifier.ubuntu64Guest); - s_mapper.put("Other Ubuntu (32-bit)", VirtualMachineGuestOsIdentifier.ubuntuGuest); - s_mapper.put("Other Ubuntu (64-bit)", VirtualMachineGuestOsIdentifier.ubuntu64Guest); + s_mapper.put("Windows 2000 Professional", VirtualMachineGuestOsIdentifier.WIN_2000_PRO_GUEST); + s_mapper.put("Windows 2000 Server", VirtualMachineGuestOsIdentifier.WIN_2000_SERV_GUEST); + s_mapper.put("Windows 2000 Server SP4 (32-bit)", VirtualMachineGuestOsIdentifier.WIN_2000_SERV_GUEST); + s_mapper.put("Windows 2000 Advanced Server", VirtualMachineGuestOsIdentifier.WIN_2000_ADV_SERV_GUEST); - s_mapper.put("Other 2.6x Linux (32-bit)", VirtualMachineGuestOsIdentifier.other26xLinuxGuest); - s_mapper.put("Other 2.6x Linux (64-bit)", VirtualMachineGuestOsIdentifier.other26xLinux64Guest); - s_mapper.put("Other Linux (32-bit)", VirtualMachineGuestOsIdentifier.otherLinuxGuest); - s_mapper.put("Other Linux (64-bit)", VirtualMachineGuestOsIdentifier.otherLinux64Guest); - - s_mapper.put("Other (32-bit)", VirtualMachineGuestOsIdentifier.otherGuest); - s_mapper.put("Other (64-bit)", VirtualMachineGuestOsIdentifier.otherGuest64); + s_mapper.put("Windows Server 2003 Enterprise Edition(32-bit)", VirtualMachineGuestOsIdentifier.WIN_NET_ENTERPRISE_GUEST); + s_mapper.put("Windows Server 2003 Enterprise Edition(64-bit)", VirtualMachineGuestOsIdentifier.WIN_NET_ENTERPRISE_64_GUEST); + s_mapper.put("Windows Server 2008 R2 (64-bit)", VirtualMachineGuestOsIdentifier.WIN_LONGHORN_64_GUEST); + s_mapper.put("Windows Server 2003 DataCenter Edition(32-bit)", VirtualMachineGuestOsIdentifier.WIN_NET_DATACENTER_GUEST); + s_mapper.put("Windows Server 2003 DataCenter Edition(64-bit)", VirtualMachineGuestOsIdentifier.WIN_NET_DATACENTER_64_GUEST); + s_mapper.put("Windows Server 2003 Standard Edition(32-bit)", VirtualMachineGuestOsIdentifier.WIN_NET_STANDARD_GUEST); + s_mapper.put("Windows Server 2003 Standard Edition(64-bit)", VirtualMachineGuestOsIdentifier.WIN_NET_STANDARD_64_GUEST); + s_mapper.put("Windows Server 2003 Web Edition", VirtualMachineGuestOsIdentifier.WIN_NET_WEB_GUEST); + s_mapper.put("Microsoft Small Bussiness Server 2003", VirtualMachineGuestOsIdentifier.WIN_NET_BUSINESS_GUEST); + + s_mapper.put("Windows Server 2008 (32-bit)", VirtualMachineGuestOsIdentifier.WIN_LONGHORN_GUEST); + s_mapper.put("Windows Server 2008 (64-bit)", VirtualMachineGuestOsIdentifier.WIN_LONGHORN_64_GUEST); + + s_mapper.put("Windows 8", VirtualMachineGuestOsIdentifier.WINDOWS_8_GUEST); + s_mapper.put("Windows 8 (64 bit)", VirtualMachineGuestOsIdentifier.WINDOWS_8_64_GUEST); + s_mapper.put("Windows 8 Server (64 bit)", VirtualMachineGuestOsIdentifier.WINDOWS_8_SERVER_64_GUEST); + + s_mapper.put("Open Enterprise Server", VirtualMachineGuestOsIdentifier.OES_GUEST); + + s_mapper.put("Asianux 3(32-bit)", VirtualMachineGuestOsIdentifier.ASIANUX_3_GUEST); + s_mapper.put("Asianux 3(64-bit)", VirtualMachineGuestOsIdentifier.ASIANUX_3_64_GUEST); + + s_mapper.put("Debian GNU/Linux 5(64-bit)", VirtualMachineGuestOsIdentifier.DEBIAN_5_64_GUEST); + s_mapper.put("Debian GNU/Linux 5.0 (32-bit)", VirtualMachineGuestOsIdentifier.DEBIAN_5_GUEST); + s_mapper.put("Debian GNU/Linux 4(32-bit)", VirtualMachineGuestOsIdentifier.DEBIAN_4_GUEST); + s_mapper.put("Debian GNU/Linux 4(64-bit)", VirtualMachineGuestOsIdentifier.DEBIAN_4_64_GUEST); + + s_mapper.put("Novell Netware 6.x", VirtualMachineGuestOsIdentifier.NETWARE_6_GUEST); + s_mapper.put("Novell Netware 5.1", VirtualMachineGuestOsIdentifier.NETWARE_5_GUEST); + + s_mapper.put("Sun Solaris 10(32-bit)", VirtualMachineGuestOsIdentifier.SOLARIS_10_GUEST); + s_mapper.put("Sun Solaris 10(64-bit)", VirtualMachineGuestOsIdentifier.SOLARIS_10_64_GUEST); + s_mapper.put("Sun Solaris 9(Experimental)", VirtualMachineGuestOsIdentifier.SOLARIS_9_GUEST); + s_mapper.put("Sun Solaris 8(Experimental)", VirtualMachineGuestOsIdentifier.SOLARIS_8_GUEST); + + s_mapper.put("FreeBSD (32-bit)", VirtualMachineGuestOsIdentifier.FREEBSD_GUEST); + s_mapper.put("FreeBSD (64-bit)", VirtualMachineGuestOsIdentifier.FREEBSD_64_GUEST); + + s_mapper.put("SCO OpenServer 5", VirtualMachineGuestOsIdentifier.OTHER_GUEST); + s_mapper.put("SCO UnixWare 7", VirtualMachineGuestOsIdentifier.UNIX_WARE_7_GUEST); + + s_mapper.put("SUSE Linux Enterprise 8(32-bit)", VirtualMachineGuestOsIdentifier.SUSE_GUEST); + s_mapper.put("SUSE Linux Enterprise 8(64-bit)", VirtualMachineGuestOsIdentifier.SUSE_64_GUEST); + s_mapper.put("SUSE Linux Enterprise 9(32-bit)", VirtualMachineGuestOsIdentifier.SUSE_GUEST); + s_mapper.put("SUSE Linux Enterprise 9(64-bit)", VirtualMachineGuestOsIdentifier.SUSE_64_GUEST); + s_mapper.put("SUSE Linux Enterprise 10(32-bit)", VirtualMachineGuestOsIdentifier.SUSE_GUEST); + s_mapper.put("SUSE Linux Enterprise 10(64-bit)", VirtualMachineGuestOsIdentifier.SUSE_64_GUEST); + s_mapper.put("SUSE Linux Enterprise 10(32-bit)", VirtualMachineGuestOsIdentifier.SUSE_GUEST); + s_mapper.put("Other SUSE Linux(32-bit)", VirtualMachineGuestOsIdentifier.SUSE_GUEST); + s_mapper.put("Other SUSE Linux(64-bit)", VirtualMachineGuestOsIdentifier.SUSE_64_GUEST); + + s_mapper.put("CentOS 4.5 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 4.6 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 4.7 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 4.8 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 5.0 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 5.0 (64-bit)", VirtualMachineGuestOsIdentifier.CENTOS_64_GUEST); + s_mapper.put("CentOS 5.1 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 5.1 (64-bit)", VirtualMachineGuestOsIdentifier.CENTOS_64_GUEST); + s_mapper.put("CentOS 5.2 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 5.2 (64-bit)", VirtualMachineGuestOsIdentifier.CENTOS_64_GUEST); + s_mapper.put("CentOS 5.3 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 5.3 (64-bit)", VirtualMachineGuestOsIdentifier.CENTOS_64_GUEST); + s_mapper.put("CentOS 5.4 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 5.4 (64-bit)", VirtualMachineGuestOsIdentifier.CENTOS_64_GUEST); + s_mapper.put("CentOS 5.5 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 5.5 (64-bit)", VirtualMachineGuestOsIdentifier.CENTOS_64_GUEST); + s_mapper.put("CentOS 5.6 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 5.6 (64-bit)", VirtualMachineGuestOsIdentifier.CENTOS_64_GUEST); + s_mapper.put("CentOS 6.0 (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("CentOS 6.0 (64-bit)", VirtualMachineGuestOsIdentifier.CENTOS_64_GUEST); + s_mapper.put("Other CentOS (32-bit)", VirtualMachineGuestOsIdentifier.CENTOS_GUEST); + s_mapper.put("Other CentOS (64-bit)", VirtualMachineGuestOsIdentifier.CENTOS_64_GUEST); + + s_mapper.put("Red Hat Enterprise Linux 2", VirtualMachineGuestOsIdentifier.RHEL_2_GUEST); + s_mapper.put("Red Hat Enterprise Linux 3(32-bit)", VirtualMachineGuestOsIdentifier.RHEL_3_GUEST); + s_mapper.put("Red Hat Enterprise Linux 3(64-bit)", VirtualMachineGuestOsIdentifier.RHEL_3_64_GUEST); + s_mapper.put("Red Hat Enterprise Linux 4(32-bit)", VirtualMachineGuestOsIdentifier.RHEL_4_GUEST); + s_mapper.put("Red Hat Enterprise Linux 4(64-bit)", VirtualMachineGuestOsIdentifier.RHEL_4_64_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5(32-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5(64-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_64_GUEST); + s_mapper.put("Red Hat Enterprise Linux 6(32-bit)", VirtualMachineGuestOsIdentifier.RHEL_6_GUEST); + s_mapper.put("Red Hat Enterprise Linux 6(64-bit)", VirtualMachineGuestOsIdentifier.RHEL_6_64_GUEST); + + s_mapper.put("Red Hat Enterprise Linux 4.5 (32-bit)", VirtualMachineGuestOsIdentifier.RHEL_4_GUEST); + s_mapper.put("Red Hat Enterprise Linux 4.6 (32-bit)", VirtualMachineGuestOsIdentifier.RHEL_4_GUEST); + s_mapper.put("Red Hat Enterprise Linux 4.7 (32-bit)", VirtualMachineGuestOsIdentifier.RHEL_4_GUEST); + s_mapper.put("Red Hat Enterprise Linux 4.8 (32-bit)", VirtualMachineGuestOsIdentifier.RHEL_4_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.0(32-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.0(64-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_64_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.1(32-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.1(64-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_64_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.2(32-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.2(64-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_64_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.3(32-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.3(64-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_64_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.4(32-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_GUEST); + s_mapper.put("Red Hat Enterprise Linux 5.4(64-bit)", VirtualMachineGuestOsIdentifier.RHEL_5_64_GUEST); + + s_mapper.put("Ubuntu 8.04 (32-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_GUEST); + s_mapper.put("Ubuntu 8.04 (64-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_64_GUEST); + s_mapper.put("Ubuntu 8.10 (32-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_GUEST); + s_mapper.put("Ubuntu 8.10 (64-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_64_GUEST); + s_mapper.put("Ubuntu 9.04 (32-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_GUEST); + s_mapper.put("Ubuntu 9.04 (64-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_64_GUEST); + s_mapper.put("Ubuntu 9.10 (32-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_GUEST); + s_mapper.put("Ubuntu 9.10 (64-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_64_GUEST); + s_mapper.put("Ubuntu 10.04 (32-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_GUEST); + s_mapper.put("Ubuntu 10.04 (64-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_64_GUEST); + s_mapper.put("Ubuntu 10.10 (32-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_GUEST); + s_mapper.put("Ubuntu 10.10 (64-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_64_GUEST); + s_mapper.put("Other Ubuntu (32-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_GUEST); + s_mapper.put("Other Ubuntu (64-bit)", VirtualMachineGuestOsIdentifier.UBUNTU_64_GUEST); + + s_mapper.put("Other 2.6x Linux (32-bit)", VirtualMachineGuestOsIdentifier.OTHER_26_X_LINUX_GUEST); + s_mapper.put("Other 2.6x Linux (64-bit)", VirtualMachineGuestOsIdentifier.OTHER_26_X_LINUX_64_GUEST); + s_mapper.put("Other Linux (32-bit)", VirtualMachineGuestOsIdentifier.OTHER_LINUX_GUEST); + s_mapper.put("Other Linux (64-bit)", VirtualMachineGuestOsIdentifier.OTHER_LINUX_64_GUEST); + + s_mapper.put("Other (32-bit)", VirtualMachineGuestOsIdentifier.OTHER_GUEST); + s_mapper.put("Other (64-bit)", VirtualMachineGuestOsIdentifier.OTHER_GUEST_64); } - + public static VirtualMachineGuestOsIdentifier getGuestOsIdentifier(String guestOsName) { return s_mapper.get(guestOsName); } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java index 47ff8e20004..8e6947fd072 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.StringWriter; +import java.lang.reflect.Method; import java.util.ArrayList; import java.util.List; import java.util.Random; @@ -69,38 +70,38 @@ import com.vmware.vim25.VirtualVmxnet3; public class VmwareHelper { private static final Logger s_logger = Logger.getLogger(VmwareHelper.class); - + public static VirtualDevice prepareNicDevice(VirtualMachineMO vmMo, ManagedObjectReference morNetwork, VirtualEthernetCardType deviceType, String portGroupName, String macAddress, int deviceNumber, int contextNumber, boolean conntected, boolean connectOnStart) throws Exception { - + VirtualEthernetCard nic; switch(deviceType) { case E1000 : nic = new VirtualE1000(); break; - + case PCNet32 : nic = new VirtualPCNet32(); break; - + case Vmxnet2 : nic = new VirtualVmxnet2(); break; - + case Vmxnet3 : nic = new VirtualVmxnet3(); break; - + default : assert(false); nic = new VirtualE1000(); } - + VirtualEthernetCardNetworkBackingInfo nicBacking = new VirtualEthernetCardNetworkBackingInfo(); nicBacking.setDeviceName(portGroupName); nicBacking.setNetwork(morNetwork); nic.setBacking(nicBacking); - + VirtualDeviceConnectInfo connectInfo = new VirtualDeviceConnectInfo(); connectInfo.setAllowGuestControl(true); connectInfo.setConnected(conntected); @@ -112,7 +113,7 @@ public class VmwareHelper { nic.setKey(-contextNumber); return nic; } - + public static VirtualDevice prepareDvNicDevice(VirtualMachineMO vmMo, ManagedObjectReference morNetwork, VirtualEthernetCardType deviceType, String dvPortGroupName, String dvSwitchUuid, String macAddress, int deviceNumber, int contextNumber, boolean conntected, boolean connectOnStart) throws Exception { @@ -144,7 +145,7 @@ public class VmwareHelper { final VirtualDeviceConnectInfo connectInfo = new VirtualDeviceConnectInfo(); dvPortConnection.setSwitchUuid(dvSwitchUuid); - dvPortConnection.setPortgroupKey(morNetwork.get_value()); + dvPortConnection.setPortgroupKey(morNetwork.getValue()); dvPortBacking.setPort(dvPortConnection); nic.setBacking(dvPortBacking); nic.setKey(30); @@ -162,13 +163,13 @@ public class VmwareHelper { } // vmdkDatastorePath: [datastore name] vmdkFilePath - public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, String vmdkDatastorePath, + public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, String vmdkDatastorePath, int sizeInMb, ManagedObjectReference morDs, int deviceNumber, int contextNumber) throws Exception { - + VirtualDisk disk = new VirtualDisk(); VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); - backingInfo.setDiskMode(VirtualDiskMode.persistent.toString()); + backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); backingInfo.setThinProvisioned(true); backingInfo.setEagerlyScrub(false); backingInfo.setDatastore(morDs); @@ -180,7 +181,7 @@ public class VmwareHelper { if(deviceNumber < 0) deviceNumber = vmMo.getNextDeviceNumber(controllerKey); disk.setControllerKey(controllerKey); - + disk.setKey(-contextNumber); disk.setUnitNumber(deviceNumber); disk.setCapacityInKB(sizeInMb*1024); @@ -189,19 +190,19 @@ public class VmwareHelper { connectInfo.setConnected(true); connectInfo.setStartConnected(true); disk.setConnectable(connectInfo); - + return disk; } - + // vmdkDatastorePath: [datastore name] vmdkFilePath, create delta disk based on disk from template - public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, String vmdkDatastorePath, + public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, String vmdkDatastorePath, int sizeInMb, ManagedObjectReference morDs, VirtualDisk templateDisk, int deviceNumber, int contextNumber) throws Exception { - + assert(templateDisk != null); VirtualDeviceBackingInfo parentBacking = templateDisk.getBacking(); assert(parentBacking != null); - - // TODO Not sure if we need to check if the disk in template and the new disk needs to share the + + // TODO Not sure if we need to check if the disk in template and the new disk needs to share the // same datastore VirtualDisk disk = new VirtualDisk(); if(parentBacking instanceof VirtualDiskFlatVer1BackingInfo) { @@ -242,13 +243,13 @@ public class VmwareHelper { } else { throw new Exception("Unsupported disk backing: " + parentBacking.getClass().getCanonicalName()); } - + if(controllerKey < 0) controllerKey = vmMo.getIDEDeviceControllerKey(); disk.setControllerKey(controllerKey); if(deviceNumber < 0) deviceNumber = vmMo.getNextDeviceNumber(controllerKey); - + disk.setKey(-contextNumber); disk.setUnitNumber(deviceNumber); disk.setCapacityInKB(sizeInMb*1024); @@ -259,94 +260,94 @@ public class VmwareHelper { disk.setConnectable(connectInfo); return disk; } - + // vmdkDatastorePath: [datastore name] vmdkFilePath - public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, String vmdkDatastorePathChain[], + public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, String vmdkDatastorePathChain[], ManagedObjectReference morDs, int deviceNumber, int contextNumber) throws Exception { - + assert(vmdkDatastorePathChain != null); assert(vmdkDatastorePathChain.length >= 1); - + VirtualDisk disk = new VirtualDisk(); - + VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); backingInfo.setDatastore(morDs); backingInfo.setFileName(vmdkDatastorePathChain[0]); - backingInfo.setDiskMode(VirtualDiskMode.persistent.toString()); + backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); if(vmdkDatastorePathChain.length > 1) { String[] parentDisks = new String[vmdkDatastorePathChain.length - 1]; for(int i = 0; i < vmdkDatastorePathChain.length - 1; i++) parentDisks[i] = vmdkDatastorePathChain[i + 1]; - + setParentBackingInfo(backingInfo, morDs, parentDisks); } - + disk.setBacking(backingInfo); if(controllerKey < 0) controllerKey = vmMo.getIDEDeviceControllerKey(); if(deviceNumber < 0) deviceNumber = vmMo.getNextDeviceNumber(controllerKey); - + disk.setControllerKey(controllerKey); disk.setKey(-contextNumber); disk.setUnitNumber(deviceNumber); - + VirtualDeviceConnectInfo connectInfo = new VirtualDeviceConnectInfo(); connectInfo.setConnected(true); connectInfo.setStartConnected(true); disk.setConnectable(connectInfo); - + return disk; } - - public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, - Pair[] vmdkDatastorePathChain, + + public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, + Pair[] vmdkDatastorePathChain, int deviceNumber, int contextNumber) throws Exception { - + assert(vmdkDatastorePathChain != null); assert(vmdkDatastorePathChain.length >= 1); - + VirtualDisk disk = new VirtualDisk(); - + VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); backingInfo.setDatastore(vmdkDatastorePathChain[0].second()); backingInfo.setFileName(vmdkDatastorePathChain[0].first()); - backingInfo.setDiskMode(VirtualDiskMode.persistent.toString()); + backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); if(vmdkDatastorePathChain.length > 1) { Pair[] parentDisks = new Pair[vmdkDatastorePathChain.length - 1]; for(int i = 0; i < vmdkDatastorePathChain.length - 1; i++) parentDisks[i] = vmdkDatastorePathChain[i + 1]; - + setParentBackingInfo(backingInfo, parentDisks); } - + disk.setBacking(backingInfo); if(controllerKey < 0) controllerKey = vmMo.getIDEDeviceControllerKey(); if(deviceNumber < 0) deviceNumber = vmMo.getNextDeviceNumber(controllerKey); - + disk.setControllerKey(controllerKey); disk.setKey(-contextNumber); disk.setUnitNumber(deviceNumber); - + VirtualDeviceConnectInfo connectInfo = new VirtualDeviceConnectInfo(); connectInfo.setConnected(true); connectInfo.setStartConnected(true); disk.setConnectable(connectInfo); - + return disk; } - - private static void setParentBackingInfo(VirtualDiskFlatVer2BackingInfo backingInfo, + + private static void setParentBackingInfo(VirtualDiskFlatVer2BackingInfo backingInfo, ManagedObjectReference morDs, String[] parentDatastorePathList) { - + VirtualDiskFlatVer2BackingInfo parentBacking = new VirtualDiskFlatVer2BackingInfo(); parentBacking.setDatastore(morDs); - parentBacking.setDiskMode(VirtualDiskMode.persistent.toString()); - + parentBacking.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); + if(parentDatastorePathList.length > 1) { String[] nextDatastorePathList = new String[parentDatastorePathList.length -1]; for(int i = 0; i < parentDatastorePathList.length -1; i++) @@ -354,17 +355,17 @@ public class VmwareHelper { setParentBackingInfo(parentBacking, morDs, nextDatastorePathList); } parentBacking.setFileName(parentDatastorePathList[0]); - + backingInfo.setParent(parentBacking); } - - private static void setParentBackingInfo(VirtualDiskFlatVer2BackingInfo backingInfo, + + private static void setParentBackingInfo(VirtualDiskFlatVer2BackingInfo backingInfo, Pair[] parentDatastorePathList) { - + VirtualDiskFlatVer2BackingInfo parentBacking = new VirtualDiskFlatVer2BackingInfo(); parentBacking.setDatastore(parentDatastorePathList[0].second()); - parentBacking.setDiskMode(VirtualDiskMode.persistent.toString()); - + parentBacking.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); + if(parentDatastorePathList.length > 1) { Pair[] nextDatastorePathList = new Pair[parentDatastorePathList.length -1]; for(int i = 0; i < parentDatastorePathList.length -1; i++) @@ -372,33 +373,33 @@ public class VmwareHelper { setParentBackingInfo(parentBacking, nextDatastorePathList); } parentBacking.setFileName(parentDatastorePathList[0].first()); - + backingInfo.setParent(parentBacking); } - + public static Pair prepareIsoDevice(VirtualMachineMO vmMo, String isoDatastorePath, ManagedObjectReference morDs, boolean connect, boolean connectAtBoot, int deviceNumber, int contextNumber) throws Exception { - + boolean newCdRom = false; VirtualCdrom cdRom = (VirtualCdrom )vmMo.getIsoDevice(); if(cdRom == null) { newCdRom = true; cdRom = new VirtualCdrom(); - + assert(vmMo.getIDEDeviceControllerKey() >= 0); cdRom.setControllerKey(vmMo.getIDEDeviceControllerKey()); if(deviceNumber < 0) deviceNumber = vmMo.getNextIDEDeviceNumber(); - cdRom.setUnitNumber(deviceNumber); + cdRom.setUnitNumber(deviceNumber); cdRom.setKey(-contextNumber); } - + VirtualDeviceConnectInfo cInfo = new VirtualDeviceConnectInfo(); cInfo.setConnected(connect); cInfo.setStartConnected(connectAtBoot); cdRom.setConnectable(cInfo); - + if(isoDatastorePath != null) { VirtualCdromIsoBackingInfo backingInfo = new VirtualCdromIsoBackingInfo(); backingInfo.setFileName(isoDatastorePath); @@ -409,32 +410,32 @@ public class VmwareHelper { backingInfo.setDeviceName(""); cdRom.setBacking(backingInfo); } - + return new Pair(cdRom, newCdRom); } - + public static VirtualDisk getRootDisk(VirtualDisk[] disks) { if(disks.length == 1) return disks[0]; - + // TODO : for now, always return the first disk as root disk return disks[0]; } - - public static ManagedObjectReference findSnapshotInTree(VirtualMachineSnapshotTree[] snapTree, String findName) { + + public static ManagedObjectReference findSnapshotInTree(List snapTree, String findName) { assert(findName != null); - + ManagedObjectReference snapMor = null; - if (snapTree == null) + if (snapTree == null) return snapMor; - - for (int i = 0; i < snapTree.length && snapMor == null; i++) { - VirtualMachineSnapshotTree node = snapTree[i]; + + for (int i = 0; i < snapTree.size() && snapMor == null; i++) { + VirtualMachineSnapshotTree node = snapTree.get(i); if (node.getName().equals(findName)) { snapMor = node.getSnapshot(); } else { - VirtualMachineSnapshotTree[] childTree = node.getChildSnapshotList(); + List childTree = node.getChildSnapshotList(); snapMor = findSnapshotInTree(childTree, findName); } } @@ -442,94 +443,94 @@ public class VmwareHelper { } public static byte[] composeDiskInfo(List> diskInfo, int disksInChain, boolean includeBase) throws IOException { - + BufferedWriter out = null; ByteArrayOutputStream bos = new ByteArrayOutputStream(); - + try { - out = new BufferedWriter(new OutputStreamWriter(bos)); + out = new BufferedWriter(new OutputStreamWriter(bos)); out.write("disksInChain=" + disksInChain); out.newLine(); - + out.write("disksInBackup=" + diskInfo.size()); out.newLine(); - + out.write("baseDiskIncluded=" + includeBase); out.newLine(); - + int seq = disksInChain - 1; for(Ternary item : diskInfo) { out.write(String.format("disk%d.fileName=%s", seq, item.first())); out.newLine(); - + out.write(String.format("disk%d.baseFileName=%s", seq, item.second())); out.newLine(); - + if(item.third() != null) { out.write(String.format("disk%d.parentFileName=%s", seq, item.third())); out.newLine(); } seq--; } - + out.newLine(); } finally { if(out != null) out.close(); } - + return bos.toByteArray(); } - - public static OptionValue[] composeVncOptions(OptionValue[] optionsToMerge, + + public static OptionValue[] composeVncOptions(OptionValue[] optionsToMerge, boolean enableVnc, String vncPassword, int vncPort, String keyboardLayout) { - + int numOptions = 3; boolean needKeyboardSetup = false; if(keyboardLayout != null && !keyboardLayout.isEmpty()) { numOptions++; needKeyboardSetup = true; } - + if(optionsToMerge != null) numOptions += optionsToMerge.length; - + OptionValue[] options = new OptionValue[numOptions]; int i = 0; if(optionsToMerge != null) { for(int j = 0; j < optionsToMerge.length; j++) options[i++] = optionsToMerge[j]; } - + options[i] = new OptionValue(); options[i].setKey("RemoteDisplay.vnc.enabled"); options[i++].setValue(enableVnc ? "true" : "false"); - + options[i] = new OptionValue(); options[i].setKey("RemoteDisplay.vnc.password"); options[i++].setValue(vncPassword); - + options[i] = new OptionValue(); options[i].setKey("RemoteDisplay.vnc.port"); options[i++].setValue("" + vncPort); - + if(needKeyboardSetup) { options[i] = new OptionValue(); options[i].setKey("RemoteDisplay.vnc.keymap"); options[i++].setValue(keyboardLayout); } - + return options; } - + public static void setBasicVmConfig(VirtualMachineConfigSpec vmConfig, int cpuCount, int cpuSpeedMHz, int cpuReservedMhz, int memoryMB, int memoryReserveMB, String guestOsIdentifier, boolean limitCpuUse) { - + // VM config basics vmConfig.setMemoryMB((long)memoryMB); vmConfig.setNumCPUs(cpuCount); - + ResourceAllocationInfo cpuInfo = new ResourceAllocationInfo(); if (limitCpuUse) { cpuInfo.setLimit((long)(cpuSpeedMHz * cpuCount)); @@ -539,52 +540,52 @@ public class VmwareHelper { cpuInfo.setReservation((long)cpuReservedMhz); vmConfig.setCpuAllocation(cpuInfo); - + ResourceAllocationInfo memInfo = new ResourceAllocationInfo(); memInfo.setLimit((long)memoryMB); memInfo.setReservation((long)memoryReserveMB); vmConfig.setMemoryAllocation(memInfo); - + vmConfig.setGuestId(guestOsIdentifier); } - + public static ManagedObjectReference getDiskDeviceDatastore(VirtualDisk diskDevice) throws Exception { VirtualDeviceBackingInfo backingInfo = diskDevice.getBacking(); assert(backingInfo instanceof VirtualDiskFlatVer2BackingInfo); return ((VirtualDiskFlatVer2BackingInfo)backingInfo).getDatastore(); } - + public static Object getPropValue(ObjectContent oc, String name) { - DynamicProperty[] props = oc.getPropSet(); - + List props = oc.getPropSet(); + for(DynamicProperty prop : props) { if(prop.getName().equalsIgnoreCase(name)) return prop.getVal(); } - + return null; } - + public static String getFileExtension(String fileName, String defaultExtension) { int pos = fileName.lastIndexOf('.'); if(pos < 0) return defaultExtension; - - return fileName.substring(pos); + + return fileName.substring(pos); } - + public static boolean isSameHost(String ipAddress, String destName) { // TODO : may need to do DNS lookup to compare IP address exactly return ipAddress.equals(destName); } - + public static void deleteVolumeVmdkFiles(DatastoreMO dsMo, String volumeName, DatacenterMO dcMo) throws Exception { String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), volumeName); dsMo.deleteFile(volumeDatastorePath, dcMo.getMor(), true); - + volumeDatastorePath = String.format("[%s] %s-flat.vmdk", dsMo.getName(), volumeName); dsMo.deleteFile(volumeDatastorePath, dcMo.getMor(), true); - + volumeDatastorePath = String.format("[%s] %s-delta.vmdk", dsMo.getName(), volumeName); dsMo.deleteFile(volumeDatastorePath, dcMo.getMor(), true); } @@ -592,38 +593,50 @@ public class VmwareHelper { public static String getExceptionMessage(Throwable e) { return getExceptionMessage(e, false); } - + public static String getExceptionMessage(Throwable e, boolean printStack) { - if(e instanceof MethodFault) { - final StringWriter writer = new StringWriter(); - writer.append("Exception: " + e.getClass().getName() + "\n"); - writer.append("message: " + ((MethodFault)e).getFaultString() + "\n"); - - if(printStack) { - writer.append("stack: "); - e.printStackTrace(new PrintWriter(writer)); - } - return writer.toString(); - } - + //TODO: in vim 5.1, exceptions do not have a base exception class, MethodFault becomes a FaultInfo that we can only get + // from individual exception through getFaultInfo, so we have to use reflection here to get MethodFault information. + try{ + Class cls = e.getClass(); + Method mth = cls.getDeclaredMethod("getFaultInfo", null); + if ( mth != null ){ + Object fault = mth.invoke(e, null); + if (fault instanceof MethodFault) { + final StringWriter writer = new StringWriter(); + writer.append("Exception: " + fault.getClass().getName() + "\n"); + writer.append("message: " + ((MethodFault)fault).getFaultMessage() + "\n"); + + if(printStack) { + writer.append("stack: "); + e.printStackTrace(new PrintWriter(writer)); + } + return writer.toString(); + } + } + } + catch (Exception ex){ + + } + return ExceptionUtil.toString(e, printStack); } - + public static VirtualMachineMO pickOneVmOnRunningHost(List vmList, boolean bFirstFit) throws Exception { List candidates = new ArrayList(); - + for(VirtualMachineMO vmMo : vmList) { HostMO hostMo = vmMo.getRunningHost(); if(hostMo.isHyperHostConnected()) candidates.add(vmMo); } - + if(candidates.size() == 0) return null; - + if(bFirstFit) return candidates.get(0); - + Random random = new Random(); return candidates.get(random.nextInt(candidates.size())); } From 481f4804723c57553e1464fb8876681ffa651cc0 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Mon, 4 Feb 2013 17:41:36 -0800 Subject: [PATCH 002/486] Upgrade to use Vsphere 5.1 SDK jar, fixed all compilation error with new wrapper class VmwareClient. --- .../vmware/VmwareServerDiscoverer.java | 84 +-- .../vmware/manager/VmwareManagerImpl.java | 125 ++--- .../manager/VmwareStorageManagerImpl.java | 262 ++++----- .../vmware/resource/VmwareContextFactory.java | 26 +- .../vmware/resource/VmwareResource.java | 454 ++++++++-------- .../VmwareSecondaryStorageContextFactory.java | 31 +- ...VmwareSecondaryStorageResourceHandler.java | 18 +- .../cloud/hypervisor/vmware/mo/BaseMO.java | 74 +-- .../cloud/hypervisor/vmware/mo/ClusterMO.java | 308 +++++------ .../vmware/mo/CustomFieldsManagerMO.java | 26 +- .../hypervisor/vmware/mo/DatacenterMO.java | 324 +++++------ .../hypervisor/vmware/mo/DatastoreMO.java | 186 +++---- .../vmware/mo/HostDatastoreBrowserMO.java | 58 +- .../vmware/mo/HostDatastoreSystemMO.java | 82 +-- .../vmware/mo/HostFirewallSystemMO.java | 14 +- .../cloud/hypervisor/vmware/mo/HostMO.java | 450 ++++++++-------- .../hypervisor/vmware/mo/HttpNfcLeaseMO.java | 89 +-- .../vmware/mo/HypervisorHostHelper.java | 211 ++++---- .../cloud/hypervisor/vmware/mo/NetworkMO.java | 10 +- .../hypervisor/vmware/mo/PerfManagerMO.java | 89 ++- .../cloud/hypervisor/vmware/mo/TaskMO.java | 24 +- .../vmware/mo/VirtualDiskManagerMO.java | 134 ++--- .../vmware/mo/VirtualMachineMO.java | 458 ++++++++-------- .../hypervisor/vmware/util/VmwareClient.java | 509 ++++++++++++++++++ .../hypervisor/vmware/util/VmwareContext.java | 334 ++++++------ 25 files changed, 2487 insertions(+), 1893 deletions(-) create mode 100644 vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index 684df54ccd5..653d259bda8 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.vmware; @@ -72,7 +72,7 @@ import com.vmware.vim25.ManagedObjectReference; @Local(value=Discoverer.class) public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { private static final Logger s_logger = Logger.getLogger(VmwareServerDiscoverer.class); - + @Inject ClusterDao _clusterDao; @Inject VmwareManager _vmwareMgr; @Inject AlertManager _alertMgr; @@ -85,27 +85,27 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer @Inject CiscoNexusVSMDeviceDao _nexusDao; @Inject NetworkModel _netmgr; - + @Override - public Map> find(long dcId, Long podId, Long clusterId, URI url, + public Map> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List hostTags) throws DiscoveryException { - + if(s_logger.isInfoEnabled()) s_logger.info("Discover host. dc: " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + url.getHost()); - + if(podId == null) { if(s_logger.isInfoEnabled()) - s_logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer"); + s_logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer"); return null; } - + ClusterVO cluster = _clusterDao.findById(clusterId); if(cluster == null || cluster.getHypervisorType() != HypervisorType.VMware) { if(s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for VMware hypervisors"); + s_logger.info("invalid cluster id or cluster is not for VMware hypervisors"); return null; } - + List hosts = _resourceMgr.listAllHostsInCluster(clusterId); if(hosts.size() >= _vmwareMgr.getMaxHostsPerCluster()) { String msg = "VMware cluster " + cluster.getName() + " is too big to add new host now. (current configured cluster size: " + _vmwareMgr.getMaxHostsPerCluster() + ")"; @@ -117,12 +117,12 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer String publicTrafficLabel = null; String guestTrafficLabel = null; Map vsmCredentials = null; - + privateTrafficLabel = _netmgr.getDefaultManagementTrafficLabel(dcId, HypervisorType.VMware); if (privateTrafficLabel != null) { s_logger.info("Detected private network label : " + privateTrafficLabel); } - + if (_vmwareMgr.getNexusVSwitchGlobalParameter()) { DataCenterVO zone = _dcDao.findById(dcId); NetworkType zoneType = zone.getNetworkType(); @@ -145,7 +145,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer context = VmwareContextFactory.create(url.getHost(), username, password); if (privateTrafficLabel != null) context.registerStockObject("privateTrafficLabel", privateTrafficLabel); - + if (_vmwareMgr.getNexusVSwitchGlobalParameter()) { if (vsmCredentials != null) { s_logger.info("Stocking credentials of Nexus VSM"); @@ -163,26 +163,26 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer s_logger.error("Unable to find host or cluster based on url: " + URLDecoder.decode(url.getPath())); return null; } - + ManagedObjectReference morCluster = null; Map clusterDetails = _clusterDetailsDao.findDetails(clusterId); if(clusterDetails.get("url") != null) { URI uriFromCluster = new URI(UriUtils.encodeURIComponent(clusterDetails.get("url"))); morCluster = context.getHostMorByPath(URLDecoder.decode(uriFromCluster.getPath())); - + if(morCluster == null || !morCluster.getType().equalsIgnoreCase("ClusterComputeResource")) { s_logger.warn("Cluster url does not point to a valid vSphere cluster, url: " + clusterDetails.get("url")); return null; } else { ClusterMO clusterMo = new ClusterMO(context, morCluster); ClusterDasConfigInfo dasConfig = clusterMo.getDasConfig(); - if(dasConfig != null && dasConfig.getEnabled() != null && dasConfig.getEnabled().booleanValue()) { + if(dasConfig != null && dasConfig.isEnabled() != null && dasConfig.isEnabled().booleanValue()) { clusterDetails.put("NativeHA", "true"); _clusterDetailsDao.persist(clusterId, clusterDetails); } } } - + if(!validateDiscoveredHosts(context, morCluster, morHosts)) { if(morCluster == null) s_logger.warn("The discovered host is not standalone host, can not be added to a standalone cluster"); @@ -195,14 +195,14 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer for(ManagedObjectReference morHost : morHosts) { Map details = new HashMap(); Map params = new HashMap(); - + HostMO hostMo = new HostMO(context, morHost); details.put("url", hostMo.getHostName()); details.put("username", username); details.put("password", password); - String guid = morHost.getType() + ":" + morHost.get_value() + "@"+ url.getHost(); + String guid = morHost.getType() + ":" + morHost.getValue() + "@"+ url.getHost(); details.put("guid", guid); - + params.put("url", hostMo.getHostName()); params.put("username", username); params.put("password", password); @@ -219,8 +219,8 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer if (guestTrafficLabel != null) { params.put("guest.network.vswitch.name", guestTrafficLabel); } - - VmwareResource resource = new VmwareResource(); + + VmwareResource resource = new VmwareResource(); try { resource.configure("VMware", params); } catch (ConfigurationException e) { @@ -228,14 +228,14 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer s_logger.warn("Unable to instantiate " + url.getHost(), e); } resource.start(); - + resources.put(resource, details); } - + // place a place holder guid derived from cluster ID cluster.setGuid(UUID.nameUUIDFromBytes(String.valueOf(clusterId).getBytes()).toString()); _clusterDao.update(clusterId, cluster); - + return resources; } catch (DiscoveredWithErrorException e) { throw e; @@ -247,59 +247,59 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer context.close(); } } - + private boolean validateDiscoveredHosts(VmwareContext context, ManagedObjectReference morCluster, List morHosts) throws Exception { if(morCluster == null) { for(ManagedObjectReference morHost : morHosts) { - ManagedObjectReference morParent = (ManagedObjectReference)context.getServiceUtil().getDynamicProperty(morHost, "parent"); + ManagedObjectReference morParent = (ManagedObjectReference)context.getVimClient().getDynamicProperty(morHost, "parent"); if(morParent.getType().equalsIgnoreCase("ClusterComputeResource")) return false; } } else { for(ManagedObjectReference morHost : morHosts) { - ManagedObjectReference morParent = (ManagedObjectReference)context.getServiceUtil().getDynamicProperty(morHost, "parent"); + ManagedObjectReference morParent = (ManagedObjectReference)context.getVimClient().getDynamicProperty(morHost, "parent"); if(!morParent.getType().equalsIgnoreCase("ClusterComputeResource")) return false; - - if(!morParent.get_value().equals(morCluster.get_value())) + + if(!morParent.getValue().equals(morCluster.getValue())) return false; } } - + return true; } - + @Override public void postDiscovery(List hosts, long msId) { // do nothing } - + @Override public boolean matchHypervisor(String hypervisor) { if(hypervisor == null) return true; - + return Hypervisor.HypervisorType.VMware.toString().equalsIgnoreCase(hypervisor); } - + @Override public Hypervisor.HypervisorType getHypervisorType() { return Hypervisor.HypervisorType.VMware; } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { if(s_logger.isInfoEnabled()) s_logger.info("Configure VmwareServerDiscoverer, discover name: " + name); - + super.configure(name, params); - + ComponentLocator locator = ComponentLocator.getCurrentLocator(); ConfigurationDao configDao = locator.getDao(ConfigurationDao.class); if (configDao == null) { throw new ConfigurationException("Unable to get the configuration dao."); } - + createVmwareToolsIso(); if(s_logger.isInfoEnabled()) { @@ -308,7 +308,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return true; } - + private void createVmwareToolsIso() { String isoName = "vmware-tools.iso"; VMTemplateVO tmplt = _tmpltDao.findByTemplateName(isoName); @@ -354,11 +354,11 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer if (host.getType() != com.cloud.host.Host.Type.Routing || host.getHypervisorType() != HypervisorType.VMware) { return null; } - + _resourceMgr.deleteRoutingHost(host, isForced, isForceDeleteStorage); return new DeleteHostAnswer(true); } - + @Override public boolean stop() { _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index c450312c1a7..dea6cca431c 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.vmware.manager; @@ -71,6 +71,7 @@ import com.cloud.hypervisor.vmware.mo.TaskMO; import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; import com.cloud.hypervisor.vmware.mo.VmwareHostType; import com.cloud.utils.ssh.SshHelper; +import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.network.CiscoNexusVSMDeviceVO; import com.cloud.network.NetworkModel; @@ -94,7 +95,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.vm.DomainRouterVO; import com.google.gson.Gson; -import com.vmware.apputils.vim25.ServiceUtil; import com.vmware.vim25.HostConnectSpec; import com.vmware.vim25.ManagedObjectReference; @@ -123,7 +123,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis @Inject SecondaryStorageVmManager _ssvmMgr; @Inject CiscoNexusVSMDeviceDao _nexusDao; @Inject ClusterVSMMapDao _vsmMapDao; - + ConfigurationServer _configServer; String _mountParent; @@ -141,15 +141,15 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis int _additionalPortRangeSize; int _maxHostsPerCluster; int _routerExtraPublicNics = 2; - + String _cpuOverprovisioningFactor = "1"; String _reserveCpu = "false"; - + String _memOverprovisioningFactor = "1"; String _reserveMem = "false"; - + String _rootDiskController = DiskControllerType.ide.toString(); - + Map _storageMounts = new HashMap(); Random _rand = new Random(System.currentTimeMillis()); @@ -217,7 +217,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis throw new ConfigurationException("Unable to find class " + value); } } - + value = configDao.getValue(Config.VmwareUseNexusVSwitch.key()); if(value == null) { _nexusVSwitchActive = false; @@ -261,30 +261,30 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis if(_serviceConsoleName == null) { _serviceConsoleName = "Service Console"; } - + _managemetPortGroupName = configDao.getValue(Config.VmwareManagementPortGroup.key()); if(_managemetPortGroupName == null) { _managemetPortGroupName = "Management Network"; } - + _defaultSystemVmNicAdapterType = configDao.getValue(Config.VmwareSystemVmNicDeviceType.key()); if(_defaultSystemVmNicAdapterType == null) _defaultSystemVmNicAdapterType = VirtualEthernetCardType.E1000.toString(); - + _additionalPortRangeStart = NumbersUtil.parseInt(configDao.getValue(Config.VmwareAdditionalVncPortRangeStart.key()), 59000); if(_additionalPortRangeStart > 65535) { s_logger.warn("Invalid port range start port (" + _additionalPortRangeStart + ") for additional VNC port allocation, reset it to default start port 59000"); _additionalPortRangeStart = 59000; } - + _additionalPortRangeSize = NumbersUtil.parseInt(configDao.getValue(Config.VmwareAdditionalVncPortRangeSize.key()), 1000); if(_additionalPortRangeSize < 0 || _additionalPortRangeStart + _additionalPortRangeSize > 65535) { s_logger.warn("Invalid port range size (" + _additionalPortRangeSize + " for range starts at " + _additionalPortRangeStart); _additionalPortRangeSize = Math.min(1000, 65535 - _additionalPortRangeStart); } - + _routerExtraPublicNics = NumbersUtil.parseInt(configDao.getValue(Config.RouterExtraPublicNics.key()), 2); - + _maxHostsPerCluster = NumbersUtil.parseInt(configDao.getValue(Config.VmwarePerClusterHostMax.key()), VmwareManager.MAX_HOSTS_PER_CLUSTER); _cpuOverprovisioningFactor = configDao.getValue(Config.CPUOverprovisioningFactor.key()); if(_cpuOverprovisioningFactor == null || _cpuOverprovisioningFactor.isEmpty()) @@ -293,22 +293,22 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis _memOverprovisioningFactor = configDao.getValue(Config.MemOverprovisioningFactor.key()); if(_memOverprovisioningFactor == null || _memOverprovisioningFactor.isEmpty()) _memOverprovisioningFactor = "1"; - + _reserveCpu = configDao.getValue(Config.VmwareReserveCpu.key()); if(_reserveCpu == null || _reserveCpu.isEmpty()) _reserveCpu = "false"; _reserveMem = configDao.getValue(Config.VmwareReserveMem.key()); if(_reserveMem == null || _reserveMem.isEmpty()) _reserveMem = "false"; - + _recycleHungWorker = configDao.getValue(Config.VmwareRecycleHungWorker.key()); if(_recycleHungWorker == null || _recycleHungWorker.isEmpty()) _recycleHungWorker = "false"; - + _rootDiskController = configDao.getValue(Config.VmwareRootDiskControllerType.key()); if(_rootDiskController == null || _rootDiskController.isEmpty()) _rootDiskController = DiskControllerType.ide.toString(); - + s_logger.info("Additional VNC port allocation range is settled at " + _additionalPortRangeStart + " to " + (_additionalPortRangeStart + _additionalPortRangeSize)); value = configDao.getValue("vmware.host.scan.interval"); @@ -319,7 +319,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis if(_configServer == null) _configServer = (ConfigurationServer)ComponentLocator.getComponent(ConfigurationServer.Name); - + _agentMgr.registerForHostEvents(this, true, true, true); s_logger.info("VmwareManagerImpl has been successfully configured"); @@ -352,6 +352,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis return _name; } + @Override public boolean getNexusVSwitchGlobalParameter() { return _nexusVSwitchActive; } @@ -360,22 +361,22 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis public String composeWorkerName() { return UUID.randomUUID().toString().replace("-", ""); } - + @Override public String getPrivateVSwitchName(long dcId, HypervisorType hypervisorType) { return _netMgr.getDefaultManagementTrafficLabel(dcId, hypervisorType); } - + @Override public String getPublicVSwitchName(long dcId, HypervisorType hypervisorType) { return _netMgr.getDefaultPublicTrafficLabel(dcId, hypervisorType); } - + @Override public String getGuestVSwitchName(long dcId, HypervisorType hypervisorType) { return _netMgr.getDefaultGuestTrafficLabel(dcId, hypervisorType); } - + @Override public List addHostToPodCluster(VmwareContext serviceContext, long dcId, Long podId, Long clusterId, String hostInventoryPath) throws Exception { @@ -392,7 +393,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis List returnedHostList = new ArrayList(); if(mor.getType().equals("ComputeResource")) { - ManagedObjectReference[] hosts = (ManagedObjectReference[])serviceContext.getServiceUtil().getDynamicProperty(mor, "host"); + ManagedObjectReference[] hosts = (ManagedObjectReference[])serviceContext.getVimClient().getDynamicProperty(mor, "host"); assert(hosts != null); // For ESX host, we need to enable host firewall to allow VNC access @@ -400,7 +401,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis HostFirewallSystemMO firewallMo = hostMo.getHostFirewallSystemMO(); if(firewallMo != null) { if(hostMo.getHostType() == VmwareHostType.ESX) { - + firewallMo.enableRuleset("vncServer"); firewallMo.refreshFirewall(); } @@ -424,15 +425,15 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis returnedHostList.add(hosts[0]); return returnedHostList; } else if(mor.getType().equals("ClusterComputeResource")) { - ManagedObjectReference[] hosts = (ManagedObjectReference[])serviceContext.getServiceUtil().getDynamicProperty(mor, "host"); + ManagedObjectReference[] hosts = (ManagedObjectReference[])serviceContext.getVimClient().getDynamicProperty(mor, "host"); assert(hosts != null); - + if(hosts.length > _maxHostsPerCluster) { String msg = "vCenter cluster size is too big (current configured cluster size: " + _maxHostsPerCluster + ")"; s_logger.error(msg); throw new DiscoveredWithErrorException(msg); } - + for(ManagedObjectReference morHost: hosts) { // For ESX host, we need to enable host firewall to allow VNC access HostMO hostMo = new HostMO(serviceContext, morHost); @@ -450,8 +451,8 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis if(tokens.length == 2) vlanId = tokens[1]; } - - + + s_logger.info("Calling prepareNetwork : " + hostMo.getContext().toString()); // prepare at least one network on the vswitch to enable OVF importing if(!_nexusVSwitchActive) { @@ -493,7 +494,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis returnedHostList.add(mor); return returnedHostList; } else { - s_logger.error("Unsupport host type " + mor.getType() + ":" + mor.get_value() + " from inventory path: " + hostInventoryPath); + s_logger.error("Unsupport host type " + mor.getType() + ":" + mor.getValue() + " from inventory path: " + hostInventoryPath); return null; } } @@ -506,8 +507,8 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis private ManagedObjectReference addHostToVCenterCluster(VmwareContext serviceContext, ManagedObjectReference morCluster, String host, String userName, String password) throws Exception { - ServiceUtil serviceUtil = serviceContext.getServiceUtil(); - ManagedObjectReference morHost = serviceUtil.getDecendentMoRef(morCluster, "HostSystem", host); + VmwareClient vclient = serviceContext.getVimClient(); + ManagedObjectReference morHost = vclient.getDecendentMoRef(morCluster, "HostSystem", host); if(morHost == null) { HostConnectSpec hostSpec = new HostConnectSpec(); hostSpec.setUserName(userName); @@ -515,16 +516,16 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis hostSpec.setHostName(host); hostSpec.setForce(true); // forcely take over the host - ManagedObjectReference morTask = serviceContext.getService().addHost_Task(morCluster, hostSpec, true, null, null); - String taskResult = serviceUtil.waitForTask(morTask); - if(!taskResult.equals("sucess")) { + ManagedObjectReference morTask = serviceContext.getService().addHostTask(morCluster, hostSpec, true, null, null); + boolean taskResult = vclient.waitForTask(morTask); + if(!taskResult) { s_logger.error("Unable to add host " + host + " to vSphere cluster due to " + TaskMO.getTaskFailureInfo(serviceContext, morTask)); throw new CloudRuntimeException("Unable to add host " + host + " to vSphere cluster due to " + taskResult); } serviceContext.waitForTaskProgressDone(morTask); // init morHost after it has been created - morHost = serviceUtil.getDecendentMoRef(morCluster, "HostSystem", host); + morHost = vclient.getDecendentMoRef(morCluster, "HostSystem", host); if(morHost == null) { throw new CloudRuntimeException("Successfully added host into vSphere but unable to find it later on?!. Please make sure you are either using IP address or full qualified domain name for host"); } @@ -545,25 +546,27 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis List secStorageHosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(dcId); if(secStorageHosts.size() > 0) return secStorageHosts.get(0).getStorageUrl(); - + return null; } - public String getServiceConsolePortGroupName() { + @Override + public String getServiceConsolePortGroupName() { return _serviceConsoleName; } - - public String getManagementPortGroupName() { + + @Override + public String getManagementPortGroupName() { return _managemetPortGroupName; } - + @Override public String getManagementPortGroupByHost(HostMO hostMo) throws Exception { if(hostMo.getHostType() == VmwareHostType.ESXi) return this._managemetPortGroupName; return this._serviceConsoleName; } - + @Override public void setupResourceStartupParams(Map params) { params.put("private.network.vswitch.name", _privateNetworkVSwitchName); @@ -585,17 +588,17 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis return _storageMgr; } - + @Override public long pushCleanupCheckpoint(String hostGuid, String vmName) { return _checkPointMgr.pushCheckPoint(new VmwareCleanupMaid(hostGuid, vmName)); } - + @Override public void popCleanupCheckpoint(long checkpoint) { _checkPointMgr.popCheckPoint(checkpoint); } - + @Override public void gcLeftOverVMs(VmwareContext context) { VmwareCleanupMaid.gcLeftOverVMs(context); @@ -623,12 +626,12 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis if(!destIso.exists()) { s_logger.info("Inject SSH key pairs before copying systemvm.iso into secondary storage"); _configServer.updateKeyPairs(); - + try { FileUtil.copyfile(srcIso, destIso); } catch(IOException e) { s_logger.error("Unexpected exception ", e); - + String msg = "Unable to copy systemvm ISO on secondary storage. src location: " + srcIso.toString() + ", dest location: " + destIso; s_logger.error(msg); throw new CloudRuntimeException(msg); @@ -645,19 +648,19 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis lock.releaseRef(); } } - + @Override public String getSystemVMIsoFileNameOnDatastore() { String version = ComponentLocator.class.getPackage().getImplementationVersion(); String fileName = "systemvm-" + version + ".iso"; return fileName.replace(':', '-'); } - + @Override public String getSystemVMDefaultNicAdapterType() { return this._defaultSystemVmNicAdapterType; } - + private File getSystemVMPatchIsoFile() { // locate systemvm.iso URL url = ComponentLocator.class.getProtectionDomain().getCodeSource().getLocation(); @@ -866,7 +869,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis if(checkPointIdStr != null) { _checkPointMgr.popCheckPoint(Long.parseLong(checkPointIdStr)); } - + checkPointIdStr = answer.getContextParam("checkpoint2"); if(checkPointIdStr != null) { _checkPointMgr.popCheckPoint(Long.parseLong(checkPointIdStr)); @@ -897,9 +900,9 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis } } } - + protected final int DEFAULT_DOMR_SSHPORT = 3922; - + protected boolean shutdownRouterVM(DomainRouterVO router) { if (s_logger.isDebugEnabled()) { s_logger.debug("Try to shutdown router VM " + router.getInstanceName() + " directly."); @@ -943,27 +946,27 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis public boolean processTimeout(long agentId, long seq) { return false; } - + @Override public boolean beginExclusiveOperation(int timeOutSeconds) { return _exclusiveOpLock.lock(timeOutSeconds); } - + @Override public void endExclusiveOperation() { _exclusiveOpLock.unlock(); } - + @Override public Pair getAddiionalVncPortRange() { return new Pair(_additionalPortRangeStart, _additionalPortRangeSize); } - + @Override public int getMaxHostsPerCluster() { return this._maxHostsPerCluster; } - + @Override public int getRouterExtraPublicNics() { return this._routerExtraPublicNics; @@ -977,7 +980,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis vsmMapVO = _vsmMapDao.findByClusterId(clusterId); long vsmId = 0; if (vsmMapVO != null) { - vsmId = vsmMapVO.getVsmId(); + vsmId = vsmMapVO.getVsmId(); s_logger.info("vsmId is " + vsmId); nexusVSM = _nexusDao.findById(vsmId); s_logger.info("Fetching nexus vsm credentials from database."); @@ -985,7 +988,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis else { s_logger.info("Found empty vsmMapVO."); return null; - } + } Map nexusVSMCredentials = new HashMap(); if (nexusVSM != null) { diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index 8650274719e..435db748680 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.vmware.manager; @@ -69,24 +69,24 @@ import com.vmware.vim25.VirtualSCSISharing; public class VmwareStorageManagerImpl implements VmwareStorageManager { private static final Logger s_logger = Logger.getLogger(VmwareStorageManagerImpl.class); - + private final VmwareStorageMount _mountService; private final StorageLayer _storage = new JavaStorageLayer(); - + private int _timeout; - + public VmwareStorageManagerImpl(VmwareStorageMount mountService) { assert(mountService != null); _mountService = mountService; } - + public void configure(Map params) { s_logger.info("Configure VmwareStorageManagerImpl"); - + String value = (String)params.get("scripts.timeout"); _timeout = NumbersUtil.parseInt(value, 1440) * 1000; } - + @Override public Answer execute(VmwareHostService hostService, PrimaryStorageDownloadCommand cmd) { String secondaryStorageUrl = cmd.getSecondaryStorageUrl(); @@ -116,18 +116,18 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } templateName = cmd.getName(); } - + VmwareContext context = hostService.getServiceContext(cmd); try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - - String templateUuidName = UUID.nameUUIDFromBytes((templateName + "@" + cmd.getPoolUuid() + "-" + hyperHost.getMor().get_value()).getBytes()).toString(); + + String templateUuidName = UUID.nameUUIDFromBytes((templateName + "@" + cmd.getPoolUuid() + "-" + hyperHost.getMor().getValue()).getBytes()).toString(); // truncate template name to 32 chars to ensure they work well with vSphere API's. - templateUuidName = templateUuidName.replace("-", ""); - + templateUuidName = templateUuidName.replace("-", ""); + DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter()); VirtualMachineMO templateMo = VmwareHelper.pickOneVmOnRunningHost(dcMo.findVmByNameAndLabel(templateUuidName), true); - + if (templateMo == null) { if(s_logger.isInfoEnabled()) s_logger.info("Template " + templateName + " is not setup yet, setup template from secondary storage with uuid name: " + templateUuidName); @@ -153,7 +153,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return new PrimaryStorageDownloadAnswer(msg); } } - + @Override public Answer execute(VmwareHostService hostService, BackupSnapshotCommand cmd) { Long accountId = cmd.getAccountId(); @@ -184,15 +184,15 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { if (vmMo == null) { if(s_logger.isDebugEnabled()) s_logger.debug("Unable to find owner VM for BackupSnapshotCommand on host " + hyperHost.getHyperHostName() + ", will try within datacenter"); - + vmMo = hyperHost.findVmOnPeerHyperHost(cmd.getVmName()); if(vmMo == null) { dsMo = new DatastoreMO(hyperHost.getContext(), morDs); - + workerVMName = hostService.getWorkerName(context, cmd, 0); - + // attach a volume to dummay wrapper VM for taking snapshot and exporting the VM for backup - if (!hyperHost.createBlankVm(workerVMName, 1, 512, 0, false, 4, 0, VirtualMachineGuestOsIdentifier._otherGuest.toString(), morDs, false)) { + if (!hyperHost.createBlankVm(workerVMName, 1, 512, 0, false, 4, 0, VirtualMachineGuestOsIdentifier.OTHER_GUEST.toString(), morDs, false)) { String msg = "Unable to create worker VM to execute BackupSnapshotCommand"; s_logger.error(msg); throw new Exception(msg); @@ -202,17 +202,17 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { throw new Exception("Failed to find the newly create or relocated VM. vmName: " + workerVMName); } workerVm = vmMo; - + // attach volume to worker VM String datastoreVolumePath = String.format("[%s] %s.vmdk", dsMo.getName(), volumePath); vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs); - } - } - + } + } + if (!vmMo.createSnapshot(snapshotUuid, "Snapshot taken for " + cmd.getSnapshotName(), false, false)) { throw new Exception("Failed to take snapshot " + cmd.getSnapshotName() + " on vm: " + cmd.getVmName()); } - + snapshotBackupUuid = backupSnapshotToSecondaryStorage(vmMo, accountId, volumeId, cmd.getVolumePath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid, prevBackupUuid, hostService.getWorkerName(context, cmd, 1)); @@ -220,11 +220,11 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { if (success) { details = "Successfully backedUp the snapshotUuid: " + snapshotUuid + " to secondary storage."; } - + } finally { if(vmMo != null) vmMo.removeAllSnapshots(); - + try { if (workerVm != null) { // detach volume and destroy worker vm @@ -233,7 +233,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } catch (Throwable e) { s_logger.warn("Failed to destroy worker VM: " + workerVMName); - } + } } } catch (Throwable e) { if (e instanceof RemoteException) { @@ -260,7 +260,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { VmwareContext context = hostService.getServiceContext(cmd); try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - + VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(cmd.getVmName()); if (vmMo == null) { if(s_logger.isDebugEnabled()) @@ -276,7 +276,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { Ternary result = createTemplateFromVolume(vmMo, accountId, templateId, cmd.getUniqueName(), - secondaryStoragePoolURL, volumePath, + secondaryStoragePoolURL, volumePath, hostService.getWorkerName(context, cmd, 0)); return new CreatePrivateTemplateAnswer(cmd, true, null, @@ -326,7 +326,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return new CreatePrivateTemplateAnswer(cmd, false, details); } } - + @Override public Answer execute(VmwareHostService hostService, CopyVolumeCommand cmd) { Long volumeId = cmd.getVolumeId(); @@ -375,7 +375,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return new CopyVolumeAnswer(cmd, false, "CopyVolumeCommand failed due to exception: " + StringUtils.getExceptionStackInfo(e), null, null); } } - + @Override public Answer execute(VmwareHostService hostService, CreateVolumeFromSnapshotCommand cmd) { @@ -392,7 +392,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { VmwareContext context = hostService.getServiceContext(cmd); try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - + ManagedObjectReference morPrimaryDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStorageNameLabel); if (morPrimaryDs == null) { String msg = "Unable to find datastore: " + primaryStorageNameLabel; @@ -417,22 +417,22 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return new CreateVolumeFromSnapshotAnswer(cmd, success, details, newVolumeName); } - + // templateName: name in secondary storage // templateUuid: will be used at hypervisor layer private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, String templatePathAtSecondaryStorage, String templateName, String templateUuid) throws Exception { - - s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + + s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + ", templateName: " + templateName); - + String secondaryMountPoint = _mountService.getMountPoint(secondaryStorageUrl); s_logger.info("Secondary storage mount point: " + secondaryMountPoint); - - String srcOVAFileName = secondaryMountPoint + "/" + templatePathAtSecondaryStorage + + + String srcOVAFileName = secondaryMountPoint + "/" + templatePathAtSecondaryStorage + templateName + "." + ImageFormat.OVA.getFileExtension(); - + String srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { Script command = new Script("tar", 0, s_logger); @@ -447,40 +447,40 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { throw new Exception(msg); } } - + srcFileName = getOVFFilePath(srcOVAFileName); - if(srcFileName == null) { - String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; + if(srcFileName == null) { + String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; s_logger.error(msg); throw new Exception(msg); } - + String vmName = templateUuid; hyperHost.importVmFromOVF(srcFileName, vmName, datastoreMo, "thin"); - + VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); if(vmMo == null) { - String msg = "Failed to import OVA template. secondaryStorage: " + String msg = "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + ", templateName: " + templateName + ", templateUuid: " + templateUuid; s_logger.error(msg); throw new Exception(msg); } - + if(vmMo.createSnapshot("cloud.template.base", "Base snapshot", false, false)) { vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, templateUuid); vmMo.markAsTemplate(); } else { vmMo.destroy(); - String msg = "Unable to create base snapshot for template, templateName: " + templateName + ", templateUuid: " + templateUuid; + String msg = "Unable to create base snapshot for template, templateName: " + templateName + ", templateUuid: " + templateUuid; s_logger.error(msg); throw new Exception(msg); } } - - private Ternary createTemplateFromVolume(VirtualMachineMO vmMo, long accountId, long templateId, String templateUniqueName, + + private Ternary createTemplateFromVolume(VirtualMachineMO vmMo, long accountId, long templateId, String templateUniqueName, String secStorageUrl, String volumePath, String workerVmName) throws Exception { - + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); String installPath = getTemplateRelativeDirInSecStorage(accountId, templateId); String installFullPath = secondaryMountPoint + "/" + installPath; @@ -488,16 +488,16 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { Script command = new Script(false, "mkdir", _timeout, s_logger); command.add("-p"); command.add(installFullPath); - + String result = command.execute(); if(result != null) { - String msg = "unable to prepare template directory: " + String msg = "unable to prepare template directory: " + installPath + ", storage: " + secStorageUrl + ", error msg: " + result; s_logger.error(msg); throw new Exception(msg); } } - + VirtualMachineMO clonedVm = null; try { Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath, false); @@ -506,15 +506,15 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.error(msg); throw new Exception(msg); } - + if(!vmMo.createSnapshot(templateUniqueName, "Temporary snapshot for template creation", false, false)) { String msg = "Unable to take snapshot for creating template from volume. volume path: " + volumePath; s_logger.error(msg); throw new Exception(msg); } - + // 4 MB is the minimum requirement for VM memory in VMware - vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), + vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); clonedVm = vmMo.getRunningHost().findVmOnHyperHost(workerVmName); if(clonedVm == null) { @@ -522,9 +522,9 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.error(msg); throw new Exception(msg); } - + clonedVm.exportVm(secondaryMountPoint + "/" + installPath, templateUniqueName, true, false); - + long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); VmdkProcessor processor = new VmdkProcessor(); Map params = new HashMap(); @@ -534,54 +534,54 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); return new Ternary(installPath + "/" + templateUniqueName + ".ova", physicalSize, virtualSize); - + } finally { if(clonedVm != null) { clonedVm.detachAllDisks(); clonedVm.destroy(); } - + vmMo.removeSnapshot(templateUniqueName, false); } } - - private Ternary createTemplateFromSnapshot(long accountId, long templateId, String templateUniqueName, + + private Ternary createTemplateFromSnapshot(long accountId, long templateId, String templateUniqueName, String secStorageUrl, long volumeId, String backedUpSnapshotUuid) throws Exception { - + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); String installPath = getTemplateRelativeDirInSecStorage(accountId, templateId); String installFullPath = secondaryMountPoint + "/" + installPath; String installFullName = installFullPath + "/" + templateUniqueName + ".ova"; - String snapshotFullName = secondaryMountPoint + "/" + getSnapshotRelativeDirInSecStorage(accountId, volumeId) + String snapshotFullName = secondaryMountPoint + "/" + getSnapshotRelativeDirInSecStorage(accountId, volumeId) + "/" + backedUpSnapshotUuid + ".ova"; String result; Script command; - + synchronized(installPath.intern()) { command = new Script(false, "mkdir", _timeout, s_logger); command.add("-p"); command.add(installFullPath); - + result = command.execute(); if(result != null) { - String msg = "unable to prepare template directory: " + String msg = "unable to prepare template directory: " + installPath + ", storage: " + secStorageUrl + ", error msg: " + result; s_logger.error(msg); throw new Exception(msg); } } - + try { command = new Script(false, "cp", _timeout, s_logger); command.add(snapshotFullName); command.add(installFullName); result = command.execute(); if(result != null) { - String msg = "unable to copy snapshot " + snapshotFullName + " to " + installFullPath; + String msg = "unable to copy snapshot " + snapshotFullName + " to " + installFullPath; s_logger.error(msg); throw new Exception(msg); } - + // untar OVA file at template directory command = new Script("tar", 0, s_logger); command.add("--no-same-owner"); @@ -590,12 +590,12 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.info("Executing command: " + command.toString()); result = command.execute(); if(result != null) { - String msg = "unable to untar snapshot " + snapshotFullName + " to " - + installFullPath; + String msg = "unable to untar snapshot " + snapshotFullName + " to " + + installFullPath; s_logger.error(msg); throw new Exception(msg); } - + long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); VmdkProcessor processor = new VmdkProcessor(); Map params = new HashMap(); @@ -605,45 +605,45 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); return new Ternary(installPath + "/" + templateUniqueName + ".ova", physicalSize, virtualSize); - + } catch(Exception e) { // TODO, clean up left over files throw e; } } - - private void postCreatePrivateTemplate(String installFullPath, long templateId, + + private void postCreatePrivateTemplate(String installFullPath, long templateId, String templateName, long size, long virtualSize) throws Exception { // TODO a bit ugly here BufferedWriter out = null; try { out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"))); - out.write("filename=" + templateName + ".ova"); + out.write("filename=" + templateName + ".ova"); out.newLine(); - out.write("description="); + out.write("description="); out.newLine(); - out.write("checksum="); + out.write("checksum="); out.newLine(); - out.write("hvm=false"); + out.write("hvm=false"); out.newLine(); - out.write("size=" + size); + out.write("size=" + size); out.newLine(); - out.write("ova=true"); + out.write("ova=true"); out.newLine(); - out.write("id=" + templateId); + out.write("id=" + templateId); out.newLine(); - out.write("public=false"); + out.write("public=false"); out.newLine(); - out.write("ova.filename=" + templateName + ".ova"); + out.write("ova.filename=" + templateName + ".ova"); out.newLine(); out.write("uniquename=" + templateName); out.newLine(); - out.write("ova.virtualsize=" + virtualSize); + out.write("ova.virtualsize=" + virtualSize); out.newLine(); - out.write("virtualsize=" + virtualSize); + out.write("virtualsize=" + virtualSize); out.newLine(); - out.write("ova.size=" + size); + out.write("ova.size=" + size); out.newLine(); } finally { if(out != null) @@ -651,21 +651,21 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, + private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, long accountId, long volumeId, String secStorageUrl, String snapshotBackupUuid) throws Exception { - - restoreVolumeFromSecStorage(hyperHost, primaryDsMo, newVolumeName, + + restoreVolumeFromSecStorage(hyperHost, primaryDsMo, newVolumeName, secStorageUrl, getSnapshotRelativeDirInSecStorage(accountId, volumeId), snapshotBackupUuid); return null; } - - private void restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, + + private void restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, String secStorageUrl, String secStorageDir, String backupName) throws Exception { - + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); - String srcOVAFileName = secondaryMountPoint + "/" + secStorageDir + "/" + String srcOVAFileName = secondaryMountPoint + "/" + secStorageDir + "/" + backupName + "." + ImageFormat.OVA.getFileExtension(); - + String srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { Script command = new Script("tar", 0, s_logger); @@ -680,21 +680,21 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { throw new Exception(msg); } } - + srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { - String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; + String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; s_logger.error(msg); throw new Exception(msg); } - + VirtualMachineMO clonedVm = null; try { hyperHost.importVmFromOVF(srcFileName, newVolumeName, primaryDsMo, "thin"); clonedVm = hyperHost.findVmOnHyperHost(newVolumeName); if(clonedVm == null) throw new Exception("Unable to create container VM for volume creation"); - + clonedVm.moveAllVmDiskFiles(primaryDsMo, "", false); clonedVm.detachAllDisks(); } finally { @@ -704,24 +704,24 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } } - - private String backupSnapshotToSecondaryStorage(VirtualMachineMO vmMo, long accountId, long volumeId, - String volumePath, String snapshotUuid, String secStorageUrl, + + private String backupSnapshotToSecondaryStorage(VirtualMachineMO vmMo, long accountId, long volumeId, + String volumePath, String snapshotUuid, String secStorageUrl, String prevSnapshotUuid, String prevBackupUuid, String workerVmName) throws Exception { - + String backupUuid = UUID.randomUUID().toString(); - exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, + exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, getSnapshotRelativeDirInSecStorage(accountId, volumeId), backupUuid, workerVmName); return backupUuid; } - - private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, + + private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, String exportName, String workerVmName) throws Exception { - + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); String exportPath = secondaryMountPoint + "/" + secStorageDir; - + synchronized(exportPath.intern()) { if(!new File(exportPath).exists()) { Script command = new Script(false, "mkdir", _timeout, s_logger); @@ -734,16 +734,16 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { VirtualMachineMO clonedVm = null; try { - + Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath, false); if(volumeDeviceInfo == null) { String msg = "Unable to find related disk device for volume. volume path: " + volumePath; s_logger.error(msg); throw new Exception(msg); } - + // 4 MB is the minimum requirement for VM memory in VMware - vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), + vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); clonedVm = vmMo.getRunningHost().findVmOnHyperHost(workerVmName); if(clonedVm == null) { @@ -751,7 +751,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.error(msg); throw new Exception(msg); } - + clonedVm.exportVm(exportPath, exportName, true, true); } finally { if(clonedVm != null) { @@ -760,7 +760,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } } - + private String deleteSnapshotOnSecondaryStorge(long accountId, long volumeId, String secStorageUrl, String backupUuid) throws Exception { String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); @@ -769,18 +769,18 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { if(file.exists()) { if(file.delete()) return null; - + } else { return "Backup file does not exist. backupUuid: " + backupUuid; } - + return "Failed to delete snapshot backup file, backupUuid: " + backupUuid; } - - private Pair copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, - String vmName, long volumeId, String poolId, String volumePath, + + private Pair copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, + String vmName, long volumeId, String poolId, String volumePath, String secStorageUrl, String workerVmName) throws Exception { - + String volumeFolder = String.valueOf(volumeId) + "/"; VirtualMachineMO workerVm=null; VirtualMachineMO vmMo=null; @@ -804,21 +804,21 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { vmConfig.setName(workerVmName); vmConfig.setMemoryMB((long) 4); vmConfig.setNumCPUs(1); - vmConfig.setGuestId(VirtualMachineGuestOsIdentifier._otherGuest.toString()); + vmConfig.setGuestId(VirtualMachineGuestOsIdentifier.OTHER_GUEST.toString()); VirtualMachineFileInfo fileInfo = new VirtualMachineFileInfo(); fileInfo.setVmPathName(String.format("[%s]", dsMo.getName())); vmConfig.setFiles(fileInfo); // Scsi controller VirtualLsiLogicController scsiController = new VirtualLsiLogicController(); - scsiController.setSharedBus(VirtualSCSISharing.noSharing); + scsiController.setSharedBus(VirtualSCSISharing.NO_SHARING); scsiController.setBusNumber(0); scsiController.setKey(1); VirtualDeviceConfigSpec scsiControllerSpec = new VirtualDeviceConfigSpec(); scsiControllerSpec.setDevice(scsiController); - scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.add); - vmConfig.setDeviceChange(new VirtualDeviceConfigSpec[] { scsiControllerSpec }); - + scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); + vmConfig.getDeviceChange().add(scsiControllerSpec); + hyperHost.createVm(vmConfig); workerVm = hyperHost.findVmOnHyperHost(workerVmName); if (workerVm == null) { @@ -826,7 +826,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.error(msg); throw new Exception(msg); } - + //attach volume to worker VM String datastoreVolumePath = String.format("[%s] %s.vmdk", dsMo.getName(), volumePath); workerVm.attachDisk(new String[] { datastoreVolumePath }, morDs); @@ -835,7 +835,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { vmMo.createSnapshot(exportName, "Temporary snapshot for copy-volume command", false, false); - exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName, + exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName, hostService.getWorkerName(hyperHost.getContext(), cmd, 1)); return new Pair(volumeFolder, exportName); @@ -849,16 +849,16 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, + private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName) throws Exception { String volumeFolder = String.valueOf(volumeId) + "/"; String newVolume = UUID.randomUUID().toString().replaceAll("-", ""); restoreVolumeFromSecStorage(hyperHost, dsMo, newVolume, secStorageUrl, "volumes/" + volumeFolder, exportName); - + return new Pair(volumeFolder, newVolume); } - + private String getOVFFilePath(String srcOVAFileName) { File file = new File(srcOVAFileName); assert(_storage != null); @@ -873,11 +873,11 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } return null; } - + private static String getTemplateRelativeDirInSecStorage(long accountId, long templateId) { return "template/tmpl/" + accountId + "/" + templateId; } - + private static String getSnapshotRelativeDirInSecStorage(long accountId, long volumeId) { return "snapshots/" + accountId + "/" + volumeId; } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java index 053ed6eaf46..11a75d8217f 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.vmware.resource; @@ -19,22 +19,22 @@ package com.cloud.hypervisor.vmware.resource; import org.apache.log4j.Logger; import com.cloud.hypervisor.vmware.manager.VmwareManager; +import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.utils.StringUtils; import com.cloud.utils.component.ComponentLocator; -import com.vmware.apputils.version.ExtendedAppUtil; public class VmwareContextFactory { - + private static final Logger s_logger = Logger.getLogger(VmwareContextFactory.class); - + private static volatile int s_seq = 1; private static VmwareManager s_vmwareMgr; - + static { // skip certificate check System.setProperty("axis.socketSecureFactory", "org.apache.axis.components.net.SunFakeTrustSocketFactory"); - + ComponentLocator locator = ComponentLocator.getLocator("management-server"); s_vmwareMgr = locator.getManager(VmwareManager.class); } @@ -45,17 +45,17 @@ public class VmwareContextFactory { assert(vCenterPassword != null); String serviceUrl = "https://" + vCenterAddress + "/sdk/vimService"; - String[] params = new String[] {"--url", serviceUrl, "--username", vCenterUserName, "--password", vCenterPassword }; + //String[] params = new String[] {"--url", serviceUrl, "--username", vCenterUserName, "--password", vCenterPassword }; if(s_logger.isDebugEnabled()) s_logger.debug("initialize VmwareContext. url: " + serviceUrl + ", username: " + vCenterUserName + ", password: " + StringUtils.getMaskedPasswordForDisplay(vCenterPassword)); - - ExtendedAppUtil appUtil = ExtendedAppUtil.initialize(vCenterAddress + "-" + s_seq++, params); - - appUtil.connect(); - VmwareContext context = new VmwareContext(appUtil, vCenterAddress); + + VmwareClient vimClient = new VmwareClient(vCenterAddress + "-" + s_seq++); + vimClient.connect(serviceUrl, vCenterUserName, vCenterPassword); + + VmwareContext context = new VmwareContext(vimClient, vCenterAddress); context.registerStockObject(VmwareManager.CONTEXT_STOCK_NAME, s_vmwareMgr); - + context.registerStockObject("serviceconsole", s_vmwareMgr.getServiceConsolePortGroupName()); context.registerStockObject("manageportgroup", s_vmwareMgr.getManagementPortGroupName()); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 96c4348cbda..e527c2e5b39 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.vmware.resource; @@ -24,6 +24,7 @@ import java.net.URI; import java.nio.channels.SocketChannel; import java.rmi.RemoteException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; @@ -237,7 +238,9 @@ import com.vmware.vim25.PerfMetricSeries; import com.vmware.vim25.PerfQuerySpec; import com.vmware.vim25.PerfSampleInfo; import com.vmware.vim25.RuntimeFault; +import com.vmware.vim25.RuntimeFaultFaultMsg; import com.vmware.vim25.ToolsUnavailable; +import com.vmware.vim25.ToolsUnavailableFaultMsg; import com.vmware.vim25.VimPortType; import com.vmware.vim25.VirtualDevice; import com.vmware.vim25.VirtualDeviceConfigSpec; @@ -259,8 +262,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected String _name; protected final long _ops_timeout = 900000; // 15 minutes time out to time - protected final int _shutdown_waitMs = 300000; // wait up to 5 minutes for shutdown - + protected final int _shutdown_waitMs = 300000; // wait up to 5 minutes for shutdown + // out an operation protected final int _retry = 24; protected final int _sleep = 10000; @@ -281,14 +284,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected String _guestNetworkVSwitchName; protected VirtualSwitchType _vSwitchType = VirtualSwitchType.StandardVirtualSwitch; protected boolean _nexusVSwitch = false; - + protected float _cpuOverprovisioningFactor = 1; protected boolean _reserveCpu = false; - + protected float _memOverprovisioningFactor = 1; protected boolean _reserveMem = false; protected boolean _recycleHungWorker = false; - protected DiskControllerType _rootDiskController = DiskControllerType.ide; + protected DiskControllerType _rootDiskController = DiskControllerType.ide; protected ManagedObjectReference _morHyperHost; protected VmwareContext _serviceContext; @@ -304,9 +307,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected static HashMap s_statesTable; static { s_statesTable = new HashMap(); - s_statesTable.put(VirtualMachinePowerState.poweredOn, State.Running); - s_statesTable.put(VirtualMachinePowerState.poweredOff, State.Stopped); - s_statesTable.put(VirtualMachinePowerState.suspended, State.Stopped); + s_statesTable.put(VirtualMachinePowerState.POWERED_ON, State.Running); + s_statesTable.put(VirtualMachinePowerState.POWERED_OFF, State.Stopped); + s_statesTable.put(VirtualMachinePowerState.SUSPENDED, State.Stopped); } public VmwareResource() { @@ -317,7 +320,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa public Answer executeRequest(Command cmd) { if(s_logger.isTraceEnabled()) s_logger.trace("Begin executeRequest(), cmd: " + cmd.getClass().getSimpleName()); - + Answer answer = null; NDC.push(_hostName != null ? _hostName : _guid + "(" + ComponentLocator.class.getPackage().getImplementationVersion() + ")"); try { @@ -464,14 +467,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa synchronized (this) { try { - JmxUtil.registerMBean("VMware " + _morHyperHost.get_value(), "Command " + cmdSequence + "-" + cmd.getClass().getSimpleName(), mbean); + JmxUtil.registerMBean("VMware " + _morHyperHost.getValue(), "Command " + cmdSequence + "-" + cmd.getClass().getSimpleName(), mbean); _cmdMBeans.add(mbean); if (_cmdMBeans.size() >= MAX_CMD_MBEAN) { PropertyMapDynamicBean mbeanToRemove = _cmdMBeans.get(0); _cmdMBeans.remove(0); - JmxUtil.unregisterMBean("VMware " + _morHyperHost.get_value(), "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name")); + JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(), "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name")); } } catch (Exception e) { if(s_logger.isTraceEnabled()) @@ -485,10 +488,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(s_logger.isTraceEnabled()) s_logger.trace("End executeRequest(), cmd: " + cmd.getClass().getSimpleName()); - + return answer; } - + protected Answer execute(CheckNetworkCommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource CheckNetworkCommand " + _gson.toJson(cmd)); @@ -497,7 +500,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // TODO setup portgroup for private network needs to be done here now return new CheckNetworkAnswer(cmd, true , "Network Setup check by names is done"); } - + protected Answer execute(NetworkUsageCommand cmd) { if ( cmd.isForVpc() ) { return VPCNetworkUsage(cmd); @@ -549,7 +552,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (!resultPair.first()) { throw new Exception(" vpc network usage plugin call failed "); } - + if (option.equals("get") || option.equals("vpn")) { String result = resultPair.second(); if (result == null || result.isEmpty()) { @@ -568,7 +571,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } return new NetworkUsageAnswer(cmd, "success", 0L, 0L); } catch (Throwable e) { - s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIp + "), domR may not be ready yet. failure due to " + s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIp + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e); } return new NetworkUsageAnswer(cmd, "success", 0L, 0L); @@ -583,7 +586,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String args = ""; String[] results = new String[cmd.getRules().length]; int i = 0; - + boolean endResult = true; for (PortForwardingRuleTO rule : cmd.getRules()) { args += rule.revoked() ? " -D " : " -A "; @@ -616,7 +619,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new SetPortForwardingRulesAnswer(cmd, results, endResult); } - + protected SetFirewallRulesAnswer execute(SetFirewallRulesCommand cmd) { String controlIp = getRouterSshControlIp(cmd); String[] results = new String[cmd.getRules().length]; @@ -672,9 +675,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa for (int i=0; i < results.length; i++) { results[i] = "Failed"; } - + return new SetFirewallRulesAnswer(cmd, false, results); - } + } } catch (Throwable e) { s_logger.error("SetFirewallRulesCommand(args: " + args + ") failed on setting one rule due to " @@ -684,11 +687,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa results[i] = "Failed"; } return new SetFirewallRulesAnswer(cmd, false, results); - } + } return new SetFirewallRulesAnswer(cmd, true, results); - } - + } + protected Answer execute(SetStaticNatRulesCommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource SetFirewallRuleCommand: " + _gson.toJson(cmd)); @@ -704,11 +707,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa args += " -l " + rule.getSrcIp(); args += " -r " + rule.getDstIp(); - + if (rule.getProtocol() != null) { args += " -P " + rule.getProtocol().toLowerCase(); } - + args += " -d " + rule.getStringSrcPortRange(); args += " -G "; @@ -742,7 +745,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); String controlIp = getRouterSshControlIp(cmd); - + assert(controlIp != null); LoadBalancerConfigurator cfgtr = new HAProxyConfigurator(); @@ -794,7 +797,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa args += " -s " + sb.toString(); } - + Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", mgr.getSystemVMKeyFile(), null, "scp " + tmpCfgFilePath + " /etc/haproxy/haproxy.cfg.new"); if (!result.first()) { @@ -1271,12 +1274,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; - deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[0].setDevice(nic); - deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.add); + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(nic); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); if(!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure devices when running PlugNicCommand"); } @@ -1319,12 +1322,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new UnPlugNicAnswer(cmd, true, "success"); } VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; - deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[0].setDevice(nic); - deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.remove); + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(nic); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.REMOVE); - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); if(!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure devices when running unplugNicCommand"); } @@ -1444,7 +1447,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa args += " -c "; args += "eth" + publicNicInfo.first(); - + args += " -g "; args += vlanGateway; @@ -1461,7 +1464,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } if (removeVif) { - + String nicMasksStr = vmMo.getCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK); int nicMasks = Integer.parseInt(nicMasksStr); nicMasks &= ~(1 << publicNicInfo.first().intValue()); @@ -1486,7 +1489,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private void plugPublicNic(VirtualMachineMO vmMo, final String vlanId, final String vifMacAddress) throws Exception { // TODO : probably need to set traffic shaping Pair networkInfo = null; - + if (!_nexusVSwitch) { networkInfo = HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public", vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, true); @@ -1514,33 +1517,33 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; - deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[0].setDevice(device); - deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.edit); - - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(device); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT); + + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); if(!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure devices when plugPublicNic"); } } catch(Exception e) { - + // restore allocation mask in case of exceptions String nicMasksStr = vmMo.getCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK); int nicMasks = Integer.parseInt(nicMasksStr); nicMasks &= ~(1 << nicIndex); vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, String.valueOf(nicMasks)); - + throw e; } } - + private int allocPublicNicIndex(VirtualMachineMO vmMo) throws Exception { String nicMasksStr = vmMo.getCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK); if(nicMasksStr == null || nicMasksStr.isEmpty()) { throw new Exception("Could not find NIC allocation info"); } - + int nicMasks = Integer.parseInt(nicMasksStr); VirtualDevice[] nicDevices = vmMo.getNicDevices(); for(int i = 3; i < nicDevices.length; i++) { @@ -1550,7 +1553,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return i; } } - + throw new Exception("Could not allocate a free public NIC"); } @@ -1606,7 +1609,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected Answer execute(SavePasswordCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource SavePasswordCommand. vmName: " + cmd.getVmName() + ", vmIp: " + cmd.getVmIpAddress() + ", password: " + s_logger.info("Executing resource SavePasswordCommand. vmName: " + cmd.getVmName() + ", vmIp: " + cmd.getVmIpAddress() + ", password: " + StringUtils.getMaskedPasswordForDisplay(cmd.getPassword())); } @@ -1620,7 +1623,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (s_logger.isDebugEnabled()) { s_logger.debug("Run command on domain router " + controlIp + ", /root/savepassword.sh " + args + " -p " + StringUtils.getMaskedPasswordForDisplay(cmd.getPassword())); } - + args += " -p " + password; @@ -1655,11 +1658,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String args = " " + cmd.getVmMac(); args += " " + cmd.getVmIpAddress(); args += " " + cmd.getVmName(); - + if (cmd.getDefaultRouter() != null) { args += " " + cmd.getDefaultRouter(); } - + if (cmd.getDefaultDns() != null) { args += " " + cmd.getDefaultDns(); } @@ -1667,7 +1670,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (cmd.getStaticRoutes() != null) { args += " " + cmd.getStaticRoutes(); } - + if (s_logger.isDebugEnabled()) { s_logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + ", /root/edithosts.sh " + args); } @@ -1696,7 +1699,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new Answer(cmd); } - + protected CheckS2SVpnConnectionsAnswer execute(CheckS2SVpnConnectionsCommand cmd) { if (s_logger.isDebugEnabled()) { s_logger.debug("Executing resource CheckS2SVpnConnectionsCommand: " + _gson.toJson(cmd)); @@ -1759,7 +1762,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } return new CheckRouterAnswer(cmd, result.second(), true); } - + protected Answer execute(GetDomRVersionCmd cmd) { if (s_logger.isDebugEnabled()) { s_logger.debug("Executing resource GetDomRVersionCmd: " + _gson.toJson(cmd)); @@ -1793,7 +1796,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } return new GetDomRVersionAnswer(cmd, result.second(), lines[0], lines[1]); } - + protected Answer execute(BumpUpPriorityCommand cmd) { if (s_logger.isDebugEnabled()) { s_logger.debug("Executing resource BumpUpPriorityCommand: " + _gson.toJson(cmd)); @@ -1834,7 +1837,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String routerPrivateIpAddress = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); String controlIp = getRouterSshControlIp(cmd); - + String vmIpAddress = cmd.getVmIpAddress(); List vmData = cmd.getVmData(); String[] vmDataArgs = new String[vmData.size() * 2 + 4]; @@ -1960,7 +1963,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualMachineTO vmSpec = cmd.getVirtualMachine(); String vmName = vmSpec.getName(); - + State state = State.Stopped; VmwareContext context = getServiceContext(); try { @@ -1974,7 +1977,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.valueOf(vmSpec.getDetails().get(VmDetailConstants.NIC_ADAPTER)); if(s_logger.isDebugEnabled()) s_logger.debug("VM " + vmName + " will be started with NIC device type: " + nicDeviceType); - + VmwareHypervisorHost hyperHost = getHyperHost(context); VolumeTO[] disks = validateDisks(vmSpec.getDisks()); assert (disks.length > 0); @@ -2020,7 +2023,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } assert (vmSpec.getSpeed() != null) && (rootDiskDataStoreDetails != null); - if (!hyperHost.createBlankVm(vmName, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), + if (!hyperHost.createBlankVm(vmName, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), getReserveCpuMHz(vmSpec.getSpeed().intValue()), vmSpec.getLimitCpuUse(), ramMb, getReserveMemMB(ramMb), translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), rootDiskDataStoreDetails.first(), false)) { throw new Exception("Failed to create VM. vmName: " + vmName); @@ -2052,10 +2055,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024)); - VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), + VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), getReserveCpuMHz(vmSpec.getSpeed().intValue()), ramMb, getReserveMemMB(ramMb), translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), vmSpec.getLimitCpuUse()); - + VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices]; int i = 0; int ideControllerKey = vmMo.getIDEDeviceControllerKey(); @@ -2072,7 +2075,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa throw new Exception(msg); } mgr.prepareSecondaryStorageStore(secStoreUrl); - + ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl); if (morSecDs == null) { String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl; @@ -2081,17 +2084,17 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DatastoreMO secDsMo = new DatastoreMO(hyperHost.getContext(), morSecDs); deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), + Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), true, true, i, i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.edit); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } i++; } else { @@ -2107,11 +2110,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (isoInfo.second()) { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.edit); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } } else { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); @@ -2120,13 +2123,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (isoInfo.second()) { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); + + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.edit); + + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } } i++; @@ -2178,11 +2181,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(), i, i + 1); } deviceConfigSpecArray[i].setDevice(device); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); if(s_logger.isDebugEnabled()) s_logger.debug("Prepare volume at new device " + _gson.toJson(device)); - + i++; } } @@ -2206,32 +2209,32 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.info("Preparing NIC device on network " + networkInfo.second()); nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), i, i + 1, true, true); } - + deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[i].setDevice(nic); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); - + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); + if(s_logger.isDebugEnabled()) s_logger.debug("Prepare NIC at new device " + _gson.toJson(deviceConfigSpecArray[i])); - + // this is really a hacking for DomR, upon DomR startup, we will reset all the NIC allocation after eth3 if(nicCount < 3) nicMask |= (1 << nicCount); - + i++; nicCount++; } - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + vmConfigSpec.getDeviceChange().addAll(Arrays.asList(deviceConfigSpecArray)); // pass boot arguments through machine.id & perform customized options to VMX - + Map vmDetailOptions = validateVmDetails(vmSpec.getDetails()); OptionValue[] extraOptions = new OptionValue[2 + vmDetailOptions.size()]; extraOptions[0] = new OptionValue(); extraOptions[0].setKey("machine.id"); extraOptions[0].setValue(vmSpec.getBootArgs()); - + extraOptions[1] = new OptionValue(); extraOptions[1].setKey("devices.hotplug"); extraOptions[1].setValue("true"); @@ -2243,16 +2246,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa extraOptions[j].setValue(entry.getValue()); j++; } - + String keyboardLayout = null; if(vmSpec.getDetails() != null) keyboardLayout = vmSpec.getDetails().get(VmDetailConstants.KEYBOARD); - vmConfigSpec.setExtraConfig(configureVnc(extraOptions, hyperHost, vmName, vmSpec.getVncPassword(), keyboardLayout)); + vmConfigSpec.getExtraConfig().addAll(Arrays.asList(configureVnc(extraOptions, hyperHost, vmName, vmSpec.getVncPassword(), keyboardLayout))); if (!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure VM before start. vmName: " + vmName); } - + vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, String.valueOf(nicMask)); if (!vmMo.powerOn()) { @@ -2280,10 +2283,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - + private Map validateVmDetails(Map vmDetails) { Map validatedDetails = new HashMap(); - + if(vmDetails != null && vmDetails.size() > 0) { for(Map.Entry entry : vmDetails.entrySet()) { if("machine.id".equalsIgnoreCase(entry.getKey())) @@ -2309,18 +2312,18 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(this._reserveCpu) { return (int)(cpuMHz / this._cpuOverprovisioningFactor); } - + return 0; } - + private int getReserveMemMB(int memMB) { if(this._reserveMem) { return (int)(memMB / this._memOverprovisioningFactor); } - + return 0; } - + private NicTO[] sortNicsByDeviceId(NicTO[] nics) { List listForSort = new ArrayList(); @@ -2343,7 +2346,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return listForSort.toArray(new NicTO[0]); } - + private VolumeTO[] sortVolumesByDeviceId(VolumeTO[] volumes) { List listForSort = new ArrayList(); @@ -2407,38 +2410,38 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo) throws Exception { - + Pair switchName = getTargetSwitch(nicTo); String namePrefix = getNetworkNamePrefix(nicTo); Pair networkInfo = null; s_logger.info("Prepare network on vSwitch: " + switchName + " with name prefix: " + namePrefix); - + if(!_nexusVSwitch) { - networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), - nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, + networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), + nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, !namePrefix.startsWith("cloud.private")); } else { - networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), + networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout); } - + return networkInfo; } - + // return Pair private Pair getTargetSwitch(NicTO nicTo) throws Exception { if(nicTo.getName() != null && !nicTo.getName().isEmpty()) { String[] tokens = nicTo.getName().split(","); - + if(tokens.length == 2) { return new Pair(tokens[0], tokens[1]); } else { return new Pair(nicTo.getName(), Vlan.UNTAGGED); } } - + if (nicTo.getType() == Networks.TrafficType.Guest) { return new Pair(this._guestNetworkVSwitchName, Vlan.UNTAGGED); } else if (nicTo.getType() == Networks.TrafficType.Control || nicTo.getType() == Networks.TrafficType.Management) { @@ -2453,7 +2456,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa throw new Exception("Unsupported traffic type: " + nicTo.getType().toString()); } } - + private String getNetworkNamePrefix(NicTO nicTo) throws Exception { if (nicTo.getType() == Networks.TrafficType.Guest) { return "cloud.guest"; @@ -2606,7 +2609,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource ReadyCommand: " + _gson.toJson(cmd)); } - + try { VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); @@ -2628,7 +2631,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); - + HostStatsEntry hostStats = new HostStatsEntry(cmd.getHostId(), 0, 0, 0, "host", 0, 0, 0, 0); Answer answer = new GetHostStatsAnswer(cmd, hostStats); try { @@ -2735,9 +2738,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, "0"); - + if (getVmState(vmMo) != State.Stopped) { - + // before we stop VM, remove all possible snapshots on the VM to let // disk chain be collapsed s_logger.info("Remove all snapshot before stopping VM " + cmd.getVmName()); @@ -2815,7 +2818,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { vmMo.rebootGuest(); return new RebootAnswer(cmd, "reboot succeeded", true); - } catch(ToolsUnavailable e) { + } catch(ToolsUnavailableFaultMsg e) { s_logger.warn("VMware tools is not installed at guest OS, we will perform hard reset for reboot"); } catch(Exception e) { s_logger.warn("We are not able to perform gracefull guest reboot due to " + VmwareHelper.getExceptionMessage(e)); @@ -2919,7 +2922,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa throw new Exception(msg); } mgr.prepareSecondaryStorageStore(secStoreUrl); - + ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl); if (morSecDs == null) { String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl; @@ -2997,11 +3000,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } private VmwareHypervisorHost getTargetHyperHost(DatacenterMO dcMo, String destIp) throws Exception { - + VmwareManager mgr = dcMo.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); - - ObjectContent[] ocs = dcMo.getHostPropertiesOnDatacenterHostFolder(new String[] { "name", "parent" }); - if (ocs != null && ocs.length > 0) { + + List ocs = dcMo.getHostPropertiesOnDatacenterHostFolder(new String[] { "name", "parent" }); + if (ocs != null && ocs.size() > 0) { for (ObjectContent oc : ocs) { HostMO hostMo = new HostMO(dcMo.getContext(), oc.getObj()); VmwareHypervisorHostNetworkSummary netSummary = hostMo.getHyperHostNetworkSummary(mgr.getManagementPortGroupByHost(hostMo)); @@ -3034,7 +3037,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ManagedObjectReference morDatastore = null; morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid()); if(morDatastore == null) - morDatastore = hyperHost.mountDatastore(pool.getType() == StoragePoolType.VMFS, pool.getHost(), + morDatastore = hyperHost.mountDatastore(pool.getType() == StoragePoolType.VMFS, pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", "")); assert (morDatastore != null); @@ -3065,7 +3068,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { // We will leave datastore cleanup management to vCenter. Since for cluster VMFS datastore, it will always // be mounted by vCenter. - + // VmwareHypervisorHost hyperHost = this.getHyperHost(getServiceContext()); // hyperHost.unmountDatastore(pool.getUuid()); Answer answer = new Answer(cmd, true, "success"); @@ -3439,7 +3442,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa throw new Exception("Unable to find VM in vSphere, vm: " + cmd.getName()); } } - + Pair portInfo = vmMo.getVncPort(mgr.getManagementPortGroupByHost((HostMO)hyperHost)); if (s_logger.isTraceEnabled()) { @@ -3486,7 +3489,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(result.first()) return new Answer(cmd); } catch (Exception e) { - s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e); } return new Answer(cmd,false,"PingTestCommand failed"); @@ -3547,11 +3550,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa /* * DestroyCommand content example - * + * * {"volume": {"id":5,"name":"Volume1", "mountPoint":"/export/home/kelven/vmware-test/primary", * "path":"6bb8762f-c34c-453c-8e03-26cc246ceec4", "size":0,"type":"DATADISK","resourceType": * "STORAGE_POOL","storagePoolType":"NetworkFilesystem", "poolId":0,"deviceId":0 } } - * + * * {"volume": {"id":1, "name":"i-2-1-KY-ROOT", "mountPoint":"/export/home/kelven/vmware-test/primary", * "path":"i-2-1-KY-ROOT","size":0,"type":"ROOT", "resourceType":"STORAGE_POOL", "storagePoolType":"NetworkFilesystem", * "poolId":0,"deviceId":0 } } @@ -3574,7 +3577,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter(); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); ClusterMO clusterMo = new ClusterMO(context, morCluster); - + if (cmd.getVolume().getType() == Volume.Type.ROOT) { String vmName = cmd.getVmName(); if (vmName != null) { @@ -3586,13 +3589,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa HostMO hostMo = vmMo.getRunningHost(); List networks = vmMo.getNetworksWithDetails(); - + // tear down all devices first before we destroy the VM to avoid accidently delete disk backing files if (getVmState(vmMo) != State.Stopped) vmMo.safePowerOff(_shutdown_waitMs); vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); vmMo.destroy(); - + for (NetworkDetails netDetails : networks) { if (netDetails.getGCTag() != null && netDetails.getGCTag().equalsIgnoreCase("true")) { if (netDetails.getVMMorsOnNetwork() == null || netDetails.getVMMorsOnNetwork().length == 1) { @@ -3600,12 +3603,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - } - + } + if (s_logger.isInfoEnabled()) s_logger.info("Destroy volume by original name: " + cmd.getVolume().getPath() + ".vmdk"); dsMo.deleteFile(cmd.getVolume().getPath() + ".vmdk", morDc, true); - + // root volume may be created via linked-clone, delete the delta disk as well if (s_logger.isInfoEnabled()) s_logger.info("Destroy volume by derived name: " + cmd.getVolume().getPath() + "-delta.vmdk"); @@ -3622,7 +3625,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (vmMo != null) { if (s_logger.isInfoEnabled()) s_logger.info("Destroy template volume " + cmd.getVolume().getPath()); - + vmMo.destroy(); return new Answer(cmd, true, "Success"); } @@ -3645,7 +3648,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.info("Empty disk chain info, fall back to try to delete by original backing file name"); } dsMo.deleteFile(cmd.getVolume().getPath() + ".vmdk", morDc, true); - + if (s_logger.isInfoEnabled()) { s_logger.info("Destroy volume by derived name: " + cmd.getVolume().getPath() + "-flat.vmdk"); } @@ -3656,7 +3659,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.info("Destroy volume by original name: " + cmd.getVolume().getPath() + ".vmdk"); } dsMo.deleteFile(cmd.getVolume().getPath() + ".vmdk", morDc, true); - + if (s_logger.isInfoEnabled()) { s_logger.info("Destroy volume by derived name: " + cmd.getVolume().getPath() + "-flat.vmdk"); } @@ -3683,7 +3686,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * NetworkMO(hostMo.getContext(), netDetails.getNetworkMor()); ManagedObjectReference[] vms = * networkMo.getVMsOnNetwork(); if(vms == null || vms.length == 0) { if(s_logger.isInfoEnabled()) { * s_logger.info("Cleanup network as it is currently not in use: " + netDetails.getName()); } - * + * * hostMo.deletePortGroup(netDetails.getName()); } } } catch(Throwable e) { * s_logger.warn("Unable to cleanup network due to exception, skip for next time"); } */ @@ -3767,7 +3770,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.warn("Template host in vSphere is not in connected state, request template reload"); return new CreateAnswer(cmd, "Template host in vSphere is not in connected state, request template reload", true); } - + ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool(); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); ManagedObjectReference morBaseSnapshot = vmTemplate.getSnapshotMor("cloud.template.base"); @@ -3796,7 +3799,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa dsMo.moveDatastoreFile(String.format("[%s] %s/%s.vmdk", dsMo.getName(), vmdkName, vmdkName), dcMo.getMor(), dsMo.getMor(), String.format("[%s] %s.vmdk", dsMo.getName(), vmdkName), dcMo.getMor(), true); - + dsMo.moveDatastoreFile(String.format("[%s] %s/%s-delta.vmdk", dsMo.getName(), vmdkName, vmdkName), dcMo.getMor(), dsMo.getMor(), String.format("[%s] %s-delta.vmdk", dsMo.getName(), vmdkName), dcMo.getMor(), true); @@ -3861,21 +3864,21 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmConfig.setName(vmName); vmConfig.setMemoryMB((long) 4); // vmware request minimum of 4 MB vmConfig.setNumCPUs(1); - vmConfig.setGuestId(VirtualMachineGuestOsIdentifier._otherGuest.toString()); + vmConfig.setGuestId(VirtualMachineGuestOsIdentifier.OTHER_GUEST.toString()); VirtualMachineFileInfo fileInfo = new VirtualMachineFileInfo(); fileInfo.setVmPathName(String.format("[%s]", dsMo.getName())); vmConfig.setFiles(fileInfo); // Scsi controller VirtualLsiLogicController scsiController = new VirtualLsiLogicController(); - scsiController.setSharedBus(VirtualSCSISharing.noSharing); + scsiController.setSharedBus(VirtualSCSISharing.NO_SHARING); scsiController.setBusNumber(0); scsiController.setKey(1); VirtualDeviceConfigSpec scsiControllerSpec = new VirtualDeviceConfigSpec(); scsiControllerSpec.setDevice(scsiController); - scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.add); + scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - vmConfig.setDeviceChange(new VirtualDeviceConfigSpec[] { scsiControllerSpec }); + vmConfig.getDeviceChange().add(scsiControllerSpec ); hyperHost.createVm(vmConfig); vmMo = hyperHost.findVmOnHyperHost(vmName); return vmMo; @@ -3902,25 +3905,25 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); VmwareManager mgr = hyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); - + if(hyperHost.isHyperHostConnected()) { mgr.gcLeftOverVMs(context); - + if(_recycleHungWorker) { s_logger.info("Scan hung worker VM to recycle"); - + // GC worker that has been running for too long ObjectContent[] ocs = hyperHost.getVmPropertiesOnHyperHost( new String[] {"name", "config.template", "runtime.powerState", "runtime.bootTime"}); if(ocs != null) { for(ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if(props != null) { String name = null; boolean template = false; - VirtualMachinePowerState powerState = VirtualMachinePowerState.poweredOff; + VirtualMachinePowerState powerState = VirtualMachinePowerState.POWERED_OFF; GregorianCalendar bootTime = null; - + for(DynamicProperty prop : props) { if(prop.getName().equals("name")) name = prop.getVal().toString(); @@ -3928,22 +3931,22 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa template = (Boolean)prop.getVal(); else if(prop.getName().equals("runtime.powerState")) powerState = (VirtualMachinePowerState)prop.getVal(); - else if(prop.getName().equals("runtime.bootTime")) + else if(prop.getName().equals("runtime.bootTime")) bootTime = (GregorianCalendar)prop.getVal(); } - + if(!template && name.matches("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}")) { boolean recycle = false; - + // recycle stopped worker VM and VM that has been running for too long (hard-coded 10 hours for now) - if(powerState == VirtualMachinePowerState.poweredOff) + if(powerState == VirtualMachinePowerState.POWERED_OFF) recycle = true; else if(bootTime != null && (new Date().getTime() - bootTime.getTimeInMillis() > 10*3600*1000)) recycle = true; - + if(recycle) { s_logger.info("Recycle pending worker VM: " + name); - + VirtualMachineMO vmMo = new VirtualMachineMO(hyperHost.getContext(), oc.getObj()); vmMo.powerOff(); vmMo.destroy(); @@ -3984,17 +3987,17 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.info("Host " + hyperHost.getHyperHostName() + " is not in connected state"); return null; } - + AboutInfo aboutInfo = ((HostMO)hyperHost).getHostAboutInfo(); hostApiVersion = aboutInfo.getApiVersion(); - + } catch (Exception e) { String msg = "VmwareResource intialize() failed due to : " + VmwareHelper.getExceptionMessage(e); s_logger.error(msg); invalidateServiceContext(); return null; } - + StartupRoutingCommand cmd = new StartupRoutingCommand(); fillHostInfo(cmd); @@ -4040,7 +4043,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DatastoreSummary dsSummary = dsMo.getSummary(); String address = hostMo.getHostName(); - StoragePoolInfo pInfo = new StoragePoolInfo(poolUuid, address, dsMo.getMor().get_value(), "", StoragePoolType.LVM, dsSummary.getCapacity(), dsSummary.getFreeSpace()); + StoragePoolInfo pInfo = new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.LVM, dsSummary.getCapacity(), dsSummary.getFreeSpace()); StartupStorageCommand cmd = new StartupStorageCommand(); cmd.setName(poolUuid); cmd.setPoolInfo(pInfo); @@ -4078,7 +4081,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa fillHostHardwareInfo(serviceContext, cmd); fillHostNetworkInfo(serviceContext, cmd); fillHostDetailsInfo(serviceContext, details); - } catch (RuntimeFault e) { + } catch (RuntimeFaultFaultMsg e) { s_logger.error("RuntimeFault while retrieving host info: " + e.toString(), e); throw new CloudRuntimeException("RuntimeFault while retrieving host info"); } catch (RemoteException e) { @@ -4100,7 +4103,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa cmd.setVersion(VmwareResource.class.getPackage().getImplementationVersion()); } - private void fillHostHardwareInfo(VmwareContext serviceContext, StartupRoutingCommand cmd) throws RuntimeFault, RemoteException, Exception { + private void fillHostHardwareInfo(VmwareContext serviceContext, StartupRoutingCommand cmd) throws RuntimeFaultFaultMsg, RemoteException, Exception { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); VmwareHypervisorHostResourceSummary summary = hyperHost.getHyperHostResourceSummary(); @@ -4116,14 +4119,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa cmd.setMemory(summary.getMemoryBytes()); } - private void fillHostNetworkInfo(VmwareContext serviceContext, StartupRoutingCommand cmd) throws RuntimeFault, RemoteException { + private void fillHostNetworkInfo(VmwareContext serviceContext, StartupRoutingCommand cmd) throws RuntimeFaultFaultMsg, RemoteException { try { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); - + assert(hyperHost instanceof HostMO); VmwareManager mgr = hyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); - + VmwareHypervisorHostNetworkSummary summary = hyperHost.getHyperHostNetworkSummary(mgr.getManagementPortGroupByHost((HostMO)hyperHost)); if (summary == null) { throw new Exception("No ESX(i) host found"); @@ -4152,7 +4155,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); ClusterDasConfigInfo dasConfig = hyperHost.getDasConfig(); - if (dasConfig != null && dasConfig.getEnabled() != null && dasConfig.getEnabled().booleanValue()) { + if (dasConfig != null && dasConfig.isEnabled() != null && dasConfig.isEnabled().booleanValue()) { details.put("NativeHA", "true"); } } @@ -4223,7 +4226,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (s_logger.isDebugEnabled()) { s_logger.debug("VM " + vm + " is now missing from host report but we detected that it might be migrated to other host by vCenter"); } - + if(oldState != State.Starting && oldState != State.Migrating) { s_logger.debug("VM " + vm + " is now missing from host report and VM is not at starting/migrating state, remove it from host VM-sync map, oldState: " + oldState); _vms.remove(vm); @@ -4270,22 +4273,22 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return hyperHost.findVmOnPeerHyperHost(vmName) != null; } - protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, + protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout) throws Exception { - + VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); VmwareManager mgr = hyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); if(!mgr.beginExclusiveOperation(600)) throw new Exception("Unable to begin exclusive operation, lock time out"); - + try { int maxVncPorts = 64; int vncPort = 0; Random random = new Random(); - + HostMO vmOwnerHost = vmMo.getRunningHost(); - + ManagedObjectReference morParent = vmOwnerHost.getParentMor(); HashMap portInfo; if(morParent.getType().equalsIgnoreCase("ClusterComputeResource")) { @@ -4294,7 +4297,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } else { portInfo = vmOwnerHost.getVmVncPortsOnHost(); } - + // allocate first at 5900 - 5964 range Collection existingPorts = portInfo.values(); int val = random.nextInt(maxVncPorts); @@ -4304,10 +4307,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vncPort = 5900 + val; break; } - + val = (++val) % maxVncPorts; } while (val != startVal); - + if(vncPort == 0) { s_logger.info("we've run out of range for ports between 5900-5964 for the cluster, we will try port range at 59000-60000"); @@ -4320,19 +4323,19 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vncPort = additionalRange.first() + val; break; } - + val = (++val) % maxVncPorts; } while (val != startVal); } - + if (vncPort == 0) { throw new Exception("Unable to find an available VNC port on host"); } - + if (s_logger.isInfoEnabled()) { s_logger.info("Configure VNC port for VM " + vmName + ", port: " + vncPort + ", host: " + vmOwnerHost.getHyperHostName()); } - + return VmwareHelper.composeVncOptions(optionsToMerge, true, vncPassword, vncPort, keyboardLayout); } finally { try { @@ -4356,9 +4359,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } if (cpuArchitecture.equalsIgnoreCase("x86_64")) { - return VirtualMachineGuestOsIdentifier.otherGuest64; + return VirtualMachineGuestOsIdentifier.OTHER_GUEST_64; } - return VirtualMachineGuestOsIdentifier.otherGuest; + return VirtualMachineGuestOsIdentifier.OTHER_GUEST; } private void prepareNetworkForVmTargetHost(HostMO hostMo, VirtualMachineMO vmMo) throws Exception { @@ -4377,7 +4380,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String[] tokens = networkName.split("\\."); if (tokens.length == 3) { Integer networkRateMbps = null; - if (shapingPolicy != null && shapingPolicy.getEnabled() != null && shapingPolicy.getEnabled().booleanValue()) { + if (shapingPolicy != null && shapingPolicy.isEnabled() != null && shapingPolicy.isEnabled().booleanValue()) { networkRateMbps = (int) (shapingPolicy.getPeakBandwidth().longValue() / (1024 * 1024)); } String vlanId = null; @@ -4393,7 +4396,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String[] tokens = networkName.split("\\."); if (tokens.length == 3) { Integer networkRateMbps = null; - if (shapingPolicy != null && shapingPolicy.getEnabled() != null && shapingPolicy.getEnabled().booleanValue()) { + if (shapingPolicy != null && shapingPolicy.isEnabled() != null && shapingPolicy.isEnabled().booleanValue()) { networkRateMbps = (int) (shapingPolicy.getPeakBandwidth().longValue() / (1024 * 1024)); } String vlanId = null; @@ -4409,7 +4412,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String[] tokens = networkName.split("\\."); if (tokens.length >= 3) { Integer networkRateMbps = null; - if (shapingPolicy != null && shapingPolicy.getEnabled() != null && shapingPolicy.getEnabled().booleanValue()) { + if (shapingPolicy != null && shapingPolicy.isEnabled() != null && shapingPolicy.isEnabled().booleanValue()) { networkRateMbps = (int) (shapingPolicy.getPeakBandwidth().longValue() / (1024 * 1024)); } @@ -4427,7 +4430,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - + private HashMap getVmStates() throws Exception { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); ObjectContent[] ocs = hyperHost.getVmPropertiesOnHyperHost(new String[] { "name", "runtime.powerState", "config.template" }); @@ -4435,12 +4438,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa HashMap newStates = new HashMap(); if (ocs != null && ocs.length > 0) { for (ObjectContent oc : ocs) { - DynamicProperty[] objProps = oc.getPropSet(); + List objProps = oc.getPropSet(); if (objProps != null) { boolean isTemplate = false; String name = null; - VirtualMachinePowerState powerState = VirtualMachinePowerState.poweredOff; + VirtualMachinePowerState powerState = VirtualMachinePowerState.POWERED_OFF; for (DynamicProperty objProp : objProps) { if (objProp.getName().equals("config.template")) { if (objProp.getVal().toString().equalsIgnoreCase("true")) { @@ -4467,14 +4470,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private HashMap getVmStats(List vmNames) throws Exception { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); HashMap vmResponseMap = new HashMap(); - ManagedObjectReference perfMgr = getServiceContext().getServiceConnection().getServiceContent().getPerfManager(); - VimPortType service = getServiceContext().getServiceConnection().getService(); + ManagedObjectReference perfMgr = getServiceContext().getServiceContent().getPerfManager(); + VimPortType service = getServiceContext().getService(); PerfCounterInfo rxPerfCounterInfo = null; PerfCounterInfo txPerfCounterInfo = null; - PerfCounterInfo[] cInfo = (PerfCounterInfo[]) getServiceContext().getServiceUtil().getDynamicProperty(perfMgr, "perfCounter"); + PerfCounterInfo[] cInfo = (PerfCounterInfo[]) getServiceContext().getVimClient().getDynamicProperty(perfMgr, "perfCounter"); for(int i=0; i 0) { for (ObjectContent oc : ocs) { - DynamicProperty[] objProps = oc.getPropSet(); + List objProps = oc.getPropSet(); if (objProps != null) { String name = null; String numberCPUs = null; @@ -4511,13 +4514,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa assert(vmMor!=null); ArrayList vmNetworkMetrics = new ArrayList(); - // get all the metrics from the available sample period - PerfMetricId[] perfMetrics = service.queryAvailablePerfMetric(perfMgr, vmMor, null, null, null); + // get all the metrics from the available sample period + List perfMetrics = service.queryAvailablePerfMetric(perfMgr, vmMor, null, null, null); if(perfMetrics != null) { - for(int index=0; index < perfMetrics.length; ++index) { - if ( ((rxPerfCounterInfo != null) && (perfMetrics[index].getCounterId() == rxPerfCounterInfo.getKey())) || - ((txPerfCounterInfo != null) && (perfMetrics[index].getCounterId() == txPerfCounterInfo.getKey())) ) { - vmNetworkMetrics.add(perfMetrics[index]); + for(int index=0; index < perfMetrics.size(); ++index) { + if ( ((rxPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == rxPerfCounterInfo.getKey())) || + ((txPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == txPerfCounterInfo.getKey())) ) { + vmNetworkMetrics.add(perfMetrics.get(index)); } } } @@ -4530,23 +4533,26 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa PerfQuerySpec qSpec = new PerfQuerySpec(); qSpec.setEntity(vmMor); PerfMetricId[] availableMetricIds = (PerfMetricId[]) vmNetworkMetrics.toArray(new PerfMetricId[0]); - qSpec.setMetricId(availableMetricIds); - PerfQuerySpec[] qSpecs = new PerfQuerySpec[] {qSpec}; - PerfEntityMetricBase[] values = service.queryPerf(perfMgr, qSpecs); + qSpec.getMetricId().addAll(Arrays.asList(availableMetricIds)); + List qSpecs = new ArrayList(); + qSpecs.add(qSpec); + List values = service.queryPerf(perfMgr, qSpecs); - for(int i=0; i infos = ((PerfEntityMetric)values.get(i)).getSampleInfo(); + int endMs = infos.get(infos.size()-1).getTimestamp().getSecond() * 1000 + infos.get(infos.size()-1).getTimestamp().getMillisecond(); + int beginMs = infos.get(0).getTimestamp().getSecond() * 1000 + infos.get(0).getTimestamp().getMillisecond(); + sampleDuration = (endMs - beginMs) /1000; + List vals = ((PerfEntityMetric)values.get(i)).getValue(); + for(int vi = 0; ((vals!= null) && (vi < vals.size())); ++vi){ + if(vals.get(vi) instanceof PerfMetricIntSeries) { + PerfMetricIntSeries val = (PerfMetricIntSeries)vals.get(vi); + List perfValues = val.getValue(); + if (vals.get(vi).getId().getCounterId() == rxPerfCounterInfo.getKey()) { + networkReadKBs = sampleDuration * perfValues.get(3); //get the average RX rate multiplied by sampled duration } - if (vals[vi].getId().getCounterId() == txPerfCounterInfo.getKey()) { - networkWriteKBs = sampleDuration * perfValues[3];//get the average TX rate multiplied by sampled duration + if (vals.get(vi).getId().getCounterId() == txPerfCounterInfo.getKey()) { + networkWriteKBs = sampleDuration * perfValues.get(3);//get the average TX rate multiplied by sampled duration } } } @@ -4556,8 +4562,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - return vmResponseMap; - } + return vmResponseMap; + } protected String networkUsage(final String privateIpAddress, final String option, final String ethName) { String args = null; @@ -4590,7 +4596,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return result.second(); } catch (Throwable e) { - s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIpAddress + "), domR may not be ready yet. failure due to " + s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIpAddress + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e); } @@ -4681,7 +4687,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ComputeResourceSummary hardwareSummary = hyperHost.getHyperHostHardwareSummary(); if(hardwareSummary == null) return null; - + HostStatsEntry entry = new HostStatsEntry(); entry.setEntityType("host"); @@ -4692,19 +4698,19 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return entry; } - + private static String getRouterSshControlIp(NetworkElementCommand cmd) { String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); String routerGuestIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP); String zoneNetworkType = cmd.getAccessDetail(NetworkElementCommand.ZONE_NETWORK_TYPE); - + if(routerGuestIp != null && zoneNetworkType != null && NetworkType.valueOf(zoneNetworkType) == NetworkType.Basic) { if(s_logger.isDebugEnabled()) s_logger.debug("In Basic zone mode, use router's guest IP for SSH control. guest IP : " + routerGuestIp); - + return routerGuestIp; } - + if(s_logger.isDebugEnabled()) s_logger.debug("Use router's private IP for SSH control. IP : " + routerIp); return routerIp; @@ -4724,15 +4730,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa _dcId = (String) params.get("zone"); _pod = (String) params.get("pod"); _cluster = (String) params.get("cluster"); - + _guid = (String) params.get("guid"); String[] tokens = _guid.split("@"); _vCenterAddress = tokens[1]; _morHyperHost = new ManagedObjectReference(); String[] hostTokens = tokens[0].split(":"); _morHyperHost.setType(hostTokens[0]); - _morHyperHost.set_value(hostTokens[1]); - + _morHyperHost.setValue(hostTokens[1]); + VmwareContext context = getServiceContext(); try { VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME); @@ -4770,14 +4776,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(_privateNetworkVSwitchName == null) { _privateNetworkVSwitchName = (String) params.get("private.network.vswitch.name"); - } + } if(_publicNetworkVSwitchName == null) { _publicNetworkVSwitchName = (String) params.get("public.network.vswitch.name"); } if(_guestNetworkVSwitchName == null) { _guestNetworkVSwitchName = (String) params.get("guest.network.vswitch.name"); } - + String value = (String) params.get("cpu.overprovisioning.factor"); if(value != null) _cpuOverprovisioningFactor = Float.parseFloat(value); @@ -4785,7 +4791,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa value = (String) params.get("vmware.reserve.cpu"); if(value != null && value.equalsIgnoreCase("true")) _reserveCpu = true; - + value = (String) params.get("vmware.recycle.hung.wokervm"); if(value != null && value.equalsIgnoreCase("true")) _recycleHungWorker = true; @@ -4797,7 +4803,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa value = (String) params.get("vmware.reserve.mem"); if(value != null && value.equalsIgnoreCase("true")) _reserveMem = true; - + value = (String)params.get("vmware.root.disk.controller"); if(value != null && value.equalsIgnoreCase("scsi")) _rootDiskController = DiskControllerType.scsi; @@ -4848,7 +4854,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa _serviceContext = VmwareContextFactory.create(_vCenterAddress, _username, _password); VmwareHypervisorHost hyperHost = getHyperHost(_serviceContext, cmd); assert(hyperHost instanceof HostMO); - + HostFirewallSystemMO firewallMo = ((HostMO)hyperHost).getHostFirewallSystemMO(); boolean bRefresh = false; if(firewallMo != null) { @@ -4864,7 +4870,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - + if(bRefresh) firewallMo.refreshFirewall(); } diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java index fc298c895c8..646ef633fc7 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.storage.resource; @@ -20,45 +20,44 @@ import java.util.HashMap; import java.util.Iterator; import java.util.Map; +import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; -import com.vmware.apputils.version.ExtendedAppUtil; public class VmwareSecondaryStorageContextFactory { private static volatile int s_seq = 1; - + private static Map s_contextMap = new HashMap(); - + public static void initFactoryEnvironment() { System.setProperty("axis.socketSecureFactory", "org.apache.axis.components.net.SunFakeTrustSocketFactory"); } - + public static VmwareContext create(String vCenterAddress, String vCenterUserName, String vCenterPassword) throws Exception { assert(vCenterAddress != null); assert(vCenterUserName != null); assert(vCenterPassword != null); - + VmwareContext context = null; - + synchronized(s_contextMap) { context = s_contextMap.get(vCenterAddress); if(context == null) { String serviceUrl = "https://" + vCenterAddress + "/sdk/vimService"; - String[] params = new String[] {"--url", serviceUrl, "--username", vCenterUserName, "--password", vCenterPassword }; - ExtendedAppUtil appUtil = ExtendedAppUtil.initialize(vCenterAddress + "-" + s_seq++, params); - - appUtil.connect(); - context = new VmwareContext(appUtil, vCenterAddress); + //String[] params = new String[] {"--url", serviceUrl, "--username", vCenterUserName, "--password", vCenterPassword }; + VmwareClient vimClient = new VmwareClient(vCenterAddress + "-" + s_seq++); + vimClient.connect(serviceUrl, vCenterUserName, vCenterPassword); + context = new VmwareContext(vimClient, vCenterAddress); context.registerStockObject("username", vCenterUserName); context.registerStockObject("password", vCenterPassword); - + s_contextMap.put(vCenterAddress, context); } } - + assert(context != null); return context; } - + public static void invalidate(VmwareContext context) { synchronized(s_contextMap) { for(Iterator> entryIter = s_contextMap.entrySet().iterator(); entryIter.hasNext();) { @@ -68,7 +67,7 @@ public class VmwareSecondaryStorageContextFactory { } } } - + context.close(); } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java index 2abed160dfc..566e750c3fe 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.storage.resource; @@ -85,11 +85,11 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe if(cmd.getContextParam("execid") != null) { answer.setContextParam("execid", cmd.getContextParam("execid")); } - + if(cmd.getContextParam("checkpoint") != null) { answer.setContextParam("checkpoint", cmd.getContextParam("checkpoint")); } - + if(cmd.getContextParam("checkpoint2") != null) { answer.setContextParam("checkpoint2", cmd.getContextParam("checkpoint2")); } @@ -219,23 +219,23 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe } morHyperHost.setType(hostTokens[0]); - morHyperHost.set_value(hostTokens[1]); + morHyperHost.setValue(hostTokens[1]); if(morHyperHost.getType().equalsIgnoreCase("HostSystem")) { HostMO hostMo = new HostMO(context, morHyperHost); try { - + ManagedObjectReference mor = hostMo.getHyperHostCluster(); ClusterMO clusterMo = new ClusterMO(hostMo.getContext(), mor); List> hostsInCluster = clusterMo.getClusterHosts(); for(Pair hostPair : hostsInCluster) { HostMO hostIteratorMo = new HostMO(hostMo.getContext(), hostPair.first()); - + VmwareHypervisorHostNetworkSummary netSummary = hostIteratorMo.getHyperHostNetworkSummary( hostIteratorMo.getHostType() == VmwareHostType.ESXi ? cmd.getContextParam("manageportgroup") : cmd.getContextParam("serviceconsole")); _resource.ensureOutgoingRuleForAddress(netSummary.getHostIp()); - + s_logger.info("Setup firewall rule for host: " + netSummary.getHostIp()); } } catch(Throwable e) { @@ -253,7 +253,7 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe public String getWorkerName(VmwareContext context, Command cmd, int workerSequence) { assert(cmd.getContextParam("worker") != null); assert(workerSequence < 2); - + if(workerSequence == 0) return cmd.getContextParam("worker"); return cmd.getContextParam("worker2"); @@ -276,7 +276,7 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe assert(hostTokens.length == 2); morHyperHost.setType(hostTokens[0]); - morHyperHost.set_value(hostTokens[1]); + morHyperHost.setValue(hostTokens[1]); if(morHyperHost.getType().equalsIgnoreCase("HostSystem")) { HostMO hostMo = new HostMO(context, morHyperHost); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/BaseMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/BaseMO.java index ff7e00436ea..0ac93e4cd6a 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/BaseMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/BaseMO.java @@ -25,55 +25,55 @@ import com.vmware.vim25.ManagedObjectReference; public class BaseMO { private static final Logger s_logger = Logger.getLogger(BaseMO.class); - + protected VmwareContext _context; protected ManagedObjectReference _mor; private String _name; - + public BaseMO(VmwareContext context, ManagedObjectReference mor) { assert(context != null); - + _context = context; _mor = mor; } - + public BaseMO(VmwareContext context, String morType, String morValue) { assert(context != null); assert(morType != null); assert(morValue != null); - + _context = context; _mor = new ManagedObjectReference(); _mor.setType(morType); - _mor.set_value(morValue); + _mor.setValue(morValue); } - + public VmwareContext getContext() { return _context; } - + public ManagedObjectReference getMor() { assert(_mor != null); return _mor; } - + public ManagedObjectReference getParentMor() throws Exception { - return (ManagedObjectReference)_context.getServiceUtil().getDynamicProperty(_mor, "parent"); + return (ManagedObjectReference)_context.getVimClient().getDynamicProperty(_mor, "parent"); } public String getName() throws Exception { if(_name == null) - _name = (String)_context.getServiceUtil().getDynamicProperty(_mor, "name"); - + _name = (String)_context.getVimClient().getDynamicProperty(_mor, "name"); + return _name; } - + public boolean destroy() throws Exception { - ManagedObjectReference morTask = _context.getService().destroy_Task(_mor); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + ManagedObjectReference morTask = _context.getService().destroyTask(_mor); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -81,16 +81,16 @@ public class BaseMO { } return false; } - + public void reload() throws Exception { _context.getService().reload(_mor); } - + public boolean rename(String newName) throws Exception { - ManagedObjectReference morTask = _context.getService().rename_Task(_mor, newName); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + ManagedObjectReference morTask = _context.getService().renameTask(_mor, newName); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -98,11 +98,11 @@ public class BaseMO { } return false; } - + public void setCustomFieldValue(String fieldName, String value) throws Exception { - CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, + CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager()); - + int key = getCustomFieldKey(fieldName); if(key == 0) { try { @@ -114,36 +114,36 @@ public class BaseMO { key = getCustomFieldKey(fieldName); } } - + if(key == 0) throw new Exception("Unable to setup custom field facility"); - + cfmMo.setField(getMor(), key, value); } - + public String getCustomFieldValue(String fieldName) throws Exception { int key = getCustomFieldKey(fieldName); if(key == 0) return null; - - CustomFieldStringValue cfValue = (CustomFieldStringValue)_context.getServiceUtil().getDynamicProperty(getMor(), + + CustomFieldStringValue cfValue = (CustomFieldStringValue)_context.getVimClient().getDynamicProperty(getMor(), String.format("value[%d]", key)); if(cfValue != null) return cfValue.getValue(); - + return null; } - + public int getCustomFieldKey(String fieldName) throws Exception { return getCustomFieldKey(getMor().getType(), fieldName); } - + public int getCustomFieldKey(String morType, String fieldName) throws Exception { assert(morType != null); - - CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, + + CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager()); - + return cfmMo.getCustomFieldKey(morType, fieldName); } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/ClusterMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/ClusterMO.java index 2ce6695befb..10265545bf4 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/ClusterMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/ClusterMO.java @@ -26,7 +26,6 @@ import org.apache.log4j.Logger; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.utils.Pair; import com.google.gson.Gson; -import com.vmware.apputils.vim25.ServiceUtil; import com.vmware.vim25.ArrayOfHostIpRouteEntry; import com.vmware.vim25.ClusterComputeResourceSummary; import com.vmware.vim25.ClusterConfigInfoEx; @@ -47,24 +46,25 @@ import com.vmware.vim25.ObjectSpec; import com.vmware.vim25.OptionValue; import com.vmware.vim25.PropertyFilterSpec; import com.vmware.vim25.PropertySpec; -import com.vmware.vim25.SelectionSpec; import com.vmware.vim25.TraversalSpec; import com.vmware.vim25.VirtualMachineConfigSpec; +import edu.emory.mathcs.backport.java.util.Arrays; + // // interface. This has changed as ClusterMO no longer works as a special host anymore. Need to refactor accordingly // public class ClusterMO extends BaseMO implements VmwareHypervisorHost { private static final Logger s_logger = Logger.getLogger(ClusterMO.class); - + public ClusterMO(VmwareContext context, ManagedObjectReference morCluster) { super(context, morCluster); } - + public ClusterMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + @Override public String getHyperHostName() throws Exception { return getName(); @@ -73,40 +73,39 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { @Override public ClusterDasConfigInfo getDasConfig() throws Exception { // Note getDynamicProperty() with "configurationEx.dasConfig" does not work here because of that dasConfig is a property in subclass - ClusterConfigInfoEx configInfo = (ClusterConfigInfoEx)_context.getServiceUtil().getDynamicProperty(_mor, "configurationEx"); + ClusterConfigInfoEx configInfo = (ClusterConfigInfoEx)_context.getVimClient().getDynamicProperty(_mor, "configurationEx"); return configInfo.getDasConfig(); } - + @Override public ManagedObjectReference getHyperHostDatacenter() throws Exception { Pair dcPair = DatacenterMO.getOwnerDatacenter(getContext(), getMor()); assert(dcPair != null); return dcPair.first().getMor(); } - + @Override public ManagedObjectReference getHyperHostOwnerResourcePool() throws Exception { - ServiceUtil serviceUtil = _context.getServiceUtil(); - return (ManagedObjectReference)serviceUtil.getDynamicProperty(getMor(), "resourcePool"); + return (ManagedObjectReference)_context.getVimClient().getDynamicProperty(getMor(), "resourcePool"); } - + @Override public ManagedObjectReference getHyperHostCluster() throws Exception { return _mor; } - + @Override public VirtualMachineMO findVmOnHyperHost(String name) throws Exception { ObjectContent[] ocs = getVmPropertiesOnHyperHost(new String[] { "name" }); return HypervisorHostHelper.findVmFromObjectContent(_context, ocs, name); } - + @Override public VirtualMachineMO findVmOnPeerHyperHost(String name) throws Exception { ObjectContent[] ocs = getVmPropertiesOnHyperHost(new String[] { "name" }); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if(props != null) { for(DynamicProperty prop : props) { if(prop.getVal().toString().equals(name)) @@ -117,54 +116,54 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { } return null; } - + @Override public ObjectContent[] getVmPropertiesOnHyperHost(String[] propertyPaths) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - retrieveProperties() for VM properties. target MOR: " + _mor.get_value() + ", properties: " + new Gson().toJson(propertyPaths)); - + s_logger.trace("vCenter API trace - retrieveProperties() for VM properties. target MOR: " + _mor.getValue() + ", properties: " + new Gson().toJson(propertyPaths)); + PropertySpec pSpec = new PropertySpec(); pSpec.setType("VirtualMachine"); - pSpec.setPathSet(propertyPaths); + pSpec.getPathSet().addAll(Arrays.asList(propertyPaths)); TraversalSpec host2VmFolderTraversal = new TraversalSpec(); host2VmFolderTraversal.setType("HostSystem"); host2VmFolderTraversal.setPath("vm"); host2VmFolderTraversal.setName("host2VmFolderTraversal"); - + TraversalSpec cluster2HostFolderTraversal = new TraversalSpec(); cluster2HostFolderTraversal.setType("ClusterComputeResource"); cluster2HostFolderTraversal.setPath("host"); cluster2HostFolderTraversal.setName("cluster2HostFolderTraversal"); - cluster2HostFolderTraversal.setSelectSet(new SelectionSpec[] { host2VmFolderTraversal }); - + cluster2HostFolderTraversal.getSelectSet().add(host2VmFolderTraversal); + ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(getMor()); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { cluster2HostFolderTraversal }); - + oSpec.getSelectSet().add(cluster2HostFolderTraversal); + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] properties = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); - + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List properties = _context.getService().retrieveProperties(_context.getPropertyCollector(), pfSpecArr); + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - retrieveProperties() done"); - return properties; + return properties.toArray(new ObjectContent[properties.size()]); } @Override public ObjectContent[] getDatastorePropertiesOnHyperHost(String[] propertyPaths) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - retrieveProperties() on Datastore properties. target MOR: " + _mor.get_value() + ", properties: " + new Gson().toJson(propertyPaths)); - + s_logger.trace("vCenter API trace - retrieveProperties() on Datastore properties. target MOR: " + _mor.getValue() + ", properties: " + new Gson().toJson(propertyPaths)); + PropertySpec pSpec = new PropertySpec(); pSpec.setType("Datastore"); - pSpec.setPathSet(propertyPaths); - + pSpec.getPathSet().addAll(Arrays.asList(propertyPaths)); + TraversalSpec cluster2DatastoreTraversal = new TraversalSpec(); cluster2DatastoreTraversal.setType("ClusterComputeResource"); cluster2DatastoreTraversal.setPath("datastore"); @@ -173,29 +172,30 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { cluster2DatastoreTraversal }); + oSpec.getSelectSet().add(cluster2DatastoreTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] properties = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); - + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List properties = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - retrieveProperties() done"); - return properties; + return properties.toArray(new ObjectContent[properties.size()]); } - + private ObjectContent[] getHostPropertiesOnCluster(String[] propertyPaths) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - retrieveProperties() on Host properties. target MOR: " + _mor.get_value() + ", properties: " + new Gson().toJson(propertyPaths)); + s_logger.trace("vCenter API trace - retrieveProperties() on Host properties. target MOR: " + _mor.getValue() + ", properties: " + new Gson().toJson(propertyPaths)); PropertySpec pSpec = new PropertySpec(); pSpec.setType("HostSystem"); - pSpec.setPathSet(propertyPaths); - + pSpec.getPathSet().addAll(Arrays.asList(propertyPaths)); + TraversalSpec cluster2HostTraversal = new TraversalSpec(); cluster2HostTraversal.setType("ClusterComputeResource"); cluster2HostTraversal.setPath("host"); @@ -204,63 +204,65 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { cluster2HostTraversal }); + oSpec.getSelectSet().add(cluster2HostTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] properties = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List properties = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - retrieveProperties() done"); - return properties; + return properties.toArray(new ObjectContent[properties.size()]); } @Override public boolean createVm(VirtualMachineConfigSpec vmSpec) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - createVM_Task(). target MOR: " + _mor.get_value() + ", VirtualMachineConfigSpec: " + new Gson().toJson(vmSpec)); - + s_logger.trace("vCenter API trace - createVM_Task(). target MOR: " + _mor.getValue() + ", VirtualMachineConfigSpec: " + new Gson().toJson(vmSpec)); + assert(vmSpec != null); DatacenterMO dcMo = new DatacenterMO(_context, getHyperHostDatacenter()); ManagedObjectReference morPool = getHyperHostOwnerResourcePool(); - - ManagedObjectReference morTask = _context.getService().createVM_Task( + + ManagedObjectReference morTask = _context.getService().createVMTask( dcMo.getVmFolder(), vmSpec, morPool, null); - String result = _context.getServiceUtil().waitForTask(morTask); - - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + + if(result) { _context.waitForTaskProgressDone(morTask); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - createVM_Task() done(successfully)"); return true; } else { s_logger.error("VMware createVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - createVM_Task() done(failed)"); return false; } - + @Override public void importVmFromOVF(String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - importVmFromOVF(). target MOR: " + _mor.get_value() + ", ovfFilePath: " + ovfFilePath + ", vmName: " + vmName - + ", datastore: " + dsMo.getMor().get_value() + ", diskOption: " + diskOption); + s_logger.trace("vCenter API trace - importVmFromOVF(). target MOR: " + _mor.getValue() + ", ovfFilePath: " + ovfFilePath + ", vmName: " + vmName + + ", datastore: " + dsMo.getMor().getValue() + ", diskOption: " + diskOption); ManagedObjectReference morRp = getHyperHostOwnerResourcePool(); assert(morRp != null); - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - importVmFromOVF(). resource pool: " + morRp.get_value()); - + s_logger.trace("vCenter API trace - importVmFromOVF(). resource pool: " + morRp.getValue()); + HypervisorHostHelper.importVmFromOVF(this, ovfFilePath, vmName, dsMo, diskOption, morRp, null); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - importVmFromOVF() done"); } @@ -268,83 +270,83 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { @Override public boolean createBlankVm(String vmName, int cpuCount, int cpuSpeedMHz, int cpuReservedMHz, boolean limitCpuUse, int memoryMB, int memoryReserveMB, String guestOsIdentifier, ManagedObjectReference morDs, boolean snapshotDirToParent) throws Exception { - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - createBlankVm(). target MOR: " + _mor.get_value() + ", vmName: " + vmName + ", cpuCount: " + cpuCount - + ", cpuSpeedMhz: " + cpuSpeedMHz + ", cpuReservedMHz: " + cpuReservedMHz + ", limitCpu: " + limitCpuUse + ", memoryMB: " + memoryMB - + ", guestOS: " + guestOsIdentifier + ", datastore: " + morDs.get_value() + ", snapshotDirToParent: " + snapshotDirToParent); - + s_logger.trace("vCenter API trace - createBlankVm(). target MOR: " + _mor.getValue() + ", vmName: " + vmName + ", cpuCount: " + cpuCount + + ", cpuSpeedMhz: " + cpuSpeedMHz + ", cpuReservedMHz: " + cpuReservedMHz + ", limitCpu: " + limitCpuUse + ", memoryMB: " + memoryMB + + ", guestOS: " + guestOsIdentifier + ", datastore: " + morDs.getValue() + ", snapshotDirToParent: " + snapshotDirToParent); + boolean result = HypervisorHostHelper.createBlankVm(this, vmName, cpuCount, cpuSpeedMHz, cpuReservedMHz, limitCpuUse, memoryMB, memoryReserveMB, guestOsIdentifier, morDs, snapshotDirToParent); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - createBlankVm() done"); - + return result; } @Override - public ManagedObjectReference mountDatastore(boolean vmfsDatastore, String poolHostAddress, + public ManagedObjectReference mountDatastore(boolean vmfsDatastore, String poolHostAddress, int poolHostPort, String poolPath, String poolUuid) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - mountDatastore(). target MOR: " + _mor.get_value() + ", vmfs: " + vmfsDatastore + ", poolHost: " + poolHostAddress + s_logger.trace("vCenter API trace - mountDatastore(). target MOR: " + _mor.getValue() + ", vmfs: " + vmfsDatastore + ", poolHost: " + poolHostAddress + ", poolHostPort: " + poolHostPort + ", poolPath: " + poolPath + ", poolUuid: " + poolUuid); - + ManagedObjectReference morDs = null; ManagedObjectReference morDsFirst = null; - ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getServiceUtil().getDynamicProperty(_mor, "host"); + ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "host"); if(hosts != null && hosts.length > 0) { for(ManagedObjectReference morHost : hosts) { HostMO hostMo = new HostMO(_context, morHost); morDs = hostMo.mountDatastore(vmfsDatastore, poolHostAddress, poolHostPort, poolPath, poolUuid); if(morDsFirst == null) morDsFirst = morDs; - + // assume datastore is in scope of datacenter - assert(morDsFirst.get_value().equals(morDs.get_value())); + assert(morDsFirst.getValue().equals(morDs.getValue())); } } - + if(morDs == null) { String msg = "Failed to mount datastore in all hosts within the cluster"; s_logger.error(msg); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - mountDatastore() done(failed)"); throw new Exception(msg); } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - mountDatastore() done(successfully)"); - + return morDs; } - + @Override public void unmountDatastore(String poolUuid) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - unmountDatastore(). target MOR: " + _mor.get_value() + ", poolUuid: " + poolUuid); - - ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getServiceUtil().getDynamicProperty(_mor, "host"); + s_logger.trace("vCenter API trace - unmountDatastore(). target MOR: " + _mor.getValue() + ", poolUuid: " + poolUuid); + + ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "host"); if(hosts != null && hosts.length > 0) { for(ManagedObjectReference morHost : hosts) { HostMO hostMo = new HostMO(_context, morHost); hostMo.unmountDatastore(poolUuid); } } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - unmountDatastore() done"); } - + @Override public ManagedObjectReference findDatastore(String poolUuid) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - findDatastore(). target MOR: " + _mor.get_value() + ", poolUuid: " + poolUuid); - - CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, + s_logger.trace("vCenter API trace - findDatastore(). target MOR: " + _mor.getValue() + ", poolUuid: " + poolUuid); + + CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager()); int key = cfmMo.getCustomFieldKey("Datastore", CustomFieldConstants.CLOUD_UUID); assert(key != 0); @@ -352,16 +354,16 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { ObjectContent[] ocs = getDatastorePropertiesOnHyperHost(new String[] {"name", String.format("value[%d]", key)}); if(ocs != null) { for(ObjectContent oc : ocs) { - if(oc.getPropSet(0).getVal().equals(poolUuid)) + if(oc.getPropSet().get(0).getVal().equals(poolUuid)) return oc.getObj(); - - if(oc.getPropSet().length > 1) { - DynamicProperty prop = oc.getPropSet(1); + + if(oc.getPropSet().size() > 1) { + DynamicProperty prop = oc.getPropSet().get(1); if(prop != null && prop.getVal() != null) { if(prop.getVal() instanceof CustomFieldStringValue) { String val = ((CustomFieldStringValue)prop.getVal()).getValue(); if(val.equalsIgnoreCase(poolUuid)) { - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - findDatastore() done(successfully)"); return oc.getObj(); @@ -371,31 +373,31 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { } } } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - findDatastore() done(failed)"); return null; } - + @Override public ManagedObjectReference findDatastoreByExportPath(String exportPath) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - findDatastoreByExportPath(). target MOR: " + _mor.get_value() + ", exportPath: " + exportPath); - + s_logger.trace("vCenter API trace - findDatastoreByExportPath(). target MOR: " + _mor.getValue() + ", exportPath: " + exportPath); + ObjectContent[] ocs = getDatastorePropertiesOnHyperHost(new String[] {"info"}); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - DatastoreInfo dsInfo = (DatastoreInfo)oc.getPropSet(0).getVal(); + DatastoreInfo dsInfo = (DatastoreInfo)oc.getPropSet().get(0).getVal(); if(dsInfo != null && dsInfo instanceof NasDatastoreInfo) { NasDatastoreInfo info = (NasDatastoreInfo)dsInfo; if(info != null) { String vmwareUrl = info.getUrl(); if(vmwareUrl.charAt(vmwareUrl.length() - 1) == '/') vmwareUrl = vmwareUrl.substring(0, vmwareUrl.length() - 1); - + URI uri = new URI(vmwareUrl); if(uri.getPath().equals("/" + exportPath)) { - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - findDatastoreByExportPath() done(successfully)"); return oc.getObj(); @@ -404,49 +406,49 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { } } } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - findDatastoreByExportPath() done(failed)"); return null; } - + @Override public ManagedObjectReference findMigrationTarget(VirtualMachineMO vmMo) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - findMigrationTarget(). target MOR: " + _mor.get_value() + ", vm: " + vmMo.getName()); + s_logger.trace("vCenter API trace - findMigrationTarget(). target MOR: " + _mor.getValue() + ", vm: " + vmMo.getName()); - ClusterHostRecommendation[] candidates = recommendHostsForVm(vmMo); - if(candidates != null && candidates.length > 0) { + List candidates = recommendHostsForVm(vmMo); + if(candidates != null && candidates.size() > 0) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - findMigrationTarget() done(successfully)"); - return candidates[0].getHost(); + return candidates.get(0).getHost(); } if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - findMigrationTarget() done(failed)"); return null; } - + @Override public boolean isHyperHostConnected() throws Exception { ObjectContent[] ocs = getHostPropertiesOnCluster(new String[] {"runtime"}); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - HostRuntimeInfo runtimeInfo = (HostRuntimeInfo)oc.getPropSet(0).getVal(); - // as long as we have one host connected, we assume the cluster is up - if(runtimeInfo.getConnectionState() == HostSystemConnectionState.connected) + HostRuntimeInfo runtimeInfo = (HostRuntimeInfo)oc.getPropSet().get(0).getVal(); + // as long as we have one host connected, we assume the cluster is up + if(runtimeInfo.getConnectionState() == HostSystemConnectionState.CONNECTED) return true; } } return false; } - + @Override public String getHyperHostDefaultGateway() throws Exception { ObjectContent[] ocs = getHostPropertiesOnCluster(new String[] {"config.network.routeTableInfo.ipRoute"}); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - ArrayOfHostIpRouteEntry entries = (ArrayOfHostIpRouteEntry)oc.getPropSet(0).getVal(); + ArrayOfHostIpRouteEntry entries = (ArrayOfHostIpRouteEntry)oc.getPropSet().get(0).getVal(); if(entries != null) { for(HostIpRouteEntry entry : entries.getHostIpRouteEntry()) { if(entry.getNetwork().equalsIgnoreCase("0.0.0.0")) @@ -455,28 +457,28 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { } } } - + throw new Exception("Could not find host default gateway, host is not properly configured?"); } - + @Override public VmwareHypervisorHostResourceSummary getHyperHostResourceSummary() throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - getHyperHostResourceSummary(). target MOR: " + _mor.get_value()); + s_logger.trace("vCenter API trace - getHyperHostResourceSummary(). target MOR: " + _mor.getValue()); VmwareHypervisorHostResourceSummary summary = new VmwareHypervisorHostResourceSummary(); - - ComputeResourceSummary vmwareSummary = (ComputeResourceSummary)_context.getServiceUtil().getDynamicProperty( + + ComputeResourceSummary vmwareSummary = (ComputeResourceSummary)_context.getVimClient().getDynamicProperty( _mor, "summary"); - - // TODO, need to use traversal to optimize retrieve of + + // TODO, need to use traversal to optimize retrieve of int cpuNumInCpuThreads = 1; - ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getServiceUtil().getDynamicProperty(_mor, "host"); + ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "host"); if(hosts != null && hosts.length > 0) { for(ManagedObjectReference morHost : hosts) { HostMO hostMo = new HostMO(_context, morHost); HostHardwareSummary hardwareSummary = hostMo.getHostHardwareSummary(); - + if(hardwareSummary.getNumCpuCores()*hardwareSummary.getNumCpuThreads() > cpuNumInCpuThreads) cpuNumInCpuThreads = hardwareSummary.getNumCpuCores()*hardwareSummary.getNumCpuThreads(); } @@ -484,74 +486,74 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { summary.setCpuCount(cpuNumInCpuThreads); summary.setCpuSpeed(vmwareSummary.getTotalCpu()); summary.setMemoryBytes(vmwareSummary.getTotalMemory()); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - getHyperHostResourceSummary() done"); return summary; } - + @Override public VmwareHypervisorHostNetworkSummary getHyperHostNetworkSummary(String esxServiceConsolePort) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - getHyperHostNetworkSummary(). target MOR: " + _mor.get_value() + ", mgmtPortgroup: " + esxServiceConsolePort); - - ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getServiceUtil().getDynamicProperty(_mor, "host"); + s_logger.trace("vCenter API trace - getHyperHostNetworkSummary(). target MOR: " + _mor.getValue() + ", mgmtPortgroup: " + esxServiceConsolePort); + + ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "host"); if(hosts != null && hosts.length > 0) { VmwareHypervisorHostNetworkSummary summary = new HostMO(_context, hosts[0]).getHyperHostNetworkSummary(esxServiceConsolePort); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - getHyperHostResourceSummary() done(successfully)"); return summary; } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - getHyperHostResourceSummary() done(failed)"); return null; } - + @Override public ComputeResourceSummary getHyperHostHardwareSummary() throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - getHyperHostHardwareSummary(). target MOR: " + _mor.get_value()); + s_logger.trace("vCenter API trace - getHyperHostHardwareSummary(). target MOR: " + _mor.getValue()); ClusterComputeResourceSummary hardwareSummary = (ClusterComputeResourceSummary) - _context.getServiceUtil().getDynamicProperty(_mor, "summary"); + _context.getVimClient().getDynamicProperty(_mor, "summary"); if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - getHyperHostHardwareSummary() done"); return hardwareSummary; } - public ClusterHostRecommendation[] recommendHostsForVm(VirtualMachineMO vmMo) throws Exception { - return _context.getService().recommendHostsForVm(_mor, vmMo.getMor(), + public List recommendHostsForVm(VirtualMachineMO vmMo) throws Exception { + return _context.getService().recommendHostsForVm(_mor, vmMo.getMor(), getHyperHostOwnerResourcePool()); } - + public List> getClusterHosts() throws Exception { List> hosts = new ArrayList>(); - + ObjectContent[] ocs = getHostPropertiesOnCluster(new String[] {"name"}); if(ocs != null) { for(ObjectContent oc : ocs) { ManagedObjectReference morHost = oc.getObj(); - String name = (String)oc.getPropSet(0).getVal(); - + String name = (String)oc.getPropSet().get(0).getVal(); + hosts.add(new Pair(morHost, name)); } } return hosts; } - + public HashMap getVmVncPortsOnCluster() throws Exception { ObjectContent[] ocs = getVmPropertiesOnHyperHost( new String[] { "name", "config.extraConfig[\"RemoteDisplay.vnc.port\"]" } ); - + HashMap portInfo = new HashMap(); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - DynamicProperty[] objProps = oc.getPropSet(); + List objProps = oc.getPropSet(); if(objProps != null) { String name = null; String value = null; @@ -561,16 +563,16 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { } else { OptionValue optValue = (OptionValue)objProp.getVal(); value = (String)optValue.getValue(); - } + } } - + if(name != null && value != null) { portInfo.put(name, Integer.parseInt(value)); } } } } - + return portInfo; } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/CustomFieldsManagerMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/CustomFieldsManagerMO.java index 44de683bb05..08932c5e5ba 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/CustomFieldsManagerMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/CustomFieldsManagerMO.java @@ -26,32 +26,32 @@ public class CustomFieldsManagerMO extends BaseMO { public CustomFieldsManagerMO(VmwareContext context, ManagedObjectReference mor) { super(context, mor); } - + public CustomFieldsManagerMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - - public CustomFieldDef addCustomerFieldDef(String fieldName, String morType, + + public CustomFieldDef addCustomerFieldDef(String fieldName, String morType, PrivilegePolicyDef fieldDefPolicy, PrivilegePolicyDef fieldPolicy) throws Exception { return _context.getService().addCustomFieldDef(getMor(), fieldName, morType, fieldDefPolicy, fieldPolicy); } - + public void removeCustomFieldDef(int key) throws Exception { _context.getService().removeCustomFieldDef(getMor(), key); } - + public void renameCustomFieldDef(int key, String name) throws Exception { _context.getService().renameCustomFieldDef(getMor(), key, name); } - + public void setField(ManagedObjectReference morEntity, int key, String value) throws Exception { _context.getService().setField(getMor(), morEntity, key, value); } - + public CustomFieldDef[] getFields() throws Exception { - return (CustomFieldDef[])_context.getServiceUtil().getDynamicProperty(getMor(), "field"); + return (CustomFieldDef[])_context.getVimClient().getDynamicProperty(getMor(), "field"); } - + public int getCustomFieldKey(String morType, String fieldName) throws Exception { CustomFieldDef[] fields = getFields(); if(fields != null) { @@ -62,12 +62,12 @@ public class CustomFieldsManagerMO extends BaseMO { } return 0; } - + public int ensureCustomFieldDef(String morType, String fieldName) throws Exception { int key = getCustomFieldKey(morType, fieldName); if(key > 0) return key; - + try { CustomFieldDef field = addCustomerFieldDef(fieldName, morType, null, null); return field.getKey(); @@ -75,10 +75,10 @@ public class CustomFieldsManagerMO extends BaseMO { // assuming that someone is adding it key = getCustomFieldKey(morType, fieldName); } - + if(key == 0) throw new Exception("Unable to setup custom field facility for " + morType + ":" + fieldName); - + return key; } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatacenterMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatacenterMO.java index eb10e167732..4cb0a619401 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatacenterMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatacenterMO.java @@ -22,7 +22,6 @@ import java.util.List; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.utils.Pair; -import com.vmware.apputils.vim25.ServiceUtil; import com.vmware.vim25.CustomFieldStringValue; import com.vmware.vim25.DVPortgroupConfigInfo; import com.vmware.vim25.DistributedVirtualSwitchPortConnection; @@ -34,59 +33,58 @@ import com.vmware.vim25.PropertyFilterSpec; import com.vmware.vim25.PropertySpec; import com.vmware.vim25.SelectionSpec; import com.vmware.vim25.TraversalSpec; -import com.vmware.vim25.VirtualDeviceBackingInfo; import com.vmware.vim25.VirtualEthernetCardDistributedVirtualPortBackingInfo; -import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo; +import edu.emory.mathcs.backport.java.util.Arrays; public class DatacenterMO extends BaseMO { - + public DatacenterMO(VmwareContext context, ManagedObjectReference morDc) { super(context, morDc); } - + public DatacenterMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public DatacenterMO(VmwareContext context, String dcName) throws Exception { super(context, null); - - _mor = _context.getServiceUtil().getDecendentMoRef(_context.getRootFolder(), "Datacenter", dcName); + + _mor = _context.getVimClient().getDecendentMoRef(_context.getRootFolder(), "Datacenter", dcName); assert(_mor != null); } - - public String getName() throws Exception { - return (String)_context.getServiceUtil().getDynamicProperty(_mor, "name"); + + @Override + public String getName() throws Exception { + return (String)_context.getVimClient().getDynamicProperty(_mor, "name"); } - - public void registerTemplate(ManagedObjectReference morHost, String datastoreName, + + public void registerTemplate(ManagedObjectReference morHost, String datastoreName, String templateName, String templateFileName) throws Exception { - - ServiceUtil serviceUtil = _context.getServiceUtil(); - - ManagedObjectReference morFolder = (ManagedObjectReference)serviceUtil.getDynamicProperty( + + + ManagedObjectReference morFolder = (ManagedObjectReference)_context.getVimClient().getDynamicProperty( _mor, "vmFolder"); assert(morFolder != null); - - ManagedObjectReference morTask = _context.getService().registerVM_Task( - morFolder, + + ManagedObjectReference morTask = _context.getService().registerVMTask( + morFolder, String.format("[%s] %s/%s", datastoreName, templateName, templateFileName), - templateName, true, + templateName, true, null, morHost); - - String result = serviceUtil.waitForTask(morTask); - if (!result.equalsIgnoreCase("Sucess")) { + + boolean result = _context.getVimClient().waitForTask(morTask); + if (!result) { throw new Exception("Unable to register template due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } else { _context.waitForTaskProgressDone(morTask); } } - + public VirtualMachineMO findVm(String vmName) throws Exception { - ObjectContent[] ocs = getVmPropertiesOnDatacenterVmFolder(new String[] { "name" }); - if(ocs != null && ocs.length > 0) { + List ocs = getVmPropertiesOnDatacenterVmFolder(new String[] { "name" }); + if(ocs != null && ocs.size() > 0) { for(ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if(props != null) { for(DynamicProperty prop : props) { if(prop.getVal().toString().equals(vmName)) @@ -97,19 +95,19 @@ public class DatacenterMO extends BaseMO { } return null; } - + public List findVmByNameAndLabel(String vmLabel) throws Exception { - CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, + CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager()); int key = cfmMo.getCustomFieldKey("VirtualMachine", CustomFieldConstants.CLOUD_UUID); assert(key != 0); - + List list = new ArrayList(); - - ObjectContent[] ocs = getVmPropertiesOnDatacenterVmFolder(new String[] { "name", String.format("value[%d]", key)}); - if(ocs != null && ocs.length > 0) { + + List ocs = getVmPropertiesOnDatacenterVmFolder(new String[] { "name", String.format("value[%d]", key)}); + if(ocs != null && ocs.size() > 0) { for(ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if(props != null) { for(DynamicProperty prop : props) { if(prop.getVal() != null) { @@ -135,209 +133,217 @@ public class DatacenterMO extends BaseMO { public List> getAllVmsOnDatacenter() throws Exception { List> vms = new ArrayList>(); - - ObjectContent[] ocs = getVmPropertiesOnDatacenterVmFolder(new String[] { "name" }); + + List ocs = getVmPropertiesOnDatacenterVmFolder(new String[] { "name" }); if(ocs != null) { for(ObjectContent oc : ocs) { - String vmName = oc.getPropSet(0).getVal().toString(); + String vmName = oc.getPropSet().get(0).getVal().toString(); vms.add(new Pair(oc.getObj(), vmName)); } } - + return vms; - } - + } + public ManagedObjectReference findDatastore(String name) throws Exception { assert(name != null); - - ObjectContent[] ocs = getDatastorePropertiesOnDatacenter(new String[] { "name" }); + + List ocs = getDatastorePropertiesOnDatacenter(new String[] { "name" }); if(ocs != null) { for(ObjectContent oc : ocs) { - if(oc.getPropSet(0).getVal().toString().equals(name)) { + if(oc.getPropSet().get(0).getVal().toString().equals(name)) { return oc.getObj(); } } } return null; } - + public ManagedObjectReference findHost(String name) throws Exception { - ObjectContent[] ocs= getHostPropertiesOnDatacenterHostFolder(new String[] { "name" }); - + List ocs= getHostPropertiesOnDatacenterHostFolder(new String[] { "name" }); + if(ocs != null) { for(ObjectContent oc : ocs) { - if(oc.getPropSet(0).getVal().toString().equals(name)) { + if(oc.getPropSet().get(0).getVal().toString().equals(name)) { return oc.getObj(); } } } return null; } - + public ManagedObjectReference getVmFolder() throws Exception { - return (ManagedObjectReference)_context.getServiceUtil().getDynamicProperty(_mor, "vmFolder"); + return (ManagedObjectReference)_context.getVimClient().getDynamicProperty(_mor, "vmFolder"); } - - public ObjectContent[] getHostPropertiesOnDatacenterHostFolder(String[] propertyPaths) throws Exception { + + public List getHostPropertiesOnDatacenterHostFolder(String[] propertyPaths) throws Exception { PropertySpec pSpec = new PropertySpec(); pSpec.setType("HostSystem"); - pSpec.setPathSet(propertyPaths); - + pSpec.getPathSet().addAll(Arrays.asList(propertyPaths)); + TraversalSpec computeResource2HostTraversal = new TraversalSpec(); computeResource2HostTraversal.setType("ComputeResource"); computeResource2HostTraversal.setPath("host"); computeResource2HostTraversal.setName("computeResource2HostTraversal"); - + SelectionSpec recurseFolders = new SelectionSpec(); recurseFolders.setName("folder2childEntity"); - + TraversalSpec folder2childEntity = new TraversalSpec(); folder2childEntity.setType("Folder"); folder2childEntity.setPath("childEntity"); folder2childEntity.setName(recurseFolders.getName()); - folder2childEntity.setSelectSet(new SelectionSpec[] { recurseFolders, computeResource2HostTraversal }); - + folder2childEntity.getSelectSet().add(recurseFolders); + folder2childEntity.getSelectSet().add(computeResource2HostTraversal); + TraversalSpec dc2HostFolderTraversal = new TraversalSpec(); dc2HostFolderTraversal.setType("Datacenter"); dc2HostFolderTraversal.setPath("hostFolder"); dc2HostFolderTraversal.setName("dc2HostFolderTraversal"); - dc2HostFolderTraversal.setSelectSet(new SelectionSpec[] { folder2childEntity } ); - + dc2HostFolderTraversal.getSelectSet().add(folder2childEntity); + ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { dc2HostFolderTraversal }); + oSpec.getSelectSet().add(dc2HostFolderTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - return _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + return _context.getService().retrieveProperties(_context.getPropertyCollector(), pfSpecArr); + } - - public ObjectContent[] getDatastorePropertiesOnDatacenter(String[] propertyPaths) throws Exception { - + + public List getDatastorePropertiesOnDatacenter(String[] propertyPaths) throws Exception { + PropertySpec pSpec = new PropertySpec(); pSpec.setType("Datastore"); - pSpec.setPathSet(propertyPaths); - + pSpec.getPathSet().addAll(Arrays.asList(propertyPaths)); + TraversalSpec dc2DatastoreTraversal = new TraversalSpec(); dc2DatastoreTraversal.setType("Datacenter"); dc2DatastoreTraversal.setPath("datastore"); dc2DatastoreTraversal.setName("dc2DatastoreTraversal"); - + ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { dc2DatastoreTraversal }); + oSpec.getSelectSet().add(dc2DatastoreTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - return _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + return _context.getService().retrieveProperties(_context.getPropertyCollector(), pfSpecArr); + } - - public ObjectContent[] getVmPropertiesOnDatacenterVmFolder(String[] propertyPaths) throws Exception { + + public List getVmPropertiesOnDatacenterVmFolder(String[] propertyPaths) throws Exception { PropertySpec pSpec = new PropertySpec(); pSpec.setType("VirtualMachine"); - pSpec.setPathSet(propertyPaths); - + pSpec.getPathSet().addAll(Arrays.asList(propertyPaths)); + TraversalSpec dc2VmFolderTraversal = new TraversalSpec(); dc2VmFolderTraversal.setType("Datacenter"); dc2VmFolderTraversal.setPath("vmFolder"); dc2VmFolderTraversal.setName("dc2VmFolderTraversal"); - + + SelectionSpec recurseFolders = new SelectionSpec(); recurseFolders.setName("folder2childEntity"); - + TraversalSpec folder2childEntity = new TraversalSpec(); folder2childEntity.setType("Folder"); folder2childEntity.setPath("childEntity"); folder2childEntity.setName(recurseFolders.getName()); - folder2childEntity.setSelectSet(new SelectionSpec[] { recurseFolders }); - dc2VmFolderTraversal.setSelectSet(new SelectionSpec[] { folder2childEntity } ); + folder2childEntity.getSelectSet().add(recurseFolders); + dc2VmFolderTraversal.getSelectSet().add(folder2childEntity); ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { dc2VmFolderTraversal }); + oSpec.getSelectSet().add(dc2VmFolderTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - return _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + return _context.getService().retrieveProperties(_context.getPropertyCollector(), pfSpecArr); } - - public static Pair getOwnerDatacenter(VmwareContext context, + + public static Pair getOwnerDatacenter(VmwareContext context, ManagedObjectReference morEntity) throws Exception { - + PropertySpec pSpec = new PropertySpec(); pSpec.setType("Datacenter"); - pSpec.setPathSet(new String[] { "name" }); - + pSpec.getPathSet().add("name"); + TraversalSpec entityParentTraversal = new TraversalSpec(); entityParentTraversal.setType("ManagedEntity"); entityParentTraversal.setPath("parent"); entityParentTraversal.setName("entityParentTraversal"); - entityParentTraversal.setSelectSet(new SelectionSpec[] { new SelectionSpec(null, null, "entityParentTraversal") }); + SelectionSpec selSpec = new SelectionSpec(); + selSpec.setName("entityParentTraversal"); + entityParentTraversal.getSelectSet().add(selSpec); ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(morEntity); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { entityParentTraversal }); + oSpec.getSelectSet().add(entityParentTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] ocs = context.getService().retrieveProperties( - context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); - - assert(ocs != null); - assert(ocs[0].getObj() != null); - assert(ocs[0].getPropSet(0) != null); - assert(ocs[0].getPropSet(0).getVal() != null); - - String dcName = ocs[0].getPropSet(0).getVal().toString(); - return new Pair(new DatacenterMO(context, ocs[0].getObj()), dcName); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List ocs = context.getService().retrieveProperties( + context.getPropertyCollector(), pfSpecArr); + + assert(ocs != null && ocs.size() > 0); + assert(ocs.get(0).getObj() != null); + assert(ocs.get(0).getPropSet().get(0) != null); + assert(ocs.get(0).getPropSet().get(0).getVal() != null); + + String dcName = ocs.get(0).getPropSet().get(0).getVal().toString(); + return new Pair(new DatacenterMO(context, ocs.get(0).getObj()), dcName); } - + public ManagedObjectReference getDvPortGroupMor(String dvPortGroupName) throws Exception { PropertySpec pSpec = new PropertySpec(); pSpec.setType("DistributedVirtualPortgroup"); - pSpec.setPathSet(new String[] {"name"}); - + pSpec.getPathSet().add("name"); + TraversalSpec datacenter2DvPortGroupTraversal = new TraversalSpec(); datacenter2DvPortGroupTraversal.setType("Datacenter"); datacenter2DvPortGroupTraversal.setPath("network"); datacenter2DvPortGroupTraversal.setName("datacenter2DvPortgroupTraversal"); - + ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { datacenter2DvPortGroupTraversal }); + oSpec.getSelectSet().add(datacenter2DvPortGroupTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); - + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List ocs = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); + if(ocs != null) { for(ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if(props != null) { for(DynamicProperty prop : props) { if(prop.getVal().equals(dvPortGroupName)) @@ -347,22 +353,23 @@ public class DatacenterMO extends BaseMO { } } return null; - } + } public boolean hasDvPortGroup(String dvPortGroupName) throws Exception { ManagedObjectReference morNetwork = getDvPortGroupMor(dvPortGroupName); if(morNetwork != null) return true; - return false; + return false; } - + public DVPortgroupConfigInfo getDvPortGroupSpec(String dvPortGroupName) throws Exception { DVPortgroupConfigInfo configSpec = null; String nameProperty = null; PropertySpec pSpec = new PropertySpec(); pSpec.setType("DistributedVirtualPortgroup"); - pSpec.setPathSet(new String[] {"name", "config"}); - + pSpec.getPathSet().add("name"); + pSpec.getPathSet().add("config"); + TraversalSpec datacenter2DvPortGroupTraversal = new TraversalSpec(); datacenter2DvPortGroupTraversal.setType("Datacenter"); datacenter2DvPortGroupTraversal.setPath("network"); @@ -371,21 +378,22 @@ public class DatacenterMO extends BaseMO { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { datacenter2DvPortGroupTraversal }); + oSpec.getSelectSet().add(datacenter2DvPortGroupTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); - + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List ocs = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); + if(ocs != null) { for(ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if(props != null) { - assert(props.length == 2); + assert(props.size() == 2); for(DynamicProperty prop : props) { if(prop.getName().equals("config")) { configSpec = (DVPortgroupConfigInfo) prop.getVal(); @@ -395,7 +403,7 @@ public class DatacenterMO extends BaseMO { } } if(nameProperty.equalsIgnoreCase(dvPortGroupName)) { - return configSpec; + return configSpec; } } } @@ -408,7 +416,8 @@ public class DatacenterMO extends BaseMO { ManagedObjectReference dvSwitchMor = null; PropertySpec pSpec = new PropertySpec(); pSpec.setType("DistributedVirtualPortgroup"); - pSpec.setPathSet(new String[] { "key", "config.distributedVirtualSwitch" }); + pSpec.getPathSet().add("key"); + pSpec.getPathSet().add("config.distributedVirtualSwitch"); TraversalSpec datacenter2DvPortGroupTraversal = new TraversalSpec(); datacenter2DvPortGroupTraversal.setType("Datacenter"); @@ -418,21 +427,22 @@ public class DatacenterMO extends BaseMO { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { datacenter2DvPortGroupTraversal }); + oSpec.getSelectSet().add(datacenter2DvPortGroupTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); - ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + List ocs = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); if (ocs != null) { for (ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if (props != null) { - assert (props.length == 2); + assert (props.size() == 2); for (DynamicProperty prop : props) { if (prop.getName().equals("key")) { dvPortGroupKey = (String) prop.getVal(); @@ -441,7 +451,7 @@ public class DatacenterMO extends BaseMO { dvSwitchMor = (ManagedObjectReference) prop.getVal(); } } - if ((dvPortGroupKey != null) && dvPortGroupKey.equals(dvPortGroupMor.get_value())) { + if ((dvPortGroupKey != null) && dvPortGroupKey.equals(dvPortGroupMor.getValue())) { return dvSwitchMor; } } @@ -452,7 +462,7 @@ public class DatacenterMO extends BaseMO { public String getDvSwitchUuid(ManagedObjectReference dvSwitchMor) throws Exception { assert (dvSwitchMor != null); - return (String) _context.getServiceUtil().getDynamicProperty(dvSwitchMor, "uuid"); + return (String) _context.getVimClient().getDynamicProperty(dvSwitchMor, "uuid"); } public VirtualEthernetCardDistributedVirtualPortBackingInfo getDvPortBackingInfo(Pair networkInfo) @@ -464,7 +474,7 @@ public class DatacenterMO extends BaseMO { ManagedObjectReference dvsMor = getDvSwitchMor(networkInfo.first()); String dvSwitchUuid = getDvSwitchUuid(dvsMor); dvPortConnection.setSwitchUuid(dvSwitchUuid); - dvPortConnection.setPortgroupKey(networkInfo.first().get_value()); + dvPortConnection.setPortgroupKey(networkInfo.first().getValue()); dvPortBacking.setPort(dvPortConnection); System.out.println("Plugging NIC device into network " + networkInfo.second() + " backed by dvSwitch: " + dvSwitchUuid); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java index 57e83b9799b..c79605d08ab 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DatastoreMO.java @@ -36,95 +36,98 @@ import com.vmware.vim25.TraversalSpec; public class DatastoreMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(DatastoreMO.class); - - private String _name; - private Pair _ownerDc; - + + private String _name; + private Pair _ownerDc; + public DatastoreMO(VmwareContext context, ManagedObjectReference morDatastore) { super(context, morDatastore); } - + public DatastoreMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } public String getName() throws Exception { - if(_name == null) - _name = (String)_context.getServiceUtil().getDynamicProperty(_mor, "name"); - + if(_name == null) + _name = (String)_context.getVimClient().getDynamicProperty(_mor, "name"); + return _name; } - + public DatastoreSummary getSummary() throws Exception { - return (DatastoreSummary)_context.getServiceUtil().getDynamicProperty(_mor, "summary"); + return (DatastoreSummary)_context.getVimClient().getDynamicProperty(_mor, "summary"); } - + public HostDatastoreBrowserMO getHostDatastoreBrowserMO() throws Exception { - return new HostDatastoreBrowserMO(_context, - (ManagedObjectReference)_context.getServiceUtil().getDynamicProperty(_mor, "browser")); + return new HostDatastoreBrowserMO(_context, + (ManagedObjectReference)_context.getVimClient().getDynamicProperty(_mor, "browser")); } - + public String getInventoryPath() throws Exception { Pair dcInfo = getOwnerDatacenter(); return dcInfo.second() + "/" + getName(); } - + public Pair getOwnerDatacenter() throws Exception { if(_ownerDc != null) return _ownerDc; - + PropertySpec pSpec = new PropertySpec(); pSpec.setType("Datacenter"); - pSpec.setPathSet(new String[] { "name" }); - + pSpec.getPathSet().add("name"); + TraversalSpec folderParentTraversal = new TraversalSpec(); folderParentTraversal.setType("Folder"); folderParentTraversal.setPath("parent"); folderParentTraversal.setName("folderParentTraversal"); - folderParentTraversal.setSelectSet(new SelectionSpec[] { new SelectionSpec(null, null, "folderParentTraversal") }); - + SelectionSpec sSpec = new SelectionSpec(); + sSpec.setName("folderParentTraversal"); + folderParentTraversal.getSelectSet().add(sSpec); + TraversalSpec dsParentTraversal = new TraversalSpec(); dsParentTraversal.setType("Datastore"); dsParentTraversal.setPath("parent"); dsParentTraversal.setName("dsParentTraversal"); - dsParentTraversal.setSelectSet(new SelectionSpec[] { folderParentTraversal }); + dsParentTraversal.getSelectSet().add(folderParentTraversal); ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(getMor()); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { dsParentTraversal }); + oSpec.getSelectSet().add(dsParentTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); - - assert(ocs != null); - assert(ocs[0].getObj() != null); - assert(ocs[0].getPropSet() != null); - String dcName = ocs[0].getPropSet()[0].getVal().toString(); - _ownerDc = new Pair(new DatacenterMO(_context, ocs[0].getObj()), dcName); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List ocs = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); + + assert(ocs != null && ocs.size() > 0); + assert(ocs.get(0).getObj() != null); + assert(ocs.get(0).getPropSet() != null); + String dcName = ocs.get(0).getPropSet().get(0).getVal().toString(); + _ownerDc = new Pair(new DatacenterMO(_context, ocs.get(0).getObj()), dcName); return _ownerDc; } - + public void makeDirectory(String path, ManagedObjectReference morDc) throws Exception { String datastoreName = getName(); ManagedObjectReference morFileManager = _context.getServiceContent().getFileManager(); - + String fullPath = path; - if(!DatastoreFile.isFullDatastorePath(fullPath)) + if(!DatastoreFile.isFullDatastorePath(fullPath)) fullPath = String.format("[%s] %s", datastoreName, path); - + _context.getService().makeDirectory(morFileManager, fullPath, morDc, true); } - + public boolean deleteFile(String path, ManagedObjectReference morDc, boolean testExistence) throws Exception { String datastoreName = getName(); ManagedObjectReference morFileManager = _context.getServiceContent().getFileManager(); - + String fullPath = path; if(!DatastoreFile.isFullDatastorePath(fullPath)) fullPath = String.format("[%s] %s", datastoreName, path); @@ -136,12 +139,11 @@ public class DatastoreMO extends BaseMO { s_logger.info("Unable to test file existence due to exception " + e.getClass().getName() + ", skip deleting of it"); return true; } - - ManagedObjectReference morTask = _context.getService().deleteDatastoreFile_Task(morFileManager, - fullPath, morDc); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + + ManagedObjectReference morTask = _context.getService().deleteDatastoreFileTask(morFileManager, fullPath, morDc); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -149,29 +151,29 @@ public class DatastoreMO extends BaseMO { } return false; } - + public boolean copyDatastoreFile(String srcFilePath, ManagedObjectReference morSrcDc, - ManagedObjectReference morDestDs, String destFilePath, ManagedObjectReference morDestDc, + ManagedObjectReference morDestDs, String destFilePath, ManagedObjectReference morDestDc, boolean forceOverwrite) throws Exception { - + String srcDsName = getName(); DatastoreMO destDsMo = new DatastoreMO(_context, morDestDs); String destDsName = destDsMo.getName(); - + ManagedObjectReference morFileManager = _context.getServiceContent().getFileManager(); String srcFullPath = srcFilePath; if(!DatastoreFile.isFullDatastorePath(srcFullPath)) srcFullPath = String.format("[%s] %s", srcDsName, srcFilePath); - + String destFullPath = destFilePath; if(!DatastoreFile.isFullDatastorePath(destFullPath)) destFullPath = String.format("[%s] %s", destDsName, destFilePath); - - ManagedObjectReference morTask = _context.getService().copyDatastoreFile_Task(morFileManager, + + ManagedObjectReference morTask = _context.getService().copyDatastoreFileTask(morFileManager, srcFullPath, morSrcDc, destFullPath, morDestDc, forceOverwrite); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -179,29 +181,29 @@ public class DatastoreMO extends BaseMO { } return false; } - + public boolean moveDatastoreFile(String srcFilePath, ManagedObjectReference morSrcDc, - ManagedObjectReference morDestDs, String destFilePath, ManagedObjectReference morDestDc, + ManagedObjectReference morDestDs, String destFilePath, ManagedObjectReference morDestDc, boolean forceOverwrite) throws Exception { - + String srcDsName = getName(); DatastoreMO destDsMo = new DatastoreMO(_context, morDestDs); String destDsName = destDsMo.getName(); - + ManagedObjectReference morFileManager = _context.getServiceContent().getFileManager(); String srcFullPath = srcFilePath; if(!DatastoreFile.isFullDatastorePath(srcFullPath)) srcFullPath = String.format("[%s] %s", srcDsName, srcFilePath); - + String destFullPath = destFilePath; if(!DatastoreFile.isFullDatastorePath(destFullPath)) destFullPath = String.format("[%s] %s", destDsName, destFilePath); - - ManagedObjectReference morTask = _context.getService().moveDatastoreFile_Task(morFileManager, + + ManagedObjectReference morTask = _context.getService().moveDatastoreFileTask(morFileManager, srcFullPath, morSrcDc, destFullPath, morDestDc, forceOverwrite); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -209,23 +211,23 @@ public class DatastoreMO extends BaseMO { } return false; } - + public String[] getVmdkFileChain(String rootVmdkDatastoreFullPath) throws Exception { Pair dcPair = getOwnerDatacenter(); - + List files = new ArrayList(); files.add(rootVmdkDatastoreFullPath); - + String currentVmdkFullPath = rootVmdkDatastoreFullPath; while(true) { String url = getContext().composeDatastoreBrowseUrl(dcPair.second(), currentVmdkFullPath); byte[] content = getContext().getResourceContent(url); if(content == null || content.length == 0) break; - + VmdkFileDescriptor descriptor = new VmdkFileDescriptor(); descriptor.parse(content); - + String parentFileName = descriptor.getParentFileName(); if(parentFileName == null) break; @@ -239,8 +241,8 @@ public class DatastoreMO extends BaseMO { dir = dir.substring(0, dir.lastIndexOf('/')); else dir = ""; - - currentVmdkFullPath = new DatastoreFile(dsFile.getDatastoreName(), dir, + + currentVmdkFullPath = new DatastoreFile(dsFile.getDatastoreName(), dir, parentFileName.substring(parentFileName.lastIndexOf('/') + 1)).getPath(); files.add(currentVmdkFullPath); } else { @@ -248,7 +250,7 @@ public class DatastoreMO extends BaseMO { files.add(currentVmdkFullPath); } } - + return files.toArray(new String[0]); } @@ -257,59 +259,59 @@ public class DatastoreMO extends BaseMO { String fullPath = path; if(!DatastoreFile.isFullDatastorePath(fullPath)) fullPath = String.format("[%s] %s", getName(), fullPath); - + Pair dcPair = getOwnerDatacenter(); String url = getContext().composeDatastoreBrowseUrl(dcPair.second(), fullPath); - + // TODO, VMware currently does not have a formal API to list Datastore directory content, // folloing hacking may have performance hit if datastore has a large number of files return _context.listDatastoreDirContent(url); } - + public boolean fileExists(String fileFullPath) throws Exception { DatastoreFile file = new DatastoreFile(fileFullPath); DatastoreFile dirFile = new DatastoreFile(file.getDatastoreName(), file.getDir()); - + HostDatastoreBrowserMO browserMo = getHostDatastoreBrowserMO(); - + s_logger.info("Search file " + file.getFileName() + " on " + dirFile.getPath()); HostDatastoreBrowserSearchResults results = browserMo.searchDatastore(dirFile.getPath(), file.getFileName(), true); if(results != null) { - FileInfo[] info = results.getFile(); - if(info != null && info.length > 0) { + List info = results.getFile(); + if(info != null && info.size() > 0) { s_logger.info("File " + fileFullPath + " exists on datastore"); return true; } } - + s_logger.info("File " + fileFullPath + " does not exist on datastore"); return false; -/* +/* String[] fileNames = listDirContent(dirFile.getPath()); - + String fileName = file.getFileName(); for(String name : fileNames) { if(name.equalsIgnoreCase(fileName)) return true; - } - + } + return false; -*/ +*/ } - + public boolean folderExists(String folderParentDatastorePath, String folderName) throws Exception { HostDatastoreBrowserMO browserMo = getHostDatastoreBrowserMO(); - + HostDatastoreBrowserSearchResults results = browserMo.searchDatastore(folderParentDatastorePath, folderName, true); if(results != null) { - FileInfo[] info = results.getFile(); - if(info != null && info.length > 0) { + List info = results.getFile(); + if(info != null && info.size() > 0) { s_logger.info("Folder " + folderName + " exists on datastore"); return true; } } - + s_logger.info("Folder " + folderName + " does not exist on datastore"); return false; } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreBrowserMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreBrowserMO.java index ec0a1810701..59e754c951d 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreBrowserMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreBrowserMO.java @@ -24,39 +24,39 @@ import com.vmware.vim25.HostDatastoreBrowserSearchSpec; import com.vmware.vim25.ManagedObjectReference; public class HostDatastoreBrowserMO extends BaseMO { - + private static final Logger s_logger = Logger.getLogger(HostDatastoreBrowserMO.class); - + public HostDatastoreBrowserMO(VmwareContext context, ManagedObjectReference morHostDatastoreBrowser) { super(context, morHostDatastoreBrowser); } - + public HostDatastoreBrowserMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public void DeleteFile(String datastoreFullPath) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - deleteFile(). target mor: " + _mor.get_value() + ", file datastore path: " + datastoreFullPath); - + s_logger.trace("vCenter API trace - deleteFile(). target mor: " + _mor.getValue() + ", file datastore path: " + datastoreFullPath); + _context.getService().deleteFile(_mor, datastoreFullPath); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - deleteFile() done"); } - + public HostDatastoreBrowserSearchResults searchDatastore(String datastorePath, HostDatastoreBrowserSearchSpec searchSpec) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - searchDatastore(). target mor: " + _mor.get_value() + ", file datastore path: " + datastorePath); + s_logger.trace("vCenter API trace - searchDatastore(). target mor: " + _mor.getValue() + ", file datastore path: " + datastorePath); try { - ManagedObjectReference morTask = _context.getService().searchDatastore_Task(_mor, datastorePath, searchSpec); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + ManagedObjectReference morTask = _context.getService().searchDatastoreTask(_mor, datastorePath, searchSpec); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); - - return (HostDatastoreBrowserSearchResults)_context.getServiceUtil().getDynamicProperty(morTask, "info.result"); + + return (HostDatastoreBrowserSearchResults)_context.getVimClient().getDynamicProperty(morTask, "info.result"); } else { s_logger.error("VMware searchDaastore_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } @@ -64,30 +64,30 @@ public class HostDatastoreBrowserMO extends BaseMO { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - searchDatastore() done"); } - + return null; } - + public HostDatastoreBrowserSearchResults searchDatastore(String datastorePath, String fileName, boolean caseInsensitive) throws Exception { HostDatastoreBrowserSearchSpec spec = new HostDatastoreBrowserSearchSpec(); spec.setSearchCaseInsensitive(caseInsensitive); - spec.setMatchPattern(new String[] { fileName }); + spec.getMatchPattern().add(fileName); return searchDatastore(datastorePath, spec); } - + public HostDatastoreBrowserSearchResults searchDatastoreSubFolders(String datastorePath, HostDatastoreBrowserSearchSpec searchSpec) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - searchDatastoreSubFolders(). target mor: " + _mor.get_value() + ", file datastore path: " + datastorePath); + s_logger.trace("vCenter API trace - searchDatastoreSubFolders(). target mor: " + _mor.getValue() + ", file datastore path: " + datastorePath); try { - ManagedObjectReference morTask = _context.getService().searchDatastoreSubFolders_Task(_mor, datastorePath, searchSpec); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + ManagedObjectReference morTask = _context.getService().searchDatastoreSubFoldersTask(_mor, datastorePath, searchSpec); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); - - return (HostDatastoreBrowserSearchResults)_context.getServiceUtil().getDynamicProperty(morTask, "info.result"); + + return (HostDatastoreBrowserSearchResults)_context.getVimClient().getDynamicProperty(morTask, "info.result"); } else { s_logger.error("VMware searchDaastoreSubFolders_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } @@ -95,14 +95,14 @@ public class HostDatastoreBrowserMO extends BaseMO { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - searchDatastore() done"); } - + return null; } - + public HostDatastoreBrowserSearchResults searchDatastoreSubFolders(String datastorePath, String folderName, boolean caseInsensitive) throws Exception { HostDatastoreBrowserSearchSpec spec = new HostDatastoreBrowserSearchSpec(); spec.setSearchCaseInsensitive(caseInsensitive); - spec.setMatchPattern(new String[] { folderName }); + spec.getMatchPattern().add(folderName); return searchDatastore(datastorePath, spec); } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java index d94102bb2fb..7d967a9b532 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java @@ -17,6 +17,9 @@ package com.cloud.hypervisor.vmware.mo; import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.CustomFieldStringValue; @@ -37,27 +40,27 @@ public class HostDatastoreSystemMO extends BaseMO { public HostDatastoreSystemMO(VmwareContext context, ManagedObjectReference morHostDatastore) { super(context, morHostDatastore); } - + public HostDatastoreSystemMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public ManagedObjectReference findDatastore(String name) throws Exception { // added cloud.com specific name convention, we will use custom field "cloud.uuid" as datastore name as well - CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, + CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(_context, _context.getServiceContent().getCustomFieldsManager()); int key = cfmMo.getCustomFieldKey("Datastore", CustomFieldConstants.CLOUD_UUID); assert(key != 0); - ObjectContent[] ocs = getDatastorePropertiesOnHostDatastoreSystem( + List ocs = getDatastorePropertiesOnHostDatastoreSystem( new String[] { "name", String.format("value[%d]", key) }); if(ocs != null) { for(ObjectContent oc : ocs) { - if(oc.getPropSet(0).getVal().equals(name)) + if(oc.getPropSet().get(0).getVal().equals(name)) return oc.getObj(); - - if(oc.getPropSet().length > 1) { - DynamicProperty prop = oc.getPropSet(1); + + if(oc.getPropSet().size() > 1) { + DynamicProperty prop = oc.getPropSet().get(1); if(prop != null && prop.getVal() != null) { if(prop.getVal() instanceof CustomFieldStringValue) { String val = ((CustomFieldStringValue)prop.getVal()).getValue(); @@ -70,11 +73,11 @@ public class HostDatastoreSystemMO extends BaseMO { } return null; } - + // storeUrl in nfs://host/exportpath format public ManagedObjectReference findDatastoreByUrl(String storeUrl) throws Exception { assert(storeUrl != null); - + ManagedObjectReference[] datastores = getDatastores(); if(datastores != null && datastores.length > 0) { for(ManagedObjectReference morDatastore : datastores) { @@ -87,7 +90,7 @@ public class HostDatastoreSystemMO extends BaseMO { } } } - + return null; } @@ -95,30 +98,30 @@ public class HostDatastoreSystemMO extends BaseMO { // we should be able to find the datastore by name public ManagedObjectReference findDatastoreByExportPath(String exportPath) throws Exception { assert(exportPath != null); - + ManagedObjectReference[] datastores = getDatastores(); if(datastores != null && datastores.length > 0) { for(ManagedObjectReference morDatastore : datastores) { DatastoreMO dsMo = new DatastoreMO(_context, morDatastore); - if(dsMo.getInventoryPath().equals(exportPath)) + if(dsMo.getInventoryPath().equals(exportPath)) return morDatastore; - + NasDatastoreInfo info = getNasDatastoreInfo(morDatastore); if(info != null) { String vmwareUrl = info.getUrl(); if(vmwareUrl.charAt(vmwareUrl.length() - 1) == '/') vmwareUrl = vmwareUrl.substring(0, vmwareUrl.length() - 1); - + URI uri = new URI(vmwareUrl); if(uri.getPath().equals("/" + exportPath)) return morDatastore; } } } - + return null; } - + public boolean deleteDatastore(String name) throws Exception { ManagedObjectReference morDatastore = findDatastore(name); if(morDatastore != null) { @@ -127,59 +130,60 @@ public class HostDatastoreSystemMO extends BaseMO { } return false; } - - public ManagedObjectReference createNfsDatastore(String host, int port, + + public ManagedObjectReference createNfsDatastore(String host, int port, String exportPath, String uuid) throws Exception { - + HostNasVolumeSpec spec = new HostNasVolumeSpec(); spec.setRemoteHost(host); spec.setRemotePath(exportPath); spec.setType("nfs"); spec.setLocalPath(uuid); - + // readOnly/readWrite spec.setAccessMode("readWrite"); return _context.getService().createNasDatastore(_mor, spec); } - + public ManagedObjectReference[] getDatastores() throws Exception { - return (ManagedObjectReference[])_context.getServiceUtil().getDynamicProperty( + return (ManagedObjectReference[])_context.getVimClient().getDynamicProperty( _mor, "datastore"); } - + public DatastoreInfo getDatastoreInfo(ManagedObjectReference morDatastore) throws Exception { - return (DatastoreInfo)_context.getServiceUtil().getDynamicProperty(morDatastore, "info"); + return (DatastoreInfo)_context.getVimClient().getDynamicProperty(morDatastore, "info"); } - + public NasDatastoreInfo getNasDatastoreInfo(ManagedObjectReference morDatastore) throws Exception { - DatastoreInfo info = (DatastoreInfo)_context.getServiceUtil().getDynamicProperty(morDatastore, "info"); + DatastoreInfo info = (DatastoreInfo)_context.getVimClient().getDynamicProperty(morDatastore, "info"); if(info instanceof NasDatastoreInfo) return (NasDatastoreInfo)info; return null; } - - public ObjectContent[] getDatastorePropertiesOnHostDatastoreSystem(String[] propertyPaths) throws Exception { - + + public List getDatastorePropertiesOnHostDatastoreSystem(String[] propertyPaths) throws Exception { + PropertySpec pSpec = new PropertySpec(); pSpec.setType("Datastore"); - pSpec.setPathSet(propertyPaths); - + pSpec.getPathSet().addAll(Arrays.asList(propertyPaths)); + TraversalSpec hostDsSys2DatastoreTraversal = new TraversalSpec(); hostDsSys2DatastoreTraversal.setType("HostDatastoreSystem"); hostDsSys2DatastoreTraversal.setPath("datastore"); hostDsSys2DatastoreTraversal.setName("hostDsSys2DatastoreTraversal"); - + ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { hostDsSys2DatastoreTraversal }); + oSpec.getSelectSet().add(hostDsSys2DatastoreTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + return _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + _context.getPropertyCollector(), pfSpecArr); } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostFirewallSystemMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostFirewallSystemMO.java index 2d2b35390ce..df159261c43 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostFirewallSystemMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostFirewallSystemMO.java @@ -29,27 +29,27 @@ public class HostFirewallSystemMO extends BaseMO { public HostFirewallSystemMO(VmwareContext context, ManagedObjectReference morFirewallSystem) { super(context, morFirewallSystem); } - + public HostFirewallSystemMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public HostFirewallInfo getFirewallInfo() throws Exception { - return (HostFirewallInfo)_context.getServiceUtil().getDynamicProperty(_mor, "firewallInfo"); + return (HostFirewallInfo)_context.getVimClient().getDynamicProperty(_mor, "firewallInfo"); } - + public void updateDefaultPolicy(HostFirewallDefaultPolicy policy) throws Exception { _context.getService().updateDefaultPolicy(_mor, policy); } - + public void enableRuleset(String rulesetName) throws Exception { _context.getService().enableRuleset(_mor, rulesetName); } - + public void disableRuleset(String rulesetName) throws Exception { _context.getService().disableRuleset(_mor, rulesetName); } - + public void refreshFirewall() throws Exception { _context.getService().refreshFirewall(_mor); } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java index a765b42fd78..c164cc22f23 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java @@ -27,8 +27,8 @@ import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.hypervisor.vmware.util.VmwareHelper; import com.cloud.utils.Pair; import com.google.gson.Gson; -import com.vmware.apputils.vim25.ServiceUtil; import com.vmware.vim25.AboutInfo; +import com.vmware.vim25.AlreadyExistsFaultMsg; import com.vmware.vim25.ClusterDasConfigInfo; import com.vmware.vim25.ComputeResourceSummary; import com.vmware.vim25.DatastoreSummary; @@ -55,63 +55,64 @@ import com.vmware.vim25.ObjectSpec; import com.vmware.vim25.OptionValue; import com.vmware.vim25.PropertyFilterSpec; import com.vmware.vim25.PropertySpec; -import com.vmware.vim25.SelectionSpec; import com.vmware.vim25.TraversalSpec; import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualNicManagerNetConfig; import com.vmware.vim25.NasDatastoreInfo; +import edu.emory.mathcs.backport.java.util.Arrays; + public class HostMO extends BaseMO implements VmwareHypervisorHost { private static final Logger s_logger = Logger.getLogger(HostMO.class); Map _vmCache = new HashMap(); - + public HostMO (VmwareContext context, ManagedObjectReference morHost) { super(context, morHost); } - + public HostMO (VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public HostHardwareSummary getHostHardwareSummary() throws Exception { HostConnectInfo hostInfo = _context.getService().queryHostConnectionInfo(_mor); HostHardwareSummary hardwareSummary = hostInfo.getHost().getHardware(); return hardwareSummary; } - + public HostConfigManager getHostConfigManager() throws Exception { - return (HostConfigManager)_context.getServiceUtil().getDynamicProperty(_mor, "configManager"); + return (HostConfigManager)_context.getVimClient().getDynamicProperty(_mor, "configManager"); } - + public VirtualNicManagerNetConfig[] getHostVirtualNicManagerNetConfig() throws Exception { - VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])_context.getServiceUtil().getDynamicProperty(_mor, + VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])_context.getVimClient().getDynamicProperty(_mor, "config.virtualNicManagerInfo.netConfig"); - return netConfigs; + return netConfigs; } - + public HostIpRouteEntry[] getHostIpRouteEntries() throws Exception { - HostIpRouteEntry[] entries = (HostIpRouteEntry[])_context.getServiceUtil().getDynamicProperty(_mor, + HostIpRouteEntry[] entries = (HostIpRouteEntry[])_context.getVimClient().getDynamicProperty(_mor, "config.network.routeTableInfo.ipRoute"); - return entries; + return entries; } - + public HostListSummaryQuickStats getHostQuickStats() throws Exception { - return (HostListSummaryQuickStats)_context.getServiceUtil().getDynamicProperty(_mor, "summary.quickStats"); + return (HostListSummaryQuickStats)_context.getVimClient().getDynamicProperty(_mor, "summary.quickStats"); } - + public HostHyperThreadScheduleInfo getHostHyperThreadInfo() throws Exception { - return (HostHyperThreadScheduleInfo)_context.getServiceUtil().getDynamicProperty(_mor, "config.hyperThread"); + return (HostHyperThreadScheduleInfo)_context.getVimClient().getDynamicProperty(_mor, "config.hyperThread"); } - + public HostNetworkInfo getHostNetworkInfo() throws Exception { - return (HostNetworkInfo)_context.getServiceUtil().getDynamicProperty(_mor, "config.network"); + return (HostNetworkInfo)_context.getVimClient().getDynamicProperty(_mor, "config.network"); } - + public HostPortGroupSpec getHostPortGroupSpec(String portGroupName) throws Exception { - + HostNetworkInfo hostNetInfo = getHostNetworkInfo(); - - HostPortGroup[] portGroups = hostNetInfo.getPortgroup(); + + List portGroups = hostNetInfo.getPortgroup(); if(portGroups != null) { for(HostPortGroup portGroup : portGroups) { HostPortGroupSpec spec = portGroup.getSpec(); @@ -119,15 +120,15 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { return spec; } } - + return null; } - + @Override public String getHyperHostName() throws Exception { return getName(); } - + @Override public ClusterDasConfigInfo getDasConfig() throws Exception { ManagedObjectReference morParent = getParentMor(); @@ -135,10 +136,10 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { ClusterMO clusterMo = new ClusterMO(_context, morParent); return clusterMo.getDasConfig(); } - + return null; } - + @Override public String getHyperHostDefaultGateway() throws Exception { HostIpRouteEntry[] entries = getHostIpRouteEntries(); @@ -146,20 +147,20 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { if(entry.getNetwork().equalsIgnoreCase("0.0.0.0")) return entry.getGateway(); } - + throw new Exception("Could not find host default gateway, host is not properly configured?"); } - + public HostDatastoreSystemMO getHostDatastoreSystemMO() throws Exception { return new HostDatastoreSystemMO(_context, - (ManagedObjectReference)_context.getServiceUtil().getDynamicProperty( + (ManagedObjectReference)_context.getVimClient().getDynamicProperty( _mor, "configManager.datastoreSystem") ); } - + public HostDatastoreBrowserMO getHostDatastoreBrowserMO() throws Exception { return new HostDatastoreBrowserMO(_context, - (ManagedObjectReference)_context.getServiceUtil().getDynamicProperty( + (ManagedObjectReference)_context.getVimClient().getDynamicProperty( _mor, "datastoreBrowser") ); } @@ -168,7 +169,7 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { ObjectContent[] ocs = getDatastorePropertiesOnHyperHost(new String[] { "name"} ); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - DynamicProperty[] objProps = oc.getPropSet(); + List objProps = oc.getPropSet(); if(objProps != null) { for(DynamicProperty objProp : objProps) { if(objProp.getVal().toString().equals(datastoreName)) @@ -179,22 +180,22 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } return null; } - + public HostNetworkSystemMO getHostNetworkSystemMO() throws Exception { HostConfigManager configMgr = getHostConfigManager(); return new HostNetworkSystemMO(_context, configMgr.getNetworkSystem()); } - + public HostFirewallSystemMO getHostFirewallSystemMO() throws Exception { HostConfigManager configMgr = getHostConfigManager(); ManagedObjectReference morFirewall = configMgr.getFirewallSystem(); - + // only ESX hosts have firewall manager if(morFirewall != null) return new HostFirewallSystemMO(_context, morFirewall); return null; } - + @Override public ManagedObjectReference getHyperHostDatacenter() throws Exception { Pair dcPair = DatacenterMO.getOwnerDatacenter(getContext(), getMor()); @@ -204,43 +205,40 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { @Override public ManagedObjectReference getHyperHostOwnerResourcePool() throws Exception { - ServiceUtil serviceUtil = _context.getServiceUtil(); - ManagedObjectReference morComputerResource = (ManagedObjectReference)serviceUtil.getDynamicProperty(_mor, "parent"); - return (ManagedObjectReference)serviceUtil.getDynamicProperty(morComputerResource, "resourcePool"); + ManagedObjectReference morComputerResource = (ManagedObjectReference)_context.getVimClient().getDynamicProperty(_mor, "parent"); + return (ManagedObjectReference)_context.getVimClient().getDynamicProperty(morComputerResource, "resourcePool"); } @Override public ManagedObjectReference getHyperHostCluster() throws Exception { - ServiceUtil serviceUtil = _context.getServiceUtil(); - ManagedObjectReference morParent = (ManagedObjectReference)serviceUtil.getDynamicProperty(_mor, "parent"); - + ManagedObjectReference morParent = (ManagedObjectReference)_context.getVimClient().getDynamicProperty(_mor, "parent"); + if(morParent.getType().equalsIgnoreCase("ClusterComputeResource")) { return morParent; } - + assert(false); throw new Exception("Standalone host is not supported"); } - + public ManagedObjectReference[] getHostLocalDatastore() throws Exception { - ServiceUtil serviceUtil = _context.getServiceUtil(); - ManagedObjectReference[] datastores = (ManagedObjectReference[])serviceUtil.getDynamicProperty( + ManagedObjectReference[] datastores = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty( _mor, "datastore"); List l = new ArrayList(); if(datastores != null) { for(ManagedObjectReference mor : datastores) { - DatastoreSummary summary = (DatastoreSummary)serviceUtil.getDynamicProperty(mor, "summary"); - if(summary.getType().equalsIgnoreCase("VMFS") && !summary.getMultipleHostAccess()) + DatastoreSummary summary = (DatastoreSummary)_context.getVimClient().getDynamicProperty(mor, "summary"); + if(summary.getType().equalsIgnoreCase("VMFS") && !summary.isMultipleHostAccess()) l.add(mor); } } return l.toArray(new ManagedObjectReference[1]); } - + public HostVirtualSwitch getHostVirtualSwitchByName(String name) throws Exception { - HostVirtualSwitch[] switches = (HostVirtualSwitch[])_context.getServiceUtil().getDynamicProperty( + HostVirtualSwitch[] switches = (HostVirtualSwitch[])_context.getVimClient().getDynamicProperty( _mor, "config.network.vswitch"); - + if(switches != null) { for(HostVirtualSwitch vswitch : switches) { if(vswitch.getName().equals(name)) @@ -249,44 +247,44 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } return null; } - + public HostVirtualSwitch[] getHostVirtualSwitch() throws Exception { - return (HostVirtualSwitch[])_context.getServiceUtil().getDynamicProperty(_mor, "config.network.vswitch"); + return (HostVirtualSwitch[])_context.getVimClient().getDynamicProperty(_mor, "config.network.vswitch"); } - + public AboutInfo getHostAboutInfo() throws Exception { - return (AboutInfo)_context.getServiceUtil().getDynamicProperty(_mor, "config.product"); + return (AboutInfo)_context.getVimClient().getDynamicProperty(_mor, "config.product"); } - + public VmwareHostType getHostType() throws Exception { AboutInfo aboutInfo = getHostAboutInfo(); if("VMware ESXi".equals(aboutInfo.getName())) return VmwareHostType.ESXi; else if("VMware ESX".equals(aboutInfo.getName())) return VmwareHostType.ESX; - + throw new Exception("Unrecognized VMware host type " + aboutInfo.getName()); } - + // default virtual switch is which management network residents on public HostVirtualSwitch getHostDefaultVirtualSwitch() throws Exception { String managementPortGroup = getPortGroupNameByNicType(HostVirtualNicType.management); if(managementPortGroup != null) return getPortGroupVirtualSwitch(managementPortGroup); - + return null; } - + public HostVirtualSwitch getPortGroupVirtualSwitch(String portGroupName) throws Exception { String vSwitchName = getPortGroupVirtualSwitchName(portGroupName); if(vSwitchName != null) return getVirtualSwitchByName(vSwitchName); - + return null; } - + public HostVirtualSwitch getVirtualSwitchByName(String vSwitchName) throws Exception { - + HostVirtualSwitch[] vSwitchs = getHostVirtualSwitch(); if(vSwitchs != null) { for(HostVirtualSwitch vSwitch: vSwitchs) { @@ -294,13 +292,13 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { return vSwitch; } } - + return null; } - + public String getPortGroupVirtualSwitchName(String portGroupName) throws Exception { HostNetworkInfo hostNetInfo = getHostNetworkInfo(); - HostPortGroup[] portGroups = hostNetInfo.getPortgroup(); + List portGroups = hostNetInfo.getPortgroup(); if(portGroups != null) { for(HostPortGroup portGroup : portGroups) { HostPortGroupSpec spec = portGroup.getSpec(); @@ -308,13 +306,13 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { return spec.getVswitchName(); } } - + return null; } - + public HostPortGroupSpec getPortGroupSpec(String portGroupName) throws Exception { HostNetworkInfo hostNetInfo = getHostNetworkInfo(); - HostPortGroup[] portGroups = hostNetInfo.getPortgroup(); + List portGroups = hostNetInfo.getPortgroup(); if(portGroups != null) { for(HostPortGroup portGroup : portGroups) { HostPortGroupSpec spec = portGroup.getSpec(); @@ -322,20 +320,20 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { return spec; } } - + return null; } - + public String getPortGroupNameByNicType(HostVirtualNicType nicType) throws Exception { assert(nicType != null); - - VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])_context.getServiceUtil().getDynamicProperty(_mor, + + VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])_context.getVimClient().getDynamicProperty(_mor, "config.virtualNicManagerInfo.netConfig"); - + if(netConfigs != null) { for(VirtualNicManagerNetConfig netConfig : netConfigs) { if(netConfig.getNicType().equals(nicType.toString())) { - HostVirtualNic[] nics = netConfig.getCandidateVnic(); + List nics = netConfig.getCandidateVnic(); if(nics != null) { for(HostVirtualNic nic : nics) { return nic.getPortgroup(); @@ -344,36 +342,36 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } } } - + if(nicType == HostVirtualNicType.management) { // ESX management network is configured in service console HostNetworkInfo netInfo = getHostNetworkInfo(); assert(netInfo != null); - HostVirtualNic[] nics = netInfo.getConsoleVnic(); + List nics = netInfo.getConsoleVnic(); if(nics != null) { for(HostVirtualNic nic : nics) { return nic.getPortgroup(); } } } - + return null; } - + public boolean hasPortGroup(HostVirtualSwitch vSwitch, String portGroupName) throws Exception { ManagedObjectReference morNetwork = getNetworkMor(portGroupName); if(morNetwork != null) return true; return false; } - + public void createPortGroup(HostVirtualSwitch vSwitch, String portGroupName, Integer vlanId, HostNetworkSecurityPolicy secPolicy, HostNetworkTrafficShapingPolicy shapingPolicy) throws Exception { assert(portGroupName != null); HostNetworkSystemMO hostNetMo = getHostNetworkSystemMO(); assert(hostNetMo != null); - + HostPortGroupSpec spec = new HostPortGroupSpec(); - + spec.setName(portGroupName); if(vlanId != null) spec.setVlanId(vlanId.intValue()); @@ -385,14 +383,14 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { spec.setVswitchName(vSwitch.getName()); hostNetMo.addPortGroup(spec); } - + public void updatePortGroup(HostVirtualSwitch vSwitch, String portGroupName, Integer vlanId, HostNetworkSecurityPolicy secPolicy, HostNetworkTrafficShapingPolicy shapingPolicy) throws Exception { assert(portGroupName != null); HostNetworkSystemMO hostNetMo = getHostNetworkSystemMO(); assert(hostNetMo != null); - + HostPortGroupSpec spec = new HostPortGroupSpec(); - + spec.setName(portGroupName); if(vlanId != null) spec.setVlanId(vlanId.intValue()); @@ -404,19 +402,19 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { spec.setVswitchName(vSwitch.getName()); hostNetMo.updatePortGroup(portGroupName, spec); } - + public void deletePortGroup(String portGroupName) throws Exception { assert(portGroupName != null); HostNetworkSystemMO hostNetMo = getHostNetworkSystemMO(); assert(hostNetMo != null); - hostNetMo.removePortGroup(portGroupName); + hostNetMo.removePortGroup(portGroupName); } - + public ManagedObjectReference getNetworkMor(String portGroupName) throws Exception { PropertySpec pSpec = new PropertySpec(); pSpec.setType("Network"); - pSpec.setPathSet(new String[] {"summary.name"}); - + pSpec.getPathSet().add("summary.name"); + TraversalSpec host2NetworkTraversal = new TraversalSpec(); host2NetworkTraversal.setType("HostSystem"); host2NetworkTraversal.setPath("network"); @@ -425,19 +423,20 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { host2NetworkTraversal }); + oSpec.getSelectSet().add(host2NetworkTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); - + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List ocs = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); + if(ocs != null) { for(ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if(props != null) { for(DynamicProperty prop : props) { if(prop.getVal().equals(portGroupName)) @@ -448,62 +447,62 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } return null; } - + public ManagedObjectReference[] getVmMorsOnNetwork(String portGroupName) throws Exception { ManagedObjectReference morNetwork = getNetworkMor(portGroupName); if(morNetwork != null) - return (ManagedObjectReference[])_context.getServiceUtil().getDynamicProperty(morNetwork, "vm"); + return (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(morNetwork, "vm"); return null; } - + public String getHostName() throws Exception { - return (String)_context.getServiceUtil().getDynamicProperty(_mor, "name"); + return (String)_context.getVimClient().getDynamicProperty(_mor, "name"); } @Override public synchronized VirtualMachineMO findVmOnHyperHost(String name) throws Exception { if(s_logger.isDebugEnabled()) s_logger.debug("find VM " + name + " on host"); - + VirtualMachineMO vmMo = _vmCache.get(name); if(vmMo != null) { if(s_logger.isDebugEnabled()) s_logger.debug("VM " + name + " found in host cache"); return vmMo; } - + loadVmCache(); return _vmCache.get(name); } - + private void loadVmCache() throws Exception { if(s_logger.isDebugEnabled()) s_logger.debug("load VM cache on host"); - + _vmCache.clear(); - + ObjectContent[] ocs = getVmPropertiesOnHyperHost(new String[] { "name" }); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - String vmName = oc.getPropSet()[0].getVal().toString(); - + String vmName = oc.getPropSet().get(0).getVal().toString(); + if(s_logger.isTraceEnabled()) s_logger.trace("put " + vmName + " into host cache"); - + _vmCache.put(vmName, new VirtualMachineMO(_context, oc.getObj())); } } } - + @Override public VirtualMachineMO findVmOnPeerHyperHost(String name) throws Exception { ManagedObjectReference morParent = getParentMor(); - + if(morParent.getType().equals("ClusterComputeResource")) { ClusterMO clusterMo = new ClusterMO(_context, morParent); return clusterMo.findVmOnHyperHost(name); } else { - // we don't support standalone host, all hosts have to be managed by + // we don't support standalone host, all hosts have to be managed by // a cluster within vCenter assert(false); return null; @@ -515,30 +514,30 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { assert(vmSpec != null); DatacenterMO dcMo = new DatacenterMO(_context, getHyperHostDatacenter()); ManagedObjectReference morPool = getHyperHostOwnerResourcePool(); - - ManagedObjectReference morTask = _context.getService().createVM_Task( + + ManagedObjectReference morTask = _context.getService().createVMTask( dcMo.getVmFolder(), vmSpec, morPool, _mor); - String result = _context.getServiceUtil().waitForTask(morTask); - - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { s_logger.error("VMware createVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } - + return false; } - + public HashMap getVmVncPortsOnHost() throws Exception { ObjectContent[] ocs = getVmPropertiesOnHyperHost( new String[] { "name", "config.extraConfig[\"RemoteDisplay.vnc.port\"]" } ); - + HashMap portInfo = new HashMap(); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - DynamicProperty[] objProps = oc.getPropSet(); + List objProps = oc.getPropSet(); if(objProps != null) { String name = null; String value = null; @@ -548,27 +547,28 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } else { OptionValue optValue = (OptionValue)objProp.getVal(); value = (String)optValue.getValue(); - } + } } - + if(name != null && value != null) { portInfo.put(name, Integer.parseInt(value)); } } } } - + return portInfo; } - - public ObjectContent[] getVmPropertiesOnHyperHost(String[] propertyPaths) throws Exception { + + @Override + public ObjectContent[] getVmPropertiesOnHyperHost(String[] propertyPaths) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - retrieveProperties() for VM properties. target MOR: " + _mor.get_value() + ", properties: " + new Gson().toJson(propertyPaths)); - + s_logger.trace("vCenter API trace - retrieveProperties() for VM properties. target MOR: " + _mor.getValue() + ", properties: " + new Gson().toJson(propertyPaths)); + PropertySpec pSpec = new PropertySpec(); pSpec.setType("VirtualMachine"); - pSpec.setPathSet(propertyPaths); - + pSpec.getPathSet().addAll(Arrays.asList(propertyPaths)); + TraversalSpec host2VmTraversal = new TraversalSpec(); host2VmTraversal.setType("HostSystem"); host2VmTraversal.setPath("vm"); @@ -577,30 +577,31 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { host2VmTraversal }); + oSpec.getSelectSet().add(host2VmTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] properties = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List properties = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - retrieveProperties() done"); - return properties; + return properties.toArray(new ObjectContent[properties.size()]); } @Override public ObjectContent[] getDatastorePropertiesOnHyperHost(String[] propertyPaths) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - retrieveProperties() on Datastore properties. target MOR: " + _mor.get_value() + ", properties: " + new Gson().toJson(propertyPaths)); + s_logger.trace("vCenter API trace - retrieveProperties() on Datastore properties. target MOR: " + _mor.getValue() + ", properties: " + new Gson().toJson(propertyPaths)); PropertySpec pSpec = new PropertySpec(); pSpec.setType("Datastore"); - pSpec.setPathSet(propertyPaths); - + pSpec.getPathSet().addAll(Arrays.asList(propertyPaths)); + TraversalSpec host2DatastoreTraversal = new TraversalSpec(); host2DatastoreTraversal.setType("HostSystem"); host2DatastoreTraversal.setPath("datastore"); @@ -609,87 +610,88 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { host2DatastoreTraversal }); + oSpec.getSelectSet().add(host2DatastoreTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ObjectContent[] properties = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); - + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List properties = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - retrieveProperties() done"); - return properties; + return properties.toArray(new ObjectContent[properties.size()]); } - + public List> getDatastoreMountsOnHost() throws Exception { List> mounts = new ArrayList>(); - + ObjectContent[] ocs = getDatastorePropertiesOnHyperHost(new String[] { - String.format("host[\"%s\"].mountInfo.path", _mor.get_value()) }); + String.format("host[\"%s\"].mountInfo.path", _mor.getValue()) }); if(ocs != null) { for(ObjectContent oc : ocs) { Pair mount = new Pair( - oc.getObj(), oc.getPropSet(0).getVal().toString()); + oc.getObj(), oc.getPropSet().get(0).getVal().toString()); mounts.add(mount); } } return mounts; } - + public List> getLocalDatastoreOnHost() throws Exception { List> dsList = new ArrayList>(); - + ObjectContent[] ocs = getDatastorePropertiesOnHyperHost(new String[] { "name", "summary" }); if(ocs != null) { for(ObjectContent oc : ocs) { DatastoreSummary dsSummary = (DatastoreSummary)VmwareHelper.getPropValue(oc, "summary"); - if(dsSummary.getMultipleHostAccess() == false && dsSummary.isAccessible() && dsSummary.getType().equalsIgnoreCase("vmfs")) { + if(dsSummary.isMultipleHostAccess() == false && dsSummary.isAccessible() && dsSummary.getType().equalsIgnoreCase("vmfs")) { ManagedObjectReference morDs = oc.getObj(); String name = (String)VmwareHelper.getPropValue(oc, "name"); - + dsList.add(new Pair(morDs, name)); } } } return dsList; } - + public void importVmFromOVF(String ovfFilePath, String vmName, String datastoreName, String diskOption) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - importVmFromOVF(). target MOR: " + _mor.get_value() + ", ovfFilePath: " + ovfFilePath + ", vmName: " + vmName + s_logger.trace("vCenter API trace - importVmFromOVF(). target MOR: " + _mor.getValue() + ", ovfFilePath: " + ovfFilePath + ", vmName: " + vmName + ",datastoreName: " + datastoreName + ", diskOption: " + diskOption); - + DatastoreMO dsMo = getHostDatastoreMO(datastoreName); if(dsMo == null) throw new Exception("Invalid datastore name: " + datastoreName); - + importVmFromOVF(ovfFilePath, vmName, dsMo, diskOption); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - importVmFromOVF() done"); } - + @Override public void importVmFromOVF(String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption) throws Exception { ManagedObjectReference morRp = getHyperHostOwnerResourcePool(); assert(morRp != null); - + HypervisorHostHelper.importVmFromOVF(this, ovfFilePath, vmName, dsMo, diskOption, morRp, _mor); } - + @Override public boolean createBlankVm(String vmName, int cpuCount, int cpuSpeedMHz, int cpuReservedMHz, boolean limitCpuUse, int memoryMB, int memoryReserveMB, String guestOsIdentifier, ManagedObjectReference morDs, boolean snapshotDirToParent) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - createBlankVm(). target MOR: " + _mor.get_value() + ", vmName: " + vmName + ", cpuCount: " + cpuCount - + ", cpuSpeedMhz: " + cpuSpeedMHz + ", cpuReservedMHz: " + cpuReservedMHz + ", limitCpu: " + limitCpuUse + ", memoryMB: " + memoryMB - + ", guestOS: " + guestOsIdentifier + ", datastore: " + morDs.get_value() + ", snapshotDirToParent: " + snapshotDirToParent); - + s_logger.trace("vCenter API trace - createBlankVm(). target MOR: " + _mor.getValue() + ", vmName: " + vmName + ", cpuCount: " + cpuCount + + ", cpuSpeedMhz: " + cpuSpeedMHz + ", cpuReservedMHz: " + cpuReservedMHz + ", limitCpu: " + limitCpuUse + ", memoryMB: " + memoryMB + + ", guestOS: " + guestOsIdentifier + ", datastore: " + morDs.getValue() + ", snapshotDirToParent: " + snapshotDirToParent); + boolean result = HypervisorHostHelper.createBlankVm(this, vmName, cpuCount, cpuSpeedMHz, cpuReservedMHz, limitCpuUse, memoryMB, memoryReserveMB, guestOsIdentifier, morDs, snapshotDirToParent); @@ -732,13 +734,13 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } @Override - public ManagedObjectReference mountDatastore(boolean vmfsDatastore, String poolHostAddress, + public ManagedObjectReference mountDatastore(boolean vmfsDatastore, String poolHostAddress, int poolHostPort, String poolPath, String poolUuid) throws Exception { - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - mountDatastore(). target MOR: " + _mor.get_value() + ", vmfs: " + vmfsDatastore + ", poolHost: " + poolHostAddress + s_logger.trace("vCenter API trace - mountDatastore(). target MOR: " + _mor.getValue() + ", vmfs: " + vmfsDatastore + ", poolHost: " + poolHostAddress + ", poolHostPort: " + poolHostPort + ", poolPath: " + poolPath + ", poolUuid: " + poolUuid); - + HostDatastoreSystemMO hostDatastoreSystemMo = getHostDatastoreSystemMO(); ManagedObjectReference morDatastore = hostDatastoreSystemMo.findDatastore(poolUuid); if(morDatastore == null) { @@ -749,9 +751,9 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { poolHostPort, poolPath, poolUuid); - } catch (com.vmware.vim25.AlreadyExists e) { + } catch (AlreadyExistsFaultMsg e) { s_logger.info("Creation of NFS datastore on vCenter failed since datastore already exists." + - " Details: vCenter API trace - mountDatastore(). target MOR: " + _mor.get_value() + ", vmfs: " + + " Details: vCenter API trace - mountDatastore(). target MOR: " + _mor.getValue() + ", vmfs: " + vmfsDatastore + ", poolHost: " + poolHostAddress + ", poolHostPort: " + poolHostPort + ", poolPath: " + poolPath + ", poolUuid: " + poolUuid); // Retrieve the morDatastore and return it. @@ -759,16 +761,16 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { poolHostPort, poolPath, poolUuid, hostDatastoreSystemMo)); } catch (Exception e) { s_logger.info("Creation of NFS datastore on vCenter failed. " + - " Details: vCenter API trace - mountDatastore(). target MOR: " + _mor.get_value() + ", vmfs: " + + " Details: vCenter API trace - mountDatastore(). target MOR: " + _mor.getValue() + ", vmfs: " + vmfsDatastore + ", poolHost: " + poolHostAddress + ", poolHostPort: " + poolHostPort + ", poolPath: " + poolPath + ", poolUuid: " + poolUuid + ". Exception mesg: " + e.getMessage()); throw new Exception("Creation of NFS datastore on vCenter failed."); } if(morDatastore == null) { - String msg = "Unable to create NFS datastore. host: " + poolHostAddress + ", port: " + String msg = "Unable to create NFS datastore. host: " + poolHostAddress + ", port: " + poolHostPort + ", path: " + poolPath + ", uuid: " + poolUuid; s_logger.error(msg); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - mountDatastore() done(failed)"); throw new Exception(msg); @@ -776,32 +778,32 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } else { morDatastore = _context.getDatastoreMorByPath(poolPath); if(morDatastore == null) { - String msg = "Unable to create VMFS datastore. host: " + poolHostAddress + ", port: " + String msg = "Unable to create VMFS datastore. host: " + poolHostAddress + ", port: " + poolHostPort + ", path: " + poolPath + ", uuid: " + poolUuid; s_logger.error(msg); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - mountDatastore() done(failed)"); throw new Exception(msg); } - + DatastoreMO dsMo = new DatastoreMO(_context, morDatastore); dsMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, poolUuid); } } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - mountDatastore() done(successfully)"); - + return morDatastore; } - + @Override public void unmountDatastore(String poolUuid) throws Exception { - + if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - unmountDatastore(). target MOR: " + _mor.get_value() + ", poolUuid: " + poolUuid); - + s_logger.trace("vCenter API trace - unmountDatastore(). target MOR: " + _mor.getValue() + ", poolUuid: " + poolUuid); + HostDatastoreSystemMO hostDatastoreSystemMo = getHostDatastoreSystemMO(); if(!hostDatastoreSystemMo.deleteDatastore(poolUuid)) { String msg = "Unable to unmount datastore. uuid: " + poolUuid; @@ -811,35 +813,35 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { s_logger.trace("vCenter API trace - unmountDatastore() done(failed)"); throw new Exception(msg); } - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - unmountDatastore() done"); } - + @Override public ManagedObjectReference findDatastore(String poolUuid) throws Exception { HostDatastoreSystemMO hostDsMo = getHostDatastoreSystemMO(); return hostDsMo.findDatastore(poolUuid); } - + @Override public ManagedObjectReference findDatastoreByExportPath(String exportPath) throws Exception { HostDatastoreSystemMO datastoreSystemMo = getHostDatastoreSystemMO(); return datastoreSystemMo.findDatastoreByExportPath(exportPath); } - + @Override public ManagedObjectReference findMigrationTarget(VirtualMachineMO vmMo) throws Exception { return _mor; } - + @Override public VmwareHypervisorHostResourceSummary getHyperHostResourceSummary() throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - getHyperHostResourceSummary(). target MOR: " + _mor.get_value()); - + s_logger.trace("vCenter API trace - getHyperHostResourceSummary(). target MOR: " + _mor.getValue()); + VmwareHypervisorHostResourceSummary summary = new VmwareHypervisorHostResourceSummary(); - + HostHardwareSummary hardwareSummary = getHostHardwareSummary(); // TODO: not sure how hyper-thread is counted in VMware resource pool summary.setCpuCount(hardwareSummary.getNumCpuCores()); @@ -850,19 +852,19 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { s_logger.trace("vCenter API trace - getHyperHostResourceSummary() done"); return summary; } - + @Override public VmwareHypervisorHostNetworkSummary getHyperHostNetworkSummary(String managementPortGroup) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - getHyperHostNetworkSummary(). target MOR: " + _mor.get_value() + ", mgmtPortgroup: " + managementPortGroup); - + s_logger.trace("vCenter API trace - getHyperHostNetworkSummary(). target MOR: " + _mor.getValue() + ", mgmtPortgroup: " + managementPortGroup); + VmwareHypervisorHostNetworkSummary summary = new VmwareHypervisorHostNetworkSummary(); - + if(this.getHostType() == VmwareHostType.ESXi) { - VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])_context.getServiceUtil().getDynamicProperty(_mor, + VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])_context.getVimClient().getDynamicProperty(_mor, "config.virtualNicManagerInfo.netConfig"); assert(netConfigs != null); - + for(int i = 0; i < netConfigs.length; i++) { if(netConfigs[i].getNicType().equals("management")) { for(HostVirtualNic nic : netConfigs[i].getCandidateVnic()) { @@ -870,7 +872,7 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { summary.setHostIp(nic.getSpec().getIp().getIpAddress()); summary.setHostNetmask(nic.getSpec().getIp().getSubnetMask()); summary.setHostMacAddress(nic.getSpec().getMac()); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - getHyperHostNetworkSummary() done(successfully)"); return summary; @@ -880,16 +882,16 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } } else { // try with ESX path - HostVirtualNic[] hostVNics = (HostVirtualNic[])_context.getServiceUtil().getDynamicProperty(_mor, + HostVirtualNic[] hostVNics = (HostVirtualNic[])_context.getVimClient().getDynamicProperty(_mor, "config.network.consoleVnic"); - + if(hostVNics != null) { for(HostVirtualNic vnic : hostVNics) { if(vnic.getPortgroup().equals(managementPortGroup)) { summary.setHostIp(vnic.getSpec().getIp().getIpAddress()); summary.setHostNetmask(vnic.getSpec().getIp().getSubnetMask()); summary.setHostMacAddress(vnic.getSpec().getMac()); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - getHyperHostNetworkSummary() done(successfully)"); return summary; @@ -902,29 +904,29 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { s_logger.trace("vCenter API trace - getHyperHostNetworkSummary() done(failed)"); throw new Exception("Uanble to find management port group " + managementPortGroup); } - + @Override public ComputeResourceSummary getHyperHostHardwareSummary() throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - getHyperHostHardwareSummary(). target MOR: " + _mor.get_value()); + s_logger.trace("vCenter API trace - getHyperHostHardwareSummary(). target MOR: " + _mor.getValue()); // // This is to adopt the model when using Cluster as a big host while ComputeResourceSummary is used // directly from VMware resource pool // - // When we break cluster hosts into individual hosts used in our resource allocator, + // When we break cluster hosts into individual hosts used in our resource allocator, // we will have to populate ComputeResourceSummary by ourselves here // HostHardwareSummary hardwareSummary = getHostHardwareSummary(); - + ComputeResourceSummary resourceSummary = new ComputeResourceSummary(); - + // TODO: not sure how hyper-threading is counted in VMware resourceSummary.setNumCpuCores(hardwareSummary.getNumCpuCores()); - + // Note: memory here is in Byte unit resourceSummary.setTotalMemory(hardwareSummary.getMemorySize()); - + // Total CPU is based on (# of cores) x Mhz int totalCpu = hardwareSummary.getCpuMhz() * hardwareSummary.getNumCpuCores(); resourceSummary.setTotalCpu(totalCpu); @@ -932,21 +934,21 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { HostListSummaryQuickStats stats = getHostQuickStats(); if(stats.getOverallCpuUsage() == null || stats.getOverallMemoryUsage() == null) throw new Exception("Unable to get valid overal CPU/Memory usage data, host may be disconnected"); - + resourceSummary.setEffectiveCpu(totalCpu - stats.getOverallCpuUsage()); - + // Note effective memory is in MB unit resourceSummary.setEffectiveMemory(hardwareSummary.getMemorySize()/(1024*1024) - stats.getOverallMemoryUsage()); - + if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - getHyperHostHardwareSummary() done"); - + return resourceSummary; } - + @Override public boolean isHyperHostConnected() throws Exception { - HostRuntimeInfo runtimeInfo = (HostRuntimeInfo)_context.getServiceUtil().getDynamicProperty(_mor, "runtime"); - return runtimeInfo.getConnectionState() == HostSystemConnectionState.connected; + HostRuntimeInfo runtimeInfo = (HostRuntimeInfo)_context.getVimClient().getDynamicProperty(_mor, "runtime"); + return runtimeInfo.getConnectionState() == HostSystemConnectionState.CONNECTED; } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java index 90d690c1101..1198b3365d7 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java @@ -20,6 +20,7 @@ import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; +import java.util.List; import org.apache.log4j.Logger; @@ -33,108 +34,108 @@ import com.vmware.vim25.OvfFileItem; public class HttpNfcLeaseMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(HttpNfcLeaseMO.class); - + public HttpNfcLeaseMO(VmwareContext context, ManagedObjectReference morHttpNfcLease) { super(context, morHttpNfcLease); } - + public HttpNfcLeaseMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } public HttpNfcLeaseState getState() throws Exception { - return (HttpNfcLeaseState)_context.getServiceUtil().getDynamicProperty(_mor, "state"); + return (HttpNfcLeaseState)_context.getVimClient().getDynamicProperty(_mor, "state"); } - + public HttpNfcLeaseState waitState(HttpNfcLeaseState[] states) throws Exception { assert(states != null); assert(states.length > 0); - + HttpNfcLeaseState state; while(true) { state = getState(); - if(state == HttpNfcLeaseState.ready || state == HttpNfcLeaseState.error) + if(state == HttpNfcLeaseState.READY || state == HttpNfcLeaseState.ERROR) return state; } } - + public HttpNfcLeaseInfo getLeaseInfo() throws Exception { - return (HttpNfcLeaseInfo)_context.getServiceUtil().getDynamicProperty(_mor, "info"); + return (HttpNfcLeaseInfo)_context.getVimClient().getDynamicProperty(_mor, "info"); } - - public HttpNfcLeaseManifestEntry[] getLeaseManifest() throws Exception { + + public List getLeaseManifest() throws Exception { return _context.getService().httpNfcLeaseGetManifest(_mor); } - + public void completeLease() throws Exception { _context.getService().httpNfcLeaseComplete(_mor); } - + public void abortLease() throws Exception { _context.getService().httpNfcLeaseAbort(_mor, null); } - + public void updateLeaseProgress(int percent) throws Exception { // make sure percentage is in right range if(percent < 0) percent = 0; else if(percent > 100) percent = 100; - + _context.getService().httpNfcLeaseProgress(_mor, percent); } - + public ProgressReporter createProgressReporter() { return new ProgressReporter(); } - - public static long calcTotalBytes(OvfCreateImportSpecResult ovfImportResult) { - OvfFileItem[] fileItemArr = ovfImportResult.getFileItem(); - long totalBytes = 0; - if (fileItemArr != null) { - for (OvfFileItem fi : fileItemArr) { - totalBytes += fi.getSize(); - } - } - return totalBytes; - } - - public static String readOvfContent(String ovfFilePath) throws IOException { - StringBuffer strContent = new StringBuffer(); - BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(ovfFilePath))); - String lineStr; - while ((lineStr = in.readLine()) != null) { - strContent.append(lineStr); - } - in.close(); - return strContent.toString(); + public static long calcTotalBytes(OvfCreateImportSpecResult ovfImportResult) { + List fileItemArr = ovfImportResult.getFileItem(); + long totalBytes = 0; + if (fileItemArr != null) { + for (OvfFileItem fi : fileItemArr) { + totalBytes += fi.getSize(); + } + } + return totalBytes; } - + + public static String readOvfContent(String ovfFilePath) throws IOException { + StringBuffer strContent = new StringBuffer(); + BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(ovfFilePath))); + String lineStr; + while ((lineStr = in.readLine()) != null) { + strContent.append(lineStr); + } + + in.close(); + return strContent.toString(); + } + public class ProgressReporter extends Thread { volatile int _percent; volatile boolean _done; - + public ProgressReporter() { _percent = 0; _done = false; - + setDaemon(true); start(); } - + public void reportProgress(int percent) { _percent = percent; } - + public void close() { if(s_logger.isInfoEnabled()) s_logger.info("close ProgressReporter, interrupt reporter runner to let it quit"); - + _done = true; interrupt(); } - + @Override public void run() { while(!_done) { @@ -149,7 +150,7 @@ public class HttpNfcLeaseMO extends BaseMO { s_logger.warn("Unexpected exception ", e); } } - + if(s_logger.isInfoEnabled()) s_logger.info("ProgressReporter stopped"); } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index 50f95413bd2..abc391fa035 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -69,16 +69,16 @@ public class HypervisorHostHelper { private static final Logger s_logger = Logger.getLogger(HypervisorHostHelper.class); private static final int DEFAULT_LOCK_TIMEOUT_SECONDS = 600; private static final String s_policyNamePrefix = "cloud.policy."; - + // make vmware-base loosely coupled with cloud-specific stuff, duplicate VLAN.UNTAGGED constant here private static final String UNTAGGED_VLAN_NAME = "untagged"; - - public static VirtualMachineMO findVmFromObjectContent(VmwareContext context, + + public static VirtualMachineMO findVmFromObjectContent(VmwareContext context, ObjectContent[] ocs, String name) { - + if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - DynamicProperty prop = oc.getPropSet(0); + DynamicProperty prop = oc.getPropSet().get(0); assert(prop != null); if(prop.getVal().toString().equals(name)) return new VirtualMachineMO(context, oc.getObj()); @@ -86,20 +86,20 @@ public class HypervisorHostHelper { } return null; } - + public static ManagedObjectReference findDatastoreWithBackwardsCompatibility(VmwareHypervisorHost hyperHost, String uuidName) throws Exception { ManagedObjectReference morDs = hyperHost.findDatastore(uuidName.replace("-", "")); if(morDs == null) morDs = hyperHost.findDatastore(uuidName); - + return morDs; } - + public static DatastoreMO getHyperHostDatastoreMO(VmwareHypervisorHost hyperHost, String datastoreName) throws Exception { ObjectContent[] ocs = hyperHost.getDatastorePropertiesOnHyperHost(new String[] { "name"} ); if(ocs != null && ocs.length > 0) { for(ObjectContent oc : ocs) { - DynamicProperty[] objProps = oc.getPropSet(); + List objProps = oc.getPropSet(); if(objProps != null) { for(DynamicProperty objProp : objProps) { if(objProp.getVal().toString().equals(datastoreName)) @@ -110,7 +110,7 @@ public class HypervisorHostHelper { } return null; } - + public static String getPublicNetworkNamePrefix(String vlanId) { if (UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) { return "cloud.public.untagged"; @@ -118,24 +118,24 @@ public class HypervisorHostHelper { return "cloud.public." + vlanId; } } - + public static String composeCloudNetworkName(String prefix, String vlanId, Integer networkRateMbps, String vSwitchName) { StringBuffer sb = new StringBuffer(prefix); if(vlanId == null || UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) sb.append(".untagged"); else sb.append(".").append(vlanId); - + if(networkRateMbps != null && networkRateMbps.intValue() > 0) sb.append(".").append(String.valueOf(networkRateMbps)); else sb.append(".0"); sb.append(".").append(VersioningContants.PORTGROUP_NAMING_VERSION); sb.append("-").append(vSwitchName); - + return sb.toString(); } - + public static Map getValidatedVsmCredentials(VmwareContext context) throws Exception { Map vsmCredentials = context.getStockObject("vsmcredentials"); String msg; @@ -415,7 +415,7 @@ public class HypervisorHostHelper { boolean createGCTag = false; String networkName; Integer vid = null; - + if(vlanId != null && !UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) { createGCTag = true; vid = Integer.parseInt(vlanId); @@ -466,8 +466,8 @@ public class HypervisorHostHelper { bWaitPortGroupReady = true; updatePortProfile(context, ethPortProfileName, networkName, vid, networkRateMbps, peakBandwidth, burstSize); } - // Wait for dvPortGroup on vCenter - if(bWaitPortGroupReady) + // Wait for dvPortGroup on vCenter + if(bWaitPortGroupReady) morNetwork = waitForDvPortGroupReady(dataCenterMo, networkName, timeOutMs); else morNetwork = dataCenterMo.getDvPortGroupMor(networkName); @@ -476,7 +476,7 @@ public class HypervisorHostHelper { s_logger.error(msg); throw new Exception(msg); } - + if(createGCTag) { NetworkMO networkMo = new NetworkMO(hostMo.getContext(), morNetwork); networkMo.setCustomFieldValue(CustomFieldConstants.CLOUD_GC_DVP, "true"); @@ -511,39 +511,39 @@ public class HypervisorHostHelper { currentTrafficShapingPolicy = spec.getDefaultPortConfig().getInShapingPolicy(); // TODO(sateesh): Extract and compare vendor specific configuration specification as well. // DistributedVirtualSwitchKeyedOpaqueBlob[] vendorSpecificConfig = spec.getVendorSpecificConfig(); - + assert(currentTrafficShapingPolicy != null); - + LongPolicy averageBandwidth = currentTrafficShapingPolicy.getAverageBandwidth(); LongPolicy burstSize = currentTrafficShapingPolicy.getBurstSize(); LongPolicy peakBandwidth = currentTrafficShapingPolicy.getPeakBandwidth(); BoolPolicy isEnabled = currentTrafficShapingPolicy.getEnabled(); - - if(!isEnabled.getValue()) + + if(!isEnabled.isValue()) return false; - + if(averageBandwidth != null && !averageBandwidth.equals(shapingPolicy.getAverageBandwidth())) { if(s_logger.isInfoEnabled()) { s_logger.info("Average bandwidth setting in shaping policy doesn't match with existing setting."); - } + } return false; } else if(burstSize != null && !burstSize.equals(shapingPolicy.getBurstSize())) { if(s_logger.isInfoEnabled()) { s_logger.info("Burst size setting in shaping policy doesn't match with existing setting."); - } + } return false; } else if(peakBandwidth != null && !peakBandwidth.equals(shapingPolicy.getPeakBandwidth())) { if(s_logger.isInfoEnabled()) { s_logger.info("Peak bandwidth setting in shaping policy doesn't match with existing setting."); - } + } return false; } - + return true; } public static Pair prepareNetwork(String vSwitchName, String namePrefix, - HostMO hostMo, String vlanId, Integer networkRateMbps, Integer networkRateMulticastMbps, + HostMO hostMo, String vlanId, Integer networkRateMbps, Integer networkRateMulticastMbps, long timeOutMs, boolean syncPeerHosts) throws Exception { HostVirtualSwitch vSwitch; @@ -558,12 +558,12 @@ public class HypervisorHostHelper { boolean createGCTag = false; String networkName; Integer vid = null; - + if(vlanId != null && !UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) { createGCTag = true; vid = Integer.parseInt(vlanId); } - + networkName = composeCloudNetworkName(namePrefix, vlanId, networkRateMbps, vSwitchName); HostNetworkSecurityPolicy secPolicy = null; if (namePrefix.equalsIgnoreCase("cloud.private")) { @@ -577,11 +577,11 @@ public class HypervisorHostHelper { shapingPolicy = new HostNetworkTrafficShapingPolicy(); shapingPolicy.setEnabled(true); shapingPolicy.setAverageBandwidth((long)networkRateMbps.intValue()*1024L*1024L); - - // + + // // TODO : people may have different opinion on how to set the following // - + // give 50% premium to peek shapingPolicy.setPeakBandwidth((long)(shapingPolicy.getAverageBandwidth()*1.5)); @@ -602,7 +602,7 @@ public class HypervisorHostHelper { } ManagedObjectReference morNetwork; - if(bWaitPortGroupReady) + if(bWaitPortGroupReady) morNetwork = waitForNetworkReady(hostMo, networkName, timeOutMs); else morNetwork = hostMo.getNetworkMor(networkName); @@ -611,24 +611,24 @@ public class HypervisorHostHelper { s_logger.error(msg); throw new Exception(msg); } - + if(createGCTag) { NetworkMO networkMo = new NetworkMO(hostMo.getContext(), morNetwork); networkMo.setCustomFieldValue(CustomFieldConstants.CLOUD_GC, "true"); } - + if(syncPeerHosts) { ManagedObjectReference morParent = hostMo.getParentMor(); if(morParent != null && morParent.getType().equals("ClusterComputeResource")) { // to be conservative, lock cluster - GlobalLock lock = GlobalLock.getInternLock("ClusterLock." + morParent.get_value()); + GlobalLock lock = GlobalLock.getInternLock("ClusterLock." + morParent.getValue()); try { if(lock.lock(DEFAULT_LOCK_TIMEOUT_SECONDS)) { try { - ManagedObjectReference[] hosts = (ManagedObjectReference[])hostMo.getContext().getServiceUtil().getDynamicProperty(morParent, "host"); + ManagedObjectReference[] hosts = (ManagedObjectReference[])hostMo.getContext().getVimClient().getDynamicProperty(morParent, "host"); if(hosts != null) { for(ManagedObjectReference otherHost: hosts) { - if(!otherHost.get_value().equals(hostMo.getMor().get_value())) { + if(!otherHost.getValue().equals(hostMo.getMor().getValue())) { HostMO otherHostMo = new HostMO(hostMo.getContext(), otherHost); try { if(s_logger.isDebugEnabled()) @@ -655,7 +655,7 @@ public class HypervisorHostHelper { s_logger.info("Network " + networkName + " is ready on vSwitch " + vSwitchName); return new Pair(morNetwork, networkName); } - + private static boolean isSpecMatch(HostPortGroupSpec spec, Integer vlanId, HostNetworkTrafficShapingPolicy shapingPolicy) { // check VLAN configuration if(vlanId != null) { @@ -670,17 +670,17 @@ public class HypervisorHostHelper { HostNetworkTrafficShapingPolicy policyInSpec = null; if(spec.getPolicy() != null) policyInSpec = spec.getPolicy().getShapingPolicy(); - + if(policyInSpec != null && shapingPolicy == null || policyInSpec == null && shapingPolicy != null) return false; - + if(policyInSpec == null && shapingPolicy == null) return true; - + // so far policyInSpec and shapingPolicy should both not be null - if(policyInSpec.getEnabled() == null || !policyInSpec.getEnabled().booleanValue()) + if(policyInSpec.isEnabled() == null || !policyInSpec.isEnabled().booleanValue()) return false; - + if(policyInSpec.getAverageBandwidth() == null || policyInSpec.getAverageBandwidth().longValue() != shapingPolicy.getAverageBandwidth().longValue()) return false; @@ -689,10 +689,10 @@ public class HypervisorHostHelper { if(policyInSpec.getBurstSize() == null || policyInSpec.getBurstSize().longValue() != shapingPolicy.getBurstSize().longValue()) return false; - + return true; } - + public static ManagedObjectReference waitForNetworkReady(HostMO hostMo, String networkName, long timeOutMs) throws Exception { @@ -713,14 +713,14 @@ public class HypervisorHostHelper { return morNetwork; } - - public static boolean createBlankVm(VmwareHypervisorHost host, String vmName, - int cpuCount, int cpuSpeedMHz, int cpuReservedMHz, boolean limitCpuUse, int memoryMB, int memoryReserveMB, String guestOsIdentifier, + + public static boolean createBlankVm(VmwareHypervisorHost host, String vmName, + int cpuCount, int cpuSpeedMHz, int cpuReservedMHz, boolean limitCpuUse, int memoryMB, int memoryReserveMB, String guestOsIdentifier, ManagedObjectReference morDs, boolean snapshotDirToParent) throws Exception { - + if(s_logger.isInfoEnabled()) s_logger.info("Create blank VM. cpuCount: " + cpuCount + ", cpuSpeed(MHz): " + cpuSpeedMHz + ", mem(Mb): " + memoryMB); - + // VM config basics VirtualMachineConfigSpec vmConfig = new VirtualMachineConfigSpec(); vmConfig.setName(vmName); @@ -728,62 +728,63 @@ public class HypervisorHostHelper { // Scsi controller VirtualLsiLogicController scsiController = new VirtualLsiLogicController(); - scsiController.setSharedBus(VirtualSCSISharing.noSharing); + scsiController.setSharedBus(VirtualSCSISharing.NO_SHARING); scsiController.setBusNumber(0); scsiController.setKey(1); VirtualDeviceConfigSpec scsiControllerSpec = new VirtualDeviceConfigSpec(); scsiControllerSpec.setDevice(scsiController); - scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.add); + scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); VirtualMachineFileInfo fileInfo = new VirtualMachineFileInfo(); DatastoreMO dsMo = new DatastoreMO(host.getContext(), morDs); fileInfo.setVmPathName(String.format("[%s]", dsMo.getName())); vmConfig.setFiles(fileInfo); - + VirtualMachineVideoCard videoCard = new VirtualMachineVideoCard(); videoCard.setControllerKey(100); videoCard.setUseAutoDetect(true); - + VirtualDeviceConfigSpec videoDeviceSpec = new VirtualDeviceConfigSpec(); videoDeviceSpec.setDevice(videoCard); - videoDeviceSpec.setOperation(VirtualDeviceConfigSpecOperation.add); - - vmConfig.setDeviceChange(new VirtualDeviceConfigSpec[] { scsiControllerSpec, videoDeviceSpec }); + videoDeviceSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); + + vmConfig.getDeviceChange().add(scsiControllerSpec); + vmConfig.getDeviceChange().add(videoDeviceSpec); if(host.createVm(vmConfig)) { VirtualMachineMO vmMo = host.findVmOnHyperHost(vmName); assert(vmMo != null); - + int ideControllerKey = -1; while(ideControllerKey < 0) { ideControllerKey = vmMo.tryGetIDEDeviceControllerKey(); if(ideControllerKey >= 0) break; - + s_logger.info("Waiting for IDE controller be ready in VM: " + vmName); Thread.sleep(1000); } - + if(snapshotDirToParent) { String snapshotDir = String.format("/vmfs/volumes/%s/", dsMo.getName()); - + s_logger.info("Switch snapshot working directory to " + snapshotDir + " for " + vmName); vmMo.setSnapshotDirectory(snapshotDir); - + // Don't have a good way to test if the VM is really ready for use through normal API after configuration file manipulation, // delay 3 seconds Thread.sleep(3000); } - + s_logger.info("Blank VM: " + vmName + " is ready for use"); return true; } return false; } - + public static String resolveHostNameInUrl(DatacenterMO dcMo, String url) { - + s_logger.info("Resolving host name in url through vCenter, url: " + url); - + URI uri; try { uri = new URI(url); @@ -791,13 +792,13 @@ public class HypervisorHostHelper { s_logger.warn("URISyntaxException on url " + url); return url; } - + String host = uri.getHost(); if(NetUtils.isValidIp(host)) { s_logger.info("host name in url is already in IP address, url: " + url); return url; } - + try { ManagedObjectReference morHost = dcMo.findHost(host); if(morHost != null) { @@ -807,18 +808,18 @@ public class HypervisorHostHelper { managementPortGroupName = (String)dcMo.getContext().getStockObject("manageportgroup"); else managementPortGroupName = (String)dcMo.getContext().getStockObject("serviceconsole"); - + VmwareHypervisorHostNetworkSummary summary = hostMo.getHyperHostNetworkSummary(managementPortGroupName); if(summary == null) { s_logger.warn("Unable to resolve host name in url through vSphere, url: " + url); return url; } - + String hostIp = summary.getHostIp(); - + try { URI resolvedUri = new URI(uri.getScheme(), uri.getUserInfo(), hostIp, uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment()); - + s_logger.info("url " + url + " is resolved to " + resolvedUri.toString() + " through vCenter"); return resolvedUri.toString(); } catch (URISyntaxException e) { @@ -829,77 +830,77 @@ public class HypervisorHostHelper { } catch(Exception e) { s_logger.warn("Unexpected exception ", e); } - + return url; } - - public static void importVmFromOVF(VmwareHypervisorHost host, String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption, + + public static void importVmFromOVF(VmwareHypervisorHost host, String ovfFilePath, String vmName, DatastoreMO dsMo, String diskOption, ManagedObjectReference morRp, ManagedObjectReference morHost) throws Exception { - + assert(morRp != null); - - OvfCreateImportSpecParams importSpecParams = new OvfCreateImportSpecParams(); - importSpecParams.setHostSystem(morHost); - importSpecParams.setLocale("US"); - importSpecParams.setEntityName(vmName); + + OvfCreateImportSpecParams importSpecParams = new OvfCreateImportSpecParams(); + importSpecParams.setHostSystem(morHost); + importSpecParams.setLocale("US"); + importSpecParams.setEntityName(vmName); importSpecParams.setDeploymentOption(""); importSpecParams.setDiskProvisioning(diskOption); // diskOption: thin, thick, etc - importSpecParams.setPropertyMapping(null); - + //importSpecParams.setPropertyMapping(null); + String ovfDescriptor = HttpNfcLeaseMO.readOvfContent(ovfFilePath); VmwareContext context = host.getContext(); OvfCreateImportSpecResult ovfImportResult = context.getService().createImportSpec( - context.getServiceContent().getOvfManager(), ovfDescriptor, morRp, + context.getServiceContent().getOvfManager(), ovfDescriptor, morRp, dsMo.getMor(), importSpecParams); - + if(ovfImportResult == null) { - String msg = "createImportSpec() failed. ovfFilePath: " + ovfFilePath + ", vmName: " + String msg = "createImportSpec() failed. ovfFilePath: " + ovfFilePath + ", vmName: " + vmName + ", diskOption: " + diskOption; s_logger.error(msg); throw new Exception(msg); } - + DatacenterMO dcMo = new DatacenterMO(context, host.getHyperHostDatacenter()); - ManagedObjectReference morLease = context.getService().importVApp(morRp, + ManagedObjectReference morLease = context.getService().importVApp(morRp, ovfImportResult.getImportSpec(), dcMo.getVmFolder(), morHost); if(morLease == null) { - String msg = "importVApp() failed. ovfFilePath: " + ovfFilePath + ", vmName: " + String msg = "importVApp() failed. ovfFilePath: " + ovfFilePath + ", vmName: " + vmName + ", diskOption: " + diskOption; s_logger.error(msg); throw new Exception(msg); } final HttpNfcLeaseMO leaseMo = new HttpNfcLeaseMO(context, morLease); HttpNfcLeaseState state = leaseMo.waitState( - new HttpNfcLeaseState[] { HttpNfcLeaseState.ready, HttpNfcLeaseState.error }); + new HttpNfcLeaseState[] { HttpNfcLeaseState.READY, HttpNfcLeaseState.ERROR }); try { - if(state == HttpNfcLeaseState.ready) { + if(state == HttpNfcLeaseState.READY) { final long totalBytes = HttpNfcLeaseMO.calcTotalBytes(ovfImportResult); - File ovfFile = new File(ovfFilePath); - + File ovfFile = new File(ovfFilePath); + HttpNfcLeaseInfo httpNfcLeaseInfo = leaseMo.getLeaseInfo(); - HttpNfcLeaseDeviceUrl[] deviceUrls = httpNfcLeaseInfo.getDeviceUrl(); + List deviceUrls = httpNfcLeaseInfo.getDeviceUrl(); long bytesAlreadyWritten = 0; - + final HttpNfcLeaseMO.ProgressReporter progressReporter = leaseMo.createProgressReporter(); try { for (HttpNfcLeaseDeviceUrl deviceUrl : deviceUrls) { - String deviceKey = deviceUrl.getImportKey(); + String deviceKey = deviceUrl.getImportKey(); for (OvfFileItem ovfFileItem : ovfImportResult.getFileItem()) { - if (deviceKey.equals(ovfFileItem.getDeviceId())) { + if (deviceKey.equals(ovfFileItem.getDeviceId())) { String absoluteFile = ovfFile.getParent() + File.separator + ovfFileItem.getPath(); String urlToPost = deviceUrl.getUrl(); urlToPost = resolveHostNameInUrl(dcMo, urlToPost); - - context.uploadVmdkFile(ovfFileItem.isCreate() ? "PUT" : "POST", urlToPost, absoluteFile, + + context.uploadVmdkFile(ovfFileItem.isCreate() ? "PUT" : "POST", urlToPost, absoluteFile, bytesAlreadyWritten, new ActionDelegate () { public void action(Long param) { progressReporter.reportProgress((int)(param * 100 / totalBytes)); } - }); - + }); + bytesAlreadyWritten += ovfFileItem.getSize(); - } - } + } + } } } finally { progressReporter.close(); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/NetworkMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/NetworkMO.java index dadd7d12a01..b8e3ab42b5b 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/NetworkMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/NetworkMO.java @@ -23,17 +23,17 @@ public class NetworkMO extends BaseMO { public NetworkMO(VmwareContext context, ManagedObjectReference morCluster) { super(context, morCluster); } - + public NetworkMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public void destroyNetwork() throws Exception { - _context.getService().destroyNetwork(_mor); + _context.getService().destroyNetwork(_mor); } - + public ManagedObjectReference[] getVMsOnNetwork() throws Exception { - ManagedObjectReference[] vms = (ManagedObjectReference[])_context.getServiceUtil().getDynamicProperty(_mor, "vm"); + ManagedObjectReference[] vms = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "vm"); return vms; } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/PerfManagerMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/PerfManagerMO.java index 5d57bd1fb09..6c2b7bb2ab7 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/PerfManagerMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/PerfManagerMO.java @@ -16,7 +16,13 @@ // under the License. package com.cloud.hypervisor.vmware.mo; +import java.util.ArrayList; import java.util.Calendar; +import java.util.List; + +import javax.xml.datatype.DatatypeConfigurationException; +import javax.xml.datatype.DatatypeFactory; +import javax.xml.datatype.XMLGregorianCalendar; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.ManagedObjectReference; @@ -28,58 +34,91 @@ import com.vmware.vim25.PerfMetricId; import com.vmware.vim25.PerfProviderSummary; import com.vmware.vim25.PerfQuerySpec; +import edu.emory.mathcs.backport.java.util.Arrays; + public class PerfManagerMO extends BaseMO { public PerfManagerMO(VmwareContext context, ManagedObjectReference mor) { super(context, mor); } - + public PerfManagerMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public void createPerfInterval(PerfInterval interval) throws Exception { _context.getService().createPerfInterval(_mor, interval); } - - public PerfMetricId[] queryAvailablePerfMetric(ManagedObjectReference morEntity, Calendar beginTime, + + /** + * Converts Calendar object into XMLGregorianCalendar + * + * @param calendar Object to be converted + * @return XMLGregorianCalendar + */ + private XMLGregorianCalendar calendarToXMLGregorianCalendar(Calendar calendar) throws DatatypeConfigurationException { + + DatatypeFactory dtf = DatatypeFactory.newInstance(); + XMLGregorianCalendar xgc = dtf.newXMLGregorianCalendar(); + xgc.setYear(calendar.get(Calendar.YEAR)); + xgc.setMonth(calendar.get(Calendar.MONTH) + 1); + xgc.setDay(calendar.get(Calendar.DAY_OF_MONTH)); + xgc.setHour(calendar.get(Calendar.HOUR_OF_DAY)); + xgc.setMinute(calendar.get(Calendar.MINUTE)); + xgc.setSecond(calendar.get(Calendar.SECOND)); + xgc.setMillisecond(calendar.get(Calendar.MILLISECOND)); + + // Calendar ZONE_OFFSET and DST_OFFSET fields are in milliseconds. + int offsetInMinutes = (calendar.get(Calendar.ZONE_OFFSET) + calendar.get(Calendar.DST_OFFSET)) / (60 * 1000); + xgc.setTimezone(offsetInMinutes); + return xgc; + } + + public List queryAvailablePerfMetric(ManagedObjectReference morEntity, Calendar beginTime, Calendar endTime, Integer intervalId) throws Exception { - - return _context.getService().queryAvailablePerfMetric(_mor, morEntity, beginTime, endTime, intervalId); + + return _context.getService().queryAvailablePerfMetric(_mor, morEntity, calendarToXMLGregorianCalendar(beginTime), + calendarToXMLGregorianCalendar(endTime), intervalId); } public PerfCompositeMetric queryPerfComposite(PerfQuerySpec spec) throws Exception { return _context.getService().queryPerfComposite(_mor, spec); } - - public PerfCounterInfo[] queryPerfCounter(int[] counterId) throws Exception { - return _context.getService().queryPerfCounter(_mor, counterId); - } - - public PerfCounterInfo[] queryPerfCounterByLevel(int level) throws Exception { - return _context.getService().queryPerfCounterByLevel(_mor, level); - } - - public PerfProviderSummary queryPerfProviderSummary(ManagedObjectReference morEntity) throws Exception { - return _context.getService().queryPerfProviderSummary(_mor, morEntity); + + public List queryPerfCounter(int[] counterId) throws Exception { + List counterArr = new ArrayList(); + if ( counterId != null){ + for (int i = 0; i < counterId.length; i++ ){ + counterArr.add(counterId[i]); + } + } + return _context.getService().queryPerfCounter(_mor, counterArr); } - public PerfEntityMetricBase[] queryPerf(PerfQuerySpec[] specs) throws Exception { - return _context.getService().queryPerf(_mor, specs); + public List queryPerfCounterByLevel(int level) throws Exception { + return _context.getService().queryPerfCounterByLevel(_mor, level); } - + + public PerfProviderSummary queryPerfProviderSummary(ManagedObjectReference morEntity) throws Exception { + return _context.getService().queryPerfProviderSummary(_mor, morEntity); + } + + public List queryPerf(PerfQuerySpec[] specs) throws Exception { + return _context.getService().queryPerf(_mor, Arrays.asList(specs)); + } + public void removePerfInterval(int samplePeriod) throws Exception { _context.getService().removePerfInterval(_mor, samplePeriod); } - + public void updatePerfInterval(PerfInterval interval) throws Exception { _context.getService().updatePerfInterval(_mor, interval); } - + public PerfCounterInfo[] getCounterInfo() throws Exception { - return (PerfCounterInfo[])_context.getServiceUtil().getDynamicProperty(_mor, "perfCounter"); + return (PerfCounterInfo[])_context.getVimClient().getDynamicProperty(_mor, "perfCounter"); } - + public PerfInterval[] getIntervalInfo() throws Exception { - return (PerfInterval[])_context.getServiceUtil().getDynamicProperty(_mor, "historicalInterval"); + return (PerfInterval[])_context.getVimClient().getDynamicProperty(_mor, "historicalInterval"); } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/TaskMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/TaskMO.java index 3b0b153dc7f..694734bf09a 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/TaskMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/TaskMO.java @@ -27,48 +27,48 @@ public class TaskMO extends BaseMO { public TaskMO(VmwareContext context, ManagedObjectReference morTask) { super(context, morTask); } - + public TaskMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public TaskInfo getTaskInfo() throws Exception { - return (TaskInfo)getContext().getServiceUtil().getDynamicProperty(_mor, "info"); + return (TaskInfo)getContext().getVimClient().getDynamicProperty(_mor, "info"); } - + public void setTaskDescription(LocalizableMessage description) throws Exception { _context.getService().setTaskDescription(_mor, description); } - + public void setTaskState(TaskInfoState state, Object result, LocalizedMethodFault fault) throws Exception { _context.getService().setTaskState(_mor, state, result, fault); } - + public void updateProgress(int percentDone) throws Exception { _context.getService().updateProgress(_mor, percentDone); } - + public void cancelTask() throws Exception { _context.getService().cancelTask(_mor); } - + public static String getTaskFailureInfo(VmwareContext context, ManagedObjectReference morTask) { StringBuffer sb = new StringBuffer(); - + try { - TaskInfo info = (TaskInfo)context.getServiceUtil().getDynamicProperty(morTask, "info"); + TaskInfo info = (TaskInfo)context.getVimClient().getDynamicProperty(morTask, "info"); if(info != null) { LocalizedMethodFault fault = info.getError(); if(fault != null) { sb.append(fault.getLocalizedMessage()).append(" "); - + if(fault.getFault() != null) sb.append(fault.getFault().getClass().getName()); } } } catch(Exception e) { } - + return sb.toString(); } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualDiskManagerMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualDiskManagerMO.java index aab82100421..e21d06adf62 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualDiskManagerMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualDiskManagerMO.java @@ -25,132 +25,132 @@ import com.vmware.vim25.VirtualDiskSpec; public class VirtualDiskManagerMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(VirtualDiskManagerMO.class); - + public VirtualDiskManagerMO(VmwareContext context, ManagedObjectReference morDiskMgr) { super(context, morDiskMgr); } - + public VirtualDiskManagerMO(VmwareContext context, String morType, String morValue) { super(context, morType, morValue); } - + public void copyVirtualDisk(String srcName, ManagedObjectReference morSrcDc, - String destName, ManagedObjectReference morDestDc, VirtualDiskSpec diskSpec, + String destName, ManagedObjectReference morDestDc, VirtualDiskSpec diskSpec, boolean force) throws Exception { - - ManagedObjectReference morTask = _context.getService().copyVirtualDisk_Task(_mor, srcName, morSrcDc, destName, morDestDc, diskSpec, force); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) - throw new Exception("Unable to copy virtual disk " + srcName + " to " + destName - + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); - - _context.waitForTaskProgressDone(morTask); - } - - public void createVirtualDisk(String name, ManagedObjectReference morDc, VirtualDiskSpec diskSpec) throws Exception { - ManagedObjectReference morTask = _context.getService().createVirtualDisk_Task(_mor, name, morDc, diskSpec); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) - throw new Exception("Unable to create virtual disk " + name + + ManagedObjectReference morTask = _context.getService().copyVirtualDiskTask(_mor, srcName, morSrcDc, destName, morDestDc, diskSpec, force); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) + throw new Exception("Unable to copy virtual disk " + srcName + " to " + destName + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); _context.waitForTaskProgressDone(morTask); } - + + public void createVirtualDisk(String name, ManagedObjectReference morDc, VirtualDiskSpec diskSpec) throws Exception { + ManagedObjectReference morTask = _context.getService().createVirtualDiskTask(_mor, name, morDc, diskSpec); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) + throw new Exception("Unable to create virtual disk " + name + + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); + + _context.waitForTaskProgressDone(morTask); + } + public void defragmentVirtualDisk(String name, ManagedObjectReference morDc) throws Exception { - ManagedObjectReference morTask = _context.getService().defragmentVirtualDisk_Task(_mor, name, morDc); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) + ManagedObjectReference morTask = _context.getService().defragmentVirtualDiskTask(_mor, name, morDc); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) throw new Exception("Unable to defragment virtual disk " + name + " due to " + result); _context.waitForTaskProgressDone(morTask); } - + public void deleteVirtualDisk(String name, ManagedObjectReference morDc) throws Exception { - ManagedObjectReference morTask = _context.getService().deleteVirtualDisk_Task(_mor, name, morDc); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) + ManagedObjectReference morTask = _context.getService().deleteVirtualDiskTask(_mor, name, morDc); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) throw new Exception("Unable to delete virtual disk " + name + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); _context.waitForTaskProgressDone(morTask); } - + public void eagerZeroVirtualDisk(String name, ManagedObjectReference morDc) throws Exception { - ManagedObjectReference morTask = _context.getService().eagerZeroVirtualDisk_Task(_mor, name, morDc); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) + ManagedObjectReference morTask = _context.getService().eagerZeroVirtualDiskTask(_mor, name, morDc); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) throw new Exception("Unable to eager zero virtual disk " + name + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); _context.waitForTaskProgressDone(morTask); } - + public void extendVirtualDisk(String name, ManagedObjectReference morDc, long newCapacityKb, boolean eagerZero) throws Exception { - ManagedObjectReference morTask = _context.getService().extendVirtualDisk_Task(_mor, name, morDc, newCapacityKb, eagerZero); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) + ManagedObjectReference morTask = _context.getService().extendVirtualDiskTask(_mor, name, morDc, newCapacityKb, eagerZero); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) throw new Exception("Unable to extend virtual disk " + name + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); _context.waitForTaskProgressDone(morTask); } - + public void inflateVirtualDisk(String name, ManagedObjectReference morDc) throws Exception { - ManagedObjectReference morTask = _context.getService().inflateVirtualDisk_Task(_mor, name, morDc); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) + ManagedObjectReference morTask = _context.getService().inflateVirtualDiskTask(_mor, name, morDc); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) throw new Exception("Unable to inflate virtual disk " + name + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); _context.waitForTaskProgressDone(morTask); } - + public void shrinkVirtualDisk(String name, ManagedObjectReference morDc, boolean copy) throws Exception { - ManagedObjectReference morTask = _context.getService().shrinkVirtualDisk_Task(_mor, name, morDc, copy); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) + ManagedObjectReference morTask = _context.getService().shrinkVirtualDiskTask(_mor, name, morDc, copy); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) throw new Exception("Unable to shrink virtual disk " + name + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); _context.waitForTaskProgressDone(morTask); } - + public void zeroFillVirtualDisk(String name, ManagedObjectReference morDc) throws Exception { - ManagedObjectReference morTask = _context.getService().zeroFillVirtualDisk_Task(_mor, name, morDc); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) + ManagedObjectReference morTask = _context.getService().zeroFillVirtualDiskTask(_mor, name, morDc); + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) throw new Exception("Unable to zero fill virtual disk " + name + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); _context.waitForTaskProgressDone(morTask); } - + public void moveVirtualDisk(String srcName, ManagedObjectReference morSrcDc, String destName, ManagedObjectReference morDestDc, boolean force) throws Exception { - - ManagedObjectReference morTask = _context.getService().moveVirtualDisk_Task(_mor, srcName, morSrcDc, + + ManagedObjectReference morTask = _context.getService().moveVirtualDiskTask(_mor, srcName, morSrcDc, destName, morDestDc, force); - - String result = _context.getServiceUtil().waitForTask(morTask); - if(!result.equals("sucess")) - throw new Exception("Unable to move virtual disk " + srcName + " to " + destName + + boolean result = _context.getVimClient().waitForTask(morTask); + if(!result) + throw new Exception("Unable to move virtual disk " + srcName + " to " + destName + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); _context.waitForTaskProgressDone(morTask); } - + public int queryVirtualDiskFragmentation(String name, ManagedObjectReference morDc) throws Exception { return _context.getService().queryVirtualDiskFragmentation(_mor, name, morDc); } - + public HostDiskDimensionsChs queryVirtualDiskGeometry(String name, ManagedObjectReference morDc) throws Exception { return _context.getService().queryVirtualDiskGeometry(_mor, name, morDc); } - + public String queryVirtualDiskUuid(String name, ManagedObjectReference morDc) throws Exception { return _context.getService().queryVirtualDiskUuid(_mor, name, morDc); } - + public void setVirtualDiskUuid(String name, ManagedObjectReference morDc, String uuid) throws Exception { _context.getService().setVirtualDiskUuid(_mor, name, morDc, uuid); } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 0dc41a1f597..76072cd614a 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -97,6 +97,8 @@ import com.vmware.vim25.VirtualPCIController; import com.vmware.vim25.VirtualSCSIController; import com.vmware.vim25.VirtualSCSISharing; +import edu.emory.mathcs.backport.java.util.Arrays; + public class VirtualMachineMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(VirtualMachineMO.class); @@ -117,7 +119,7 @@ public class VirtualMachineMO extends BaseMO { PropertySpec pSpec = new PropertySpec(); pSpec.setType("Datastore"); - pSpec.setPathSet(new String[] { "name" }); + pSpec.getPathSet().add("name"); TraversalSpec vmDatastoreTraversal = new TraversalSpec(); vmDatastoreTraversal.setType("VirtualMachine"); @@ -127,19 +129,21 @@ public class VirtualMachineMO extends BaseMO { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { vmDatastoreTraversal }); + oSpec.getSelectSet().add(vmDatastoreTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + + List ocs = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); - ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); if(ocs != null) { for(ObjectContent oc : ocs) { - DynamicProperty prop = oc.getPropSet(0); + DynamicProperty prop = oc.getPropSet().get(0); if(prop.getVal().toString().equals(dsName)) { return new Pair(new DatastoreMO(_context, oc.getObj()), dsName); } @@ -155,11 +159,11 @@ public class VirtualMachineMO extends BaseMO { } public String getVmName() throws Exception { - return (String)getContext().getServiceUtil().getDynamicProperty(_mor, "name"); + return (String)getContext().getVimClient().getDynamicProperty(_mor, "name"); } public GuestInfo getVmGuestInfo() throws Exception { - return (GuestInfo)getContext().getServiceUtil().getDynamicProperty(_mor, "guest"); + return (GuestInfo)getContext().getVimClient().getDynamicProperty(_mor, "guest"); } public boolean isVMwareToolsRunning() throws Exception { @@ -172,13 +176,13 @@ public class VirtualMachineMO extends BaseMO { } public boolean powerOn() throws Exception { - if(getPowerState() == VirtualMachinePowerState.poweredOn) + if(getPowerState() == VirtualMachinePowerState.POWERED_ON) return true; - ManagedObjectReference morTask = _context.getService().powerOnVM_Task(_mor, null); + ManagedObjectReference morTask = _context.getService().powerOnVMTask(_mor, null); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -189,7 +193,7 @@ public class VirtualMachineMO extends BaseMO { } public boolean powerOff() throws Exception { - if(getPowerState() == VirtualMachinePowerState.poweredOff) + if(getPowerState() == VirtualMachinePowerState.POWERED_OFF) return true; return powerOffNoCheck(); @@ -197,7 +201,7 @@ public class VirtualMachineMO extends BaseMO { public boolean safePowerOff(int shutdownWaitMs) throws Exception { - if(getPowerState() == VirtualMachinePowerState.poweredOff) + if(getPowerState() == VirtualMachinePowerState.POWERED_OFF) return true; if(isVMwareToolsRunning()) { @@ -208,14 +212,14 @@ public class VirtualMachineMO extends BaseMO { shutdown(); long startTick = System.currentTimeMillis(); - while(getPowerState() != VirtualMachinePowerState.poweredOff && System.currentTimeMillis() - startTick < shutdownWaitMs) { + while(getPowerState() != VirtualMachinePowerState.POWERED_OFF && System.currentTimeMillis() - startTick < shutdownWaitMs) { try { Thread.sleep(1000); } catch(InterruptedException e) { } } - if(getPowerState() != VirtualMachinePowerState.poweredOff) { + if(getPowerState() != VirtualMachinePowerState.POWERED_OFF) { s_logger.info("can not gracefully shutdown VM within " + (shutdownWaitMs/1000) + " seconds, we will perform force power off on VM " + vmName); return powerOffNoCheck(); } @@ -230,17 +234,17 @@ public class VirtualMachineMO extends BaseMO { } private boolean powerOffNoCheck() throws Exception { - ManagedObjectReference morTask = _context.getService().powerOffVM_Task(_mor); + ManagedObjectReference morTask = _context.getService().powerOffVMTask(_mor); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); // It seems that even if a power-off task is returned done, VM state may still not be marked, // wait up to 5 seconds to make sure to avoid race conditioning for immediate following on operations // that relies on a powered-off VM long startTick = System.currentTimeMillis(); - while(getPowerState() != VirtualMachinePowerState.poweredOff && System.currentTimeMillis() - startTick < 5000) { + while(getPowerState() != VirtualMachinePowerState.POWERED_OFF && System.currentTimeMillis() - startTick < 5000) { try { Thread.sleep(1000); } catch(InterruptedException e) { @@ -248,7 +252,7 @@ public class VirtualMachineMO extends BaseMO { } return true; } else { - if(getPowerState() == VirtualMachinePowerState.poweredOff) { + if(getPowerState() == VirtualMachinePowerState.POWERED_OFF) { // to help deal with possible race-condition s_logger.info("Current power-off task failed. However, VM has been switched to the state we are expecting for"); return true; @@ -262,7 +266,7 @@ public class VirtualMachineMO extends BaseMO { public VirtualMachinePowerState getPowerState() throws Exception { - VirtualMachinePowerState powerState = VirtualMachinePowerState.poweredOff; + VirtualMachinePowerState powerState = VirtualMachinePowerState.POWERED_OFF; // This is really ugly, there is a case that when windows guest VM is doing sysprep, the temporary // rebooting process may let us pick up a "poweredOff" state during VMsync process, this can trigger @@ -272,8 +276,8 @@ public class VirtualMachineMO extends BaseMO { // In the future, VMsync should not kick off CloudStack action (this is not a HA case) based on VM // state report, until then we can remove this hacking fix for(int i = 0; i < 3; i++) { - powerState = (VirtualMachinePowerState)getContext().getServiceUtil().getDynamicProperty(_mor, "runtime.powerState"); - if(powerState == VirtualMachinePowerState.poweredOff) { + powerState = (VirtualMachinePowerState)getContext().getVimClient().getDynamicProperty(_mor, "runtime.powerState"); + if(powerState == VirtualMachinePowerState.POWERED_OFF) { try { Thread.sleep(1000); } catch(InterruptedException e) { @@ -287,10 +291,10 @@ public class VirtualMachineMO extends BaseMO { } public boolean reset() throws Exception { - ManagedObjectReference morTask = _context.getService().resetVM_Task(_mor); + ManagedObjectReference morTask = _context.getService().resetVMTask(_mor); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -317,11 +321,11 @@ public class VirtualMachineMO extends BaseMO { } public boolean migrate(ManagedObjectReference morRp, ManagedObjectReference morTargetHost) throws Exception { - ManagedObjectReference morTask = _context.getService().migrateVM_Task(_mor, - morRp, morTargetHost, VirtualMachineMovePriority.defaultPriority, null); + ManagedObjectReference morTask = _context.getService().migrateVMTask(_mor, + morRp, morTargetHost, VirtualMachineMovePriority.DEFAULT_PRIORITY, null); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -335,11 +339,11 @@ public class VirtualMachineMO extends BaseMO { VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); relocateSpec.setHost(morTargetHost); - ManagedObjectReference morTask = _context.getService().relocateVM_Task(_mor, + ManagedObjectReference morTask = _context.getService().relocateVMTask(_mor, relocateSpec, null); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -350,17 +354,17 @@ public class VirtualMachineMO extends BaseMO { } public VirtualMachineSnapshotInfo getSnapshotInfo() throws Exception { - return (VirtualMachineSnapshotInfo)_context.getServiceUtil().getDynamicProperty(_mor, "snapshot"); + return (VirtualMachineSnapshotInfo)_context.getVimClient().getDynamicProperty(_mor, "snapshot"); } public boolean createSnapshot(String snapshotName, String snapshotDescription, boolean dumpMemory, boolean quiesce) throws Exception { - ManagedObjectReference morTask = _context.getService().createSnapshot_Task(_mor, snapshotName, + ManagedObjectReference morTask = _context.getService().createSnapshotTask(_mor, snapshotName, snapshotDescription, dumpMemory, quiesce); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); ManagedObjectReference morSnapshot = null; @@ -393,9 +397,9 @@ public class VirtualMachineMO extends BaseMO { return false; } - ManagedObjectReference morTask = _context.getService().removeSnapshot_Task(morSnapshot, removeChildren); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + ManagedObjectReference morTask = _context.getService().removeSnapshotTask(morSnapshot, removeChildren, true); + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -409,11 +413,11 @@ public class VirtualMachineMO extends BaseMO { VirtualMachineSnapshotInfo snapshotInfo = getSnapshotInfo(); if(snapshotInfo != null && snapshotInfo.getRootSnapshotList() != null) { - VirtualMachineSnapshotTree[] tree = snapshotInfo.getRootSnapshotList(); + List tree = snapshotInfo.getRootSnapshotList(); for(VirtualMachineSnapshotTree treeNode : tree) { - ManagedObjectReference morTask = _context.getService().removeSnapshot_Task(treeNode.getSnapshot(), true); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + ManagedObjectReference morTask = _context.getService().removeSnapshotTask(treeNode.getSnapshot(), true, true); + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); } else { s_logger.error("VMware removeSnapshot_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); @@ -474,29 +478,31 @@ public class VirtualMachineMO extends BaseMO { public String getSnapshotDescriptorDatastorePath() throws Exception { PropertySpec pSpec = new PropertySpec(); pSpec.setType("VirtualMachine"); - pSpec.setPathSet(new String[] { "name", "config.files" }); + pSpec.getPathSet().add("name"); + pSpec.getPathSet().add("config.files"); ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.FALSE); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); - ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + List ocs = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); assert(ocs != null); String vmName = null; VirtualMachineFileInfo fileInfo = null; - assert(ocs.length == 1); + assert(ocs.size() == 1); for(ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if(props != null) { - assert(props.length == 2); + assert(props.size() == 2); for(DynamicProperty prop : props) { if(prop.getName().equals("name")) { @@ -535,10 +541,10 @@ public class VirtualMachineMO extends BaseMO { relocSpec.setDatastore(morDs); relocSpec.setPool(morResourcePool); - ManagedObjectReference morTask = _context.getService().cloneVM_Task(_mor, morFolder, cloneName, cloneSpec); + ManagedObjectReference morTask = _context.getService().cloneVMTask(_mor, morFolder, cloneName, cloneSpec); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -560,18 +566,19 @@ public class VirtualMachineMO extends BaseMO { VirtualDisk[] independentDisks = getAllIndependentDiskDevice(); VirtualMachineRelocateSpec rSpec = new VirtualMachineRelocateSpec(); if(independentDisks.length > 0) { - VirtualMachineRelocateSpecDiskLocator[] diskLocator = new VirtualMachineRelocateSpecDiskLocator[independentDisks.length]; - for(int i = 0; i < diskLocator.length; i++) { - diskLocator[i] = new VirtualMachineRelocateSpecDiskLocator(); - diskLocator[i].setDatastore(morDs); - diskLocator[i].setDiskId(independentDisks[i].getKey()); - diskLocator[i].setDiskMoveType(VirtualMachineRelocateDiskMoveOptions._moveAllDiskBackingsAndDisallowSharing); + List diskLocator = new ArrayList(independentDisks.length); + for(int i = 0; i < independentDisks.length; i++) { + VirtualMachineRelocateSpecDiskLocator loc = new VirtualMachineRelocateSpecDiskLocator(); + loc.setDatastore(morDs); + loc.setDiskId(independentDisks[i].getKey()); + loc.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions.MOVE_ALL_DISK_BACKINGS_AND_DISALLOW_SHARING.toString()); + diskLocator.add(loc); } - rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions._createNewChildDiskBacking); - rSpec.setDisk(diskLocator); + rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions.CREATE_NEW_CHILD_DISK_BACKING.toString()); + rSpec.getDisk().addAll(diskLocator); } else { - rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions._createNewChildDiskBacking); + rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions.CREATE_NEW_CHILD_DISK_BACKING.toString()); } rSpec.setPool(morResourcePool); @@ -581,10 +588,10 @@ public class VirtualMachineMO extends BaseMO { cloneSpec.setLocation(rSpec); cloneSpec.setSnapshot(morBaseSnapshot); - ManagedObjectReference morTask = _context.getService().cloneVM_Task(_mor, morFolder, cloneName, cloneSpec); + ManagedObjectReference morTask = _context.getService().cloneVMTask(_mor, morFolder, cloneName, cloneSpec); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -595,34 +602,34 @@ public class VirtualMachineMO extends BaseMO { } public VirtualMachineRuntimeInfo getRuntimeInfo() throws Exception { - return (VirtualMachineRuntimeInfo)_context.getServiceUtil().getDynamicProperty( + return (VirtualMachineRuntimeInfo)_context.getVimClient().getDynamicProperty( _mor, "runtime"); } public VirtualMachineConfigInfo getConfigInfo() throws Exception { - return (VirtualMachineConfigInfo)_context.getServiceUtil().getDynamicProperty( + return (VirtualMachineConfigInfo)_context.getVimClient().getDynamicProperty( _mor, "config"); } public VirtualMachineConfigSummary getConfigSummary() throws Exception { - return (VirtualMachineConfigSummary)_context.getServiceUtil().getDynamicProperty( + return (VirtualMachineConfigSummary)_context.getVimClient().getDynamicProperty( _mor, "summary.config"); } public VirtualMachineFileInfo getFileInfo() throws Exception { - return (VirtualMachineFileInfo)_context.getServiceUtil().getDynamicProperty( + return (VirtualMachineFileInfo)_context.getVimClient().getDynamicProperty( _mor, "config.files"); } public ManagedObjectReference getParentMor() throws Exception { - return (ManagedObjectReference)_context.getServiceUtil().getDynamicProperty( + return (ManagedObjectReference)_context.getVimClient().getDynamicProperty( _mor, "parent"); } public String[] getNetworks() throws Exception { PropertySpec pSpec = new PropertySpec(); pSpec.setType("Network"); - pSpec.setPathSet(new String[] {"name"}); + pSpec.getPathSet().add("name"); TraversalSpec vm2NetworkTraversal = new TraversalSpec(); vm2NetworkTraversal.setType("VirtualMachine"); @@ -632,20 +639,21 @@ public class VirtualMachineMO extends BaseMO { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { vm2NetworkTraversal }); + oSpec.getSelectSet().add(vm2NetworkTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); - ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + List ocs = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); List networks = new ArrayList(); - if(ocs != null && ocs.length > 0) { + if(ocs != null && ocs.size() > 0) { for(ObjectContent oc : ocs) { - networks.add(oc.getPropSet(0).getVal().toString()); + networks.add(oc.getPropSet().get(0).getVal().toString()); } } return networks.toArray(new String[0]); @@ -663,7 +671,9 @@ public class VirtualMachineMO extends BaseMO { PropertySpec pSpec = new PropertySpec(); pSpec.setType("Network"); - pSpec.setPathSet(new String[] {"name", "vm", String.format("value[%d]", gcTagKey)}); + pSpec.getPathSet().add("name"); + pSpec.getPathSet().add("vm"); + pSpec.getPathSet().add(String.format("value[%d]", gcTagKey)); TraversalSpec vm2NetworkTraversal = new TraversalSpec(); vm2NetworkTraversal.setType("VirtualMachine"); @@ -673,17 +683,19 @@ public class VirtualMachineMO extends BaseMO { ObjectSpec oSpec = new ObjectSpec(); oSpec.setObj(_mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { vm2NetworkTraversal }); + oSpec.getSelectSet().add(vm2NetworkTraversal); PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); - ObjectContent[] ocs = _context.getService().retrieveProperties( - _context.getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); + List ocs = _context.getService().retrieveProperties( + _context.getPropertyCollector(), pfSpecArr); - if(ocs != null && ocs.length > 0) { + + if(ocs != null && ocs.size() > 0) { for(ObjectContent oc : ocs) { ArrayOfManagedObjectReference morVms = null; String gcTagValue = null; @@ -702,7 +714,7 @@ public class VirtualMachineMO extends BaseMO { } NetworkDetails details = new NetworkDetails(name, oc.getObj(), - (morVms != null ? morVms.getManagedObjectReference() : null), + (morVms != null ? morVms.getManagedObjectReference().toArray(new ManagedObjectReference[morVms.getManagedObjectReference().size()]) : null), gcTagValue); networks.add(details); @@ -755,11 +767,11 @@ public class VirtualMachineMO extends BaseMO { public boolean setVncConfigInfo(boolean enableVnc, String vncPassword, int vncPort, String keyboard) throws Exception { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); OptionValue[] vncOptions = VmwareHelper.composeVncOptions(null, enableVnc, vncPassword, vncPort, keyboard); - vmConfigSpec.setExtraConfig(vncOptions); - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, vmConfigSpec); + vmConfigSpec.getExtraConfig().addAll(Arrays.asList(vncOptions)); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, vmConfigSpec); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -769,10 +781,10 @@ public class VirtualMachineMO extends BaseMO { } public boolean configureVm(VirtualMachineConfigSpec vmConfigSpec) throws Exception { - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, vmConfigSpec); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, vmConfigSpec); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -796,12 +808,12 @@ public class VirtualMachineMO extends BaseMO { deviceConfigSpec.setFileOperation(deviceTernary.third()); deviceConfigSpecArray[i++] = deviceConfigSpec; } - configSpec.setDeviceChange(deviceConfigSpecArray); + configSpec.getDeviceChange().addAll(Arrays.asList(deviceConfigSpecArray)); - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, configSpec); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, configSpec); - String result = _context.getServiceUtil().waitForTask(morTask); - if(result.equals("sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if(result) { _context.waitForTaskProgressDone(morTask); return true; } else { @@ -815,7 +827,7 @@ public class VirtualMachineMO extends BaseMO { VmwareHypervisorHostNetworkSummary summary = hostMo.getHyperHostNetworkSummary(hostNetworkName); VirtualMachineConfigInfo configInfo = getConfigInfo(); - OptionValue[] values = configInfo.getExtraConfig(); + List values = configInfo.getExtraConfig(); if(values != null) { for(OptionValue option : values) { @@ -832,7 +844,7 @@ public class VirtualMachineMO extends BaseMO { // vmdkDatastorePath: [datastore name] vmdkFilePath public void createDisk(String vmdkDatastorePath, int sizeInMb, ManagedObjectReference morDs, int controllerKey) throws Exception { - createDisk(vmdkDatastorePath, VirtualDiskType.thin, VirtualDiskMode.persistent, null, sizeInMb, morDs, controllerKey); + createDisk(vmdkDatastorePath, VirtualDiskType.THIN, VirtualDiskMode.PERSISTENT, null, sizeInMb, morDs, controllerKey); } // vmdkDatastorePath: [datastore name] vmdkFilePath @@ -840,9 +852,9 @@ public class VirtualMachineMO extends BaseMO { String rdmDeviceName, int sizeInMb, ManagedObjectReference morDs, int controllerKey) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - createDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + vmdkDatastorePath + s_logger.trace("vCenter API trace - createDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + vmdkDatastorePath + ", sizeInMb: " + sizeInMb + ", diskType: " + diskType + ", diskMode: " + diskMode + ", rdmDeviceName: " + rdmDeviceName - + ", datastore: " + morDs.get_value() + ", controllerKey: " + controllerKey); + + ", datastore: " + morDs.getValue() + ", controllerKey: " + controllerKey); assert(vmdkDatastorePath != null); assert(morDs != null); @@ -852,18 +864,18 @@ public class VirtualMachineMO extends BaseMO { } VirtualDisk newDisk = new VirtualDisk(); - if(diskType == VirtualDiskType.thin || diskType == VirtualDiskType.preallocated - || diskType == VirtualDiskType.eagerZeroedThick) { + if(diskType == VirtualDiskType.THIN || diskType == VirtualDiskType.PREALLOCATED + || diskType == VirtualDiskType.EAGER_ZEROED_THICK) { VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); - backingInfo.setDiskMode(diskMode.persistent.toString()); - if(diskType == VirtualDiskType.thin) { + backingInfo.setDiskMode(diskMode.PERSISTENT.toString()); + if(diskType == VirtualDiskType.THIN) { backingInfo.setThinProvisioned(true); } else { backingInfo.setThinProvisioned(false); } - if(diskType == VirtualDiskType.eagerZeroedThick) { + if(diskType == VirtualDiskType.EAGER_ZEROED_THICK) { backingInfo.setEagerlyScrub(true); } else { backingInfo.setEagerlyScrub(false); @@ -872,17 +884,17 @@ public class VirtualMachineMO extends BaseMO { backingInfo.setDatastore(morDs); backingInfo.setFileName(vmdkDatastorePath); newDisk.setBacking(backingInfo); - } else if(diskType == VirtualDiskType.rdm || diskType == VirtualDiskType.rdmp) { + } else if(diskType == VirtualDiskType.RDM || diskType == VirtualDiskType.RDMP) { VirtualDiskRawDiskMappingVer1BackingInfo backingInfo = new VirtualDiskRawDiskMappingVer1BackingInfo(); - if(diskType == VirtualDiskType.rdm) { + if(diskType == VirtualDiskType.RDM) { backingInfo.setCompatibilityMode("virtualMode"); } else { backingInfo.setCompatibilityMode("physicalMode"); } backingInfo.setDeviceName(rdmDeviceName); - if(diskType == VirtualDiskType.rdm) { - backingInfo.setDiskMode(diskMode.persistent.toString()); + if(diskType == VirtualDiskType.RDM) { + backingInfo.setDiskMode(diskMode.PERSISTENT.toString()); } backingInfo.setDatastore(morDs); @@ -898,20 +910,20 @@ public class VirtualMachineMO extends BaseMO { newDisk.setCapacityInKB(sizeInMb*1024); VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); deviceConfigSpec.setDevice(newDisk); - deviceConfigSpec.setFileOperation(VirtualDeviceConfigSpecFileOperation.create); - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpec.setFileOperation(VirtualDeviceConfigSpecFileOperation.CREATE); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - deviceConfigSpecArray[0] = deviceConfigSpec; - reConfigSpec.setDeviceChange(deviceConfigSpecArray); + //deviceConfigSpecArray[0] = deviceConfigSpec; + reConfigSpec.getDeviceChange().add(deviceConfigSpec); - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); - String result = _context.getServiceUtil().waitForTask(morTask); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, reConfigSpec); + boolean result = _context.getVimClient().waitForTask(morTask); - if(!result.equals("sucess")) { + if(!result) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - createDisk() done(failed)"); throw new Exception("Unable to create disk " + vmdkDatastorePath + " due to " + TaskMO.getTaskFailureInfo(_context, morTask)); @@ -926,25 +938,25 @@ public class VirtualMachineMO extends BaseMO { public void attachDisk(String[] vmdkDatastorePathChain, ManagedObjectReference morDs) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " - + new Gson().toJson(vmdkDatastorePathChain) + ", datastore: " + morDs.get_value()); + s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + + new Gson().toJson(vmdkDatastorePathChain) + ", datastore: " + morDs.getValue()); VirtualDevice newDisk = VmwareHelper.prepareDiskDevice(this, getScsiDeviceControllerKey(), vmdkDatastorePathChain, morDs, -1, 1); VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); deviceConfigSpec.setDevice(newDisk); - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - deviceConfigSpecArray[0] = deviceConfigSpec; - reConfigSpec.setDeviceChange(deviceConfigSpecArray); + //deviceConfigSpecArray[0] = deviceConfigSpec; + reConfigSpec.getDeviceChange().add(deviceConfigSpec); - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); - String result = _context.getServiceUtil().waitForTask(morTask); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, reConfigSpec); + boolean result = _context.getVimClient().waitForTask(morTask); - if(!result.equals("sucess")) { + if(!result) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - attachDisk() done(failed)"); throw new Exception("Failed to attach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask)); @@ -959,25 +971,25 @@ public class VirtualMachineMO extends BaseMO { public void attachDisk(Pair[] vmdkDatastorePathChain, int controllerKey) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + s_logger.trace("vCenter API trace - attachDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + new Gson().toJson(vmdkDatastorePathChain)); VirtualDevice newDisk = VmwareHelper.prepareDiskDevice(this, controllerKey, vmdkDatastorePathChain, -1, 1); VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); deviceConfigSpec.setDevice(newDisk); - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - deviceConfigSpecArray[0] = deviceConfigSpec; - reConfigSpec.setDeviceChange(deviceConfigSpecArray); + //deviceConfigSpecArray[0] = deviceConfigSpec; + reConfigSpec.getDeviceChange().add(deviceConfigSpec); - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); - String result = _context.getServiceUtil().waitForTask(morTask); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, reConfigSpec); + boolean result = _context.getVimClient().waitForTask(morTask); - if(!result.equals("sucess")) { + if(!result) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - attachDisk() done(failed)"); throw new Exception("Failed to attach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask)); @@ -993,7 +1005,7 @@ public class VirtualMachineMO extends BaseMO { public List> detachDisk(String vmdkDatastorePath, boolean deleteBackingFile) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - detachDisk(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + s_logger.trace("vCenter API trace - detachDisk(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + vmdkDatastorePath + ", deleteBacking: " + deleteBackingFile); // Note: if VM has been taken snapshot, original backing file will be renamed, therefore, when we try to find the matching @@ -1008,22 +1020,22 @@ public class VirtualMachineMO extends BaseMO { List> chain = getDiskDatastorePathChain(deviceInfo.first(), true); VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); deviceConfigSpec.setDevice(deviceInfo.first()); if(deleteBackingFile) { - deviceConfigSpec.setFileOperation(VirtualDeviceConfigSpecFileOperation.destroy); + deviceConfigSpec.setFileOperation(VirtualDeviceConfigSpecFileOperation.DESTROY); } - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.remove); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.REMOVE); - deviceConfigSpecArray[0] = deviceConfigSpec; - reConfigSpec.setDeviceChange(deviceConfigSpecArray); + //deviceConfigSpecArray[0] = deviceConfigSpec; + reConfigSpec.getDeviceChange().add(deviceConfigSpec); - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); - String result = _context.getServiceUtil().waitForTask(morTask); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, reConfigSpec); + boolean result = _context.getVimClient().waitForTask(morTask); - if(!result.equals("sucess")) { + if(!result) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachDisk() done (failed)"); @@ -1059,7 +1071,7 @@ public class VirtualMachineMO extends BaseMO { public void detachAllDisks() throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - detachAllDisk(). target MOR: " + _mor.get_value()); + s_logger.trace("vCenter API trace - detachAllDisk(). target MOR: " + _mor.getValue()); VirtualDisk[] disks = getAllDiskDevice(); if(disks.length > 0) { @@ -1069,14 +1081,14 @@ public class VirtualMachineMO extends BaseMO { for(int i = 0; i < disks.length; i++) { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[i].setDevice(disks[i]); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.remove); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.REMOVE); } - reConfigSpec.setDeviceChange(deviceConfigSpecArray); + reConfigSpec.getDeviceChange().addAll(Arrays.asList(deviceConfigSpecArray)); - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); - String result = _context.getServiceUtil().waitForTask(morTask); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, reConfigSpec); + boolean result = _context.getVimClient().waitForTask(morTask); - if(!result.equals("sucess")) { + if(!result) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachAllDisk() done(failed)"); throw new Exception("Failed to detach disk due to " + TaskMO.getTaskFailureInfo(_context, morTask)); @@ -1094,8 +1106,8 @@ public class VirtualMachineMO extends BaseMO { boolean connect, boolean connectAtBoot) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - detachIso(). target MOR: " + _mor.get_value() + ", isoDatastorePath: " - + isoDatastorePath + ", datastore: " + morDs.get_value() + ", connect: " + connect + ", connectAtBoot: " + connectAtBoot); + s_logger.trace("vCenter API trace - detachIso(). target MOR: " + _mor.getValue() + ", isoDatastorePath: " + + isoDatastorePath + ", datastore: " + morDs.getValue() + ", connect: " + connect + ", connectAtBoot: " + connectAtBoot); assert(isoDatastorePath != null); assert(morDs != null); @@ -1123,23 +1135,23 @@ public class VirtualMachineMO extends BaseMO { cdRom.setBacking(backingInfo); VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); deviceConfigSpec.setDevice(cdRom); if(newCdRom) { - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.edit); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT); } - deviceConfigSpecArray[0] = deviceConfigSpec; - reConfigSpec.setDeviceChange(deviceConfigSpecArray); + //deviceConfigSpecArray[0] = deviceConfigSpec; + reConfigSpec.getDeviceChange().add(deviceConfigSpec); - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); - String result = _context.getServiceUtil().waitForTask(morTask); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, reConfigSpec); + boolean result = _context.getVimClient().waitForTask(morTask); - if(!result.equals("sucess")) { + if(!result) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachIso() done(failed)"); throw new Exception("Failed to attach ISO due to " + TaskMO.getTaskFailureInfo(_context, morTask)); @@ -1153,7 +1165,7 @@ public class VirtualMachineMO extends BaseMO { public void detachIso(String isoDatastorePath) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - detachIso(). target MOR: " + _mor.get_value() + ", isoDatastorePath: " + s_logger.trace("vCenter API trace - detachIso(). target MOR: " + _mor.getValue() + ", isoDatastorePath: " + isoDatastorePath); VirtualDevice device = getIsoDevice(); @@ -1168,19 +1180,19 @@ public class VirtualMachineMO extends BaseMO { device.setBacking(backingInfo); VirtualMachineConfigSpec reConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); deviceConfigSpec.setDevice(device); - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.edit); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT); - deviceConfigSpecArray[0] = deviceConfigSpec; - reConfigSpec.setDeviceChange(deviceConfigSpecArray); + //deviceConfigSpecArray[0] = deviceConfigSpec; + reConfigSpec.getDeviceChange().add(deviceConfigSpec); - ManagedObjectReference morTask = _context.getService().reconfigVM_Task(_mor, reConfigSpec); - String result = _context.getServiceUtil().waitForTask(morTask); + ManagedObjectReference morTask = _context.getService().reconfigVMTask(_mor, reConfigSpec); + boolean result = _context.getVimClient().waitForTask(morTask); - if(!result.equals("sucess")) { + if(!result) { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - detachIso() done(failed)"); throw new Exception("Failed to detachIso due to " + TaskMO.getTaskFailureInfo(_context, morTask)); @@ -1194,7 +1206,7 @@ public class VirtualMachineMO extends BaseMO { public Pair getVmdkFileInfo(String vmdkDatastorePath) throws Exception { if(s_logger.isTraceEnabled()) - s_logger.trace("vCenter API trace - getVmdkFileInfo(). target MOR: " + _mor.get_value() + ", vmdkDatastorePath: " + s_logger.trace("vCenter API trace - getVmdkFileInfo(). target MOR: " + _mor.getValue() + ", vmdkDatastorePath: " + vmdkDatastorePath); Pair dcPair = getOwnerDatacenter(); @@ -1222,7 +1234,7 @@ public class VirtualMachineMO extends BaseMO { DatacenterMO dcMo = new DatacenterMO(_context, hostMo.getHyperHostDatacenter()); - if(runtimeInfo.getPowerState() != VirtualMachinePowerState.poweredOff) { + if(runtimeInfo.getPowerState() != VirtualMachinePowerState.POWERED_OFF) { String msg = "Unable to export VM because it is not at powerdOff state. vmName: " + vmName + ", host: " + hostName; s_logger.error(msg); throw new Exception(msg); @@ -1235,10 +1247,10 @@ public class VirtualMachineMO extends BaseMO { } HttpNfcLeaseMO leaseMo = new HttpNfcLeaseMO(_context, morLease); - HttpNfcLeaseState state = leaseMo.waitState(new HttpNfcLeaseState[] { HttpNfcLeaseState.ready, HttpNfcLeaseState.error }); + HttpNfcLeaseState state = leaseMo.waitState(new HttpNfcLeaseState[] { HttpNfcLeaseState.READY, HttpNfcLeaseState.ERROR }); try { - if(state == HttpNfcLeaseState.ready) { + if(state == HttpNfcLeaseState.READY) { final HttpNfcLeaseMO.ProgressReporter progressReporter = leaseMo.createProgressReporter(); boolean success = false; @@ -1248,12 +1260,12 @@ public class VirtualMachineMO extends BaseMO { final long totalBytes = leaseInfo.getTotalDiskCapacityInKB() * 1024; long totalBytesDownloaded = 0; - HttpNfcLeaseDeviceUrl[] deviceUrls = leaseInfo.getDeviceUrl(); + List deviceUrls = leaseInfo.getDeviceUrl(); if(deviceUrls != null) { - OvfFile[] ovfFiles = new OvfFile[deviceUrls.length]; - for (int i = 0; i < deviceUrls.length; i++) { - String deviceId = deviceUrls[i].getKey(); - String deviceUrlStr = deviceUrls[i].getUrl(); + OvfFile[] ovfFiles = new OvfFile[deviceUrls.size()]; + for (int i = 0; i < deviceUrls.size(); i++) { + String deviceId = deviceUrls.get(i).getKey(); + String deviceUrlStr = deviceUrls.get(i).getUrl(); String orgDiskFileName = deviceUrlStr.substring(deviceUrlStr.lastIndexOf("/") + 1); String diskFileName = String.format("%s-disk%d%s", exportName, i, VmwareHelper.getFileExtension(orgDiskFileName, ".vmdk")); String diskUrlStr = deviceUrlStr.replace("*", hostName); @@ -1285,7 +1297,7 @@ public class VirtualMachineMO extends BaseMO { // write OVF descriptor file OvfCreateDescriptorParams ovfDescParams = new OvfCreateDescriptorParams(); - ovfDescParams.setOvfFiles(ovfFiles); + ovfDescParams.getOvfFiles().addAll(Arrays.asList(ovfFiles)); OvfCreateDescriptorResult ovfCreateDescriptorResult = _context.getService().createDescriptor(morOvf, getMor(), ovfDescParams); String ovfPath = exportDir + File.separator + exportName + ".ovf"; fileNames.add(ovfPath); @@ -1513,14 +1525,14 @@ public class VirtualMachineMO extends BaseMO { boolean bSuccess = false; try { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; - deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); VirtualDevice device = VmwareHelper.prepareDiskDevice(clonedVmMo, -1, disks, morDs, -1, 1); - deviceConfigSpecArray[0].setDevice(device); - deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.add); - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + deviceConfigSpec.setDevice(device); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); clonedVmMo.configureVm(vmConfigSpec); bSuccess = true; } finally { @@ -1533,12 +1545,12 @@ public class VirtualMachineMO extends BaseMO { public void plugDevice(VirtualDevice device) throws Exception { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; - deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[0].setDevice(device); - deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.add); + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(device); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); if(!configureVm(vmConfigSpec)) { throw new Exception("Failed to add devices"); } @@ -1546,12 +1558,12 @@ public class VirtualMachineMO extends BaseMO { public void tearDownDevice(VirtualDevice device) throws Exception { VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; - deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[0].setDevice(device); - deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.remove); + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(device); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.REMOVE); - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); if(!configureVm(vmConfigSpec)) { throw new Exception("Failed to detach devices"); } @@ -1566,10 +1578,10 @@ public class VirtualMachineMO extends BaseMO { for(int i = 0; i < devices.length; i++) { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[i].setDevice(devices[i]); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.remove); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.REMOVE); } - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + vmConfigSpec.getDeviceChange().addAll(Arrays.asList(deviceConfigSpecArray)); if(!configureVm(vmConfigSpec)) { throw new Exception("Failed to detach devices"); } @@ -1651,7 +1663,7 @@ public class VirtualMachineMO extends BaseMO { } public int getScsiDeviceControllerKey() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); if(devices != null && devices.length > 0) { @@ -1667,7 +1679,7 @@ public class VirtualMachineMO extends BaseMO { } public int getScsiDeviceControllerKeyNoException() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); if(devices != null && devices.length > 0) { @@ -1688,14 +1700,14 @@ public class VirtualMachineMO extends BaseMO { // Scsi controller VirtualLsiLogicController scsiController = new VirtualLsiLogicController(); - scsiController.setSharedBus(VirtualSCSISharing.noSharing); + scsiController.setSharedBus(VirtualSCSISharing.NO_SHARING); scsiController.setBusNumber(0); scsiController.setKey(1); VirtualDeviceConfigSpec scsiControllerSpec = new VirtualDeviceConfigSpec(); scsiControllerSpec.setDevice(scsiController); - scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.add); + scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - vmConfig.setDeviceChange(new VirtualDeviceConfigSpec[] { scsiControllerSpec }); + vmConfig.getDeviceChange().add(scsiControllerSpec); if(configureVm(vmConfig)) { throw new Exception("Unable to add Scsi controller"); } @@ -1704,7 +1716,7 @@ public class VirtualMachineMO extends BaseMO { // return pair of VirtualDisk and disk device bus name(ide0:0, etc) public Pair getDiskDevice(String vmdkDatastorePath, boolean matchExactly) throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil().getDynamicProperty(_mor, "config.hardware.device"); + VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); s_logger.info("Look for disk device info from volume : " + vmdkDatastorePath); DatastoreFile dsSrcFile = new DatastoreFile(vmdkDatastorePath); @@ -1842,7 +1854,7 @@ public class VirtualMachineMO extends BaseMO { public VirtualDisk[] getAllDiskDevice() throws Exception { List deviceList = new ArrayList(); - VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil().getDynamicProperty(_mor, "config.hardware.device"); + VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); if(devices != null && devices.length > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualDisk) { @@ -1882,7 +1894,7 @@ public class VirtualMachineMO extends BaseMO { } public int tryGetIDEDeviceControllerKey() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); if(devices != null && devices.length > 0) { @@ -1897,7 +1909,7 @@ public class VirtualMachineMO extends BaseMO { } public int getIDEDeviceControllerKey() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); if(devices != null && devices.length > 0) { @@ -1918,7 +1930,7 @@ public class VirtualMachineMO extends BaseMO { } public VirtualDevice getIsoDevice() throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); if(devices != null && devices.length > 0) { for(VirtualDevice device : devices) { @@ -1931,7 +1943,7 @@ public class VirtualMachineMO extends BaseMO { } public int getPCIDeviceControllerKey() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); if(devices != null && devices.length > 0) { @@ -1952,7 +1964,7 @@ public class VirtualMachineMO extends BaseMO { } public int getNextDeviceNumber(int controllerKey) throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); int deviceNumber = -1; @@ -1969,7 +1981,7 @@ public class VirtualMachineMO extends BaseMO { } public VirtualDevice[] getNicDevices() throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); List nics = new ArrayList(); @@ -1985,7 +1997,7 @@ public class VirtualMachineMO extends BaseMO { } public Pair getNicDeviceIndex(String networkNamePrefix) throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); List nics = new ArrayList(); @@ -2035,9 +2047,9 @@ public class VirtualMachineMO extends BaseMO { DistributedVirtualSwitchPortConnection dvsPort = (DistributedVirtualSwitchPortConnection) dvpBackingInfo.getPort(); String dvPortGroupKey = dvsPort.getPortgroupKey(); ManagedObjectReference dvPortGroupMor = new ManagedObjectReference(); - dvPortGroupMor.set_value(dvPortGroupKey); + dvPortGroupMor.setValue(dvPortGroupKey); dvPortGroupMor.setType("DistributedVirtualPortgroup"); - return (String) _context.getServiceUtil().getDynamicProperty(dvPortGroupMor, "name"); + return (String) _context.getVimClient().getDynamicProperty(dvPortGroupMor, "name"); } public VirtualDevice[] getMatchedDevices(Class[] deviceClasses) throws Exception { @@ -2045,7 +2057,7 @@ public class VirtualMachineMO extends BaseMO { List returnList = new ArrayList(); - VirtualDevice[] devices = (VirtualDevice[])_context.getServiceUtil(). + VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); if(devices != null) { @@ -2086,14 +2098,14 @@ public class VirtualMachineMO extends BaseMO { _context.getService().unregisterVM(_mor); - ManagedObjectReference morTask = _context.getService().registerVM_Task( + ManagedObjectReference morTask = _context.getService().registerVMTask( morFolder, vmFileInfo.getVmPathName(), vmName, false, morPool, hostMo.getMor()); - String result = _context.getServiceUtil().waitForTask(morTask); - if (!result.equalsIgnoreCase("Sucess")) { + boolean result = _context.getVimClient().waitForTask(morTask); + if (!result) { throw new Exception("Unable to register template due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } else { _context.waitForTaskProgressDone(morTask); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java new file mode 100644 index 00000000000..3fbe5c8c34d --- /dev/null +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java @@ -0,0 +1,509 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.vmware.util; + +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLSession; +import javax.net.ssl.HttpsURLConnection; +import javax.xml.ws.BindingProvider; + +import com.vmware.vim25.DynamicProperty; +import com.vmware.vim25.InvalidCollectorVersionFaultMsg; +import com.vmware.vim25.InvalidPropertyFaultMsg; +import com.vmware.vim25.LocalizedMethodFault; +import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.ObjectSpec; +import com.vmware.vim25.ObjectUpdate; +import com.vmware.vim25.ObjectUpdateKind; +import com.vmware.vim25.PropertyChange; +import com.vmware.vim25.PropertyChangeOp; +import com.vmware.vim25.PropertyFilterSpec; +import com.vmware.vim25.PropertyFilterUpdate; +import com.vmware.vim25.PropertySpec; +import com.vmware.vim25.RuntimeFaultFaultMsg; +import com.vmware.vim25.SelectionSpec; +import com.vmware.vim25.ServiceContent; +import com.vmware.vim25.TaskInfoState; +import com.vmware.vim25.TraversalSpec; +import com.vmware.vim25.UpdateSet; +import com.vmware.vim25.VimPortType; +import com.vmware.vim25.VimService; +import com.vmware.vim25.ObjectContent; + +/** + * A wrapper class to handle Vmware vsphere connection and disconnection. + * + * @author minc + * + */ +public class VmwareClient { + + private ManagedObjectReference SVC_INST_REF = new ManagedObjectReference(); + private ManagedObjectReference propCollectorRef; + private ManagedObjectReference rootRef; + private VimService vimService; + private VimPortType vimPort; + private ServiceContent serviceContent; + private final String SVC_INST_NAME = "ServiceInstance"; + + private boolean isConnected = false; + + public VmwareClient(String name) { + + } + + /** + * Establishes session with the virtual center server. + * + * @throws Exception + * the exception + */ + public void connect(String url, String userName, String password) throws Exception { + + HostnameVerifier hv = new HostnameVerifier() { + @Override + public boolean verify(String urlHostName, SSLSession session) { + return true; + } + }; + HttpsURLConnection.setDefaultHostnameVerifier(hv); + + SVC_INST_REF.setType(SVC_INST_NAME); + SVC_INST_REF.setValue(SVC_INST_NAME); + + vimService = new VimService(); + vimPort = vimService.getVimPort(); + Map ctxt = ((BindingProvider) vimPort).getRequestContext(); + + ctxt.put(BindingProvider.ENDPOINT_ADDRESS_PROPERTY, url); + ctxt.put(BindingProvider.SESSION_MAINTAIN_PROPERTY, true); + + serviceContent = vimPort.retrieveServiceContent(SVC_INST_REF); + vimPort.login(serviceContent.getSessionManager(), userName, password, null); + isConnected = true; + + propCollectorRef = serviceContent.getPropertyCollector(); + rootRef = serviceContent.getRootFolder(); + } + + /** + * Disconnects the user session. + * + * @throws Exception + */ + public void disconnect() throws Exception { + if (isConnected) { + vimPort.logout(serviceContent.getSessionManager()); + } + isConnected = false; + } + + /** + * @return Service instance + */ + public VimPortType getService() { + return vimPort; + } + + /** + * @return Service instance content + */ + public ServiceContent getServiceContent() { + return serviceContent; + } + + /** + * @return Service property collector + */ + public ManagedObjectReference getPropCol() { + return propCollectorRef; + } + + /** + * @return Root folder + */ + public ManagedObjectReference getRootFolder() { + return rootRef; + } + + /** + * Get the property value of a managed object. + * + * @param mor + * managed object reference + * @param propertyName + * property name. + * @return property value. + * @throws Exception + * in case of error. + */ + public Object getDynamicProperty(ManagedObjectReference mor, String propertyName) throws Exception { + List props = new ArrayList(); + props.add(propertyName); + List objContent = this.getObjectProperties(mor, props); + + Object propertyValue = null; + if (objContent != null && objContent.size() > 0) { + List dynamicProperty = objContent.get(0).getPropSet(); + if (dynamicProperty != null && dynamicProperty.size() > 0) { + DynamicProperty dp = dynamicProperty.get(0); + propertyValue = dp.getVal(); + /* + * If object is ArrayOfXXX object, then get the XXX[] by + * invoking getXXX() on the object. + * For Ex: + * ArrayOfManagedObjectReference.getManagedObjectReference() + * returns ManagedObjectReference[] array. + */ + Class dpCls = propertyValue.getClass(); + String dynamicPropertyName = dpCls.getName(); + if (dynamicPropertyName.startsWith("ArrayOf")) { + String methodName = "get" + + dynamicPropertyName + .substring(dynamicPropertyName.indexOf("ArrayOf") + "ArrayOf".length(), dynamicPropertyName.length()); + + Method getMorMethod = dpCls.getDeclaredMethod(methodName, null); + propertyValue = getMorMethod.invoke(propertyValue, (Object[]) null); + } + } + } + return propertyValue; + } + + private List getObjectProperties(ManagedObjectReference mObj, List props) throws Exception { + PropertySpec pSpec = new PropertySpec(); + pSpec.setAll(false); + pSpec.setType(mObj.getType()); + pSpec.getPathSet().addAll(props); + + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(mObj); + oSpec.setSkip(false); + PropertyFilterSpec spec = new PropertyFilterSpec(); + spec.getPropSet().add(pSpec); + spec.getObjectSet().add(oSpec); + List specArr = new ArrayList(); + specArr.add(spec); + + return vimPort.retrieveProperties(propCollectorRef, specArr); + } + + /** + * This method returns a boolean value specifying whether the Task is + * succeeded or failed. + * + * @param task + * ManagedObjectReference representing the Task. + * + * @return boolean value representing the Task result. + * @throws InvalidCollectorVersionFaultMsg + * @throws RuntimeFaultFaultMsg + * @throws InvalidPropertyFaultMsg + */ + public boolean waitForTask(ManagedObjectReference task) throws InvalidPropertyFaultMsg, RuntimeFaultFaultMsg, InvalidCollectorVersionFaultMsg { + + boolean retVal = false; + + // info has a property - state for state of the task + Object[] result = waitForValues(task, new String[] { "info.state", "info.error" }, new String[] { "state" }, new Object[][] { new Object[] { + TaskInfoState.SUCCESS, TaskInfoState.ERROR } }); + + if (result[0].equals(TaskInfoState.SUCCESS)) { + retVal = true; + } + if (result[1] instanceof LocalizedMethodFault) { + throw new RuntimeException(((LocalizedMethodFault) result[1]).getLocalizedMessage()); + } + return retVal; + } + + /** + * Handle Updates for a single object. waits till expected values of + * properties to check are reached Destroys the ObjectFilter when done. + * + * @param objmor + * MOR of the Object to wait for + * @param filterProps + * Properties list to filter + * @param endWaitProps + * Properties list to check for expected values these be + * properties of a property in the filter properties list + * @param expectedVals + * values for properties to end the wait + * @return true indicating expected values were met, and false otherwise + * @throws RuntimeFaultFaultMsg + * @throws InvalidPropertyFaultMsg + * @throws InvalidCollectorVersionFaultMsg + */ + private Object[] waitForValues(ManagedObjectReference objmor, String[] filterProps, String[] endWaitProps, Object[][] expectedVals) + throws InvalidPropertyFaultMsg, RuntimeFaultFaultMsg, InvalidCollectorVersionFaultMsg { + // version string is initially null + String version = ""; + Object[] endVals = new Object[endWaitProps.length]; + Object[] filterVals = new Object[filterProps.length]; + + PropertyFilterSpec spec = new PropertyFilterSpec(); + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(objmor); + oSpec.setSkip(Boolean.FALSE); + spec.getObjectSet().add(oSpec); + + PropertySpec pSpec = new PropertySpec(); + pSpec.getPathSet().addAll(Arrays.asList(filterProps)); + pSpec.setType(objmor.getType()); + spec.getPropSet().add(pSpec); + + ManagedObjectReference filterSpecRef = vimPort.createFilter(propCollectorRef, spec, true); + + boolean reached = false; + + UpdateSet updateset = null; + List filtupary = null; + List objupary = null; + List propchgary = null; + while (!reached) { + updateset = vimPort.waitForUpdates(propCollectorRef, version); + if (updateset == null || updateset.getFilterSet() == null) { + continue; + } + version = updateset.getVersion(); + + // Make this code more general purpose when PropCol changes later. + filtupary = updateset.getFilterSet(); + + for (PropertyFilterUpdate filtup : filtupary) { + objupary = filtup.getObjectSet(); + for (ObjectUpdate objup : objupary) { + // TODO: Handle all "kind"s of updates. + if (objup.getKind() == ObjectUpdateKind.MODIFY || objup.getKind() == ObjectUpdateKind.ENTER + || objup.getKind() == ObjectUpdateKind.LEAVE) { + propchgary = objup.getChangeSet(); + for (PropertyChange propchg : propchgary) { + updateValues(endWaitProps, endVals, propchg); + updateValues(filterProps, filterVals, propchg); + } + } + } + } + + Object expctdval = null; + // Check if the expected values have been reached and exit the loop + // if done. + // Also exit the WaitForUpdates loop if this is the case. + for (int chgi = 0; chgi < endVals.length && !reached; chgi++) { + for (int vali = 0; vali < expectedVals[chgi].length && !reached; vali++) { + expctdval = expectedVals[chgi][vali]; + + reached = expctdval.equals(endVals[chgi]) || reached; + } + } + } + + // Destroy the filter when we are done. + vimPort.destroyPropertyFilter(filterSpecRef); + return filterVals; + } + + private void updateValues(String[] props, Object[] vals, PropertyChange propchg) { + for (int findi = 0; findi < props.length; findi++) { + if (propchg.getName().lastIndexOf(props[findi]) >= 0) { + if (propchg.getOp() == PropertyChangeOp.REMOVE) { + vals[findi] = ""; + } else { + vals[findi] = propchg.getVal(); + } + } + } + } + + private SelectionSpec getSelectionSpec(String name) { + SelectionSpec genericSpec = new SelectionSpec(); + genericSpec.setName(name); + return genericSpec; + } + + /* + * @return An array of SelectionSpec covering VM, Host, Resource pool, + * Cluster Compute Resource and Datastore. + */ + private List buildFullTraversal() { + // Terminal traversal specs + + // RP -> VM + TraversalSpec rpToVm = new TraversalSpec(); + rpToVm.setName("rpToVm"); + rpToVm.setType("ResourcePool"); + rpToVm.setPath("vm"); + rpToVm.setSkip(Boolean.FALSE); + + // vApp -> VM + TraversalSpec vAppToVM = new TraversalSpec(); + vAppToVM.setName("vAppToVM"); + vAppToVM.setType("VirtualApp"); + vAppToVM.setPath("vm"); + + // HostSystem -> VM + TraversalSpec hToVm = new TraversalSpec(); + hToVm.setType("HostSystem"); + hToVm.setPath("vm"); + hToVm.setName("hToVm"); + hToVm.getSelectSet().add(getSelectionSpec("VisitFolders")); + hToVm.setSkip(Boolean.FALSE); + + // DC -> DS + TraversalSpec dcToDs = new TraversalSpec(); + dcToDs.setType("Datacenter"); + dcToDs.setPath("datastore"); + dcToDs.setName("dcToDs"); + dcToDs.setSkip(Boolean.FALSE); + + // Recurse through all ResourcePools + TraversalSpec rpToRp = new TraversalSpec(); + rpToRp.setType("ResourcePool"); + rpToRp.setPath("resourcePool"); + rpToRp.setSkip(Boolean.FALSE); + rpToRp.setName("rpToRp"); + rpToRp.getSelectSet().add(getSelectionSpec("rpToRp")); + + TraversalSpec crToRp = new TraversalSpec(); + crToRp.setType("ComputeResource"); + crToRp.setPath("resourcePool"); + crToRp.setSkip(Boolean.FALSE); + crToRp.setName("crToRp"); + crToRp.getSelectSet().add(getSelectionSpec("rpToRp")); + + TraversalSpec crToH = new TraversalSpec(); + crToH.setSkip(Boolean.FALSE); + crToH.setType("ComputeResource"); + crToH.setPath("host"); + crToH.setName("crToH"); + + TraversalSpec dcToHf = new TraversalSpec(); + dcToHf.setSkip(Boolean.FALSE); + dcToHf.setType("Datacenter"); + dcToHf.setPath("hostFolder"); + dcToHf.setName("dcToHf"); + dcToHf.getSelectSet().add(getSelectionSpec("VisitFolders")); + + TraversalSpec vAppToRp = new TraversalSpec(); + vAppToRp.setName("vAppToRp"); + vAppToRp.setType("VirtualApp"); + vAppToRp.setPath("resourcePool"); + vAppToRp.getSelectSet().add(getSelectionSpec("rpToRp")); + + TraversalSpec dcToVmf = new TraversalSpec(); + dcToVmf.setType("Datacenter"); + dcToVmf.setSkip(Boolean.FALSE); + dcToVmf.setPath("vmFolder"); + dcToVmf.setName("dcToVmf"); + dcToVmf.getSelectSet().add(getSelectionSpec("VisitFolders")); + + // For Folder -> Folder recursion + TraversalSpec visitFolders = new TraversalSpec(); + visitFolders.setType("Folder"); + visitFolders.setPath("childEntity"); + visitFolders.setSkip(Boolean.FALSE); + visitFolders.setName("VisitFolders"); + List sspecarrvf = new ArrayList(); + sspecarrvf.add(getSelectionSpec("crToRp")); + sspecarrvf.add(getSelectionSpec("crToH")); + sspecarrvf.add(getSelectionSpec("dcToVmf")); + sspecarrvf.add(getSelectionSpec("dcToHf")); + sspecarrvf.add(getSelectionSpec("vAppToRp")); + sspecarrvf.add(getSelectionSpec("vAppToVM")); + sspecarrvf.add(getSelectionSpec("dcToDs")); + sspecarrvf.add(getSelectionSpec("hToVm")); + sspecarrvf.add(getSelectionSpec("rpToVm")); + sspecarrvf.add(getSelectionSpec("VisitFolders")); + + visitFolders.getSelectSet().addAll(sspecarrvf); + + List resultspec = new ArrayList(); + resultspec.add(visitFolders); + resultspec.add(crToRp); + resultspec.add(crToH); + resultspec.add(dcToVmf); + resultspec.add(dcToHf); + resultspec.add(vAppToRp); + resultspec.add(vAppToVM); + resultspec.add(dcToDs); + resultspec.add(hToVm); + resultspec.add(rpToVm); + resultspec.add(rpToRp); + + return resultspec; + } + + + /** + * Get the ManagedObjectReference for an item under the + * specified root folder that has the type and name specified. + * + * @param root a root folder if available, or null for default + * @param type type of the managed object + * @param name name to match + * + * @return First ManagedObjectReference of the type / name pair found + */ + public ManagedObjectReference getDecendentMoRef(ManagedObjectReference root, String type, String name) throws Exception { + if (name == null || name.length() == 0) { + return null; + } + + // Create PropertySpecs + PropertySpec pSpec = new PropertySpec(); + pSpec.setType(type); + pSpec.setAll(false); + pSpec.getPathSet().add(name); + + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(root); + oSpec.setSkip(false); + oSpec.getSelectSet().addAll(buildFullTraversal()); + + PropertyFilterSpec spec = new PropertyFilterSpec(); + spec.getPropSet().add(pSpec); + spec.getObjectSet().add(oSpec); + List specArr = new ArrayList(); + specArr.add(spec); + + List ocary = vimPort.retrieveProperties(propCollectorRef, specArr); + + if (ocary == null || ocary.size() == 0) { + return null; + } + + for (ObjectContent oc : ocary) { + ManagedObjectReference mor = oc.getObj(); + List propary = oc.getPropSet(); + if (type == null || type.equals(mor.getType())) { + if (propary.size() > 0) { + String propval = (String) propary.get(0).getVal(); + if (propval != null && name.equals(propval)) + return mor; + } + } + } + return null; + } +} diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareContext.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareContext.java index 6dd6475ae11..7f9aacb538d 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareContext.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareContext.java @@ -26,7 +26,6 @@ import java.io.FileOutputStream; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; -import java.net.ConnectException; import java.net.HttpURLConnection; import java.net.URL; import java.net.URLEncoder; @@ -44,15 +43,11 @@ import org.apache.log4j.Logger; import com.cloud.hypervisor.vmware.mo.DatacenterMO; import com.cloud.hypervisor.vmware.mo.DatastoreFile; import com.cloud.utils.ActionDelegate; -import com.vmware.apputils.version.ExtendedAppUtil; -import com.vmware.apputils.vim25.ServiceConnection; -import com.vmware.apputils.vim25.ServiceUtil; import com.vmware.vim25.ManagedObjectReference; import com.vmware.vim25.ObjectContent; import com.vmware.vim25.ObjectSpec; import com.vmware.vim25.PropertyFilterSpec; import com.vmware.vim25.PropertySpec; -import com.vmware.vim25.SelectionSpec; import com.vmware.vim25.ServiceContent; import com.vmware.vim25.TaskInfo; import com.vmware.vim25.TraversalSpec; @@ -63,30 +58,30 @@ public class VmwareContext { private static int MAX_CONNECT_RETRY = 5; private static int CONNECT_RETRY_INTERVAL = 1000; - - private ExtendedAppUtil _appUtil; + + private VmwareClient _vimClient; private String _serverAddress; - + private Map _stockMap = new HashMap(); private int _CHUNKSIZE = 1*1024*1024; // 1M - + static { try { - javax.net.ssl.TrustManager[] trustAllCerts = new javax.net.ssl.TrustManager[1]; - javax.net.ssl.TrustManager tm = new TrustAllManager(); - trustAllCerts[0] = tm; - javax.net.ssl.SSLContext sc = javax.net.ssl.SSLContext.getInstance("SSL"); - sc.init(null, trustAllCerts, null); + javax.net.ssl.TrustManager[] trustAllCerts = new javax.net.ssl.TrustManager[1]; + javax.net.ssl.TrustManager tm = new TrustAllManager(); + trustAllCerts[0] = tm; + javax.net.ssl.SSLContext sc = javax.net.ssl.SSLContext.getInstance("SSL"); + sc.init(null, trustAllCerts, null); javax.net.ssl.HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); } catch (Exception e) { s_logger.error("Unexpected exception ", e); } } - - public VmwareContext(ExtendedAppUtil appUtil, String address) { - assert(appUtil != null) : "Invalid parameter in constructing VmwareContext object"; - - _appUtil = appUtil; + + public VmwareContext(VmwareClient client, String address) { + assert(client != null) : "Invalid parameter in constructing VmwareContext object"; + + _vimClient = client; _serverAddress = address; } @@ -95,132 +90,136 @@ public class VmwareContext { _stockMap.put(name, obj); } } - + public void uregisterStockObject(String name) { synchronized(_stockMap) { _stockMap.remove(name); } } - + @SuppressWarnings("unchecked") public T getStockObject(String name) { synchronized(_stockMap) { return (T)_stockMap.get(name); } } - + public String getServerAddress() { return _serverAddress; } - + + /* public ServiceConnection getServiceConnection() { - return _appUtil.getServiceConnection3(); + return _vimClient.getServiceConnection3(); } - + */ + public VimPortType getService() { - return getServiceConnection().getService(); + return _vimClient.getService(); } - + public ServiceContent getServiceContent() { - return getServiceConnection().getServiceContent(); + return _vimClient.getServiceContent(); } - + + /* public ServiceUtil getServiceUtil() { - return _appUtil.getServiceUtil3(); + return _vimClient.getServiceUtil3(); } - + */ + + public ManagedObjectReference getPropertyCollector(){ + return _vimClient.getPropCol(); + } + public ManagedObjectReference getRootFolder() { - return getServiceContent().getRootFolder(); + return _vimClient.getRootFolder(); } - + + public VmwareClient getVimClient(){ + return _vimClient; + } + + public ManagedObjectReference getHostMorByPath(String inventoryPath) throws Exception { assert(inventoryPath != null); - + String[] tokens; if(inventoryPath.startsWith("/")) tokens = inventoryPath.substring(1).split("/"); else tokens = inventoryPath.split("/"); - + ManagedObjectReference mor = getRootFolder(); for(int i=0; i < tokens.length;i++) { String token = tokens[i]; - ObjectContent[] ocs; + List ocs; + PropertySpec pSpec = null; + ObjectSpec oSpec = null; if(mor.getType().equalsIgnoreCase("Datacenter")) { - PropertySpec pSpec = new PropertySpec(); + pSpec = new PropertySpec(); + pSpec.setAll(false); pSpec.setType("ManagedEntity"); - pSpec.setPathSet(new String[] { "name" }); - + pSpec.getPathSet().add("name"); + TraversalSpec dcHostFolderTraversal = new TraversalSpec(); dcHostFolderTraversal.setType("Datacenter"); dcHostFolderTraversal.setPath("hostFolder"); dcHostFolderTraversal.setName("dcHostFolderTraversal"); - ObjectSpec oSpec = new ObjectSpec(); + oSpec = new ObjectSpec(); oSpec.setObj(mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { dcHostFolderTraversal }); + oSpec.getSelectSet().add(dcHostFolderTraversal); - PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - ocs = getService().retrieveProperties( - getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); - } else if(mor.getType().equalsIgnoreCase("Folder")) { - PropertySpec pSpec = new PropertySpec(); + pSpec = new PropertySpec(); + pSpec.setAll(false); pSpec.setType("ManagedEntity"); - pSpec.setPathSet(new String[] { "name" }); - + pSpec.getPathSet().add("name"); + TraversalSpec folderChildrenTraversal = new TraversalSpec(); folderChildrenTraversal.setType("Folder"); folderChildrenTraversal.setPath("childEntity"); folderChildrenTraversal.setName("folderChildrenTraversal"); - ObjectSpec oSpec = new ObjectSpec(); + oSpec = new ObjectSpec(); oSpec.setObj(mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { folderChildrenTraversal }); + oSpec.getSelectSet().add(folderChildrenTraversal); + - PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ocs = getService().retrieveProperties( - getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); } else if(mor.getType().equalsIgnoreCase("ClusterComputeResource")) { - PropertySpec pSpec = new PropertySpec(); + pSpec = new PropertySpec(); pSpec.setType("ManagedEntity"); - pSpec.setPathSet(new String[] { "name" }); - + pSpec.getPathSet().add("name"); + TraversalSpec clusterHostTraversal = new TraversalSpec(); clusterHostTraversal.setType("ClusterComputeResource"); clusterHostTraversal.setPath("host"); clusterHostTraversal.setName("folderChildrenTraversal"); - ObjectSpec oSpec = new ObjectSpec(); + oSpec = new ObjectSpec(); oSpec.setObj(mor); oSpec.setSkip(Boolean.TRUE); - oSpec.setSelectSet(new SelectionSpec[] { clusterHostTraversal }); + oSpec.getSelectSet().add(clusterHostTraversal); - PropertyFilterSpec pfSpec = new PropertyFilterSpec(); - pfSpec.setPropSet(new PropertySpec[] { pSpec }); - pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); - - ocs = getService().retrieveProperties( - getServiceContent().getPropertyCollector(), - new PropertyFilterSpec[] { pfSpec }); } else { s_logger.error("Invalid inventory path, path element can only be datacenter and folder"); return null; } - - if(ocs != null && ocs.length > 0) { + + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); + pfSpec.getPropSet().add(pSpec); + pfSpec.getObjectSet().add(oSpec); + List pfSpecArr = new ArrayList(); + pfSpecArr.add(pfSpec); + ocs = getService().retrieveProperties(getPropertyCollector(), pfSpecArr); + + if(ocs != null && ocs.size() > 0) { boolean found = false; for(ObjectContent oc : ocs) { - String name = oc.getPropSet()[0].getVal().toString(); + String name = oc.getPropSet().get(0).getVal().toString(); if(name.equalsIgnoreCase(token) || name.equalsIgnoreCase("host")) { mor = oc.getObj(); found = true; @@ -244,44 +243,44 @@ public class VmwareContext { // path in format of / public ManagedObjectReference getDatastoreMorByPath(String inventoryPath) throws Exception { assert(inventoryPath != null); - + String[] tokens; if(inventoryPath.startsWith("/")) tokens = inventoryPath.substring(1).split("/"); else tokens = inventoryPath.split("/"); - + if(tokens == null || tokens.length != 2) { s_logger.error("Invalid datastore inventory path. path: " + inventoryPath); return null; } - + DatacenterMO dcMo = new DatacenterMO(this, tokens[0]); if(dcMo.getMor() == null) { s_logger.error("Unable to locate the datacenter specified in path: " + inventoryPath); return null; } - + return dcMo.findDatastore(tokens[1]); } - + public void waitForTaskProgressDone(ManagedObjectReference morTask) throws Exception { while(true) { - TaskInfo tinfo = (TaskInfo)getServiceUtil().getDynamicProperty(morTask, "info"); + TaskInfo tinfo = (TaskInfo)_vimClient.getDynamicProperty(morTask, "info"); Integer progress = tinfo.getProgress(); if(progress == null) break; - + if(progress.intValue() >= 100) break; - + Thread.sleep(1000); } } - + public void getFile(String urlString, String localFileFullName) throws Exception { HttpURLConnection conn = getHTTPConnection(urlString); - + InputStream in = conn.getInputStream(); OutputStream out = new FileOutputStream(new File(localFileFullName)); byte[] buf = new byte[_CHUNKSIZE]; @@ -290,19 +289,19 @@ public class VmwareContext { out.write(buf, 0, len); } in.close(); - out.close(); + out.close(); } - + public void uploadFile(String urlString, String localFileFullName) throws Exception { uploadFile(urlString, new File(localFileFullName)); } - + public void uploadFile(String urlString, File localFile) throws Exception { HttpURLConnection conn = getHTTPConnection(urlString, "PUT"); OutputStream out = null; InputStream in = null; BufferedReader br = null; - + try { out = conn.getOutputStream(); in = new FileInputStream(localFile); @@ -312,7 +311,7 @@ public class VmwareContext { out.write(buf, 0, len); } out.flush(); - + br = new BufferedReader(new InputStreamReader(conn.getInputStream())); String line; while ((line = br.readLine()) != null) { @@ -322,41 +321,41 @@ public class VmwareContext { } finally { if(in != null) in.close(); - + if(out != null) out.close(); - + if(br != null) br.close(); } } - - public void uploadVmdkFile(String httpMethod, String urlString, String localFileName, + + public void uploadVmdkFile(String httpMethod, String urlString, String localFileName, long totalBytesUpdated, ActionDelegate progressUpdater) throws Exception { - + HttpURLConnection conn = getRawHTTPConnection(urlString); - + conn.setDoOutput(true); conn.setUseCaches(false); - - conn.setChunkedStreamingMode(_CHUNKSIZE); - conn.setRequestMethod(httpMethod); - conn.setRequestProperty("Connection", "Keep-Alive"); - conn.setRequestProperty("Content-Type", "application/x-vnd.vmware-streamVmdk"); + + conn.setChunkedStreamingMode(_CHUNKSIZE); + conn.setRequestMethod(httpMethod); + conn.setRequestProperty("Connection", "Keep-Alive"); + conn.setRequestProperty("Content-Type", "application/x-vnd.vmware-streamVmdk"); conn.setRequestProperty("Content-Length", Long.toString(new File(localFileName).length())); connectWithRetry(conn); - + BufferedOutputStream bos = null; BufferedInputStream is = null; try { - bos = new BufferedOutputStream(conn.getOutputStream()); - is = new BufferedInputStream(new FileInputStream(localFileName)); - int bufferSize = _CHUNKSIZE; - byte[] buffer = new byte[bufferSize]; - while (true) { - int bytesRead = is.read(buffer, 0, bufferSize); - if (bytesRead == -1) { - break; + bos = new BufferedOutputStream(conn.getOutputStream()); + is = new BufferedInputStream(new FileInputStream(localFileName)); + int bufferSize = _CHUNKSIZE; + byte[] buffer = new byte[bufferSize]; + while (true) { + int bytesRead = is.read(buffer, 0, bufferSize); + if (bytesRead == -1) { + break; } bos.write(buffer, 0, bytesRead); totalBytesUpdated += bytesRead; @@ -370,54 +369,54 @@ public class VmwareContext { is.close(); if(bos != null) bos.close(); - + conn.disconnect(); } } - - public long downloadVmdkFile(String urlString, String localFileName, + + public long downloadVmdkFile(String urlString, String localFileName, long totalBytesDownloaded, ActionDelegate progressUpdater) throws Exception { HttpURLConnection conn = getRawHTTPConnection(urlString); - + String cookieString = getServiceCookie(); conn.setRequestProperty(org.apache.axis.transport.http.HTTPConstants.HEADER_COOKIE, cookieString); conn.setDoInput(true); - conn.setDoOutput(true); + conn.setDoOutput(true); conn.setAllowUserInteraction(true); connectWithRetry(conn); - long bytesWritten = 0; - InputStream in = null; - OutputStream out = null; + long bytesWritten = 0; + InputStream in = null; + OutputStream out = null; try { - in = conn.getInputStream(); - out = new FileOutputStream(new File(localFileName)); - - byte[] buf = new byte[_CHUNKSIZE]; - int len = 0; - while ((len = in.read(buf)) > 0) { - out.write(buf, 0, len); + in = conn.getInputStream(); + out = new FileOutputStream(new File(localFileName)); + + byte[] buf = new byte[_CHUNKSIZE]; + int len = 0; + while ((len = in.read(buf)) > 0) { + out.write(buf, 0, len); bytesWritten += len; totalBytesDownloaded += len; - + if(progressUpdater != null) progressUpdater.action(new Long(totalBytesDownloaded)); - } + } } finally { if(in != null) in.close(); if(out != null) out.close(); - + conn.disconnect(); } - return bytesWritten; + return bytesWritten; } - + public byte[] getResourceContent(String urlString) throws Exception { HttpURLConnection conn = getHTTPConnection(urlString); InputStream in = conn.getInputStream(); - + ByteArrayOutputStream out = new ByteArrayOutputStream(); byte[] buf = new byte[_CHUNKSIZE]; int len = 0; @@ -425,10 +424,10 @@ public class VmwareContext { out.write(buf, 0, len); } in.close(); - out.close(); + out.close(); return out.toByteArray(); } - + public void uploadResourceContent(String urlString, byte[] content) throws Exception { // vSphere does not support POST HttpURLConnection conn = getHTTPConnection(urlString, "PUT"); @@ -436,7 +435,7 @@ public class VmwareContext { OutputStream out = conn.getOutputStream(); out.write(content); out.flush(); - + BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream())); String line; while ((line = in.readLine()) != null) { @@ -446,15 +445,15 @@ public class VmwareContext { out.close(); in.close(); } - + /* * Sample content returned by query a datastore directory - * + * * Url for the query * https://vsphere-1.lab.vmops.com/folder/Fedora-clone-test?dcPath=cupertino&dsName=NFS+datastore * * Returned conent from vSphere - * + * @@ -485,7 +484,7 @@ public class VmwareContext { - */ + */ public String[] listDatastoreDirContent(String urlString) throws Exception { List fileList = new ArrayList(); String content = new String(getResourceContent(urlString)); @@ -497,7 +496,7 @@ public class VmwareContext { int beginPos = content.lastIndexOf('>', parsePos -1); if(beginPos < 0) beginPos = 0; - + fileList.add((content.substring(beginPos + 1, parsePos))); parsePos += marker.length(); } else { @@ -511,11 +510,11 @@ public class VmwareContext { DatastoreFile dsFile = new DatastoreFile(fullPath); return composeDatastoreBrowseUrl(dcName, dsFile.getDatastoreName(), dsFile.getRelativePath()); } - + public String composeDatastoreBrowseUrl(String dcName, String datastoreName, String relativePath) { assert(relativePath != null); assert(datastoreName != null); - + StringBuffer sb = new StringBuffer(); sb.append("https://"); sb.append(_serverAddress); @@ -525,23 +524,24 @@ public class VmwareContext { sb.append(URLEncoder.encode(datastoreName)); return sb.toString(); } - + public HttpURLConnection getHTTPConnection(String urlString) throws Exception { return getHTTPConnection(urlString, "GET"); } - + public HttpURLConnection getHTTPConnection(String urlString, String httpMethod) throws Exception { String cookieString = getServiceCookie(); HostnameVerifier hv = new HostnameVerifier() { - public boolean verify(String urlHostName, SSLSession session) { + @Override + public boolean verify(String urlHostName, SSLSession session) { return true; } }; - - HttpsURLConnection.setDefaultHostnameVerifier(hv); - URL url = new URL(urlString); + + HttpsURLConnection.setDefaultHostnameVerifier(hv); + URL url = new URL(urlString); HttpURLConnection conn = (HttpURLConnection)url.openConnection(); - + conn.setDoInput(true); conn.setDoOutput(true); conn.setAllowUserInteraction(true); @@ -550,16 +550,17 @@ public class VmwareContext { connectWithRetry(conn); return conn; } - + public HttpURLConnection getRawHTTPConnection(String urlString) throws Exception { HostnameVerifier hv = new HostnameVerifier() { - public boolean verify(String urlHostName, SSLSession session) { + @Override + public boolean verify(String urlHostName, SSLSession session) { return true; } }; - - HttpsURLConnection.setDefaultHostnameVerifier(hv); - URL url = new URL(urlString); + + HttpsURLConnection.setDefaultHostnameVerifier(hv); + URL url = new URL(urlString); return (HttpURLConnection)url.openConnection(); } @@ -571,7 +572,7 @@ public class VmwareContext { String cookieString = (String)msgContext.getProperty(org.apache.axis.transport.http.HTTPConstants.HEADER_COOKIE); return cookieString; } - + private static void connectWithRetry(HttpURLConnection conn) throws Exception { boolean connected = false; for(int i = 0; i < MAX_CONNECT_RETRY && !connected; i++) { @@ -581,45 +582,48 @@ public class VmwareContext { s_logger.info("Connected, conn: " + conn.toString() + ", retry: " + i); } catch (Exception e) { s_logger.warn("Unable to connect, conn: " + conn.toString() + ", message: " + e.toString() + ", retry: " + i); - + try { Thread.sleep(CONNECT_RETRY_INTERVAL); } catch(InterruptedException ex) { } } } - + if(!connected) throw new Exception("Unable to connect to " + conn.toString()); } - + public void close() { try { - _appUtil.disConnect(); + _vimClient.disconnect(); } catch(Exception e) { s_logger.warn("Unexpected exception: ", e); } } public static class TrustAllManager implements javax.net.ssl.TrustManager, javax.net.ssl.X509TrustManager { - public java.security.cert.X509Certificate[] getAcceptedIssuers() { + @Override + public java.security.cert.X509Certificate[] getAcceptedIssuers() { return null; } - + public boolean isServerTrusted(java.security.cert.X509Certificate[] certs) { return true; } - + public boolean isClientTrusted(java.security.cert.X509Certificate[] certs) { return true; } - - public void checkServerTrusted(java.security.cert.X509Certificate[] certs, String authType) + + @Override + public void checkServerTrusted(java.security.cert.X509Certificate[] certs, String authType) throws java.security.cert.CertificateException { return; } - - public void checkClientTrusted(java.security.cert.X509Certificate[] certs, String authType) + + @Override + public void checkClientTrusted(java.security.cert.X509Certificate[] certs, String authType) throws java.security.cert.CertificateException { return; } From 1ce4d62ace9ffd5976eda30652ada78a89fdae80 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Tue, 5 Feb 2013 21:44:38 -0800 Subject: [PATCH 003/486] Fix cast error from List to array. --- .../vmware/manager/VmwareManagerImpl.java | 12 ++--- .../vmware/resource/VmwareContextFactory.java | 17 +++++-- .../vmware/resource/VmwareResource.java | 14 +++--- .../cloud/hypervisor/vmware/mo/ClusterMO.java | 18 +++---- .../vmware/mo/CustomFieldsManagerMO.java | 8 ++-- .../vmware/mo/HostDatastoreSystemMO.java | 12 ++--- .../cloud/hypervisor/vmware/mo/HostMO.java | 48 +++++++++---------- .../vmware/mo/HypervisorHostHelper.java | 2 +- .../cloud/hypervisor/vmware/mo/NetworkMO.java | 7 +-- .../hypervisor/vmware/mo/PerfManagerMO.java | 8 ++-- .../vmware/mo/VirtualMachineMO.java | 10 ++-- .../hypervisor/vmware/util/VmwareClient.java | 3 +- .../vmware/mo/TestVmwareContextFactory.java | 14 +++--- .../hypervisor/vmware/mo/TestVmwareMO.java | 1 - .../test/com/cloud/vmware/TestVMWare.java | 1 - 15 files changed, 92 insertions(+), 83 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index 24c89d3624d..21cd914689d 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -390,20 +390,20 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw List returnedHostList = new ArrayList(); if(mor.getType().equals("ComputeResource")) { - ManagedObjectReference[] hosts = (ManagedObjectReference[])serviceContext.getVimClient().getDynamicProperty(mor, "host"); - assert(hosts != null); + List hosts = (List)serviceContext.getVimClient().getDynamicProperty(mor, "host"); + assert(hosts != null && hosts.size() > 0); // For ESX host, we need to enable host firewall to allow VNC access - HostMO hostMo = new HostMO(serviceContext, hosts[0]); + HostMO hostMo = new HostMO(serviceContext, hosts.get(0)); prepareHost(hostMo, privateTrafficLabel); - returnedHostList.add(hosts[0]); + returnedHostList.add(hosts.get(0)); return returnedHostList; } else if(mor.getType().equals("ClusterComputeResource")) { - ManagedObjectReference[] hosts = (ManagedObjectReference[])serviceContext.getVimClient().getDynamicProperty(mor, "host"); + List hosts = (List)serviceContext.getVimClient().getDynamicProperty(mor, "host"); assert(hosts != null); - if(hosts.length > _maxHostsPerCluster) { + if(hosts.size() > _maxHostsPerCluster) { String msg = "vCenter cluster size is too big (current configured cluster size: " + _maxHostsPerCluster + ")"; s_logger.error(msg); throw new DiscoveredWithErrorException(msg); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java index 20399cd737e..5db9da3c02d 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java @@ -16,16 +16,18 @@ // under the License. package com.cloud.hypervisor.vmware.resource; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.hypervisor.vmware.manager.VmwareManager; -import com.cloud.hypervisor.vmware.manager.VmwareManagerImpl; import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.utils.StringUtils; -import com.cloud.utils.component.ComponentContext; - +@Component public class VmwareContextFactory { private static final Logger s_logger = Logger.getLogger(VmwareContextFactory.class); @@ -33,10 +35,17 @@ public class VmwareContextFactory { private static volatile int s_seq = 1; private static VmwareManager s_vmwareMgr; + @Inject VmwareManager _vmwareMgr; + static { // skip certificate check System.setProperty("axis.socketSecureFactory", "org.apache.axis.components.net.SunFakeTrustSocketFactory"); - s_vmwareMgr = ComponentContext.inject(VmwareManagerImpl.class); + //s_vmwareMgr = ComponentContext.inject(VmwareManagerImpl.class); + } + + @PostConstruct + void init() { + s_vmwareMgr = _vmwareMgr; } public static VmwareContext create(String vCenterAddress, String vCenterUserName, String vCenterPassword) throws Exception { diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 397408af7ff..7b97d3d1d1b 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -4494,14 +4494,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa PerfCounterInfo rxPerfCounterInfo = null; PerfCounterInfo txPerfCounterInfo = null; - PerfCounterInfo[] cInfo = (PerfCounterInfo[]) getServiceContext().getVimClient().getDynamicProperty(perfMgr, "perfCounter"); - for(int i=0; i cInfo = (List) getServiceContext().getVimClient().getDynamicProperty(perfMgr, "perfCounter"); + for(PerfCounterInfo info : cInfo) { + if ("net".equalsIgnoreCase(info.getGroupInfo().getKey())) { + if ("transmitted".equalsIgnoreCase(info.getNameInfo().getKey())) { + txPerfCounterInfo = info; } - if ("received".equalsIgnoreCase(cInfo[i].getNameInfo().getKey())) { - rxPerfCounterInfo = cInfo[i]; + if ("received".equalsIgnoreCase(info.getNameInfo().getKey())) { + rxPerfCounterInfo = info; } } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/ClusterMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/ClusterMO.java index 10265545bf4..d112c34fb9e 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/ClusterMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/ClusterMO.java @@ -295,8 +295,8 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { ManagedObjectReference morDs = null; ManagedObjectReference morDsFirst = null; - ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "host"); - if(hosts != null && hosts.length > 0) { + List hosts = (List)_context.getVimClient().getDynamicProperty(_mor, "host"); + if(hosts != null && hosts.size() > 0) { for(ManagedObjectReference morHost : hosts) { HostMO hostMo = new HostMO(_context, morHost); morDs = hostMo.mountDatastore(vmfsDatastore, poolHostAddress, poolHostPort, poolPath, poolUuid); @@ -328,8 +328,8 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - unmountDatastore(). target MOR: " + _mor.getValue() + ", poolUuid: " + poolUuid); - ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "host"); - if(hosts != null && hosts.length > 0) { + List hosts = (List)_context.getVimClient().getDynamicProperty(_mor, "host"); + if(hosts != null && hosts.size() > 0) { for(ManagedObjectReference morHost : hosts) { HostMO hostMo = new HostMO(_context, morHost); hostMo.unmountDatastore(poolUuid); @@ -473,8 +473,8 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { // TODO, need to use traversal to optimize retrieve of int cpuNumInCpuThreads = 1; - ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "host"); - if(hosts != null && hosts.length > 0) { + List hosts = (List)_context.getVimClient().getDynamicProperty(_mor, "host"); + if(hosts != null && hosts.size() > 0) { for(ManagedObjectReference morHost : hosts) { HostMO hostMo = new HostMO(_context, morHost); HostHardwareSummary hardwareSummary = hostMo.getHostHardwareSummary(); @@ -498,9 +498,9 @@ public class ClusterMO extends BaseMO implements VmwareHypervisorHost { if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - getHyperHostNetworkSummary(). target MOR: " + _mor.getValue() + ", mgmtPortgroup: " + esxServiceConsolePort); - ManagedObjectReference[] hosts = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "host"); - if(hosts != null && hosts.length > 0) { - VmwareHypervisorHostNetworkSummary summary = new HostMO(_context, hosts[0]).getHyperHostNetworkSummary(esxServiceConsolePort); + List hosts = (List)_context.getVimClient().getDynamicProperty(_mor, "host"); + if(hosts != null && hosts.size() > 0) { + VmwareHypervisorHostNetworkSummary summary = new HostMO(_context, hosts.get(0)).getHyperHostNetworkSummary(esxServiceConsolePort); if(s_logger.isTraceEnabled()) s_logger.trace("vCenter API trace - getHyperHostResourceSummary() done(successfully)"); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/CustomFieldsManagerMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/CustomFieldsManagerMO.java index 08932c5e5ba..3082a7143b5 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/CustomFieldsManagerMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/CustomFieldsManagerMO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.hypervisor.vmware.mo; +import java.util.List; + import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.CustomFieldDef; import com.vmware.vim25.ManagedObjectReference; @@ -48,12 +50,12 @@ public class CustomFieldsManagerMO extends BaseMO { _context.getService().setField(getMor(), morEntity, key, value); } - public CustomFieldDef[] getFields() throws Exception { - return (CustomFieldDef[])_context.getVimClient().getDynamicProperty(getMor(), "field"); + public List getFields() throws Exception { + return (List)_context.getVimClient().getDynamicProperty(getMor(), "field"); } public int getCustomFieldKey(String morType, String fieldName) throws Exception { - CustomFieldDef[] fields = getFields(); + List fields = getFields(); if(fields != null) { for(CustomFieldDef field : fields) { if(field.getName().equals(fieldName) && field.getManagedObjectType().equals(morType)) diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java index 7d967a9b532..3dcd7249060 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostDatastoreSystemMO.java @@ -78,8 +78,8 @@ public class HostDatastoreSystemMO extends BaseMO { public ManagedObjectReference findDatastoreByUrl(String storeUrl) throws Exception { assert(storeUrl != null); - ManagedObjectReference[] datastores = getDatastores(); - if(datastores != null && datastores.length > 0) { + List datastores = getDatastores(); + if(datastores != null && datastores.size() > 0) { for(ManagedObjectReference morDatastore : datastores) { NasDatastoreInfo info = getNasDatastoreInfo(morDatastore); if(info != null) { @@ -99,8 +99,8 @@ public class HostDatastoreSystemMO extends BaseMO { public ManagedObjectReference findDatastoreByExportPath(String exportPath) throws Exception { assert(exportPath != null); - ManagedObjectReference[] datastores = getDatastores(); - if(datastores != null && datastores.length > 0) { + List datastores = getDatastores(); + if(datastores != null && datastores.size() > 0) { for(ManagedObjectReference morDatastore : datastores) { DatastoreMO dsMo = new DatastoreMO(_context, morDatastore); if(dsMo.getInventoryPath().equals(exportPath)) @@ -145,8 +145,8 @@ public class HostDatastoreSystemMO extends BaseMO { return _context.getService().createNasDatastore(_mor, spec); } - public ManagedObjectReference[] getDatastores() throws Exception { - return (ManagedObjectReference[])_context.getVimClient().getDynamicProperty( + public List getDatastores() throws Exception { + return (List)_context.getVimClient().getDynamicProperty( _mor, "datastore"); } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java index c164cc22f23..d80680292a5 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HostMO.java @@ -84,16 +84,14 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { return (HostConfigManager)_context.getVimClient().getDynamicProperty(_mor, "configManager"); } - public VirtualNicManagerNetConfig[] getHostVirtualNicManagerNetConfig() throws Exception { - VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])_context.getVimClient().getDynamicProperty(_mor, + public List getHostVirtualNicManagerNetConfig() throws Exception { + return (List)_context.getVimClient().getDynamicProperty(_mor, "config.virtualNicManagerInfo.netConfig"); - return netConfigs; } - public HostIpRouteEntry[] getHostIpRouteEntries() throws Exception { - HostIpRouteEntry[] entries = (HostIpRouteEntry[])_context.getVimClient().getDynamicProperty(_mor, + public List getHostIpRouteEntries() throws Exception { + return (List)_context.getVimClient().getDynamicProperty(_mor, "config.network.routeTableInfo.ipRoute"); - return entries; } public HostListSummaryQuickStats getHostQuickStats() throws Exception { @@ -142,7 +140,7 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { @Override public String getHyperHostDefaultGateway() throws Exception { - HostIpRouteEntry[] entries = getHostIpRouteEntries(); + List entries = getHostIpRouteEntries(); for(HostIpRouteEntry entry : entries) { if(entry.getNetwork().equalsIgnoreCase("0.0.0.0")) return entry.getGateway(); @@ -222,7 +220,7 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } public ManagedObjectReference[] getHostLocalDatastore() throws Exception { - ManagedObjectReference[] datastores = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty( + List datastores = (List)_context.getVimClient().getDynamicProperty( _mor, "datastore"); List l = new ArrayList(); if(datastores != null) { @@ -236,7 +234,7 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } public HostVirtualSwitch getHostVirtualSwitchByName(String name) throws Exception { - HostVirtualSwitch[] switches = (HostVirtualSwitch[])_context.getVimClient().getDynamicProperty( + List switches = (List)_context.getVimClient().getDynamicProperty( _mor, "config.network.vswitch"); if(switches != null) { @@ -248,8 +246,8 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { return null; } - public HostVirtualSwitch[] getHostVirtualSwitch() throws Exception { - return (HostVirtualSwitch[])_context.getVimClient().getDynamicProperty(_mor, "config.network.vswitch"); + public List getHostVirtualSwitch() throws Exception { + return (List)_context.getVimClient().getDynamicProperty(_mor, "config.network.vswitch"); } public AboutInfo getHostAboutInfo() throws Exception { @@ -285,7 +283,7 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { public HostVirtualSwitch getVirtualSwitchByName(String vSwitchName) throws Exception { - HostVirtualSwitch[] vSwitchs = getHostVirtualSwitch(); + List vSwitchs = getHostVirtualSwitch(); if(vSwitchs != null) { for(HostVirtualSwitch vSwitch: vSwitchs) { if(vSwitch.getName().equals(vSwitchName)) @@ -327,7 +325,7 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { public String getPortGroupNameByNicType(HostVirtualNicType nicType) throws Exception { assert(nicType != null); - VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])_context.getVimClient().getDynamicProperty(_mor, + List netConfigs = (List)_context.getVimClient().getDynamicProperty(_mor, "config.virtualNicManagerInfo.netConfig"); if(netConfigs != null) { @@ -448,10 +446,10 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { return null; } - public ManagedObjectReference[] getVmMorsOnNetwork(String portGroupName) throws Exception { + public List getVmMorsOnNetwork(String portGroupName) throws Exception { ManagedObjectReference morNetwork = getNetworkMor(portGroupName); if(morNetwork != null) - return (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(morNetwork, "vm"); + return (List)_context.getVimClient().getDynamicProperty(morNetwork, "vm"); return null; } @@ -703,7 +701,7 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { public ManagedObjectReference getExistingDataStoreOnHost(boolean vmfsDatastore, String hostAddress, int hostPort, String path, String uuid, HostDatastoreSystemMO hostDatastoreSystemMo) { // First retrieve the list of Datastores on the host. - ManagedObjectReference[] morArray; + List morArray; try { morArray = hostDatastoreSystemMo.getDatastores(); } catch (Exception e) { @@ -711,17 +709,17 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { return null; } // Next, get all the NAS datastores from this array of datastores. - if (morArray.length > 0) { + if (morArray.size() > 0) { int i; - for (i = 0; i < morArray.length; i++) { + for (i = 0; i < morArray.size(); i++) { NasDatastoreInfo nasDS; try { - nasDS = hostDatastoreSystemMo.getNasDatastoreInfo(morArray[i]); + nasDS = hostDatastoreSystemMo.getNasDatastoreInfo(morArray.get(i)); if (nasDS != null) { //DatastoreInfo info = (DatastoreInfo)_context.getServiceUtil().getDynamicProperty(morDatastore, "info"); if (nasDS.getNas().getRemoteHost().equalsIgnoreCase(hostAddress) && nasDS.getNas().getRemotePath().equalsIgnoreCase(path)) { - return morArray[i]; + return morArray.get(i); } } } catch (Exception e) { @@ -861,13 +859,13 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { VmwareHypervisorHostNetworkSummary summary = new VmwareHypervisorHostNetworkSummary(); if(this.getHostType() == VmwareHostType.ESXi) { - VirtualNicManagerNetConfig[] netConfigs = (VirtualNicManagerNetConfig[])_context.getVimClient().getDynamicProperty(_mor, + List netConfigs = (List)_context.getVimClient().getDynamicProperty(_mor, "config.virtualNicManagerInfo.netConfig"); assert(netConfigs != null); - for(int i = 0; i < netConfigs.length; i++) { - if(netConfigs[i].getNicType().equals("management")) { - for(HostVirtualNic nic : netConfigs[i].getCandidateVnic()) { + for(VirtualNicManagerNetConfig netConfig : netConfigs) { + if(netConfig.getNicType().equals("management")) { + for(HostVirtualNic nic : netConfig.getCandidateVnic()) { if(nic.getPortgroup().equals(managementPortGroup)) { summary.setHostIp(nic.getSpec().getIp().getIpAddress()); summary.setHostNetmask(nic.getSpec().getIp().getSubnetMask()); @@ -882,7 +880,7 @@ public class HostMO extends BaseMO implements VmwareHypervisorHost { } } else { // try with ESX path - HostVirtualNic[] hostVNics = (HostVirtualNic[])_context.getVimClient().getDynamicProperty(_mor, + List hostVNics = (List)_context.getVimClient().getDynamicProperty(_mor, "config.network.consoleVnic"); if(hostVNics != null) { diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index abc391fa035..e4a9485c32c 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -625,7 +625,7 @@ public class HypervisorHostHelper { try { if(lock.lock(DEFAULT_LOCK_TIMEOUT_SECONDS)) { try { - ManagedObjectReference[] hosts = (ManagedObjectReference[])hostMo.getContext().getVimClient().getDynamicProperty(morParent, "host"); + List hosts = (List)hostMo.getContext().getVimClient().getDynamicProperty(morParent, "host"); if(hosts != null) { for(ManagedObjectReference otherHost: hosts) { if(!otherHost.getValue().equals(hostMo.getMor().getValue())) { diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/NetworkMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/NetworkMO.java index b8e3ab42b5b..df82f155cf1 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/NetworkMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/NetworkMO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.hypervisor.vmware.mo; +import java.util.List; + import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.ManagedObjectReference; @@ -32,8 +34,7 @@ public class NetworkMO extends BaseMO { _context.getService().destroyNetwork(_mor); } - public ManagedObjectReference[] getVMsOnNetwork() throws Exception { - ManagedObjectReference[] vms = (ManagedObjectReference[])_context.getVimClient().getDynamicProperty(_mor, "vm"); - return vms; + public List getVMsOnNetwork() throws Exception { + return (List)_context.getVimClient().getDynamicProperty(_mor, "vm"); } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/PerfManagerMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/PerfManagerMO.java index 6c2b7bb2ab7..bb4fb56ebbc 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/PerfManagerMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/PerfManagerMO.java @@ -114,11 +114,11 @@ public class PerfManagerMO extends BaseMO { _context.getService().updatePerfInterval(_mor, interval); } - public PerfCounterInfo[] getCounterInfo() throws Exception { - return (PerfCounterInfo[])_context.getVimClient().getDynamicProperty(_mor, "perfCounter"); + public List getCounterInfo() throws Exception { + return (List)_context.getVimClient().getDynamicProperty(_mor, "perfCounter"); } - public PerfInterval[] getIntervalInfo() throws Exception { - return (PerfInterval[])_context.getVimClient().getDynamicProperty(_mor, "historicalInterval"); + public List getIntervalInfo() throws Exception { + return (List)_context.getVimClient().getDynamicProperty(_mor, "historicalInterval"); } } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 76072cd614a..6f836c442b5 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -1716,13 +1716,13 @@ public class VirtualMachineMO extends BaseMO { // return pair of VirtualDisk and disk device bus name(ide0:0, etc) public Pair getDiskDevice(String vmdkDatastorePath, boolean matchExactly) throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); + List devices = (List)_context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); s_logger.info("Look for disk device info from volume : " + vmdkDatastorePath); DatastoreFile dsSrcFile = new DatastoreFile(vmdkDatastorePath); String srcBaseName = dsSrcFile.getFileBaseName(); - if(devices != null && devices.length > 0) { + if(devices != null && devices.size() > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualDisk) { s_logger.info("Test against disk device, controller key: " + device.getControllerKey() + ", unit number: " + device.getUnitNumber()); @@ -1837,7 +1837,7 @@ public class VirtualMachineMO extends BaseMO { return pathList; } - private String getDeviceBusName(VirtualDevice[] allDevices, VirtualDevice theDevice) throws Exception { + private String getDeviceBusName(List allDevices, VirtualDevice theDevice) throws Exception { for(VirtualDevice device : allDevices) { if(device.getKey() == theDevice.getControllerKey().intValue()) { if(device instanceof VirtualIDEController) { @@ -1854,8 +1854,8 @@ public class VirtualMachineMO extends BaseMO { public VirtualDisk[] getAllDiskDevice() throws Exception { List deviceList = new ArrayList(); - VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); - if(devices != null && devices.length > 0) { + List devices = (List)_context.getVimClient().getDynamicProperty(_mor, "config.hardware.device"); + if(devices != null && devices.size() > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualDisk) { deviceList.add((VirtualDisk)device); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java index 3fbe5c8c34d..2e0238d8acb 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java @@ -97,6 +97,7 @@ public class VmwareClient { ctxt.put(BindingProvider.ENDPOINT_ADDRESS_PROPERTY, url); ctxt.put(BindingProvider.SESSION_MAINTAIN_PROPERTY, true); + ctxt.put("com.sun.xml.internal.ws.request.timeout", 60000); serviceContent = vimPort.retrieveServiceContent(SVC_INST_REF); vimPort.login(serviceContent.getSessionManager(), userName, password, null); @@ -177,7 +178,7 @@ public class VmwareClient { */ Class dpCls = propertyValue.getClass(); String dynamicPropertyName = dpCls.getName(); - if (dynamicPropertyName.startsWith("ArrayOf")) { + if (dynamicPropertyName.indexOf("ArrayOf") != -1) { String methodName = "get" + dynamicPropertyName .substring(dynamicPropertyName.indexOf("ArrayOf") + "ArrayOf".length(), dynamicPropertyName.length()); diff --git a/vmware-base/test/com/cloud/hypervisor/vmware/mo/TestVmwareContextFactory.java b/vmware-base/test/com/cloud/hypervisor/vmware/mo/TestVmwareContextFactory.java index a857986de72..81a62d2993a 100644 --- a/vmware-base/test/com/cloud/hypervisor/vmware/mo/TestVmwareContextFactory.java +++ b/vmware-base/test/com/cloud/hypervisor/vmware/mo/TestVmwareContextFactory.java @@ -16,12 +16,13 @@ // under the License. package com.cloud.hypervisor.vmware.mo; +import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; -import com.vmware.apputils.version.ExtendedAppUtil; + public class TestVmwareContextFactory { private static volatile int s_seq = 1; - + static { // skip certificate check System.setProperty("axis.socketSecureFactory", "org.apache.axis.components.net.SunFakeTrustSocketFactory"); @@ -33,11 +34,10 @@ public class TestVmwareContextFactory { assert(vCenterPassword != null); String serviceUrl = "https://" + vCenterAddress + "/sdk/vimService"; - String[] params = new String[] {"--url", serviceUrl, "--username", vCenterUserName, "--password", vCenterPassword }; - ExtendedAppUtil appUtil = ExtendedAppUtil.initialize(vCenterAddress + "-" + s_seq++, params); - - appUtil.connect(); - VmwareContext context = new VmwareContext(appUtil, vCenterAddress); + VmwareClient vimClient = new VmwareClient(vCenterAddress + "-" + s_seq++); + vimClient.connect(serviceUrl, vCenterUserName, vCenterPassword); + + VmwareContext context = new VmwareContext(vimClient, vCenterAddress); return context; } } diff --git a/vmware-base/test/com/cloud/hypervisor/vmware/mo/TestVmwareMO.java b/vmware-base/test/com/cloud/hypervisor/vmware/mo/TestVmwareMO.java index 2417ddf99de..c9807f443f1 100644 --- a/vmware-base/test/com/cloud/hypervisor/vmware/mo/TestVmwareMO.java +++ b/vmware-base/test/com/cloud/hypervisor/vmware/mo/TestVmwareMO.java @@ -22,7 +22,6 @@ import org.apache.log4j.Logger; import com.cloud.hypervisor.vmware.mo.SnapshotDescriptor.SnapshotInfo; import com.cloud.hypervisor.vmware.util.VmwareContext; -import com.cloud.serializer.GsonHelper; import com.cloud.utils.Pair; import com.cloud.utils.testcase.Log4jEnabledTestCase; import com.google.gson.Gson; diff --git a/vmware-base/test/com/cloud/vmware/TestVMWare.java b/vmware-base/test/com/cloud/vmware/TestVMWare.java index 43c784ce2e6..b0ec68d69ae 100644 --- a/vmware-base/test/com/cloud/vmware/TestVMWare.java +++ b/vmware-base/test/com/cloud/vmware/TestVMWare.java @@ -37,7 +37,6 @@ import javax.net.ssl.SSLSession; import org.apache.log4j.xml.DOMConfigurator; import com.cloud.utils.PropertiesUtil; -import com.vmware.apputils.version.ExtendedAppUtil; import com.vmware.vim25.HostIpConfig; import com.vmware.vim25.HostVirtualNicSpec; import com.vmware.vim25.HostConfigManager; From 640fa6be760177d11bca2d03805462a9bdf41e8e Mon Sep 17 00:00:00 2001 From: Min Chen Date: Thu, 7 Feb 2013 12:23:57 -0800 Subject: [PATCH 004/486] Fix a bug in converting Enum type to String. --- .../manager/VmwareStorageManagerImpl.java | 4 ++-- .../vmware/resource/VmwareResource.java | 6 ++--- .../hypervisor/vmware/mo/HttpNfcLeaseMO.java | 22 ++++++++++++++++++- .../vmware/mo/VirtualMachineMO.java | 14 ++++++------ .../hypervisor/vmware/util/VmwareHelper.java | 10 ++++----- 5 files changed, 38 insertions(+), 18 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index 435db748680..fd3afe8e5c9 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -192,7 +192,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { workerVMName = hostService.getWorkerName(context, cmd, 0); // attach a volume to dummay wrapper VM for taking snapshot and exporting the VM for backup - if (!hyperHost.createBlankVm(workerVMName, 1, 512, 0, false, 4, 0, VirtualMachineGuestOsIdentifier.OTHER_GUEST.toString(), morDs, false)) { + if (!hyperHost.createBlankVm(workerVMName, 1, 512, 0, false, 4, 0, VirtualMachineGuestOsIdentifier.OTHER_GUEST.value(), morDs, false)) { String msg = "Unable to create worker VM to execute BackupSnapshotCommand"; s_logger.error(msg); throw new Exception(msg); @@ -804,7 +804,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { vmConfig.setName(workerVmName); vmConfig.setMemoryMB((long) 4); vmConfig.setNumCPUs(1); - vmConfig.setGuestId(VirtualMachineGuestOsIdentifier.OTHER_GUEST.toString()); + vmConfig.setGuestId(VirtualMachineGuestOsIdentifier.OTHER_GUEST.value()); VirtualMachineFileInfo fileInfo = new VirtualMachineFileInfo(); fileInfo.setVmPathName(String.format("[%s]", dsMo.getName())); vmConfig.setFiles(fileInfo); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 51b463ef47c..21f17328c53 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -2094,7 +2094,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (!hyperHost.createBlankVm(vmName, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), getReserveCpuMHz(vmSpec.getSpeed().intValue()), vmSpec.getLimitCpuUse(), ramMb, getReserveMemMB(ramMb), - translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), rootDiskDataStoreDetails.first(), false)) { + translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).value(), rootDiskDataStoreDetails.first(), false)) { throw new Exception("Failed to create VM. vmName: " + vmName); } } @@ -2126,7 +2126,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024)); VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), getReserveCpuMHz(vmSpec.getSpeed().intValue()), ramMb, getReserveMemMB(ramMb), - translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), vmSpec.getLimitCpuUse()); + translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).value(), vmSpec.getLimitCpuUse()); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices]; int i = 0; @@ -3935,7 +3935,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmConfig.setName(vmName); vmConfig.setMemoryMB((long) 4); // vmware request minimum of 4 MB vmConfig.setNumCPUs(1); - vmConfig.setGuestId(VirtualMachineGuestOsIdentifier.OTHER_GUEST.toString()); + vmConfig.setGuestId(VirtualMachineGuestOsIdentifier.OTHER_GUEST.value()); VirtualMachineFileInfo fileInfo = new VirtualMachineFileInfo(); fileInfo.setVmPathName(String.format("[%s]", dsMo.getName())); vmConfig.setFiles(fileInfo); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java index 1198b3365d7..ca362809f3a 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HttpNfcLeaseMO.java @@ -20,17 +20,23 @@ import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; +import java.rmi.RemoteException; +import java.util.Arrays; import java.util.List; import org.apache.log4j.Logger; +import org.w3c.dom.Element; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.HttpNfcLeaseInfo; import com.vmware.vim25.HttpNfcLeaseManifestEntry; import com.vmware.vim25.HttpNfcLeaseState; import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.ObjectSpec; import com.vmware.vim25.OvfCreateImportSpecResult; import com.vmware.vim25.OvfFileItem; +import com.vmware.vim25.PropertyFilterSpec; +import com.vmware.vim25.PropertySpec; public class HttpNfcLeaseMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(HttpNfcLeaseMO.class); @@ -44,7 +50,19 @@ public class HttpNfcLeaseMO extends BaseMO { } public HttpNfcLeaseState getState() throws Exception { - return (HttpNfcLeaseState)_context.getVimClient().getDynamicProperty(_mor, "state"); + Object stateProp = _context.getVimClient().getDynamicProperty(_mor, "state"); + // Due to some issue in JAX-WS De-serialization getting the information + // from the nodes + assert (stateProp.toString().contains("val: null")); + String stateVal = null; + Element stateElement = (Element) stateProp; + if (stateElement != null && stateElement.getFirstChild() != null) { + stateVal = stateElement.getFirstChild().getTextContent(); + } + if (stateVal != null) { + return HttpNfcLeaseState.fromValue(stateVal); + } + return HttpNfcLeaseState.ERROR; } public HttpNfcLeaseState waitState(HttpNfcLeaseState[] states) throws Exception { @@ -59,6 +77,8 @@ public class HttpNfcLeaseMO extends BaseMO { } } + + public HttpNfcLeaseInfo getLeaseInfo() throws Exception { return (HttpNfcLeaseInfo)_context.getVimClient().getDynamicProperty(_mor, "info"); } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 6f836c442b5..2b07f9ad66d 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -571,14 +571,14 @@ public class VirtualMachineMO extends BaseMO { VirtualMachineRelocateSpecDiskLocator loc = new VirtualMachineRelocateSpecDiskLocator(); loc.setDatastore(morDs); loc.setDiskId(independentDisks[i].getKey()); - loc.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions.MOVE_ALL_DISK_BACKINGS_AND_DISALLOW_SHARING.toString()); + loc.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions.MOVE_ALL_DISK_BACKINGS_AND_DISALLOW_SHARING.value()); diskLocator.add(loc); } - rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions.CREATE_NEW_CHILD_DISK_BACKING.toString()); + rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions.CREATE_NEW_CHILD_DISK_BACKING.value()); rSpec.getDisk().addAll(diskLocator); } else { - rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions.CREATE_NEW_CHILD_DISK_BACKING.toString()); + rSpec.setDiskMoveType(VirtualMachineRelocateDiskMoveOptions.CREATE_NEW_CHILD_DISK_BACKING.value()); } rSpec.setPool(morResourcePool); @@ -868,7 +868,7 @@ public class VirtualMachineMO extends BaseMO { || diskType == VirtualDiskType.EAGER_ZEROED_THICK) { VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); - backingInfo.setDiskMode(diskMode.PERSISTENT.toString()); + backingInfo.setDiskMode(diskMode.PERSISTENT.value()); if(diskType == VirtualDiskType.THIN) { backingInfo.setThinProvisioned(true); } else { @@ -894,7 +894,7 @@ public class VirtualMachineMO extends BaseMO { } backingInfo.setDeviceName(rdmDeviceName); if(diskType == VirtualDiskType.RDM) { - backingInfo.setDiskMode(diskMode.PERSISTENT.toString()); + backingInfo.setDiskMode(diskMode.PERSISTENT.value()); } backingInfo.setDatastore(morDs); @@ -1894,10 +1894,10 @@ public class VirtualMachineMO extends BaseMO { } public int tryGetIDEDeviceControllerKey() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); - if(devices != null && devices.length > 0) { + if(devices != null && devices.size() > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualIDEController) { return ((VirtualIDEController)device).getKey(); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java index 8e6947fd072..32e36a63f3d 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java @@ -169,7 +169,7 @@ public class VmwareHelper { VirtualDisk disk = new VirtualDisk(); VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); - backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); + backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.value()); backingInfo.setThinProvisioned(true); backingInfo.setEagerlyScrub(false); backingInfo.setDatastore(morDs); @@ -273,7 +273,7 @@ public class VmwareHelper { VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); backingInfo.setDatastore(morDs); backingInfo.setFileName(vmdkDatastorePathChain[0]); - backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); + backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.value()); if(vmdkDatastorePathChain.length > 1) { String[] parentDisks = new String[vmdkDatastorePathChain.length - 1]; for(int i = 0; i < vmdkDatastorePathChain.length - 1; i++) @@ -313,7 +313,7 @@ public class VmwareHelper { VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); backingInfo.setDatastore(vmdkDatastorePathChain[0].second()); backingInfo.setFileName(vmdkDatastorePathChain[0].first()); - backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); + backingInfo.setDiskMode(VirtualDiskMode.PERSISTENT.value()); if(vmdkDatastorePathChain.length > 1) { Pair[] parentDisks = new Pair[vmdkDatastorePathChain.length - 1]; for(int i = 0; i < vmdkDatastorePathChain.length - 1; i++) @@ -346,7 +346,7 @@ public class VmwareHelper { VirtualDiskFlatVer2BackingInfo parentBacking = new VirtualDiskFlatVer2BackingInfo(); parentBacking.setDatastore(morDs); - parentBacking.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); + parentBacking.setDiskMode(VirtualDiskMode.PERSISTENT.value()); if(parentDatastorePathList.length > 1) { String[] nextDatastorePathList = new String[parentDatastorePathList.length -1]; @@ -364,7 +364,7 @@ public class VmwareHelper { VirtualDiskFlatVer2BackingInfo parentBacking = new VirtualDiskFlatVer2BackingInfo(); parentBacking.setDatastore(parentDatastorePathList[0].second()); - parentBacking.setDiskMode(VirtualDiskMode.PERSISTENT.toString()); + parentBacking.setDiskMode(VirtualDiskMode.PERSISTENT.value()); if(parentDatastorePathList.length > 1) { Pair[] nextDatastorePathList = new Pair[parentDatastorePathList.length -1]; From 14ef9151a54201e2c12f68a353df0da4eac1df1d Mon Sep 17 00:00:00 2001 From: Min Chen Date: Thu, 7 Feb 2013 16:23:56 -0800 Subject: [PATCH 005/486] Fix ClassCastException. --- .../src/com/cloud/configuration/Config.java | 6 ++-- .../vmware/mo/VirtualMachineMO.java | 30 +++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 7592b6bdfca..5002ca14722 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -258,8 +258,8 @@ public enum Config { VmwareAdditionalVncPortRangeSize("Advanced", ManagementServer.class, Integer.class, "vmware.additional.vnc.portrange.size", "1000", "Start port number of additional VNC port range", null), //VmwareGuestNicDeviceType("Advanced", ManagementServer.class, String.class, "vmware.guest.nic.device.type", "E1000", "Ethernet card type used in guest VM, valid values are E1000, PCNet32, Vmxnet2, Vmxnet3", null), VmwarePerClusterHostMax("Advanced", ManagementServer.class, Integer.class, "vmware.percluster.host.max", "8", "maxmium hosts per vCenter cluster(do not let it grow over 8)", "1-8"), - VmwareReserveCpu("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.cpu", "false", "Specify whether or not to reserve CPU based on CPU overprovisioning factor", null), - VmwareReserveMem("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.mem", "false", "Specify whether or not to reserve memory based on memory overprovisioning factor", null), + VmwareReserveCpu("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.cpu", "true", "Specify whether or not to reserve CPU based on CPU overprovisioning factor", null), + VmwareReserveMem("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.mem", "true", "Specify whether or not to reserve memory based on memory overprovisioning factor", null), VmwareRootDiskControllerType("Advanced", ManagementServer.class, String.class, "vmware.root.disk.controller", "ide", "Specify the default disk controller for root volumes, valid values are scsi, ide", null), VmwareSystemVmNicDeviceType("Advanced", ManagementServer.class, String.class, "vmware.systemvm.nic.device.type", "E1000", "Specify the default network device type for system VMs, valid values are E1000, PCNet32, Vmxnet2, Vmxnet3", null), VmwareRecycleHungWorker("Advanced", ManagementServer.class, Boolean.class, "vmware.recycle.hung.wokervm", "false", "Specify whether or not to recycle hung worker VMs", null), @@ -358,7 +358,7 @@ public enum Config { ConcurrentSnapshotsThresholdPerHost("Advanced", ManagementServer.class, Long.class, "concurrent.snapshots.threshold.perhost", null, "Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited", null), NetworkIPv6SearchRetryMax("Network", ManagementServer.class, Integer.class, "network.ipv6.search.retry.max", "10000", "The maximum number of retrying times to search for an available IPv6 address in the table", null), - + ExternalBaremetalSystemUrl("Advanced", ManagementServer.class, String.class, "external.baremetal.system.url", null, "url of external baremetal system that CloudStack will talk to", null), ExternalBaremetalResourceClassName("Advanced", ManagementServer.class, String.class, "external,baremetal.resource.classname", null, "class name for handling external baremetal resource", null), EnableBaremetalSecurityGroupAgentEcho("Advanced", ManagementServer.class, Boolean.class, "enable.baremetal.securitygroup.agent.echo", "false", "After starting provision process, periodcially echo security agent installed in the template. Treat provisioning as success only if echo successfully", null), diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 2b07f9ad66d..0dfb2344e38 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -1663,10 +1663,10 @@ public class VirtualMachineMO extends BaseMO { } public int getScsiDeviceControllerKey() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); - if(devices != null && devices.length > 0) { + if(devices != null && devices.size() > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualLsiLogicController) { return device.getKey(); @@ -1679,10 +1679,10 @@ public class VirtualMachineMO extends BaseMO { } public int getScsiDeviceControllerKeyNoException() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); - if(devices != null && devices.length > 0) { + if(devices != null && devices.size() > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualLsiLogicController) { return device.getKey(); @@ -1909,10 +1909,10 @@ public class VirtualMachineMO extends BaseMO { } public int getIDEDeviceControllerKey() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); - if(devices != null && devices.length > 0) { + if(devices != null && devices.size() > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualIDEController) { return ((VirtualIDEController)device).getKey(); @@ -1930,9 +1930,9 @@ public class VirtualMachineMO extends BaseMO { } public VirtualDevice getIsoDevice() throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); - if(devices != null && devices.length > 0) { + if(devices != null && devices.size() > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualCdrom) { return device; @@ -1943,10 +1943,10 @@ public class VirtualMachineMO extends BaseMO { } public int getPCIDeviceControllerKey() throws Exception { - VirtualDevice[] devices = (VirtualDevice [])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); - if(devices != null && devices.length > 0) { + if(devices != null && devices.size() > 0) { for(VirtualDevice device : devices) { if(device instanceof VirtualPCIController) { return ((VirtualPCIController)device).getKey(); @@ -1964,11 +1964,11 @@ public class VirtualMachineMO extends BaseMO { } public int getNextDeviceNumber(int controllerKey) throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); int deviceNumber = -1; - if(devices != null && devices.length > 0) { + if(devices != null && devices.size() > 0) { for(VirtualDevice device : devices) { if(device.getControllerKey() != null && device.getControllerKey().intValue() == controllerKey) { if(device.getUnitNumber() != null && device.getUnitNumber().intValue() > deviceNumber) { @@ -1981,7 +1981,7 @@ public class VirtualMachineMO extends BaseMO { } public VirtualDevice[] getNicDevices() throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); List nics = new ArrayList(); @@ -1997,7 +1997,7 @@ public class VirtualMachineMO extends BaseMO { } public Pair getNicDeviceIndex(String networkNamePrefix) throws Exception { - VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); List nics = new ArrayList(); @@ -2057,7 +2057,7 @@ public class VirtualMachineMO extends BaseMO { List returnList = new ArrayList(); - VirtualDevice[] devices = (VirtualDevice[])_context.getVimClient(). + List devices = (List)_context.getVimClient(). getDynamicProperty(_mor, "config.hardware.device"); if(devices != null) { From c0442e2556a7b10cfb363a2d4d55b7fb9c381297 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Tue, 12 Feb 2013 16:56:02 -0800 Subject: [PATCH 006/486] Fix a bug in register template, cannot find template adapter. --- client/tomcatconf/componentContext.xml.in | 8 ++ .../tomcatconf/nonossComponentContext.xml.in | 8 ++ .../baremetal/BareMetalTemplateAdapter.java | 33 ++++---- ...er.java => HypervisorTemplateAdapter.java} | 75 +++++++++---------- 4 files changed, 69 insertions(+), 55 deletions(-) rename server/src/com/cloud/template/{HyervisorTemplateAdapter.java => HypervisorTemplateAdapter.java} (97%) diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in index 43d31fb4b95..fc6c3e346c0 100644 --- a/client/tomcatconf/componentContext.xml.in +++ b/client/tomcatconf/componentContext.xml.in @@ -95,6 +95,14 @@ + + + + + + + + diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in index 5532becba18..57f7ad5516f 100644 --- a/client/tomcatconf/nonossComponentContext.xml.in +++ b/client/tomcatconf/nonossComponentContext.xml.in @@ -104,6 +104,14 @@ + + + + + + + + diff --git a/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java b/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java index 965c912a41e..33ab4684a63 100755 --- a/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java +++ b/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java @@ -48,17 +48,16 @@ import com.cloud.user.Account; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; -@Component @Local(value=TemplateAdapter.class) public class BareMetalTemplateAdapter extends TemplateAdapterBase implements TemplateAdapter { private final static Logger s_logger = Logger.getLogger(BareMetalTemplateAdapter.class); @Inject HostDao _hostDao; @Inject ResourceManager _resourceMgr; - + @Override public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException { TemplateProfile profile = super.prepare(cmd); - + if (profile.getZoneId() == null || profile.getZoneId() == -1) { List dcs = _dcDao.listAllIncludingRemoved(); for (DataCenterVO dc : dcs) { @@ -73,15 +72,15 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem throw new CloudRuntimeException("Please add PXE server before adding baremetal template in zone " + profile.getZoneId()); } } - + return profile; } - + @Override public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException { throw new CloudRuntimeException("Baremetal doesn't support ISO template"); } - + private void templateCreateUsage(VMTemplateVO template, HostVO host) { if (template.getAccountId() != Account.ACCOUNT_ID_SYSTEM) { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_TEMPLATE_CREATE, template.getAccountId(), host.getDataCenterId(), @@ -89,12 +88,12 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem template.getClass().getName(), template.getUuid()); } } - + @Override public VMTemplateVO create(TemplateProfile profile) { VMTemplateVO template = persistTemplate(profile); Long zoneId = profile.getZoneId(); - + /* There is no secondary storage vm for baremetal, we use pxe server id. * Tempalte is not bound to pxeserver right now, and we assume the pxeserver * cannot be removed once it was added. so we use host id of first found pxe @@ -122,7 +121,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem _tmpltHostDao.persist(vmTemplateHost); templateCreateUsage(template, pxe); } - + _resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template); return template; } @@ -130,7 +129,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem public TemplateProfile prepareDelete(DeleteIsoCmd cmd) { throw new CloudRuntimeException("Baremetal doesn't support ISO, how the delete get here???"); } - + @Override @DB public boolean delete(TemplateProfile profile) { VMTemplateVO template = (VMTemplateVO)profile.getTemplate(); @@ -138,7 +137,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem boolean success = true; String zoneName; boolean isAllZone; - + if (!template.isCrossZones() && profile.getZoneId() != null) { isAllZone = false; zoneName = profile.getZoneId().toString(); @@ -146,12 +145,12 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem zoneName = "all zones"; isAllZone = true; } - + s_logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId()); String eventType = EventTypes.EVENT_TEMPLATE_DELETE; List templateHostVOs = _tmpltHostDao.listByTemplateId(templateId); - + for (VMTemplateHostVO vo : templateHostVOs) { VMTemplateHostVO lock = null; try { @@ -182,13 +181,13 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem } } } - + s_logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); - + // If there are no more non-destroyed template host entries for this template, delete it if (success && (_tmpltHostDao.listByTemplateId(templateId).size() == 0)) { long accountId = template.getAccountId(); - + VMTemplateVO lock = _tmpltDao.acquireInLockTable(templateId); try { @@ -207,7 +206,7 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem } s_logger.debug("Removed template: " + template.getName() + " because all of its template host refs were marked as destroyed."); } - + return success; } } diff --git a/server/src/com/cloud/template/HyervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java similarity index 97% rename from server/src/com/cloud/template/HyervisorTemplateAdapter.java rename to server/src/com/cloud/template/HypervisorTemplateAdapter.java index 089f6508d7e..b93d7e5ba42 100755 --- a/server/src/com/cloud/template/HyervisorTemplateAdapter.java +++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java @@ -63,18 +63,17 @@ import javax.ejb.Local; import java.net.*; import java.util.List; -@Component @Local(value=TemplateAdapter.class) -public class HyervisorTemplateAdapter extends TemplateAdapterBase implements TemplateAdapter { - private final static Logger s_logger = Logger.getLogger(HyervisorTemplateAdapter.class); +public class HypervisorTemplateAdapter extends TemplateAdapterBase implements TemplateAdapter { + private final static Logger s_logger = Logger.getLogger(HypervisorTemplateAdapter.class); @Inject DownloadMonitor _downloadMonitor; @Inject SecondaryStorageVmManager _ssvmMgr; @Inject AgentManager _agentMgr; - + private String validateUrl(String url) { try { URI uri = new URI(url); - if ((uri.getScheme() == null) || (!uri.getScheme().equalsIgnoreCase("http") + if ((uri.getScheme() == null) || (!uri.getScheme().equalsIgnoreCase("http") && !uri.getScheme().equalsIgnoreCase("https") && !uri.getScheme().equalsIgnoreCase("file"))) { throw new IllegalArgumentException("Unsupported scheme for url: " + url); } @@ -95,34 +94,34 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem } catch (UnknownHostException uhe) { throw new IllegalArgumentException("Unable to resolve " + host); } - + return uri.toString(); } catch (URISyntaxException e) { throw new IllegalArgumentException("Invalid URL " + url); } } - + @Override public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException { TemplateProfile profile = super.prepare(cmd); String url = profile.getUrl(); - + if((!url.toLowerCase().endsWith("iso"))&&(!url.toLowerCase().endsWith("iso.zip"))&&(!url.toLowerCase().endsWith("iso.bz2")) &&(!url.toLowerCase().endsWith("iso.gz"))){ throw new InvalidParameterValueException("Please specify a valid iso"); } - + profile.setUrl(validateUrl(url)); return profile; } - + @Override public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException { TemplateProfile profile = super.prepare(cmd); String url = profile.getUrl(); - + if((!url.toLowerCase().endsWith("vhd"))&&(!url.toLowerCase().endsWith("vhd.zip")) - &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz")) + &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz")) &&(!url.toLowerCase().endsWith("qcow2"))&&(!url.toLowerCase().endsWith("qcow2.zip")) &&(!url.toLowerCase().endsWith("qcow2.bz2"))&&(!url.toLowerCase().endsWith("qcow2.gz")) &&(!url.toLowerCase().endsWith("ova"))&&(!url.toLowerCase().endsWith("ova.zip")) @@ -130,40 +129,40 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem &&(!url.toLowerCase().endsWith("img"))&&(!url.toLowerCase().endsWith("raw"))){ throw new InvalidParameterValueException("Please specify a valid "+ cmd.getFormat().toLowerCase()); } - + if ((cmd.getFormat().equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith("vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase().endsWith("vhd.gz") )) || (cmd.getFormat().equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith("qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz") )) || (cmd.getFormat().equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith("ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase().endsWith("ova.gz"))) || (cmd.getFormat().equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith("img") && !url.toLowerCase().endsWith("raw")))) { throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + cmd.getFormat().toLowerCase()); } - + profile.setUrl(validateUrl(url)); return profile; } - + @Override public VMTemplateVO create(TemplateProfile profile) { VMTemplateVO template = persistTemplate(profile); - + if (template == null) { throw new CloudRuntimeException("Unable to persist the template " + profile.getTemplate()); } - + _downloadMonitor.downloadTemplateToStorage(template, profile.getZoneId()); _resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template); - + return template; } @Override @DB public boolean delete(TemplateProfile profile) { boolean success = true; - + VMTemplateVO template = (VMTemplateVO)profile.getTemplate(); Long zoneId = profile.getZoneId(); Long templateId = template.getId(); - + String zoneName; List secondaryStorageHosts; if (!template.isCrossZones() && zoneId != null) { @@ -174,9 +173,9 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem zoneName = "(all zones)"; secondaryStorageHosts = _ssvmMgr.listSecondaryStorageHostsInAllZones(); } - + s_logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); - + // Make sure the template is downloaded to all the necessary secondary storage hosts for (HostVO secondaryStorageHost : secondaryStorageHosts) { long hostId = secondaryStorageHost.getId(); @@ -189,16 +188,16 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem } } } - + Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId()); String eventType = ""; - + if (template.getFormat().equals(ImageFormat.ISO)){ eventType = EventTypes.EVENT_ISO_DELETE; } else { eventType = EventTypes.EVENT_TEMPLATE_DELETE; } - + // Iterate through all necessary secondary storage hosts and mark the template on each host as destroyed for (HostVO secondaryStorageHost : secondaryStorageHosts) { long hostId = secondaryStorageHost.getId(); @@ -229,7 +228,7 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem _tmpltHostDao.remove(templateHostVO.getId()); } VMTemplateZoneVO templateZone = _tmpltZoneDao.findByZoneTemplate(sZoneId, templateId); - + if (templateZone != null) { _tmpltZoneDao.remove(templateZone.getId()); } @@ -239,18 +238,18 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem } } } - + if (!success) { break; } } - + s_logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); - + // If there are no more non-destroyed template host entries for this template, delete it if (success && (_tmpltHostDao.listByTemplateId(templateId).size() == 0)) { long accountId = template.getAccountId(); - + VMTemplateVO lock = _tmpltDao.acquireInLockTable(templateId); try { @@ -267,18 +266,18 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem _tmpltDao.releaseFromLockTable(lock.getId()); } } - + s_logger.debug("Removed template: " + template.getName() + " because all of its template host refs were marked as destroyed."); } - + return success; } - + public TemplateProfile prepareDelete(DeleteTemplateCmd cmd) { TemplateProfile profile = super.prepareDelete(cmd); VMTemplateVO template = (VMTemplateVO)profile.getTemplate(); Long zoneId = profile.getZoneId(); - + if (template.getTemplateType() == TemplateType.SYSTEM) { throw new InvalidParameterValueException("The DomR template cannot be deleted."); } @@ -286,18 +285,18 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem if (zoneId != null && (_ssvmMgr.findSecondaryStorageHost(zoneId) == null)) { throw new InvalidParameterValueException("Failed to find a secondary storage host in the specified zone."); } - + return profile; } - + public TemplateProfile prepareDelete(DeleteIsoCmd cmd) { TemplateProfile profile = super.prepareDelete(cmd); Long zoneId = profile.getZoneId(); - + if (zoneId != null && (_ssvmMgr.findSecondaryStorageHost(zoneId) == null)) { throw new InvalidParameterValueException("Failed to find a secondary storage host in the specified zone."); } - + return profile; } } From c20ea048b4a01ddbbf80077b65f6e75d8f58e7bd Mon Sep 17 00:00:00 2001 From: Min Chen Date: Tue, 12 Feb 2013 16:57:20 -0800 Subject: [PATCH 007/486] Add new windows 8 guest os entries to DB. --- setup/db/db/schema-40to410.sql | 8 ++++++++ setup/db/templates.sql | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index bb9c815af05..a51ddb3fe8c 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -1299,3 +1299,11 @@ INSERT INTO `cloud`.`region` values ('1','Local','http://localhost:8080/client/a ALTER TABLE `cloud`.`account` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; ALTER TABLE `cloud`.`user` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; ALTER TABLE `cloud`.`domain` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; + + +INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (208, UUID(), 6, 'Windows 8'); +INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (209, UUID(), 6, 'Windows 8 (64 bit)'); +INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (210, UUID(), 6, 'Windows 8 Server (64 bit)'); +INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8', 208); +INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8 (64 bit)', 209); +INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8 Server (64 bit)', 210); diff --git a/setup/db/templates.sql b/setup/db/templates.sql index 9980b159630..34ec32d0ce7 100755 --- a/setup/db/templates.sql +++ b/setup/db/templates.sql @@ -219,6 +219,11 @@ INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (201 INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (202, UUID(), 5, 'Other SUSE Linux(32-bit)'); INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (203, UUID(), 5, 'Other SUSE Linux(64-bit)'); +INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (208, UUID(), 6, 'Windows 8'); +INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (209, UUID(), 6, 'Windows 8 (64 bit)'); +INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (210, UUID(), 6, 'Windows 8 Server (64 bit)'); + + INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("XenServer", 'CentOS 4.5 (32-bit)', 1); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("XenServer", 'CentOS 4.6 (32-bit)', 2); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("XenServer", 'CentOS 4.7 (32-bit)', 3); @@ -323,6 +328,10 @@ INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows NT 4', 64); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows 3.1', 65); +INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8', 208); +INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8 (64 bit)', 209); +INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8 Server (64 bit)', 210); + INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Red Hat Enterprise Linux 5.0(32-bit)', 30); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Red Hat Enterprise Linux 5.1(32-bit)', 32); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Red Hat Enterprise Linux 5.2(32-bit)', 34); From d346f632c3a4e3c880fe2cf57e10b7102615f39f Mon Sep 17 00:00:00 2001 From: Edison Su Date: Wed, 13 Feb 2013 15:27:03 -0800 Subject: [PATCH 008/486] add missing file, fix build --- .../com/cloud/vm/snapshot/VMSnapshotVO.java | 224 ++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 server/src/com/cloud/vm/snapshot/VMSnapshotVO.java diff --git a/server/src/com/cloud/vm/snapshot/VMSnapshotVO.java b/server/src/com/cloud/vm/snapshot/VMSnapshotVO.java new file mode 100644 index 00000000000..03d4945fda0 --- /dev/null +++ b/server/src/com/cloud/vm/snapshot/VMSnapshotVO.java @@ -0,0 +1,224 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.vm.snapshot; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.TableGenerator; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "vm_snapshots") +public class VMSnapshotVO implements VMSnapshot { + @Id + @TableGenerator(name = "vm_snapshots_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_snapshots_seq", allocationSize = 1) + @GeneratedValue(strategy = GenerationType.TABLE) + @Column(name = "id") + long id; + + @Column(name = "uuid") + String uuid = UUID.randomUUID().toString(); + + @Column(name = "name") + String name; + + @Column(name = "display_name") + String displayName; + + @Column(name = "description") + String description; + + @Column(name = "vm_id") + long vmId; + + @Column(name = "account_id") + long accountId; + + @Column(name = "domain_id") + long domainId; + + @Column(name = "vm_snapshot_type") + @Enumerated(EnumType.STRING) + VMSnapshot.Type type; + + @Column(name = "state", updatable = true, nullable = false) + @Enumerated(value = EnumType.STRING) + private State state; + + @Column(name = GenericDao.CREATED_COLUMN) + Date created; + + @Column(name = GenericDao.REMOVED_COLUMN) + Date removed; + + @Column(name = "current") + Boolean current; + + @Column(name = "parent") + Long parent; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + public Long getParent() { + return parent; + } + + public void setParent(Long parent) { + this.parent = parent; + } + + public VMSnapshotVO() { + + } + + public Date getRemoved() { + return removed; + } + + public VMSnapshotVO(Long accountId, Long domainId, Long vmId, + String description, String vmSnapshotName, String vsDisplayName, + Long serviceOfferingId, Type type, Boolean current) { + this.accountId = accountId; + this.domainId = domainId; + this.vmId = vmId; + this.state = State.Allocated; + this.description = description; + this.name = vmSnapshotName; + this.displayName = vsDisplayName; + this.type = type; + this.current = current; + } + + public String getDescription() { + return description; + } + + @Override + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } + + @Override + public long getId() { + return id; + } + + @Override + public Long getVmId() { + return vmId; + } + + public void setVmId(Long vmId) { + this.vmId = vmId; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public State getState() { + return state; + } + + public void setState(State state) { + this.state = state; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public long getAccountId() { + return accountId; + } + + @Override + public long getDomainId() { + return domainId; + } + + @Override + public String getDisplayName() { + return displayName; + } + + public void setDisplayName(String displayName) { + this.displayName = displayName; + } + + public Boolean getCurrent() { + return current; + } + + public void setCurrent(Boolean current) { + this.current = current; + } + + @Override + public long getUpdatedCount() { + return updatedCount; + } + + @Override + public void incrUpdatedCount() { + this.updatedCount++; + } + + @Override + public Date getUpdated() { + return updated; + } + + @Override + public Type getType() { + return type; + } + + public void setRemoved(Date removed) { + this.removed = removed; + } +} From f542c320d340fc8ff235a79df5411f4a4e97601f Mon Sep 17 00:00:00 2001 From: Alex Huang Date: Wed, 13 Feb 2013 15:41:06 -0800 Subject: [PATCH 009/486] Removed a bunch of missing files and useless imports --- core/src/com/cloud/resource/DiskPreparer.java | 42 ----------- .../com/cloud/resource/NetworkPreparer.java | 29 -------- .../storage/PrimaryStorageHeadResource.java | 52 -------------- core/src/com/cloud/vm/VirtualEnvironment.java | 46 ------------ core/src/com/cloud/vm/VirtualNetwork.java | 72 ------------------- plugins/parent/pom.xml | 42 ----------- .../cloud/vm/VirtualMachineManagerImpl.java | 42 ++++------- 7 files changed, 13 insertions(+), 312 deletions(-) delete mode 100644 core/src/com/cloud/resource/DiskPreparer.java delete mode 100644 core/src/com/cloud/resource/NetworkPreparer.java delete mode 100644 core/src/com/cloud/resource/storage/PrimaryStorageHeadResource.java delete mode 100644 core/src/com/cloud/vm/VirtualEnvironment.java delete mode 100644 core/src/com/cloud/vm/VirtualNetwork.java delete mode 100644 plugins/parent/pom.xml diff --git a/core/src/com/cloud/resource/DiskPreparer.java b/core/src/com/cloud/resource/DiskPreparer.java deleted file mode 100644 index 77b8f7c1b7f..00000000000 --- a/core/src/com/cloud/resource/DiskPreparer.java +++ /dev/null @@ -1,42 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.resource; - -import com.cloud.storage.VolumeVO; -import com.cloud.template.VirtualMachineTemplate.BootloaderType; -import com.cloud.utils.component.Adapter; - -/** - * DiskMounter mounts and unmounts disk for VMs - * to consume. - * - */ -public interface DiskPreparer extends Adapter { - /** - * Mounts a volumeVO and returns a path. - * - * @param vol - * @return - */ - public String mount(String vmName, VolumeVO vol, BootloaderType type); - - /** - * Unmounts - */ - public boolean unmount(String path); - -} diff --git a/core/src/com/cloud/resource/NetworkPreparer.java b/core/src/com/cloud/resource/NetworkPreparer.java deleted file mode 100644 index d7034535e9f..00000000000 --- a/core/src/com/cloud/resource/NetworkPreparer.java +++ /dev/null @@ -1,29 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.resource; - -import com.cloud.utils.component.Adapter; - -/** - * Prepares the network for VM. - */ -public interface NetworkPreparer extends Adapter { - - String setup(String vnet); - - void cleanup(String vnet); -} diff --git a/core/src/com/cloud/resource/storage/PrimaryStorageHeadResource.java b/core/src/com/cloud/resource/storage/PrimaryStorageHeadResource.java deleted file mode 100644 index 65297a39b96..00000000000 --- a/core/src/com/cloud/resource/storage/PrimaryStorageHeadResource.java +++ /dev/null @@ -1,52 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.resource.storage; - -import com.cloud.agent.api.storage.CreateAnswer; -import com.cloud.agent.api.storage.CreateCommand; -import com.cloud.agent.api.storage.DestroyAnswer; -import com.cloud.agent.api.storage.DestroyCommand; -import com.cloud.agent.api.storage.DownloadAnswer; -import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; -import com.cloud.resource.ServerResource; - -/** - * a primary storage. - * - */ -public interface PrimaryStorageHeadResource extends ServerResource { - /** - * Downloads the template to the primary storage. - * @param cmd - * @return - */ - DownloadAnswer execute(PrimaryStorageDownloadCommand cmd); - - /** - * Creates volumes for the VM. - * @param cmd - * @return - */ - CreateAnswer execute(CreateCommand cmd); - - /** - * Destroys volumes for the VM. - * @param cmd - * @return - */ - DestroyAnswer execute(DestroyCommand cmd); -} diff --git a/core/src/com/cloud/vm/VirtualEnvironment.java b/core/src/com/cloud/vm/VirtualEnvironment.java deleted file mode 100644 index 79d4a59bbfc..00000000000 --- a/core/src/com/cloud/vm/VirtualEnvironment.java +++ /dev/null @@ -1,46 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.vm; - -import java.util.List; - -/** - * - * be an information carrier within one thread only. - * - */ -public class VirtualEnvironment { - /** - * The actual machine - */ - public VirtualMachine machine; - - /** - * Disks to assign to the machine in order. - */ - public List disks; - - /** - * Networks to assign to the machine. - */ - public List networks; - - /** - * Boot options to assign to the machine. - */ - public String bootOptions; -} diff --git a/core/src/com/cloud/vm/VirtualNetwork.java b/core/src/com/cloud/vm/VirtualNetwork.java deleted file mode 100644 index ace3b80769f..00000000000 --- a/core/src/com/cloud/vm/VirtualNetwork.java +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.vm; - -import com.cloud.network.Networks.IsolationType; -import com.cloud.network.Networks.Mode; - -/** - * VirtualNetwork describes from a management level the - * machine. - */ -public class VirtualNetwork { - /** - * The gateway for this network. - */ - public String gateway; - - /** - * Netmask - */ - public String netmask; - - /** - * ip address. null if mode is DHCP. - */ - public String ip; - - /** - * Mac Address. - */ - public String mac; - - /** - * rate limit on this network. -1 if no limit. - */ - public long rate; - - /** - * tag for virtualization. - */ - public String tag; - - /** - * mode to acquire ip address. - */ - public Mode mode; - - /** - * Isolation method for networking. - */ - public IsolationType method; - - public boolean firewalled; - - public int[] openPorts; - - public int[] closedPorts; -} diff --git a/plugins/parent/pom.xml b/plugins/parent/pom.xml deleted file mode 100644 index 3a0bf3ce3cf..00000000000 --- a/plugins/parent/pom.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - 4.0.0 - cloud-plugin-parent - Apache CloudStack Plugin POM - pom - - com.cloud - cloud-parent - 4.0.0-SNAPSHOT - ../../parent/pom.xml - - - - com.cloud - cloud-server - ${project.version} - - - - install - src - - diff --git a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java index bb8f8f6abd6..d0a2305ecab 100755 --- a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -37,7 +37,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.AgentManager.OnError; @@ -69,12 +68,10 @@ import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.manager.Commands; import com.cloud.agent.manager.allocator.HostAllocator; import com.cloud.alert.AlertManager; -import com.cloud.capacity.CapacityManager; import com.cloud.cluster.ClusterManager; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.consoleproxy.ConsoleProxyManager; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; @@ -152,10 +149,7 @@ import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.ItWorkVO.Step; import com.cloud.vm.VirtualMachine.Event; import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.dao.ConsoleProxyDao; -import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; -import com.cloud.vm.dao.SecondaryStorageVmDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.snapshot.VMSnapshot; @@ -194,12 +188,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Inject protected UserVmDao _userVmDao; @Inject - protected DomainRouterDao _routerDao; - @Inject - protected ConsoleProxyDao _consoleDao; - @Inject - protected SecondaryStorageVmDao _secondaryDao; - @Inject protected NicDao _nicsDao; @Inject protected AccountManager _accountMgr; @@ -214,12 +202,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Inject protected VolumeDao _volsDao; @Inject - protected ConsoleProxyManager _consoleProxyMgr; - @Inject protected ConfigurationManager _configMgr; @Inject - protected CapacityManager _capacityMgr; - @Inject protected HighAvailabilityManager _haMgr; @Inject protected HostPodDao _podDao; @@ -2591,7 +2575,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _networkModel.getNetworkRate(network.getId(), vm.getId()), _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network)); - + //1) Unplug the nic if (vm.getState() == State.Running) { NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); @@ -2608,11 +2592,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", DataCenter.class, vm.getDataCenterId()); } - + //2) Release the nic _networkMgr.releaseNic(vmProfile, nic); s_logger.debug("Successfully released nic " + nic + "for vm " + vm); - + //3) Remove the nic _networkMgr.removeNic(vmProfile, nic); _nicsDao.expunge(nic.getId()); @@ -2647,7 +2631,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.warn("Could not get a nic with " + network); return false; } - + // don't delete default NIC on a user VM if (nic.isDefaultNic() && vm.getType() == VirtualMachine.Type.User ) { s_logger.warn("Failed to remove nic from " + vm + " in " + network + ", nic is default."); @@ -2661,15 +2645,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac //1) Unplug the nic if (vm.getState() == State.Running) { - NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); - s_logger.debug("Un-plugging nic for vm " + vm + " from network " + network); - boolean result = vmGuru.unplugNic(network, nicTO, vmTO, context, dest); - if (result) { - s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network ); - } else { - s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); - return false; - } + NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); + s_logger.debug("Un-plugging nic for vm " + vm + " from network " + network); + boolean result = vmGuru.unplugNic(network, nicTO, vmTO, context, dest); + if (result) { + s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network ); + } else { + s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); + return false; + } } else if (vm.getState() != State.Stopped) { s_logger.warn("Unable to remove vm " + vm + " from network " + network); throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", From 35cf56862f603309d1778c0f9be0ee3d7e92f3c7 Mon Sep 17 00:00:00 2001 From: Marcus Sorensen Date: Wed, 13 Feb 2013 16:37:45 -0700 Subject: [PATCH 010/486] Summary: KVM - Fix physical net parsing when traffic label points to tagged dev Detail: If your traffic label points to a bridge that is on a tagged interface rather than a real physical interface, cloudstack may not parse the physical interface correctly, bringing up tagged interfaces on the tagged interface. Signed-off-by: Marcus Sorensen 1360798665 -0700 --- .../cloud/hypervisor/kvm/resource/LibvirtComputingResource.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 552afb1e665..5d17fd0b9c6 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -858,7 +858,7 @@ ServerResource { private String getPif(String bridge) { String pif = matchPifFileInDirectory(bridge); - File vlanfile = new File("/proc/net/vlan" + pif); + File vlanfile = new File("/proc/net/vlan/" + pif); if (vlanfile.isFile()) { pif = Script.runSimpleBashScript("grep ^Device\\: /proc/net/vlan/" From f0086df0c79ac0b68a1ddce1797ae1a90b3ea995 Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Wed, 13 Feb 2013 17:51:55 -0800 Subject: [PATCH 011/486] CLOUDSTACK-1134: [EC2 Query API] DescribeSnapshots, 'n' ListVolumes get fired on CS for displaying 'n' Snapshots taken from the same Volume For snapshots taken from the same volume re-use the response obtained by calling listVolumes --- .../bridge/service/EC2SoapServiceImpl.java | 5 +++- .../bridge/service/core/ec2/EC2Engine.java | 29 ++++++++++++++----- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java b/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java index 9fc581be86f..cebac0b159e 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java +++ b/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java @@ -1905,7 +1905,10 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface { param3.setStartTime( cal ); param3.setOwnerId(ownerId); - param3.setVolumeSize( snap.getVolumeSize().toString()); + if ( snap.getVolumeSize() == null ) + param3.setVolumeSize("0"); + else + param3.setVolumeSize( snap.getVolumeSize().toString() ); param3.setDescription( snap.getName()); param3.setOwnerAlias( snap.getAccountName() ); diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java index a835d8a258b..281ecbd90bf 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java @@ -25,9 +25,12 @@ import java.security.SignatureException; import java.sql.SQLException; import java.text.ParseException; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Set; import java.util.UUID; import javax.inject.Inject; @@ -443,25 +446,35 @@ public class EC2Engine extends ManagerBase { */ public EC2DescribeSnapshotsResponse handleRequest( EC2DescribeSnapshots request ) { - EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); EC2SnapshotFilterSet sfs = request.getFilterSet(); EC2TagKeyValue[] tagKeyValueSet = request.getResourceTagSet(); try { - // -> query to get the volume size for each snapshot EC2DescribeSnapshotsResponse response = listSnapshots( request.getSnapshotSet(), getResourceTags(tagKeyValueSet)); if (response == null) { return new EC2DescribeSnapshotsResponse(); } EC2Snapshot[] snapshots = response.getSnapshotSet(); - for (EC2Snapshot snap : snapshots) { - volumes = listVolumes(snap.getVolumeId(), null, volumes, null); - EC2Volume[] volSet = volumes.getVolumeSet(); - if (0 < volSet.length) snap.setVolumeSize(volSet[0].getSize()); - volumes.reset(); + // -> query to get the volume size for each snapshot + HashMap volumeIdSize = new HashMap(); + for( EC2Snapshot snap : snapshots ) { + Boolean duplicateVolume = false; + Long size = null; + if ( volumeIdSize.containsKey(snap.getVolumeId()) ) { + size = volumeIdSize.get(snap.getVolumeId()); + duplicateVolume = true; + break; + } + if ( !duplicateVolume ) { + EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); + volumes = listVolumes(snap.getVolumeId(), null, volumes, null); + EC2Volume[] volumeSet = volumes.getVolumeSet(); + if (volumeSet.length > 0) size = volumeSet[0].getSize(); + volumeIdSize.put(snap.getVolumeId(), size); + } + snap.setVolumeSize(size); } - if ( null == sfs ) return response; else return sfs.evaluate( response ); From fe44e39dfd9e4d91e233858f32cb4510125ea6db Mon Sep 17 00:00:00 2001 From: Mice Xia Date: Thu, 14 Feb 2013 10:00:36 +0800 Subject: [PATCH 012/486] CS648 ckeck in missing VO file --- .../com/cloud/vm/snapshot/VMSnapshotVO.java | 224 ++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 core/src/com/cloud/vm/snapshot/VMSnapshotVO.java diff --git a/core/src/com/cloud/vm/snapshot/VMSnapshotVO.java b/core/src/com/cloud/vm/snapshot/VMSnapshotVO.java new file mode 100644 index 00000000000..03d4945fda0 --- /dev/null +++ b/core/src/com/cloud/vm/snapshot/VMSnapshotVO.java @@ -0,0 +1,224 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.vm.snapshot; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.TableGenerator; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "vm_snapshots") +public class VMSnapshotVO implements VMSnapshot { + @Id + @TableGenerator(name = "vm_snapshots_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_snapshots_seq", allocationSize = 1) + @GeneratedValue(strategy = GenerationType.TABLE) + @Column(name = "id") + long id; + + @Column(name = "uuid") + String uuid = UUID.randomUUID().toString(); + + @Column(name = "name") + String name; + + @Column(name = "display_name") + String displayName; + + @Column(name = "description") + String description; + + @Column(name = "vm_id") + long vmId; + + @Column(name = "account_id") + long accountId; + + @Column(name = "domain_id") + long domainId; + + @Column(name = "vm_snapshot_type") + @Enumerated(EnumType.STRING) + VMSnapshot.Type type; + + @Column(name = "state", updatable = true, nullable = false) + @Enumerated(value = EnumType.STRING) + private State state; + + @Column(name = GenericDao.CREATED_COLUMN) + Date created; + + @Column(name = GenericDao.REMOVED_COLUMN) + Date removed; + + @Column(name = "current") + Boolean current; + + @Column(name = "parent") + Long parent; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + public Long getParent() { + return parent; + } + + public void setParent(Long parent) { + this.parent = parent; + } + + public VMSnapshotVO() { + + } + + public Date getRemoved() { + return removed; + } + + public VMSnapshotVO(Long accountId, Long domainId, Long vmId, + String description, String vmSnapshotName, String vsDisplayName, + Long serviceOfferingId, Type type, Boolean current) { + this.accountId = accountId; + this.domainId = domainId; + this.vmId = vmId; + this.state = State.Allocated; + this.description = description; + this.name = vmSnapshotName; + this.displayName = vsDisplayName; + this.type = type; + this.current = current; + } + + public String getDescription() { + return description; + } + + @Override + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } + + @Override + public long getId() { + return id; + } + + @Override + public Long getVmId() { + return vmId; + } + + public void setVmId(Long vmId) { + this.vmId = vmId; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public State getState() { + return state; + } + + public void setState(State state) { + this.state = state; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public long getAccountId() { + return accountId; + } + + @Override + public long getDomainId() { + return domainId; + } + + @Override + public String getDisplayName() { + return displayName; + } + + public void setDisplayName(String displayName) { + this.displayName = displayName; + } + + public Boolean getCurrent() { + return current; + } + + public void setCurrent(Boolean current) { + this.current = current; + } + + @Override + public long getUpdatedCount() { + return updatedCount; + } + + @Override + public void incrUpdatedCount() { + this.updatedCount++; + } + + @Override + public Date getUpdated() { + return updated; + } + + @Override + public Type getType() { + return type; + } + + public void setRemoved(Date removed) { + this.removed = removed; + } +} From 8db8ee71d6fc73c1aefd17e82ea6e0c1ccc8adee Mon Sep 17 00:00:00 2001 From: radhikap Date: Wed, 13 Feb 2013 16:39:47 +0530 Subject: [PATCH 013/486] dev guide updates for 4.1 Signed-off-by: radhikap --- docs/en-US/added-API-commands-4-1.xml | 41 ++++++++++ docs/en-US/changed-apicommands-4.1.xml | 106 +++++++++++++++++++++++++ docs/en-US/reset-ssh-key-dev.xml | 27 +++++++ docs/en-US/vmx-settings-dev.xml | 40 ++++++++++ docs/en-US/whats-new.xml | 71 +++++++++-------- 5 files changed, 252 insertions(+), 33 deletions(-) create mode 100644 docs/en-US/added-API-commands-4-1.xml create mode 100644 docs/en-US/changed-apicommands-4.1.xml create mode 100644 docs/en-US/reset-ssh-key-dev.xml create mode 100644 docs/en-US/vmx-settings-dev.xml diff --git a/docs/en-US/added-API-commands-4-1.xml b/docs/en-US/added-API-commands-4-1.xml new file mode 100644 index 00000000000..f635e9dfdd8 --- /dev/null +++ b/docs/en-US/added-API-commands-4-1.xml @@ -0,0 +1,41 @@ + + +%BOOK_ENTITIES; +]> + +
+ Added API Commands in 4.1-incubating + + + createEgressFirewallRules (creates an egress firewall rule on the guest network.) + + + deleteEgressFirewallRules (deletes a egress firewall rule on the guest network.) + + + listEgressFirewallRules (lists the egress firewall rules configured for a guest + network.) + + + resetSSHKeyForVirtualMachine (Resets the SSHkey for virtual machine.) + + + addBaremetalHost (Adds a new host.) + + +
diff --git a/docs/en-US/changed-apicommands-4.1.xml b/docs/en-US/changed-apicommands-4.1.xml new file mode 100644 index 00000000000..42bd088afb3 --- /dev/null +++ b/docs/en-US/changed-apicommands-4.1.xml @@ -0,0 +1,106 @@ + + +%BOOK_ENTITIES; +]> + +
+ Changed API Commands in 4.1-incubating + + + + + + + API Commands + Description + + + + + + createNetworkOffering + listNetworkOfferings + listNetworks + + + The following request parameters is added: isPersistent. + This parameter determines if the network or network offering created or listed by + using this offering are persistent or not. + + + + + addF5LoadBalancer + configureNetscalerLoadBalancer + addNetscalerLoadBalancer + listF5LoadBalancers + configureF5LoadBalancer + listNetscalerLoadBalancers + + + The following response parameter is removed: inline. + + + + listFirewallRules + createFirewallRule + + The following request parameter is added: traffictype (optional). + + + + listUsageRecords + The following response parameter is added: virtualsize. + + + + + deleteIso + + + The following request parameter is added: forced (optional). + + + + + createStoragePool + + + The following request parameters are made mandatory: + + + podid + + + clusterid + + + + + + + listZones + + + The following request parameters is added: securitygroupenabled + + + + + +
diff --git a/docs/en-US/reset-ssh-key-dev.xml b/docs/en-US/reset-ssh-key-dev.xml new file mode 100644 index 00000000000..1a904e566ef --- /dev/null +++ b/docs/en-US/reset-ssh-key-dev.xml @@ -0,0 +1,27 @@ + + +%BOOK_ENTITIES; +]> + +
+ Resetting SSH Keys to Access VMs + Use the resetSSHKeyForVirtualMachine API to set or reset the SSH keypair assigned to a + virtual machine. With the addition of this feature, a lost or compromised SSH keypair can be + changed, and the user can access the VM by using the new keypair. Just create or register a new + keypair, then call resetSSHKeyForVirtualMachine. +
diff --git a/docs/en-US/vmx-settings-dev.xml b/docs/en-US/vmx-settings-dev.xml new file mode 100644 index 00000000000..a0fdf7f7825 --- /dev/null +++ b/docs/en-US/vmx-settings-dev.xml @@ -0,0 +1,40 @@ + + +%BOOK_ENTITIES; +]> + +
+ Additional VMX Settings + A VMX (.vmx) file is the primary configuration file for a virtual machine. When a new VM is + created, information on the operating system, disk sizes, and networking is stored in this file. + The VM actively writes to its .vmx file for all the configuration changes. The VMX file is + typically located in the directory where the VM is created. In Windows Vista / Windows 7 / + Windows Server 2008, the default location is C:\Users\<your_user_name>\My + Documents\Virtual Machines\<virtual_machine_name>.vmx. In Linux, vmware-cmd -l lists the + full path to all the registered VMX files. Any manual additions to the .vmx file from ESX/ESXi + are overwritten by the entries stored in the vCenter Server database. Therefore, before you edit + a .vmx file, first remove the VM from the vCenter server's inventory and register the VM again + after editing. + The CloudStack API that supports passing some of the VMX settings is registerTemplate. The + supported parameters are rootDiskController, nicAdapter, and keyboard. In addition to these + existing VMX parameters, you can now use the keyboard.typematicMinDelay parameter in the + registerTemplate API call. This parameter controls the amount of delay for the repeated key + strokes on remote consoles. For more information on keyboard.typematicMinDelay, see keyboard.typematicMinDelay. +
diff --git a/docs/en-US/whats-new.xml b/docs/en-US/whats-new.xml index 77b3ec3df22..29ae1f68903 100644 --- a/docs/en-US/whats-new.xml +++ b/docs/en-US/whats-new.xml @@ -3,40 +3,45 @@ %BOOK_ENTITIES; ]> - - - What's New in the API? - The following describes any new major features of each &PRODUCT; version as it applies to API usage. -
- What's New in the API for 4.0 - - -
-
- What's New in the API for 3.0 - - - - - - -
+ What's New in the API? + The following describes any new major features of each &PRODUCT; version as it applies to + API usage. +
+ What's New in the API for 4.1 + + + + +
+
+ What's New in the API for 4.0 + + +
+
+ What's New in the API for 3.0 + + + + + + +
From 26679e88def18d1bd5c57ab8091b9f0e0bbc28f8 Mon Sep 17 00:00:00 2001 From: Radhika PC Date: Thu, 14 Feb 2013 15:33:03 +0530 Subject: [PATCH 014/486] Persistent Networks documentation:Reviewed-By: Jessica Tomechak and Likitha Shetty --- docs/en-US/creating-network-offerings.xml | 346 +++++++++++++--------- docs/en-US/networks.xml | 3 +- docs/en-US/persistent-network.xml | 100 +++++++ 3 files changed, 306 insertions(+), 143 deletions(-) create mode 100644 docs/en-US/persistent-network.xml diff --git a/docs/en-US/creating-network-offerings.xml b/docs/en-US/creating-network-offerings.xml index 0269ce024cb..df392420937 100644 --- a/docs/en-US/creating-network-offerings.xml +++ b/docs/en-US/creating-network-offerings.xml @@ -22,146 +22,208 @@ under the License. -->
- Creating a New Network Offering - To create a network offering: - - Log in with admin privileges to the &PRODUCT; UI. - In the left navigation bar, click Service Offerings. - In Select Offering, choose Network Offering. - Click Add Network Offering. - In the dialog, make the following choices: - - Name. Any desired name for the network offering - Description. A short description of the offering that can be - displayed to users - Network Rate. Allowed data transfer rate in MB per - second - Guest Type. Choose whether the guest network is isolated or - shared. For a description of these terms, see - - Specify VLAN. (Isolated guest networks only) Indicate whether - a VLAN should be specified when this offering is used - Supported Services. Select one or more of the possible - network services. For some services, you must also choose the service - provider; for example, if you select Load Balancer, you can choose the - &PRODUCT; virtual router or any other load balancers that have been - configured in the cloud. Depending on which services you choose, additional - fields may appear in the rest of the dialog box.Based on the guest network type selected, you can see the following supported services: - - - - Supported Services - Description - Isolated - Shared - - - - - DHCP - For more information, see . - Supported - Supported - - - DNS - For more information, see . - Supported - Supported - - - Load Balancer - If you select Load Balancer, you can choose the &PRODUCT; virtual router or any other load - balancers that have been configured in the cloud. - Supported - Supported - - - Source NAT - If you select Source NAT, you can choose the &PRODUCT; virtual router or any other Source - NAT providers that have been configured in the - cloud. - Supported - Supported - - - Static NAT - If you select Static NAT, you can choose the &PRODUCT; virtual router or any other Static - NAT providers that have been configured in the - cloud. - Supported - Supported - - - Port Forwarding - If you select Port Forwarding, you can choose the &PRODUCT; virtual router or any other - Port Forwarding providers that have been configured in - the cloud. - Supported - Not Supported - - - VPN - For more information, see . - Supported - Not Supported - - - User Data - For more information, see . - Not Supported - Supported - - - Network ACL - For more information, see . - Supported - Not Supported - - - Security Groups - For more information, see . - Not Supported - Supported - - - - - - System Offering. If the service provider for any of the - services selected in Supported Services is a virtual router, the System - Offering field appears. Choose the system service offering that you want - virtual routers to use in this network. For example, if you selected Load - Balancer in Supported Services and selected a virtual router to provide load - balancing, the System Offering field appears so you can choose between the - &PRODUCT; default system service offering and any custom system service - offerings that have been defined by the &PRODUCT; root administrator. - For more information, see System Service Offerings. - Redundant router capability. Available - only when Virtual Router is selected as the Source NAT provider. Select this - option if you want to use two virtual routers in the network for - uninterrupted connection: one operating as the master virtual router and the - other as the backup. The master virtual router receives requests from and - sends responses to the user’s VM. The backup virtual router is activated - only when the master is down. After the failover, the backup becomes the - master virtual router. &PRODUCT; deploys the routers on different hosts - to ensure reliability if one host is down. - Conserve mode. Indicate whether to use conserve mode. In this - mode, network resources are allocated only when the first virtual machine - starts in the network. When the conservative mode is off, the public IP can - only be used for a single service. For example, a public IP used for a port - forwarding rule cannot be used for defining other services, such as SaticNAT - or load balancing. When the conserve mode is on, you can define more than - one service on the same public IP. - If StaticNAT is enabled, irrespective of the status of the conserve mode, no port forwarding - or load balancing rule can be created for the IP. However, you can add - the firewall rules by using the createFirewallRule command. - Tags. Network tag to specify which physical network to - use. - - Click Add. - - - + Creating a New Network Offering + To create a network offering: + + + Log in with admin privileges to the &PRODUCT; UI. + + + In the left navigation bar, click Service Offerings. + + + In Select Offering, choose Network Offering. + + + Click Add Network Offering. + + + In the dialog, make the following choices: + + + Name. Any desired name for the network + offering. + + + Description. A short description of the offering + that can be displayed to users. + + + Network Rate. Allowed data transfer rate in MB per + second. + + + Guest Type. Choose whether the guest network is + isolated or shared. + For a description of this term, see . + For a description of this term, see the Administration Guide. + + + + Persistent. Indicate whether the guest network is + persistent or not. The network that you can provision without having to deploy a VM on + it is termed persistent network. For more information, see . + + + Specify VLAN. (Isolated guest networks only) + Indicate whether a VLAN should be specified when this offering is used. + + + VPC. This option indicate whether the guest network + is Virtual Private Cloud-enabled. A Virtual Private Cloud (VPC) is a private, isolated + part of &PRODUCT;. A VPC can have its own virtual network topology that resembles a + traditional physical network. For more information on VPCs, see . + + + Supported Services. Select one or more of the + possible network services. For some services, you must also choose the service provider; + for example, if you select Load Balancer, you can choose the &PRODUCT; virtual router or + any other load balancers that have been configured in the cloud. Depending on which + services you choose, additional fields may appear in the rest of the dialog box. + Based on the guest network type selected, you can see the following supported + services: + + + + + Supported Services + Description + Isolated + Shared + + + + + DHCP + For more information, see . + Supported + Supported + + + DNS + For more information, see . + Supported + Supported + + + Load Balancer + If you select Load Balancer, you can choose the &PRODUCT; virtual + router or any other load balancers that have been configured in the + cloud. + Supported + Supported + + + Firewall + For more information, see . + For more information, see the Administration + Guide. + Supported + Supported + + + Source NAT + If you select Source NAT, you can choose the &PRODUCT; virtual router + or any other Source NAT providers that have been configured in the + cloud. + Supported + Supported + + + Static NAT + If you select Static NAT, you can choose the &PRODUCT; virtual router + or any other Static NAT providers that have been configured in the + cloud. + Supported + Supported + + + Port Forwarding + If you select Port Forwarding, you can choose the &PRODUCT; virtual + router or any other Port Forwarding providers that have been configured in the + cloud. + Supported + Not Supported + + + VPN + For more information, see . + Supported + Not Supported + + + User Data + For more information, see . + For more information, see the Administration + Guide. + Not Supported + Supported + + + Network ACL + For more information, see . + Supported + Not Supported + + + Security Groups + For more information, see . + Not Supported + Supported + + + + + + + System Offering. If the service provider for any of + the services selected in Supported Services is a virtual router, the System Offering + field appears. Choose the system service offering that you want virtual routers to use + in this network. For example, if you selected Load Balancer in Supported Services and + selected a virtual router to provide load balancing, the System Offering field appears + so you can choose between the &PRODUCT; default system service offering and any custom + system service offerings that have been defined by the &PRODUCT; root + administrator. + For more information, see . + For more information, see the Administration Guide. + + + Redundant router capability. Available only when + Virtual Router is selected as the Source NAT provider. Select this option if you want to + use two virtual routers in the network for uninterrupted connection: one operating as + the master virtual router and the other as the backup. The master virtual router + receives requests from and sends responses to the user’s VM. The backup virtual router + is activated only when the master is down. After the failover, the backup becomes the + master virtual router. &PRODUCT; deploys the routers on different hosts to ensure + reliability if one host is down. + + + Conserve mode. Indicate whether to use conserve + mode. In this mode, network resources are allocated only when the first virtual machine + starts in the network. When conservative mode is off, the public IP can only be used for + a single service. For example, a public IP used for a port forwarding rule cannot be + used for defining other services, such as SaticNAT or load balancing. When the conserve + mode is on, you can define more than one service on the same public IP. + + If StaticNAT is enabled, irrespective of the status of the conserve mode, no port + forwarding or load balancing rule can be created for the IP. However, you can add the + firewall rules by using the createFirewallRule command. + + + + Tags. Network tag to specify which physical network + to use. + + + + + Click Add. + +
diff --git a/docs/en-US/networks.xml b/docs/en-US/networks.xml index a7b9ea12466..830576902b1 100644 --- a/docs/en-US/networks.xml +++ b/docs/en-US/networks.xml @@ -45,4 +45,5 @@ - \ No newline at end of file + + diff --git a/docs/en-US/persistent-network.xml b/docs/en-US/persistent-network.xml new file mode 100644 index 00000000000..1ccc99c59a6 --- /dev/null +++ b/docs/en-US/persistent-network.xml @@ -0,0 +1,100 @@ + + +%BOOK_ENTITIES; +]> + + +
+ Persistent Networks + The network that you can provision without having to deploy any VMs on it is called a + persistent network. A persistent network can be part of a VPC or a non-VPC environment. + When you create other types of network, a network is only a database entry until the first + VM is created on that network. When the first VM is created, a VLAN ID is assigned and the + network is provisioned. Also, when the last VM is destroyed, the VLAN ID is released and the + network is no longer available. With the addition of persistent network, you will have the + ability to create a network in &PRODUCT; in which physical devices can be deployed without + having to run any VMs. Additionally, you can deploy physical devices on that network. + One of the advantages of having a persistent network is that you can create a VPC with a tier + consisting of only physical devices. For example, you might create a VPC for a three-tier + application, deploy VMs for Web and Application tier, and use physical machines for the + Database tier. Another use case is that if you are providing services by using physical + hardware, you can define the network as persistent and therefore even if all its VMs are + destroyed the services will not be discontinued. +
+ Persistent Network Considerations + + + Persistent network is designed for isolated networks. + + + All default network offerings are non-persistent. + + + A network offering cannot be editable because changing it affects the behavior of the + existing networks that were created using this network offering. + + + When you create a guest network, the network offering that you select defines the + network persistence. This in turn depends on whether persistent network is enabled in the + selected network offering. + + + An existing network can be made persistent by changing its network offering to an + offering that has the Persistent option enabled. While setting this property, even if the + network has no running VMs, the network is provisioned. + + + An existing network can be made non-persistent by changing its network offering to an + offering that has the Persistent option disabled. If the network has no running VMs, + during the next network garbage collection run the network is shut down. + + + When the last VM on a network is destroyed, the network garbage collector checks if + the network offering associated with the network is persistent, and shuts down the network + only if it is non-persistent. + + +
+
+ Creating a Persistent Guest Network + To create a persistent network, perform the following: + + + Create a network offering with the Persistent option enabled. + See . + See the Administration Guide. + + + Select Network from the left navigation pane. + + + Select the guest network that you want to offer this network service to. + + + Click the Edit button. + + + From the Network Offering drop-down, select the persistent network offering you have + just created. + + + Click OK. + + +
+
From ec8d204d3d962a6e1e036a98bbf345c8509827d9 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Thu, 14 Feb 2013 11:27:32 +0100 Subject: [PATCH 015/486] Docs: Fixed typo in nicira plugin guide --- docs/en-US/plugin-niciranvp-devicemanagement.xml | 4 ++-- docs/en-US/plugin-niciranvp-features.xml | 8 ++++---- docs/en-US/plugin-niciranvp-preparations.xml | 6 +++--- docs/en-US/plugin-niciranvp-uuidreferences.xml | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/en-US/plugin-niciranvp-devicemanagement.xml b/docs/en-US/plugin-niciranvp-devicemanagement.xml index 2423ce3925d..57b8eee9d7d 100644 --- a/docs/en-US/plugin-niciranvp-devicemanagement.xml +++ b/docs/en-US/plugin-niciranvp-devicemanagement.xml @@ -22,7 +22,7 @@ -->
Device-management - In CloudStack 4.0.x each Nicira NVP setup is considered a "device" that can be added and removed from a physical network. To complete the configuration of the Nicira NVP plugin a device needs to be added to the physical network using the "addNiciraNVPDevice" API call. The plugin is now enabled on the physical network and any guest networks created on that network will be provisioned using the Nicra NVP Controller. + In &PRODUCT; 4.0.x each Nicira NVP setup is considered a "device" that can be added and removed from a physical network. To complete the configuration of the Nicira NVP plugin a device needs to be added to the physical network using the "addNiciraNVPDevice" API call. The plugin is now enabled on the physical network and any guest networks created on that network will be provisioned using the Nicira NVP Controller. The plugin introduces a set of new API calls to manage the devices, see below or refer to the API reference. addNiciraNvpDevice @@ -44,4 +44,4 @@ listNiciraNVPDevices -
\ No newline at end of file + diff --git a/docs/en-US/plugin-niciranvp-features.xml b/docs/en-US/plugin-niciranvp-features.xml index b71e67f4199..c346bfb64e3 100644 --- a/docs/en-US/plugin-niciranvp-features.xml +++ b/docs/en-US/plugin-niciranvp-features.xml @@ -22,12 +22,12 @@ -->
Features of the Nicira NVP Plugin - In CloudStack release 4.0.0-incubating this plugin supports the Connectivity service. This service is responsible for creating Layer 2 networks supporting the networks created by Guests. In other words when an tennant creates a new network, instead of the traditional VLAN a logical network will be created by sending the appropriate calls to the Nicira NVP Controller. + In &PRODUCT; release 4.0.0-incubating this plugin supports the Connectivity service. This service is responsible for creating Layer 2 networks supporting the networks created by Guests. In other words when an tenant creates a new network, instead of the traditional VLAN a logical network will be created by sending the appropriate calls to the Nicira NVP Controller. The plugin has been tested with Nicira NVP versions 2.1.0, 2.2.0 and 2.2.1 - In CloudStack 4.0.0-incubating only the XenServer hypervisor is supported for use in + In &PRODUCT; 4.0.0-incubating only the XenServer hypervisor is supported for use in combination with Nicira NVP. - In CloudStack 4.1.0-incubating both KVM and XenServer hypervisors are + In &PRODUCT; 4.1.0-incubating both KVM and XenServer hypervisors are supported. - In CloudStack 4.0.0-incubating the UI components for this plugin are not complete, + In &PRODUCT; 4.0.0-incubating the UI components for this plugin are not complete, configuration is done by sending commands to the API.
diff --git a/docs/en-US/plugin-niciranvp-preparations.xml b/docs/en-US/plugin-niciranvp-preparations.xml index 86b795ccd0b..762c941fd13 100644 --- a/docs/en-US/plugin-niciranvp-preparations.xml +++ b/docs/en-US/plugin-niciranvp-preparations.xml @@ -23,7 +23,7 @@
Prerequisites Before enabling the Nicira NVP plugin the NVP Controller needs to be configured. Please review the NVP User Guide on how to do that. - CloudStack needs to have at least one physical network with the isolation method set to "STT". This network should be enabled for the Guest traffic type. + &PRODUCT; needs to have at least one physical network with the isolation method set to "STT". This network should be enabled for the Guest traffic type. The Guest traffic type should be configured with the traffic label that matches the name of the Integration Bridge on the hypervisor. See the Nicira NVP User Guide for more details on how to set this up in XenServer or KVM. @@ -33,6 +33,6 @@ The username to access the API The password to access the API The UUID of the Transport Zone that contains the hypervisors in this Zone - The UUID of the Physical Network that will used for the Guest networks + The UUID of the Physical Network that will be used for the Guest networks -
\ No newline at end of file + diff --git a/docs/en-US/plugin-niciranvp-uuidreferences.xml b/docs/en-US/plugin-niciranvp-uuidreferences.xml index c912971736b..cb5f1cae834 100644 --- a/docs/en-US/plugin-niciranvp-uuidreferences.xml +++ b/docs/en-US/plugin-niciranvp-uuidreferences.xml @@ -22,9 +22,9 @@ -->
UUID References - The plugin maintains several references in the CloudStack database to items created on the NVP Controller. - Every guest network this is created will have its broadcast type set to Lswitch and if the network is in state "Implemented", the broadcast URI will have the UUID of the Logical Switch that was created for this network on the NVP Controller. + The plugin maintains several references in the &PRODUCT; database to items created on the NVP Controller. + Every guest network that is created will have its broadcast type set to Lswitch and if the network is in state "Implemented", the broadcast URI will have the UUID of the Logical Switch that was created for this network on the NVP Controller. The Nics that are connected to one of the Logical Switches will have their Logical Switch Port UUID listed in the nicira_nvp_nic_map table All devices created on the NVP Controller will have a tag set to domain-account of the owner of the network, this string can be used to search for items in the NVP Controller. -
\ No newline at end of file + From afbc950652d5c0aa6cd5415a296a6581f04af5a5 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Thu, 14 Feb 2013 16:26:18 +0530 Subject: [PATCH 016/486] CLOUDSTACK-1272: Fix incorrect annotation for service offering id Fixes param annotation in: api/src/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java api/src/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java Signed-off-by: Rohit Yadav --- .../cloudstack/api/command/admin/router/UpgradeRouterCmd.java | 4 ++-- .../api/command/admin/systemvm/UpgradeSystemVMCmd.java | 4 ++-- .../command/user/autoscale/CreateAutoScaleVmProfileCmd.java | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/api/src/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java b/api/src/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java index c2cde163eba..b049f66f648 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java @@ -22,8 +22,8 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.api.response.DomainRouterResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.log4j.Logger; import com.cloud.network.router.VirtualRouter; @@ -42,7 +42,7 @@ public class UpgradeRouterCmd extends BaseCmd { required=true, description="The ID of the router") private Long id; - @Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType = DiskOfferingResponse.class, + @Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType = ServiceOfferingResponse.class, required=true, description="the service offering ID to apply to the domain router") private Long serviceOfferingId; diff --git a/api/src/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java b/api/src/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java index a70d927f020..e91d0053c64 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java @@ -23,7 +23,7 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; -import org.apache.cloudstack.api.response.DiskOfferingResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.SystemVmResponse; import org.apache.log4j.Logger; @@ -48,7 +48,7 @@ public class UpgradeSystemVMCmd extends BaseCmd { required=true, description="The ID of the system vm") private Long id; - @Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType=DiskOfferingResponse.class, + @Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType=ServiceOfferingResponse.class, required=true, description="the service offering ID to apply to the system vm") private Long serviceOfferingId; diff --git a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java index ecfd8df0ceb..87d4466e79a 100644 --- a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java @@ -26,7 +26,7 @@ import org.apache.cloudstack.api.BaseAsyncCreateCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.AutoScaleVmProfileResponse; -import org.apache.cloudstack.api.response.DiskOfferingResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.ZoneResponse; @@ -56,7 +56,7 @@ public class CreateAutoScaleVmProfileCmd extends BaseAsyncCreateCmd { required = true, description = "availability zone for the auto deployed virtual machine") private Long zoneId; - @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = DiskOfferingResponse.class, + @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, required = true, description = "the service offering of the auto deployed virtual machine") private Long serviceOfferingId; From 9e32fc4c834de84482caca77fb067b175a9c1deb Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Thu, 14 Feb 2013 16:27:57 +0530 Subject: [PATCH 017/486] maven: Fix stray characted in client/pom.xml introduced in 4572bc06 Signed-off-by: Rohit Yadav --- client/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/pom.xml b/client/pom.xml index 0c37df3a5f5..bd8283d7c82 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -392,14 +392,14 @@ org.jasypt jasypt - 1.9.0` + 1.9.0 false ${project.build.directory}/pythonlibs org.jasypt jasypt - 1.8` + 1.8 false ${project.build.directory}/pythonlibs From 4364cb97842466919cdd8d14098f5e34a63ef8a7 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Thu, 14 Feb 2013 17:18:22 +0530 Subject: [PATCH 018/486] rat: Fix license for file from recent ui-plugins merge Signed-off-by: Rohit Yadav --- ui/plugins/testPlugin/testPlugin.css | 20 +++++++++++++++++++- ui/scripts/ui-custom/plugins.js | 18 +++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/ui/plugins/testPlugin/testPlugin.css b/ui/plugins/testPlugin/testPlugin.css index 19e12414d32..f26216fadb4 100644 --- a/ui/plugins/testPlugin/testPlugin.css +++ b/ui/plugins/testPlugin/testPlugin.css @@ -1,2 +1,20 @@ -/* Put your CSS here */ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. +*/ +/* Put your CSS here */ diff --git a/ui/scripts/ui-custom/plugins.js b/ui/scripts/ui-custom/plugins.js index 4e6fbb22759..aaf95319da1 100644 --- a/ui/scripts/ui-custom/plugins.js +++ b/ui/scripts/ui-custom/plugins.js @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. (function($, cloudStack) { var elems = { pluginItem: function(args) { @@ -90,4 +106,4 @@ }) }); }; -}(jQuery, cloudStack)); +}(jQuery, cloudStack)); From 298c5ee06b0774d0f4cf059dd21a249777b45251 Mon Sep 17 00:00:00 2001 From: Marcus Sorensen Date: Thu, 14 Feb 2013 09:44:03 -0700 Subject: [PATCH 019/486] Summary: Adjust systemvm.iso path for KVM, due to packaging changes Detail: Removing references to /usr/lib/cloud and /usr/lib64/cloud so that old systemvm.iso files aren't found by accident. systemvm.iso should exist in /usr/share/cloudstack-common/vms now. Signed-off-by: Marcus Sorensen 1360860243 -0700 --- .../hypervisor/kvm/resource/LibvirtComputingResource.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 5d17fd0b9c6..9472ea2f313 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -698,10 +698,7 @@ ServerResource { _sysvmISOPath = (String) params.get("systemvm.iso.path"); if (_sysvmISOPath == null) { - String[] isoPaths = { "/usr/lib64/cloud/agent/vms/systemvm.iso", - "/usr/lib/cloud/agent/vms/systemvm.iso", - "/usr/lib64/cloud/common/vms/systemvm.iso", - "/usr/lib/cloud/common/vms/systemvm.iso" }; + String[] isoPaths = {"/usr/share/cloudstack-common/vms/systemvm.iso"}; for (String isoPath : isoPaths) { if (_storage.exists(isoPath)) { _sysvmISOPath = isoPath; From cf7ac9d6c4f5f85c47a5f4d09d402f4f0f682ba1 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Thu, 14 Feb 2013 12:01:40 -0800 Subject: [PATCH 020/486] CLOUDSTACK-1277: populating transit password field to UserVmJoinVO in converting from UserVm to UserVmJoinVO. --- .../src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 6f5587f87ea..4a5ac33bea9 100644 --- a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -327,7 +327,15 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem } Set vmIdSet = userVmDataHash.keySet(); - return searchByIds(vmIdSet.toArray(new Long[vmIdSet.size()])); + List uvms = searchByIds(vmIdSet.toArray(new Long[vmIdSet.size()])); + // populate transit password field from UserVm + if ( uvms != null ){ + for (UserVmJoinVO uvm : uvms){ + UserVm v = userVmDataHash.get(uvm.getId()); + uvm.setPassword(v.getPassword()); + } + } + return uvms; } } From ca5c6d5d1496f39fae32b020e5d16a2eb06365fb Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 14 Feb 2013 16:19:17 -0800 Subject: [PATCH 021/486] CLOUDSTACK-1219, CLOUDSTACK-1220: Fix IPv6 error messages --- api/src/com/cloud/network/NetworkModel.java | 2 ++ .../cloud/network/Ipv6AddressManagerImpl.java | 4 +-- .../com/cloud/network/NetworkModelImpl.java | 32 +++++++++++++++++++ .../src/com/cloud/vm/UserVmManagerImpl.java | 17 ++-------- .../cloud/network/MockNetworkModelImpl.java | 6 ++++ .../com/cloud/vpc/MockNetworkModelImpl.java | 6 ++++ 6 files changed, 50 insertions(+), 17 deletions(-) diff --git a/api/src/com/cloud/network/NetworkModel.java b/api/src/com/cloud/network/NetworkModel.java index 783e5cc9c85..9731a61667d 100644 --- a/api/src/com/cloud/network/NetworkModel.java +++ b/api/src/com/cloud/network/NetworkModel.java @@ -255,4 +255,6 @@ public interface NetworkModel { boolean isIP6AddressAvailableInVlan(long vlanId); void checkIp6Parameters(String startIPv6, String endIPv6, String ip6Gateway, String ip6Cidr) throws InvalidParameterValueException; + + void checkRequestedIpAddresses(long networkId, String ip4, String ip6) throws InvalidParameterValueException; } \ No newline at end of file diff --git a/server/src/com/cloud/network/Ipv6AddressManagerImpl.java b/server/src/com/cloud/network/Ipv6AddressManagerImpl.java index ecef5a225e9..a401f9ae396 100644 --- a/server/src/com/cloud/network/Ipv6AddressManagerImpl.java +++ b/server/src/com/cloud/network/Ipv6AddressManagerImpl.java @@ -80,7 +80,7 @@ public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressMa } List vlans = _vlanDao.listVlansByNetworkId(networkId); if (vlans == null) { - s_logger.debug("Cannot find related vlan or too many vlan attached to network " + networkId); + s_logger.debug("Cannot find related vlan attached to network " + networkId); return null; } String ip = null; @@ -109,7 +109,7 @@ public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressMa } } if (ip == null) { - throw new InsufficientAddressCapacityException("Cannot find a usable IP in the network " + network.getName() + " after network.ipv6.search.retry.max = " + _ipv6RetryMax + " times retry!", + throw new InsufficientAddressCapacityException("Cannot find a usable IP in the network " + network.getName() + " after " + _ipv6RetryMax + "(network.ipv6.search.retry.max) times retry!", DataCenter.class, network.getDataCenterId()); } } else { diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index ca7a900e39c..beebb871d8a 100644 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -1923,4 +1923,36 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { throw new InvalidParameterValueException("The cidr size of IPv6 network must be no less than 64 bits!"); } } + + @Override + public void checkRequestedIpAddresses(long networkId, String ip4, String ip6) throws InvalidParameterValueException { + if (ip4 != null) { + if (!NetUtils.isValidIp(ip4)) { + throw new InvalidParameterValueException("Invalid specified IPv4 address " + ip4); + } + //Other checks for ipv4 are done in assignPublicIpAddress() + } + if (ip6 != null) { + if (!NetUtils.isValidIpv6(ip6)) { + throw new InvalidParameterValueException("Invalid specified IPv6 address " + ip6); + } + if (_ipv6Dao.findByNetworkIdAndIp(networkId, ip6) != null) { + throw new InvalidParameterValueException("The requested IP is already taken!"); + } + List vlans = _vlanDao.listVlansByNetworkId(networkId); + if (vlans == null) { + throw new CloudRuntimeException("Cannot find related vlan attached to network " + networkId); + } + Vlan ipVlan = null; + for (Vlan vlan : vlans) { + if (NetUtils.isIp6InRange(ip6, vlan.getIp6Range())) { + ipVlan = vlan; + break; + } + } + if (ipVlan == null) { + throw new InvalidParameterValueException("Requested IPv6 is not in the predefined range!"); + } + } + } } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 19887ff9e25..df976099f1d 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -3299,7 +3299,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (requestedIpPair == null) { requestedIpPair = new IpAddresses(null, null); } else { - checkRequestedIpAddresses(requestedIpPair.getIp4Address(), requestedIpPair.getIp6Address()); + _networkModel.checkRequestedIpAddresses(network.getId(), requestedIpPair.getIp4Address(), requestedIpPair.getIp6Address()); } NicProfile profile = new NicProfile(requestedIpPair.getIp4Address(), requestedIpPair.getIp6Address()); @@ -3308,7 +3308,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use defaultNetworkNumber++; // if user requested specific ip for default network, add it if (defaultIps.getIp4Address() != null || defaultIps.getIp6Address() != null) { - checkRequestedIpAddresses(defaultIps.getIp4Address(), defaultIps.getIp6Address()); + _networkModel.checkRequestedIpAddresses(network.getId(), defaultIps.getIp4Address(), defaultIps.getIp6Address()); profile = new NicProfile(defaultIps.getIp4Address(), defaultIps.getIp6Address()); } @@ -3486,19 +3486,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use return vm; } - private void checkRequestedIpAddresses(String ip4, String ip6) throws InvalidParameterValueException { - if (ip4 != null) { - if (!NetUtils.isValidIp(ip4)) { - throw new InvalidParameterValueException("Invalid specified IPv4 address " + ip4); - } - } - if (ip6 != null) { - if (!NetUtils.isValidIpv6(ip6)) { - throw new InvalidParameterValueException("Invalid specified IPv6 address " + ip6); - } - } - } - private void validateUserData(String userData) { byte[] decodedUserData = null; if (userData != null) { diff --git a/server/test/com/cloud/network/MockNetworkModelImpl.java b/server/test/com/cloud/network/MockNetworkModelImpl.java index a2bef63d539..d7ffa7da280 100644 --- a/server/test/com/cloud/network/MockNetworkModelImpl.java +++ b/server/test/com/cloud/network/MockNetworkModelImpl.java @@ -829,4 +829,10 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { // TODO Auto-generated method stub } + + @Override + public void checkRequestedIpAddresses(long networkId, String ip4, String ip6) + throws InvalidParameterValueException { + // TODO Auto-generated method stub + } } diff --git a/server/test/com/cloud/vpc/MockNetworkModelImpl.java b/server/test/com/cloud/vpc/MockNetworkModelImpl.java index 3fad33870bf..5ac87772529 100644 --- a/server/test/com/cloud/vpc/MockNetworkModelImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkModelImpl.java @@ -842,4 +842,10 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { // TODO Auto-generated method stub } + @Override + public void checkRequestedIpAddresses(long networkId, String ip4, String ip6) + throws InvalidParameterValueException { + // TODO Auto-generated method stub + } + } From ddcad148e4d5463b7f53cc14374a6e1fd67de85d Mon Sep 17 00:00:00 2001 From: Marcus Sorensen Date: Thu, 14 Feb 2013 17:57:20 -0700 Subject: [PATCH 022/486] Summary: centos63/package.sh, slightly better version parsing Description: current package.sh fails if user hasn't downloaded all maven poms prior to running package.sh. Now we match for something that vaguely resembles a cloudstack version. Signed-off-by: Marcus Sorensen 1360889840 -0700 --- packaging/centos63/package.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/centos63/package.sh b/packaging/centos63/package.sh index 5b1bab49b61..fa45210b2f8 100755 --- a/packaging/centos63/package.sh +++ b/packaging/centos63/package.sh @@ -21,7 +21,7 @@ RPMDIR=$CWD/../../dist/rpmbuild -VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep -v '^\['` +VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep '^[0-9]\.'` if echo $VERSION | grep SNAPSHOT ; then REALVER=`echo $VERSION | cut -d '-' -f 1` DEFVER="-D_ver $REALVER" From bd4661e46746a5f597ce72e1b8c9ad3962cede90 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Thu, 14 Feb 2013 17:56:04 -0800 Subject: [PATCH 023/486] CLOUDSTACK-1137: force reconnect to a disconnected host throws error. --- .../cloud/agent/manager/AgentManagerImpl.java | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/server/src/com/cloud/agent/manager/AgentManagerImpl.java b/server/src/com/cloud/agent/manager/AgentManagerImpl.java index 2286dabfda1..3b5d433f1be 100755 --- a/server/src/com/cloud/agent/manager/AgentManagerImpl.java +++ b/server/src/com/cloud/agent/manager/AgentManagerImpl.java @@ -230,7 +230,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public boolean configure(final String name, final Map params) throws ConfigurationException { - + final Map configs = _configDao.getConfiguration("AgentManager", params); _port = NumbersUtil.parseInt(configs.get("port"), 8250); final int workers = NumbersUtil.parseInt(configs.get("workers"), 5); @@ -778,7 +778,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (host != null) { agentStatusTransitTo(host, Event.AgentDisconnected, _nodeId); } - } + } } if (forRebalance) { @@ -895,7 +895,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } catch (NoTransitionException ne) { /* Agent may be currently in status of Down, Alert, Removed, namely there is no next status for some events. * Why this can happen? Ask God not me. I hate there was no piece of comment for code handling race condition. - * God knew what race condition the code dealt with! + * God knew what race condition the code dealt with! */ } @@ -1046,6 +1046,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return false; } + if (host.getStatus() == Status.Disconnected) { + s_logger.info("Host is already disconnected, no work to be done"); + return true; + } + if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert && host.getStatus() != Status.Rebalancing) { s_logger.info("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus()); return false; @@ -1197,12 +1202,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } Response response = null; - response = new Response(request, answers[0], _nodeId, -1); + response = new Response(request, answers[0], _nodeId, -1); try { link.send(response.toBytes()); } catch (ClosedChannelException e) { s_logger.debug("Failed to send startupanswer: " + e.toString()); - } + } _connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request)); } @@ -1405,7 +1410,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } else { throw new CloudRuntimeException("Unkonwn TapAgentsAction " + action); } - } + } return true; } @@ -1508,7 +1513,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl attache.setMaintenanceMode(true); // Now cancel all of the commands except for the active one. attache.cancelAllCommands(Status.Disconnected, false); - } + } } @Override From 2b3e23f6cf7b194c10dabea8504ab0826adf7e0d Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 15 Feb 2013 12:02:26 +0530 Subject: [PATCH 024/486] LDAP front end UI development --- ui/scripts/globalSettings.js | 98 ++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/ui/scripts/globalSettings.js b/ui/scripts/globalSettings.js index e65a00b290c..9ede3135aa4 100644 --- a/ui/scripts/globalSettings.js +++ b/ui/scripts/globalSettings.js @@ -14,6 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. + (function(cloudStack) { cloudStack.sections['global-settings'] = { title: 'label.menu.global.settings', @@ -81,6 +82,103 @@ } } }, + + ldapConfiguration:{ + type:'select', + title:'LDAP Configuration', + listView:{ + id:'ldap', + label:'LDAP Configuration', + fields:{ + hostname: {label: 'Hostname'}, + queryfilter: {label: 'Query Filter'}, + searchbase: {label: 'Search Base'}, + port: {label: 'LDAP Port'}, + ssl: { + label: 'SSL' + + + } + + + }, + dataProvider:function(args){ + var data = {}; + listViewDataProvider(args, data); + $.ajax({ + url: createURL(''), + data: data, + success: function(json) { + // var items = json.listhypervisorcapabilitiesresponse.hypervisorCapabilities; + args.response.success({data:items}); + }, + error: function(data) { + args.response.error(parseXMLHttpResponse(data)); + } + }); + }, + + actions: { + add:{ + + label: 'Configure LDAP', + + messages: { + confirm: function(args) { + return 'Do you really want to configure LDAP ? '; + }, + notification: function(args) { + return 'LDAP configured'; + } + }, + + createForm: { + + title: 'Configure LDAP', + fields:{ + name:{label: 'Bind Username' , validation: {required:true} }, + password: {label: 'Bind Password', validation: {required: true },isPassword:true }, + hostname: {label:'Hostname' , validation:{required:true}}, + queryfilter: {label:'Query Filter' , validation: {required:true}}, + searchbase: {label:'SearchBase',validation:{required:true}}, + ssl: { + label:'SSL' , + isBoolean:true, + isChecked:false + // var $form = $(this).closest("form"); + + + }, + port: { label: 'Port' , defaultValue: '389' }, + truststore:{ label:'Trust Store' , isHidden:true }, + truststorepassword:{ label:'Trust Store Password' ,isHidden:true } + + } + + + }, + + + action:function(args) { + var $form = $(this).closest("form"); + if($form.find('.form-item [rel=port]').find('input[ type=checkbox]').is(":checked") ==true) { + $form.find('.form-item[rel=truststore]').attr("isHidden",false); + $form.find('.form-item[rel=truststorepassword]').attr("isHidden",false); + } + + + } + } + + } + + + + } + + + + }, hypervisorCapabilities: { type: 'select', title: 'label.hypervisor.capabilities', From bc493bd39b1cd4be2334b5d82ef3b590f551372e Mon Sep 17 00:00:00 2001 From: Koushik Das Date: Mon, 21 Jan 2013 16:40:43 +0530 Subject: [PATCH 025/486] CLOUDSTACK-672: Set VMware cluster max. limit based on HV version Max. number of hosts in a Vmware clueter is specific to the version of the HV. This limit is read from the hypervisor_capabilities table --- .../hypervisor/HypervisorCapabilities.java | 6 + .../hypervisor/HypervisorCapabilitiesVO.java | 12 ++ .../vmware/VmwareServerDiscoverer.java | 126 ++++++++---------- .../vmware/manager/VmwareManager.java | 5 - .../vmware/manager/VmwareManagerImpl.java | 23 ++-- .../src/com/cloud/configuration/Config.java | 1 - .../dao/HypervisorCapabilitiesDao.java | 2 + .../dao/HypervisorCapabilitiesDaoImpl.java | 87 +++--------- setup/db/create-schema.sql | 10 +- 9 files changed, 117 insertions(+), 155 deletions(-) diff --git a/api/src/com/cloud/hypervisor/HypervisorCapabilities.java b/api/src/com/cloud/hypervisor/HypervisorCapabilities.java index d52c36b12f5..aff81b0018d 100644 --- a/api/src/com/cloud/hypervisor/HypervisorCapabilities.java +++ b/api/src/com/cloud/hypervisor/HypervisorCapabilities.java @@ -46,4 +46,10 @@ public interface HypervisorCapabilities extends Identity, InternalIdentity{ * @return the max. data volumes per VM supported by hypervisor */ Integer getMaxDataVolumesLimit(); + + /** + * @return the max. hosts per cluster supported by hypervisor + */ + Integer getMaxHostsPerCluster(); + } diff --git a/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java b/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java index 56e8e0a734d..b525a2d05d5 100644 --- a/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java +++ b/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java @@ -59,6 +59,9 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities { @Column(name="max_data_volumes_limit") private Integer maxDataVolumesLimit; + @Column(name="max_hosts_per_cluster") + private Integer maxHostsPerCluster; + protected HypervisorCapabilitiesVO() { this.uuid = UUID.randomUUID().toString(); } @@ -157,6 +160,15 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities { this.maxDataVolumesLimit = maxDataVolumesLimit; } + @Override + public Integer getMaxHostsPerCluster() { + return maxHostsPerCluster; + } + + public void setMaxHostsPerCluster(Integer maxHostsPerCluster) { + this.maxHostsPerCluster = maxHostsPerCluster; + } + @Override public boolean equals(Object obj) { if (obj instanceof HypervisorCapabilitiesVO) { diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index ddbce661239..84c37473a4a 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -45,6 +45,7 @@ import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.hypervisor.vmware.manager.VmwareManager; import com.cloud.hypervisor.vmware.mo.ClusterMO; import com.cloud.hypervisor.vmware.mo.HostMO; @@ -95,78 +96,69 @@ public class VmwareServerDiscoverer extends DiscovererBase implements CiscoNexusVSMDeviceDao _nexusDao; @Inject NetworkModel _netmgr; - + @Inject + HypervisorCapabilitiesDao _hvCapabilitiesDao; + public VmwareServerDiscoverer() { s_logger.info("VmwareServerDiscoverer is constructed"); } - - @Override - public Map> find(long dcId, - Long podId, Long clusterId, URI url, String username, - String password, List hostTags) throws DiscoveryException { - if (s_logger.isInfoEnabled()) - s_logger.info("Discover host. dc: " + dcId + ", pod: " + podId - + ", cluster: " + clusterId + ", uri host: " - + url.getHost()); + @Override + public Map> find(long dcId, Long podId, Long clusterId, URI url, + String username, String password, List hostTags) throws DiscoveryException { + + if(s_logger.isInfoEnabled()) + s_logger.info("Discover host. dc: " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + url.getHost()); + + if(podId == null) { + if(s_logger.isInfoEnabled()) + s_logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer"); + return null; + } + + ClusterVO cluster = _clusterDao.findById(clusterId); + if(cluster == null || cluster.getHypervisorType() != HypervisorType.VMware) { + if(s_logger.isInfoEnabled()) + s_logger.info("invalid cluster id or cluster is not for VMware hypervisors"); + return null; + } + + List hosts = _resourceMgr.listAllHostsInCluster(clusterId); + if (hosts != null && hosts.size() > 0) { + int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(hosts.get(0).getHypervisorType(), hosts.get(0).getHypervisorVersion()); + if (hosts.size() > maxHostsPerCluster) { + String msg = "VMware cluster " + cluster.getName() + " is too big to add new host now. (current configured cluster size: " + maxHostsPerCluster + ")"; + s_logger.error(msg); + throw new DiscoveredWithErrorException(msg); + } + } - if (podId == null) { - if (s_logger.isInfoEnabled()) - s_logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer"); - return null; - } - - ClusterVO cluster = _clusterDao.findById(clusterId); - if (cluster == null - || cluster.getHypervisorType() != HypervisorType.VMware) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for VMware hypervisors"); - return null; - } - - List hosts = _resourceMgr.listAllHostsInCluster(clusterId); - if (hosts.size() >= _vmwareMgr.getMaxHostsPerCluster()) { - String msg = "VMware cluster " - + cluster.getName() - + " is too big to add new host now. (current configured cluster size: " - + _vmwareMgr.getMaxHostsPerCluster() + ")"; - s_logger.error(msg); - throw new DiscoveredWithErrorException(msg); - } - - String privateTrafficLabel = null; - String publicTrafficLabel = null; - String guestTrafficLabel = null; - Map vsmCredentials = null; - - privateTrafficLabel = _netmgr.getDefaultManagementTrafficLabel(dcId, - HypervisorType.VMware); - if (privateTrafficLabel != null) { - s_logger.info("Detected private network label : " - + privateTrafficLabel); - } - - if (_vmwareMgr.getNexusVSwitchGlobalParameter()) { - DataCenterVO zone = _dcDao.findById(dcId); - NetworkType zoneType = zone.getNetworkType(); - if (zoneType != NetworkType.Basic) { - publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(dcId, - HypervisorType.VMware); - if (publicTrafficLabel != null) { - s_logger.info("Detected public network label : " - + publicTrafficLabel); - } - } - // Get physical network label - guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(dcId, - HypervisorType.VMware); - if (guestTrafficLabel != null) { - s_logger.info("Detected guest network label : " - + guestTrafficLabel); - } - vsmCredentials = _vmwareMgr - .getNexusVSMCredentialsByClusterId(clusterId); - } + String privateTrafficLabel = null; + String publicTrafficLabel = null; + String guestTrafficLabel = null; + Map vsmCredentials = null; + + privateTrafficLabel = _netmgr.getDefaultManagementTrafficLabel(dcId, HypervisorType.VMware); + if (privateTrafficLabel != null) { + s_logger.info("Detected private network label : " + privateTrafficLabel); + } + + if (_vmwareMgr.getNexusVSwitchGlobalParameter()) { + DataCenterVO zone = _dcDao.findById(dcId); + NetworkType zoneType = zone.getNetworkType(); + if (zoneType != NetworkType.Basic) { + publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(dcId, HypervisorType.VMware); + if (publicTrafficLabel != null) { + s_logger.info("Detected public network label : " + publicTrafficLabel); + } + } + // Get physical network label + guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(dcId, HypervisorType.VMware); + if (guestTrafficLabel != null) { + s_logger.info("Detected guest network label : " + guestTrafficLabel); + } + vsmCredentials = _vmwareMgr.getNexusVSMCredentialsByClusterId(clusterId); + } VmwareContext context = null; try { diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java index e1ca6ccac03..e219c1cc426 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java @@ -29,10 +29,6 @@ import com.vmware.vim25.ManagedObjectReference; public interface VmwareManager { public final String CONTEXT_STOCK_NAME = "vmwareMgr"; - // this limitation comes from the fact that we are using linked clone on shared VMFS storage, - // we need to limit the size of vCenter cluster, http://en.wikipedia.org/wiki/VMware_VMFS - public final int MAX_HOSTS_PER_CLUSTER = 8; - String composeWorkerName(); String getSystemVMIsoFileNameOnDatastore(); @@ -57,7 +53,6 @@ public interface VmwareManager { Pair getAddiionalVncPortRange(); - int getMaxHostsPerCluster(); int getRouterExtraPublicNics(); boolean beginExclusiveOperation(int timeOutSeconds); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index 88e03f5a220..e6d57426db2 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -58,6 +58,7 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.hypervisor.vmware.VmwareCleanupMaid; import com.cloud.hypervisor.vmware.mo.DiskControllerType; import com.cloud.hypervisor.vmware.mo.HostFirewallSystemMO; @@ -91,6 +92,7 @@ import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.DomainRouterVO; import com.google.gson.Gson; import com.vmware.apputils.vim25.ServiceUtil; +import com.vmware.vim25.AboutInfo; import com.vmware.vim25.HostConnectSpec; import com.vmware.vim25.ManagedObjectReference; @@ -119,6 +121,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw @Inject ClusterVSMMapDao _vsmMapDao; @Inject ConfigurationDao _configDao; @Inject ConfigurationServer _configServer; + @Inject HypervisorCapabilitiesDao _hvCapabilitiesDao; String _mountParent; StorageLayer _storage; @@ -133,7 +136,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw String _recycleHungWorker = "false"; int _additionalPortRangeStart; int _additionalPortRangeSize; - int _maxHostsPerCluster; int _routerExtraPublicNics = 2; String _cpuOverprovisioningFactor = "1"; @@ -260,7 +262,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw _routerExtraPublicNics = NumbersUtil.parseInt(_configDao.getValue(Config.RouterExtraPublicNics.key()), 2); - _maxHostsPerCluster = NumbersUtil.parseInt(_configDao.getValue(Config.VmwarePerClusterHostMax.key()), VmwareManager.MAX_HOSTS_PER_CLUSTER); _cpuOverprovisioningFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); if(_cpuOverprovisioningFactor == null || _cpuOverprovisioningFactor.isEmpty()) _cpuOverprovisioningFactor = "1"; @@ -400,10 +401,15 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw ManagedObjectReference[] hosts = (ManagedObjectReference[])serviceContext.getServiceUtil().getDynamicProperty(mor, "host"); assert(hosts != null); - if(hosts.length > _maxHostsPerCluster) { - String msg = "vCenter cluster size is too big (current configured cluster size: " + _maxHostsPerCluster + ")"; - s_logger.error(msg); - throw new DiscoveredWithErrorException(msg); + if (hosts.length > 0) { + AboutInfo about = (AboutInfo)(serviceContext.getServiceUtil().getDynamicProperty(hosts[0], "config.product")); + String version = about.getApiVersion(); + int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(HypervisorType.VMware, version); + if (hosts.length > maxHostsPerCluster) { + String msg = "vCenter cluster size is too big (current configured cluster size: " + maxHostsPerCluster + ")"; + s_logger.error(msg); + throw new DiscoveredWithErrorException(msg); + } } for(ManagedObjectReference morHost: hosts) { @@ -868,11 +874,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw return new Pair(_additionalPortRangeStart, _additionalPortRangeSize); } - @Override - public int getMaxHostsPerCluster() { - return this._maxHostsPerCluster; - } - @Override public int getRouterExtraPublicNics() { return this._routerExtraPublicNics; diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index b22bf4b76f0..abc6a09e4ef 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -258,7 +258,6 @@ public enum Config { VmwareAdditionalVncPortRangeStart("Advanced", ManagementServer.class, Integer.class, "vmware.additional.vnc.portrange.start", "50000", "Start port number of additional VNC port range", null), VmwareAdditionalVncPortRangeSize("Advanced", ManagementServer.class, Integer.class, "vmware.additional.vnc.portrange.size", "1000", "Start port number of additional VNC port range", null), //VmwareGuestNicDeviceType("Advanced", ManagementServer.class, String.class, "vmware.guest.nic.device.type", "E1000", "Ethernet card type used in guest VM, valid values are E1000, PCNet32, Vmxnet2, Vmxnet3", null), - VmwarePerClusterHostMax("Advanced", ManagementServer.class, Integer.class, "vmware.percluster.host.max", "8", "maxmium hosts per vCenter cluster(do not let it grow over 8)", "1-8"), VmwareReserveCpu("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.cpu", "false", "Specify whether or not to reserve CPU based on CPU overprovisioning factor", null), VmwareReserveMem("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.mem", "false", "Specify whether or not to reserve memory based on memory overprovisioning factor", null), VmwareRootDiskControllerType("Advanced", ManagementServer.class, String.class, "vmware.root.disk.controller", "ide", "Specify the default disk controller for root volumes, valid values are scsi, ide", null), diff --git a/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java b/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java index 8f4a47584b3..0fe0b535f78 100644 --- a/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java +++ b/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java @@ -31,4 +31,6 @@ public interface HypervisorCapabilitiesDao extends GenericDao HypervisorTypeSearch; protected final SearchBuilder HypervisorTypeAndVersionSearch; - protected final GenericSearchBuilder MaxGuestLimitByHypervisorSearch; - protected final GenericSearchBuilder MaxDataVolumesLimitByHypervisorSearch; private static final String DEFAULT_VERSION = "default"; @@ -52,18 +50,14 @@ public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase sc = MaxGuestLimitByHypervisorSearch.create(); - sc.setParameters("hypervisorType", hypervisorType); - sc.setParameters("hypervisorVersion", hypervisorVersion); - List limitList = customSearch(sc, null); - if(!limitList.isEmpty()){ - result = limitList.get(0); - }else{ - useDefault = true; - } - }else{ - useDefault = true; - } - if(useDefault){ - SearchCriteria sc = MaxGuestLimitByHypervisorSearch.create(); - sc.setParameters("hypervisorType", hypervisorType); - sc.setParameters("hypervisorVersion", DEFAULT_VERSION); - List limitList = customSearch(sc, null); - if(!limitList.isEmpty()){ - result = limitList.get(0); - } - } - if(result == null){ + HypervisorCapabilitiesVO result = getCapabilities(hypervisorType, hypervisorVersion); + Long limit = result.getMaxGuestsLimit(); + if (limit == null) return defaultLimit; - } - return result; + return limit; } @Override public Integer getMaxDataVolumesLimit(HypervisorType hypervisorType, String hypervisorVersion) { - Integer result = null; - boolean useDefault = false; - if (hypervisorVersion != null) { - SearchCriteria sc = MaxDataVolumesLimitByHypervisorSearch.create(); - sc.setParameters("hypervisorType", hypervisorType); - sc.setParameters("hypervisorVersion", hypervisorVersion); - List limitList = customSearch(sc, null); - if (!limitList.isEmpty()) { - result = limitList.get(0); - } else { - useDefault = true; - } - } else { - useDefault = true; - } - // If data is not available for a specific hypervisor version then use 'default' as the version - if (useDefault) { - SearchCriteria sc = MaxDataVolumesLimitByHypervisorSearch.create(); - sc.setParameters("hypervisorType", hypervisorType); - sc.setParameters("hypervisorVersion", DEFAULT_VERSION); - List limitList = customSearch(sc, null); - if (!limitList.isEmpty()) { - result = limitList.get(0); - } - } - return result; + HypervisorCapabilitiesVO result = getCapabilities(hypervisorType, hypervisorVersion); + return result.getMaxDataVolumesLimit(); } -} \ No newline at end of file + + @Override + public Integer getMaxHostsPerCluster(HypervisorType hypervisorType, String hypervisorVersion) { + HypervisorCapabilitiesVO result = getCapabilities(hypervisorType, hypervisorVersion); + return result.getMaxHostsPerCluster(); + } +} diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index 11ae26745e8..b4f992d4287 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -1650,6 +1650,7 @@ CREATE TABLE `cloud`.`hypervisor_capabilities` ( `max_guests_limit` bigint unsigned DEFAULT 50, `security_group_enabled` int(1) unsigned DEFAULT 1 COMMENT 'Is security group supported', `max_data_volumes_limit` int unsigned DEFAULT 6 COMMENT 'Max. data volumes per VM supported by hypervisor', + `max_hosts_per_cluster` int unsigned DEFAULT NULL COMMENT 'Max. hosts in cluster supported by hypervisor', PRIMARY KEY (`id`), CONSTRAINT `uc_hypervisor_capabilities__uuid` UNIQUE (`uuid`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; @@ -1661,10 +1662,11 @@ INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '5.6 SP2', 50, 1); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit) VALUES ('XenServer', '6.0', 50, 1, 13); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit) VALUES ('XenServer', '6.0.2', 50, 1, 13); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', 'default', 128, 0); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', '4.0', 128, 0); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', '4.1', 128, 0); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', '5.0', 128, 0); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', 'default', 128, 0, 32); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '4.0', 128, 0, 32); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '4.1', 128, 0, 32); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.0', 128, 0, 32); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('KVM', 'default', 50, 1); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('Ovm', 'default', 25, 1); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('Ovm', '2.3', 25, 1); From ff9d62a8ed779597e1294937a79f2a47638215e1 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 15 Feb 2013 17:00:59 +0530 Subject: [PATCH 026/486] LDAP UI front end complete --- ui/scripts/globalSettings.js | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/ui/scripts/globalSettings.js b/ui/scripts/globalSettings.js index 9ede3135aa4..e9461490b27 100644 --- a/ui/scripts/globalSettings.js +++ b/ui/scripts/globalSettings.js @@ -150,8 +150,8 @@ }, port: { label: 'Port' , defaultValue: '389' }, - truststore:{ label:'Trust Store' , isHidden:true }, - truststorepassword:{ label:'Trust Store Password' ,isHidden:true } + truststore:{ label:'Trust Store' , isHidden:true , dependsOn:'ssl' }, + truststorepassword:{ label:'Trust Store Password' ,isHidden:true , dependsOn:'ssl'} } @@ -160,11 +160,6 @@ action:function(args) { - var $form = $(this).closest("form"); - if($form.find('.form-item [rel=port]').find('input[ type=checkbox]').is(":checked") ==true) { - $form.find('.form-item[rel=truststore]').attr("isHidden",false); - $form.find('.form-item[rel=truststorepassword]').attr("isHidden",false); - } } From 3c764c0be759bc3f05eec77998bbba647f03b686 Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Fri, 15 Feb 2013 16:46:01 +0530 Subject: [PATCH 027/486] CLOUDSTACK-1207. UpdateNetwork API fails with NPE if the network offering is not being updated and is non-persistent. --- server/src/com/cloud/network/NetworkServiceImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index 70f0fa818bd..37b4903182b 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -1711,7 +1711,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // 4) if network has been upgraded from a non persistent ntwk offering to a persistent ntwk offering, // implement the network if its not already - if ( !oldNtwkOff.getIsPersistent() && networkOffering.getIsPersistent()) { + if ( networkOfferingChanged && !oldNtwkOff.getIsPersistent() && networkOffering.getIsPersistent()) { if( network.getState() == Network.State.Allocated) { try { DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); From 7d61ee6e99ffa355516da11fc08d30193e109e39 Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Fri, 15 Feb 2013 18:27:24 +0530 Subject: [PATCH 028/486] CLOUDSTACK-1295 : Added usage unit tests Fixed Component annontation for usage parsers Fixed mvn target to run usage removed UsageServerComponentConfig which is not required Added region_id to account table in cloud_usage db Conflicts: setup/db/db/schema-40to410.sql --- .../api/command/test/UsageCmdTest.java | 69 +++++++ setup/db/create-schema-premium.sql | 1 + setup/db/db/schema-40to410.sql | 2 + usage/pom.xml | 5 + usage/resources/usageApplicationContext.xml | 5 +- .../src/com/cloud/usage/UsageManagerImpl.java | 2 + .../usage/UsageServerComponentConfig.java | 180 ------------------ .../parser/NetworkOfferingUsageParser.java | 3 +- .../usage/parser/NetworkUsageParser.java | 2 + .../parser/PortForwardingUsageParser.java | 5 +- .../parser/SecurityGroupUsageParser.java | 3 +- .../usage/parser/StorageUsageParser.java | 3 +- .../usage/parser/VMInstanceUsageParser.java | 9 +- .../usage/parser/VPNUserUsageParser.java | 3 +- .../cloud/usage/parser/VolumeUsageParser.java | 3 +- .../com/cloud/usage/UsageManagerTest.java | 97 ++++++++++ .../usage/UsageManagerTestConfiguration.java | 94 +++++++++ .../resources/UsageManagerTestContext.xml | 42 ++++ 18 files changed, 335 insertions(+), 193 deletions(-) create mode 100644 api/test/org/apache/cloudstack/api/command/test/UsageCmdTest.java delete mode 100644 usage/src/com/cloud/usage/UsageServerComponentConfig.java create mode 100644 usage/test/com/cloud/usage/UsageManagerTest.java create mode 100644 usage/test/com/cloud/usage/UsageManagerTestConfiguration.java create mode 100644 usage/test/resources/UsageManagerTestContext.xml diff --git a/api/test/org/apache/cloudstack/api/command/test/UsageCmdTest.java b/api/test/org/apache/cloudstack/api/command/test/UsageCmdTest.java new file mode 100644 index 00000000000..1f218f47e2a --- /dev/null +++ b/api/test/org/apache/cloudstack/api/command/test/UsageCmdTest.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.test; + +import junit.framework.TestCase; +import org.apache.cloudstack.api.command.admin.usage.GetUsageRecordsCmd; +import org.apache.cloudstack.usage.Usage; +import org.apache.cloudstack.usage.UsageService; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.List; + +public class UsageCmdTest extends TestCase { + + private GetUsageRecordsCmd getUsageRecordsCmd; + + @Rule + public ExpectedException expectedException = ExpectedException.none(); + + @Before + public void setUp() { + + getUsageRecordsCmd = new GetUsageRecordsCmd() { + + }; + } + + @Test + public void testExecuteSuccess() { + UsageService usageService = Mockito.mock(UsageService.class); + getUsageRecordsCmd._usageService = usageService; + getUsageRecordsCmd.execute(); + } + + @Test + public void testExecuteEmptyResult() { + + UsageService usageService = Mockito.mock(UsageService.class); + + List usageRecords = new ArrayList(); + + Mockito.when(usageService.getUsageRecords(getUsageRecordsCmd)).thenReturn( + usageRecords); + + getUsageRecordsCmd._usageService = usageService; + getUsageRecordsCmd.execute(); + + } + +} diff --git a/setup/db/create-schema-premium.sql b/setup/db/create-schema-premium.sql index 2f86c0b9ab5..e30812ba68e 100644 --- a/setup/db/create-schema-premium.sql +++ b/setup/db/create-schema-premium.sql @@ -137,6 +137,7 @@ CREATE TABLE `cloud_usage`.`account` ( `cleanup_needed` tinyint(1) NOT NULL default '0', `network_domain` varchar(100) COMMENT 'Network domain name of the Vms of the account', `default_zone_id` bigint unsigned, + `region_id` int unsigned NOT NULL, CONSTRAINT `uc_account__uuid` UNIQUE (`uuid`), PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 7f0044127f6..3ea8bbdbb73 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -1323,3 +1323,5 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT' INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.cpus', '40', 'The default maximum number of cpu cores that can be used for a project'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.memory', '40960', 'The default maximum memory (in MB) that can be used for a project'); + +ALTER TABLE `cloud_usage`.`account` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; diff --git a/usage/pom.xml b/usage/pom.xml index bf2001b432c..4b408c4313b 100644 --- a/usage/pom.xml +++ b/usage/pom.xml @@ -42,6 +42,11 @@ install src test + + + test/resources + + resources diff --git a/usage/resources/usageApplicationContext.xml b/usage/resources/usageApplicationContext.xml index 32da93eecbc..0340038a06a 100644 --- a/usage/resources/usageApplicationContext.xml +++ b/usage/resources/usageApplicationContext.xml @@ -31,7 +31,9 @@ http://www.springframework.org/schema/context/spring-context-3.0.xsd"> - + + + + + + + + + + + + + + + + + + + + + + + + From 825c1c17a13ac1b9c1281c6278c4fc4603211834 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Fri, 15 Feb 2013 19:50:38 +0530 Subject: [PATCH 029/486] CLOUDSTACK-1066: Add building script to build appliance Signed-off-by: Rohit Yadav --- tools/appliance/build.sh | 27 +++++++++++++++++++ .../definitions/systemvmtemplate/cleanup.sh | 4 --- .../definitions/systemvmtemplate/zerodisk.sh | 2 -- 3 files changed, 27 insertions(+), 6 deletions(-) create mode 100644 tools/appliance/build.sh diff --git a/tools/appliance/build.sh b/tools/appliance/build.sh new file mode 100644 index 00000000000..cfd4e8b2349 --- /dev/null +++ b/tools/appliance/build.sh @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -x + +appliance="systemvmtemplate" + +# Initialize veewee and dependencies +bundle + +# Start building the appliance +veewee vbox build $appliance --nogui +veewee vbox halt $appliance diff --git a/tools/appliance/definitions/systemvmtemplate/cleanup.sh b/tools/appliance/definitions/systemvmtemplate/cleanup.sh index abecc56dcb5..6009aad8e78 100644 --- a/tools/appliance/definitions/systemvmtemplate/cleanup.sh +++ b/tools/appliance/definitions/systemvmtemplate/cleanup.sh @@ -17,7 +17,3 @@ rm /lib/udev/rules.d/75-persistent-net-generator.rules echo "Adding a 2 sec delay to the interface up, to make the dhclient happy" echo "pre-up sleep 2" >> /etc/network/interfaces - -# Clean up any copied iso or scripts -rm -v /root/*.iso -rm -v /root/*.sh diff --git a/tools/appliance/definitions/systemvmtemplate/zerodisk.sh b/tools/appliance/definitions/systemvmtemplate/zerodisk.sh index 6ad4205033e..9fc9f6f8693 100644 --- a/tools/appliance/definitions/systemvmtemplate/zerodisk.sh +++ b/tools/appliance/definitions/systemvmtemplate/zerodisk.sh @@ -5,5 +5,3 @@ rm -f /root/* dd if=/dev/zero of=/EMPTY bs=1M rm -f /EMPTY -# Shutdown the appliance, now export it to required image format -shutdown -h now From 1dd40518a4710c03dfd45baabc3122d05e776851 Mon Sep 17 00:00:00 2001 From: Hugo Trippaers Date: Fri, 15 Feb 2013 15:48:22 +0100 Subject: [PATCH 030/486] Make sure initial log messages go somewhere for agent and usage --- packaging/centos63/cloud-agent.rc | 5 +++-- packaging/centos63/cloud-usage.rc | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/packaging/centos63/cloud-agent.rc b/packaging/centos63/cloud-agent.rc index acf81316479..19782b78ce1 100755 --- a/packaging/centos63/cloud-agent.rc +++ b/packaging/centos63/cloud-agent.rc @@ -31,7 +31,8 @@ whatami=cloudstack-agent SHORTNAME="$whatami" PIDFILE=/var/run/"$whatami".pid LOCKFILE=/var/lock/subsys/"$SHORTNAME" -LOGFILE=/var/log/cloudstack/agent/agent.log +LOGDIR=/var/log/cloudstack/agent +LOGFILE=${LOGFIR}/agent.log PROGNAME="Cloud Agent" CLASS="com.cloud.agent.AgentShell" JSVC=`which jsvc 2>/dev/null`; @@ -67,7 +68,7 @@ export CLASSPATH="$SCP:$DCP:$ACP:$JCP:/etc/cloudstack/agent:/usr/share/cloudstac start() { echo -n $"Starting $PROGNAME: " if hostname --fqdn >/dev/null 2>&1 ; then - $JSVC -cp "$CLASSPATH" -pidfile "$PIDFILE" $CLASS + $JSVC -cp "$CLASSPATH" -outfile "${LOGDIR}/cloudstack-agent.out" -errfile "${LOGDIR}/cloudstack-agent.err" -pidfile "$PIDFILE" $CLASS RETVAL=$? echo else diff --git a/packaging/centos63/cloud-usage.rc b/packaging/centos63/cloud-usage.rc index 8bee5aeb6a0..dc97cd36f89 100755 --- a/packaging/centos63/cloud-usage.rc +++ b/packaging/centos63/cloud-usage.rc @@ -35,7 +35,8 @@ SHORTNAME="cloudstack-usage" PIDFILE=/var/run/"$SHORTNAME".pid LOCKFILE=/var/lock/subsys/"$SHORTNAME" -LOGFILE=/var/log/cloudstack/usage/usage.log +LOGDIR=/var/log/cloudstack/usage +LOGFILE=${LOGDIR}/usage.log PROGNAME="CloudStack Usage Monitor" CLASS="com.cloud.usage.UsageServer" PROG="jsvc" @@ -79,7 +80,7 @@ start() { echo -n "Starting $PROGNAME" "$SHORTNAME" - if daemon --pidfile $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -user "$USER" -errfile SYSLOG -Dpid=$$ $CLASS + if daemon --pidfile $PIDFILE $DAEMON -cp "$CLASSPATH" -outfile "${LOGDIR}/cloudstack-usage.out" -errfile "${LOGDIR}/cloudstack-usage.err" -pidfile "$PIDFILE" -user "$USER" -Dpid=$$ $CLASS RETVAL=$? then rc=0 From 05437d0e975562c9aff25415ed0b9681b702b6af Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Fri, 15 Feb 2013 20:45:32 +0530 Subject: [PATCH 031/486] maven pom: put apache cloudstack tools together in umbrella pom minor pom refactor to put all the tools under one placeholder pom. Also adds a profile for marvin to run deployDataCenter via mvn options. $mvn -Pdeveloper,marvin -pl :cloud-marvin -Dmarvin.config= OR $cd tools/marvin $mvn -Pmarvin -pl :cloud-marvin -Dmarvin.config=path/to/config will deploy the datacenter using the marvin.config property's value Signed-off-by: Prasanna Santhanam --- developer/pom.xml | 117 ++---------------------- pom.xml | 117 +++++++++++++++++++++++- tools/apidoc/pom.xml | 12 +-- tools/cli/pom.xml | 4 +- tools/devcloud-kvm/pom.xml | 2 +- tools/devcloud/pom.xml | 2 +- tools/marvin/marvin/deployDataCenter.py | 8 +- tools/marvin/pom.xml | 68 +++++++++----- tools/pom.xml | 44 +++++++++ 9 files changed, 217 insertions(+), 157 deletions(-) create mode 100644 tools/pom.xml diff --git a/developer/pom.xml b/developer/pom.xml index 79b24665542..81bb3ffc208 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -27,7 +27,12 @@ 5.1.21 runtime
- + + org.apache.cloudstack + cloud-plugin-hypervisor-simulator + ${project.version} + compile + install @@ -189,114 +194,6 @@ - - - simulator - - - deploydb-simulator - - - - - - org.codehaus.mojo - properties-maven-plugin - 1.0-alpha-2 - - - initialize - - read-project-properties - - - - ${project.parent.basedir}/utils/conf/db.properties - ${project.parent.basedir}/utils/conf/db.properties.override - - true - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - - mysql - mysql-connector-java - ${cs.mysql.version} - - - commons-dbcp - commons-dbcp - ${cs.dbcp.version} - - - commons-pool - commons-pool - ${cs.pool.version} - - - org.jasypt - jasypt - ${cs.jasypt.version} - - - org.apache.cloudstack - cloud-utils - ${project.version} - - - org.apache.cloudstack - cloud-server - ${project.version} - - - - - process-resources - create-schema - - java - - - - - false - true - - org.apache.cloudstack - cloud-server - - com.cloud.upgrade.DatabaseCreator - - - ${project.parent.basedir}/utils/conf/db.properties - ${project.parent.basedir}/utils/conf/db.properties.override - - ${basedir}/target/db/create-schema-simulator.sql - ${basedir}/target/db/templates.simulator.sql - - com.cloud.upgrade.DatabaseUpgradeChecker - --database=simulator - --rootpassword=${db.root.password} - - - - - catalina.home - ${project.parent.basedir}/utils - - - - - - - + diff --git a/pom.xml b/pom.xml index 820e9380cf1..eb7a79026ea 100644 --- a/pom.xml +++ b/pom.xml @@ -504,13 +504,12 @@ developer + + tools/devcloud/devcloud.cfg + developer - tools/apidoc - tools/devcloud - tools/devcloud-kvm - tools/marvin - tools/cli + tools @@ -524,5 +523,113 @@ vmware-base + + simulator + + + deploydb-simulator + + + + + + org.codehaus.mojo + properties-maven-plugin + 1.0-alpha-2 + + + initialize + + read-project-properties + + + + ${project.basedir}/utils/conf/db.properties + ${project.basedir}/utils/conf/db.properties.override + + true + + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + + mysql + mysql-connector-java + ${cs.mysql.version} + + + commons-dbcp + commons-dbcp + ${cs.dbcp.version} + + + commons-pool + commons-pool + ${cs.pool.version} + + + org.jasypt + jasypt + ${cs.jasypt.version} + + + org.apache.cloudstack + cloud-utils + ${project.version} + + + org.apache.cloudstack + cloud-server + ${project.version} + + + + + process-resources + create-schema + + java + + + + + false + true + + org.apache.cloudstack + cloud-server + + com.cloud.upgrade.DatabaseCreator + + + ${project.basedir}/utils/conf/db.properties + ${project.basedir}/utils/conf/db.properties.override + + ${basedir}/target/db/create-schema-simulator.sql + ${basedir}/target/db/templates.simulator.sql + + com.cloud.upgrade.DatabaseUpgradeChecker + --database=simulator + --rootpassword=${db.root.password} + + + + + catalina.home + ${project.basedir}/utils + + + + + + + diff --git a/tools/apidoc/pom.xml b/tools/apidoc/pom.xml index 6b159ff54eb..7358b926642 100644 --- a/tools/apidoc/pom.xml +++ b/tools/apidoc/pom.xml @@ -12,22 +12,14 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 cloud-apidoc - Apache CloudStack apidoc Tools + Apache CloudStack apidocs pom org.apache.cloudstack cloudstack 4.1.0-SNAPSHOT - ../../pom.xml + ../pom.xml - - - org.apache.cloudstack - cloud-client-ui - ${project.version} - war - - ../../client/target/cloud-client-ui-4.1.0-SNAPSHOT/WEB-INF/ ${client.config.base}/lib diff --git a/tools/cli/pom.xml b/tools/cli/pom.xml index d99d6fb3aec..5f14d7c7d5a 100644 --- a/tools/cli/pom.xml +++ b/tools/cli/pom.xml @@ -20,13 +20,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> 4.0.0 cloud-cli - Apache CloudStack Developer Tools: cloudmonkey cli + Apache CloudStack cloudmonkey cli pom org.apache.cloudstack cloudstack 4.1.0-SNAPSHOT - ../../pom.xml + ../pom.xml diff --git a/tools/devcloud-kvm/pom.xml b/tools/devcloud-kvm/pom.xml index e90a257ccee..d4e12e48c41 100644 --- a/tools/devcloud-kvm/pom.xml +++ b/tools/devcloud-kvm/pom.xml @@ -18,7 +18,7 @@ org.apache.cloudstack cloudstack 4.1.0-SNAPSHOT - ../../pom.xml + ../pom.xml diff --git a/tools/devcloud/pom.xml b/tools/devcloud/pom.xml index cbf93a4918f..5257cb8d1bf 100644 --- a/tools/devcloud/pom.xml +++ b/tools/devcloud/pom.xml @@ -18,7 +18,7 @@ org.apache.cloudstack cloudstack 4.1.0-SNAPSHOT - ../../pom.xml + ../pom.xml diff --git a/tools/marvin/marvin/deployDataCenter.py b/tools/marvin/marvin/deployDataCenter.py index e4f7eace9bd..cec920c5ff0 100644 --- a/tools/marvin/marvin/deployDataCenter.py +++ b/tools/marvin/marvin/deployDataCenter.py @@ -19,16 +19,16 @@ import configGenerator import cloudstackException import cloudstackTestClient -import sys -import os import logging from cloudstackAPI import * +from os import path from optparse import OptionParser class deployDataCenters(): def __init__(self, cfgFile): - if not os.path.exists(cfgFile): + if not path.exists(cfgFile) \ + and not path.exists(path.abspath(cfgFile)): raise IOError("config file %s not found. please specify a valid config file"%cfgFile) self.configFile = cfgFile @@ -444,7 +444,7 @@ if __name__ == "__main__": parser = OptionParser() - parser.add_option("-i", "--intput", action="store", \ + parser.add_option("-i", "--input", action="store", \ default="./datacenterCfg", dest="input", help="the path \ where the json config file generated, by default is \ ./datacenterCfg") diff --git a/tools/marvin/pom.xml b/tools/marvin/pom.xml index 51c70cd92b8..f5561ca7722 100644 --- a/tools/marvin/pom.xml +++ b/tools/marvin/pom.xml @@ -12,15 +12,14 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 cloud-marvin - Apache CloudStack Developer Tools: marvin + Apache CloudStack marvin pom org.apache.cloudstack - cloudstack + cloud-tools 4.1.0-SNAPSHOT - ../../pom.xml + ../pom.xml - install @@ -29,14 +28,15 @@ 1.7 - generate-resource - generate-resources + clean + clean run - + + Deleting ${project.artifactId} API sources @@ -54,12 +54,13 @@ exec - marvin + ${basedir}/marvin python codegenerator.py -s ${basedir}/../apidoc/target/commands.xml + Generating ${project.artifactId} API classes} @@ -78,23 +79,42 @@ - - deploy - deploy - - exec - - - dist - pip - - install - Marvin-0.1.0.tar.gz - - - + - + + + + marvin + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + package + + exec + + + + + ${basedir}/marvin + python + + deployDataCenter.py + -i + ${user.dir}/${marvin.config} + + + + + + + diff --git a/tools/pom.xml b/tools/pom.xml new file mode 100644 index 00000000000..982306f2cce --- /dev/null +++ b/tools/pom.xml @@ -0,0 +1,44 @@ + + + + 4.0.0 + + Apache CloudStack Developer Tools + org.apache.cloudstack + cloud-tools + pom + + org.apache.cloudstack + cloudstack + 4.1.0-SNAPSHOT + ../pom.xml + + + install + + + apidoc + marvin + cli + devcloud + devcloud-kvm + + From 6279433f458263e845b3c6f03975e8392f49ccef Mon Sep 17 00:00:00 2001 From: Chip Childers Date: Fri, 15 Feb 2013 10:22:56 -0500 Subject: [PATCH 032/486] CLOUDSTACK-625: Correcting legal documentation for the require.js file that was added for the UI plugin feature. Signed-off-by: Chip Childers --- LICENSE | 25 + pom.xml | 1 + tools/whisker/LICENSE | 4960 +++++++++++++++++++- tools/whisker/descriptor-for-packaging.xml | 12 + tools/whisker/descriptor.xml | 12 + 5 files changed, 4919 insertions(+), 91 deletions(-) diff --git a/LICENSE b/LICENSE index eda958726f8..5e29e2b12bc 100644 --- a/LICENSE +++ b/LICENSE @@ -471,6 +471,31 @@ Within the ui/lib directory from George McGinley Smith jquery.easing.js + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from The Dojo Foundation http://dojofoundation.org/ + require.js from http://github.com/jrburke/requirejs + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) Copyright (c) 2011, John Resig diff --git a/pom.xml b/pom.xml index eb7a79026ea..1ee889112ef 100644 --- a/pom.xml +++ b/pom.xml @@ -402,6 +402,7 @@ ui/lib/qunit/qunit.css ui/lib/qunit/qunit.js ui/lib/reset.css + ui/lib/require.js waf patches/systemvm/debian/systemvm.vmx patches/systemvm/debian/config/root/.ssh/authorized_keys diff --git a/tools/whisker/LICENSE b/tools/whisker/LICENSE index 025cb33136b..bada5182ce8 100644 --- a/tools/whisker/LICENSE +++ b/tools/whisker/LICENSE @@ -748,7 +748,7 @@ Within the deps/awsapi-lib directory The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. � + computer software" (as that term is defined at 48 C.F.R. ¤ 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 @@ -1138,7 +1138,7 @@ Within the deps/awsapi-lib directory The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. � + computer software" (as that term is defined at 48 C.F.R. ¤ 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 @@ -1526,7 +1526,7 @@ Within the deps/awsapi-lib directory The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. � + computer software" (as that term is defined at 48 C.F.R. ¤ 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 @@ -1867,9 +1867,7 @@ Within the deps/awsapi-lib directory slf4j-api-1.5.11.jar from https://github.com/qos-ch/slf4j slf4j-jdk14-1.5.11.jar from https://github.com/qos-ch/slf4j - licensed under the Mozilla Public License, Version 1.1 http://www.mozilla.org/MPL/1.1/ (as follows) - MOZILLA PUBLIC LICENSE @@ -2346,71 +2344,70 @@ Within the deps/awsapi-lib directory from Shigeru Chiba http://www.csg.ci.i.u-tokyo.ac.jp/~chiba/javassist/ javassist-3.9.0.GA.jar from http://sourceforge.net/projects/jboss/files/Javassist/ - licensed under the Mozilla Public License, Version 1.1 http://www.mozilla.org/MPL/1.1/ (as follows) - Copyright (c) 2007-2012 VMware, Inc. All Rights Reserved. - + Copyright (c) 2007-2012 VMware, Inc. All Rights Reserved. + MOZILLA PUBLIC LICENSE Version 1.1 - + --------------- - + 1. Definitions. - + 1.0.1. "Commercial Use" means distribution or otherwise making the Covered Code available to a third party. - + 1.1. "Contributor" means each entity that creates or contributes to the creation of Modifications. - + 1.2. "Contributor Version" means the combination of the Original Code, prior Modifications used by a Contributor, and the Modifications made by that particular Contributor. - + 1.3. "Covered Code" means the Original Code or Modifications or the combination of the Original Code and Modifications, in each case including portions thereof. - + 1.4. "Electronic Distribution Mechanism" means a mechanism generally accepted in the software development community for the electronic transfer of data. - + 1.5. "Executable" means Covered Code in any form other than Source Code. - + 1.6. "Initial Developer" means the individual or entity identified as the Initial Developer in the Source Code notice required by Exhibit A. - + 1.7. "Larger Work" means a work which combines Covered Code or portions thereof with code not governed by the terms of this License. - + 1.8. "License" means this document. - + 1.8.1. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - + 1.9. "Modifications" means any addition to or deletion from the substance or structure of either the Original Code or any previous Modifications. When Covered Code is released as a series of files, a Modification is: A. Any addition to or deletion from the contents of a file containing Original Code or previous Modifications. - + B. Any new file that contains any part of the Original Code or previous Modifications. - + 1.10. "Original Code" means Source Code of computer software code which is described in the Source Code notice required by Exhibit A as Original Code, and which, at the time of its release under this License is not already Covered Code governed by this License. - + 1.10.1. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - + 1.11. "Source Code" means the preferred form of the Covered Code for making modifications to it, including all modules it contains, plus any associated interface definition files, scripts used to control @@ -2420,7 +2417,7 @@ Within the deps/awsapi-lib directory Source Code can be in a compressed or archival form, provided the appropriate decompression or de-archiving software is widely available for no charge. - + 1.12. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License or a future version of this License issued under Section 6.1. @@ -2431,9 +2428,9 @@ Within the deps/awsapi-lib directory contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - + 2. Source Code License. - + 2.1. The Initial Developer Grant. The Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license, subject to third party intellectual property @@ -2443,33 +2440,33 @@ Within the deps/awsapi-lib directory modify, display, perform, sublicense and distribute the Original Code (or portions thereof) with or without Modifications, and/or as part of a Larger Work; and - + (b) under Patents Claims infringed by the making, using or selling of Original Code, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Code (or portions thereof). - + (c) the licenses granted in this Section 2.1(a) and (b) are effective on the date Initial Developer first distributes Original Code under the terms of this License. - + (d) Notwithstanding Section 2.1(b) above, no patent license is granted: 1) for code that You delete from the Original Code; 2) separate from the Original Code; or 3) for infringements caused by: i) the modification of the Original Code or ii) the combination of the Original Code with other software or devices. - + 2.2. Contributor Grant. Subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license - + (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor, to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof) either on an unmodified basis, with other Modifications, as Covered Code and/or as part of a Larger Work; and - + (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions @@ -2478,11 +2475,11 @@ Within the deps/awsapi-lib directory Contributor (or portions thereof); and 2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). - + (c) the licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first makes Commercial Use of the Covered Code. - + (d) Notwithstanding Section 2.2(b) above, no patent license is granted: 1) for any code that Contributor has deleted from the Contributor Version; 2) separate from the Contributor Version; @@ -2492,9 +2489,9 @@ Within the deps/awsapi-lib directory Contributor Version) or other devices; or 4) under Patent Claims infringed by Covered Code in the absence of Modifications made by that Contributor. - + 3. Distribution Obligations. - + 3.1. Application of License. The Modifications which You create or to which You contribute are governed by the terms of this License, including without limitation @@ -2507,7 +2504,7 @@ Within the deps/awsapi-lib directory License or the recipients' rights hereunder. However, You may include an additional document offering the additional rights described in Section 3.5. - + 3.2. Availability of Source Code. Any Modification which You create or to which You contribute must be made available in Source Code form under the terms of this License @@ -2520,7 +2517,7 @@ Within the deps/awsapi-lib directory has been made available to such recipients. You are responsible for ensuring that the Source Code version remains available even if the Electronic Distribution Mechanism is maintained by a third party. - + 3.3. Description of Modifications. You must cause all Covered Code to which You contribute to contain a file documenting the changes You made to create that Covered Code and @@ -2530,7 +2527,7 @@ Within the deps/awsapi-lib directory Initial Developer in (a) the Source Code, and (b) in any notice in an Executable version or related documentation in which You describe the origin or ownership of the Covered Code. - + 3.4. Intellectual Property Matters (a) Third Party Claims. If Contributor has knowledge that a license under a third party's @@ -2546,20 +2543,20 @@ Within the deps/awsapi-lib directory (such as notifying appropriate mailing lists or newsgroups) reasonably calculated to inform those who received the Covered Code that new knowledge has been obtained. - + (b) Contributor APIs. If Contributor's Modifications include an application programming interface and Contributor has knowledge of patent licenses which are reasonably necessary to implement that API, Contributor must also include this information in the LEGAL file. - + (c) Representations. Contributor represents that, except as disclosed pursuant to Section 3.4(a) above, Contributor believes that Contributor's Modifications are Contributor's original creation(s) and/or Contributor has sufficient rights to grant the rights conveyed by this License. - + 3.5. Required Notices. You must duplicate the notice in Exhibit A in each file of the Source Code. If it is not possible to put such notice in a particular Source @@ -2579,7 +2576,7 @@ Within the deps/awsapi-lib directory Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - + 3.6. Distribution of Executable Versions. You may distribute Covered Code in Executable form only if the requirements of Section 3.1-3.5 have been met for that Covered Code, @@ -2602,15 +2599,15 @@ Within the deps/awsapi-lib directory Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - + 3.7. Larger Works. You may create a Larger Work by combining Covered Code with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Code. - + 4. Inability to Comply Due to Statute or Regulation. - + If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Code due to statute, judicial order, or regulation then You must: (a) comply with @@ -2621,19 +2618,19 @@ Within the deps/awsapi-lib directory extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. - + 5. Application of this License. - + This License applies to code to which the Initial Developer has attached the notice in Exhibit A and to related Covered Code. - + 6. Versions of the License. - + 6.1. New Versions. Netscape Communications Corporation ("Netscape") may publish revised and/or new versions of the License from time to time. Each version will be given a distinguishing version number. - + 6.2. Effect of New Versions. Once Covered Code has been published under a particular version of the License, You may always continue to use it under the terms of that @@ -2641,7 +2638,7 @@ Within the deps/awsapi-lib directory of any subsequent version of the License published by Netscape. No one other than Netscape has the right to modify the terms applicable to Covered Code created under this License. - + 6.3. Derivative Works. If You create or use a modified version of this License (which you may only do in order to apply it to code which is not already Covered Code @@ -2655,9 +2652,9 @@ Within the deps/awsapi-lib directory Developer, Original Code or Contributor in the notice described in Exhibit A shall not of themselves be deemed to be modifications of this License.) - + 7. DISCLAIMER OF WARRANTY. - + COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF @@ -2668,9 +2665,9 @@ Within the deps/awsapi-lib directory COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - + 8. TERMINATION. - + 8.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. All @@ -2678,12 +2675,12 @@ Within the deps/awsapi-lib directory survive any termination of this License. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - + 8.2. If You initiate litigation by asserting a patent infringement claim (excluding declatory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You file such action is referred to as "Participant") alleging that: - + (a) such Participant's Contributor Version directly or indirectly infringes any patent, then any and all rights granted by such Participant to You under Sections 2.1 and/or 2.2 of this License @@ -2698,14 +2695,14 @@ Within the deps/awsapi-lib directory is not withdrawn, the rights granted by Participant to You under Sections 2.1 and/or 2.2 automatically terminate at the expiration of the 60 day notice period specified above. - + (b) any software, hardware, or device, other than such Participant's Contributor Version, directly or indirectly infringes any patent, then any rights granted to You by such Participant under Sections 2.1(b) and 2.2(b) are revoked effective as of the date You first made, used, sold, distributed, or had made, Modifications made by that Participant. - + 8.3. If You assert a patent infringement claim against Participant alleging that such Participant's Contributor Version directly or indirectly infringes any patent where such claim is resolved (such as @@ -2714,14 +2711,14 @@ Within the deps/awsapi-lib directory granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license. - + 8.4. In the event of termination under Sections 8.1 or 8.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or any distributor hereunder prior to termination shall survive termination. - + 9. LIMITATION OF LIABILITY. - + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE, @@ -2736,9 +2733,9 @@ Within the deps/awsapi-lib directory PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - + 10. U.S. GOVERNMENT END USERS. - + The Covered Code is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" and "commercial computer software documentation," as such @@ -2746,9 +2743,9 @@ Within the deps/awsapi-lib directory C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Code with only those rights set forth herein. - + 11. MISCELLANEOUS. - + This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent @@ -2767,44 +2764,44 @@ Within the deps/awsapi-lib directory Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. - + 12. RESPONSIBILITY FOR CLAIMS. - + As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - + 13. MULTIPLE-LICENSED CODE. - + Initial Developer may designate portions of the Covered Code as "Multiple-Licensed". "Multiple-Licensed" means that the Initial Developer permits you to utilize portions of the Covered Code under Your choice of the NPL or the alternative licenses, if any, specified by the Initial Developer in the file described in Exhibit A. - + EXHIBIT A -Mozilla Public License. - + ``The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ - + Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. - + The Original Code is RabbitMQ. - + The Initial Developer of the Original Code is VMware, Ltd.. - Portions created by VMware, Ltd. are Copyright (C) + Portions created by VMware, Ltd. are Copyright (C) 2007-2012 VMware, Inc.. All Rights Reserved. - + Contributor(s): . - + Alternatively, the contents of this file may be used under the terms of the GNU General Public License Version 2 license (the "[GPL] License"), in which case the provisions of [GPL] License are applicable instead of those @@ -2815,17 +2812,16 @@ Within the deps/awsapi-lib directory other provisions required by the [GPL] License. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the [GPL] License." - + [NOTE: The text of this Exhibit A may differ slightly from the text of the notices in the Source Code files of the Original Code. You should use the text of this Exhibit A rather than the text found in the Original Code Source Code for Your Modifications.] - - - from VMware, Inc http://www.vmware.com/ + + + from VMware, Inc http://www.vmware.com/ rabbitmq-client.jar from http://www.rabbitmq.com/java-client.html - Within the patches/systemvm/debian/config/etc directory placed in the public domain by Adiscon GmbH http://www.adiscon.com/ @@ -3462,7 +3458,7 @@ Within the target/jar directory The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. � + computer software" (as that term is defined at 48 C.F.R. ¤ 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 @@ -3851,7 +3847,7 @@ Within the target/jar directory The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. � + computer software" (as that term is defined at 48 C.F.R. ¤ 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 @@ -4377,7 +4373,7 @@ Within the target/jar directory licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - Copyright (C) 2008 T�th Istv�n + Copyright (C) 2008 Tóth István 2008-2012 Daniel Veillard 2009-2011 Bryan Kearney @@ -4480,6 +4476,31 @@ Within the ui/lib directory from George McGinley Smith jquery.easing.js + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from The Dojo Foundation http://dojofoundation.org/ + require.js from http://github.com/jrburke/requirejs + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) Copyright (c) 2011, John Resig @@ -4508,7 +4529,7 @@ Within the ui/lib directory licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - Copyright (c) 2006 - 2011 J�rn Zaefferer + Copyright (c) 2006 - 2011 Jörn Zaefferer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -4703,7 +4724,4764 @@ Within the ui/lib/jquery-ui directory Within the ui/lib/qunit directory licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - Copyright (c) 2012 John Resig, J�rn Zaefferer + Copyright (c) 2012 John Resig, Jörn Zaefferer + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from Jorn Zaefferer + qunit.css from http://docs.jquery.com/QUnit + qunit.js from http://docs.jquery.com/QUnit + +Within the utils/src/com/cloud/utils/db directory + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2004 Clinton Begin + from Clinton Begin http://code.google.com/p/mybatis/ + ScriptRunner.java from http://code.google.com/p/mybatis/ + +Copyright (c) 2013 The Apache Software Foundation + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + +This distribution contains third party resources. +Within the . directory + licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) + + Copyright (c) 2005-2010 Thomas Nagy + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. + + from Thomas Nagy http://code.google.com/p/waf/ + waf + +Within the awsapi directory + licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) + + Copyright (c) 2005-2010 Thomas Nagy + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. + + from Thomas Nagy http://code.google.com/p/waf/ + waf + +Within the console-proxy/js directory + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Copyright (c) 2009, John Resig + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from John Resig + jquery.js + +Within the deps directory + licensed under the BSD (2-clause) for XenServerJava http://www.opensource.org/licenses/BSD-2-Clause (as follows) + + Copyright (c) Citrix Systems, Inc. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1) Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + from Citrix Systems, Inc http://www.citrix.com/ + XenServerJava from http://community.citrix.com/cdn/xs/sdks/ + +Within the deps/awsapi-lib directory + licensed under the ANTLR 2 License http://www.antlr2.org/license.html (as follows) + + + ANTLR 2 License + + We reserve no legal rights to the ANTLR--it is fully in the public domain. An + individual or company may do whatever they wish with source code distributed + with ANTLR or the code generated by ANTLR, including the incorporation of ANTLR, + or its output, into commerical software. We encourage users to develop software + with ANTLR. However, we do ask that credit is given to us for developing ANTLR. + By "credit", we mean that if you use ANTLR or incorporate any source code into + one of your programs (commercial product, research project, or otherwise) that + you acknowledge this fact somewhere in the documentation, research report, + etc... If you like ANTLR and have developed a nice tool with the output, please + mention that you developed it using ANTLR. In addition, we ask that the headers + remain intact in our source code. As long as these guidelines are kept, we + expect to continue enhancing this system and expect to make other tools + available as they are completed. + + from ANTLR Translator Generator Project http://www.antlr2.org/ + antlr-2.7.6.jar from http://repo1.maven.org/maven2/antlr/antlr/2.7.6/antlr-2.7.6-sources.jar + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2004-2008 The Apache Software Foundation + from The Apache Software Foundation http://www.apache.org/ + XmlSchema-1.4.3.jar + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2004-2012 The Apache Software Foundation + from The Apache Software Foundation http://www.apache.org/ + apache-log4j-extras-1.0.jar from http://logging.apache.org/log4j/companions/extras/ + axiom-api-1.2.8.jar from http://ws.apache.org/axiom/source-repository.html + axiom-impl-1.2.8.jar from http://ws.apache.org/axiom/source-repository.html + axis2-1.5.1.jar from http://axis.apache.org/axis/ + axis2-adb-1.5.1.jar from http://axis.apache.org/axis/ + axis2-ant-plugin-1.5.1.jar from http://axis.apache.org/axis/ + axis2-codegen-1.4.1.jar from http://axis.apache.org/axis/ + axis2-jaxbri-1.5.1.jar from http://axis.apache.org/axis/ + axis2-jaxws-1.5.1.jar from http://axis.apache.org/axis/ + axis2-jibx-1.5.1.jar from http://axis.apache.org/axis/ + axis2-json-1.5.1.jar from http://axis.apache.org/axis/ + axis2-kernel-1.5.1.jar from http://axis.apache.org/axis/ + axis2-transport-http-1.5.1.jar from http://axis.apache.org/axis/ + axis2-transport-local-1.5.1.jar from http://axis.apache.org/axis/ + axis2-webapp-1.5.1.war from http://axis.apache.org/axis/ + commons-codec-1.4.jar from http://commons.apache.org/codec/ + commons-collections-3.1.jar from http://commons.apache.org/collections/ + commons-fileupload-1.2.jar from http://commons.apache.org/fileupload/ + commons-httpclient-3.1.jar from http://hc.apache.org/httpclient-3.x/ + commons-io-1.4.jar from http://commons.apache.org/io/ + commons-logging-1.1.1.jar from http://commons.apache.org/logging/ + httpcore-4.0.jar from http://hc.apache.org/httpcomponents-core-ga/ + log4j-1.2.15.jar from http://logging.apache.org/log4j/ + neethi-2.0.4.jar from http://svn.apache.org/viewvc/webservices/commons/tags/neethi/2.0.4/ + rampart-lib from http://axis.apache.org/axis2/java/rampart/download/1.5/download.cgi + woden-api-1.0M8.jar from http://svn.apache.org/viewvc/webservices/woden/tags/1.0M8_20080423/ + woden-impl-dom-1.0M8.jar from http://svn.apache.org/viewvc/webservices/woden/tags/1.0M8_20080423/ + wss4j-1.5.8.jar from http://ws.apache.org/wss4j/source-repository.html + xercesImpl.jar from http://xerces.apache.org/xerces2-j/source-repository.html + xml-apis.jar from http://repo1.maven.org/maven2/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04-sources.jar + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2009 Google Inc. + from Google Inc. http://google.com + cloud-gson.jar from http://code.google.com/p/google-gson/ + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + + from Json.simple Project http://code.google.com/p/json-simple/ + json_simple-1.1.jar from http://code.google.com/p/json-simple/source/checkout + + licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) + + Copyright (c) 2002-2011 Atsuhiko Yamanaka, JCraft,Inc. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. + + from JCraft http://www.jcraft.com/ + jsch-0.1.42.jar from http://www.jcraft.com/jsch/ + + licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) + + Copyright (c) 1997-2010 Oracle and/or its affiliates. All rights reserved. + + COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 + + 1. Definitions. + + 1.1. "Contributor" means each individual or entity that + creates or contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the + Original Software, prior Modifications used by a + Contributor (if any), and the Modifications made by that + particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or + (b) Modifications, or (c) the combination of files + containing Original Software with files containing + Modifications, in each case including portions thereof. + + 1.4. "Executable" means the Covered Software in any form + other than Source Code. + + 1.5. "Initial Developer" means the individual or entity + that first makes Original Software available under this + License. + + 1.6. "Larger Work" means a work which combines Covered + Software or portions thereof with code not governed by the + terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the + maximum extent possible, whether at the time of the initial + grant or subsequently acquired, any and all of the rights + conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable + form of any of the following: + + A. Any file that results from an addition to, + deletion from or modification of the contents of a + file containing Original Software or previous + Modifications; + + B. Any new file that contains any part of the + Original Software or previous Modification; or + + C. Any new file that is contributed or otherwise made + available under the terms of this License. + + 1.10. "Original Software" means the Source Code and + Executable form of computer software code that is + originally released under this License. + + 1.11. "Patent Claims" means any patent claim(s), now owned + or hereafter acquired, including without limitation, + method, process, and apparatus claims, in any patent + Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer + software code in which modifications are made and (b) + associated documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal + entity exercising rights under, and complying with all of + the terms of, this License. For legal entities, "You" + includes any entity which controls, is controlled by, or is + under common control with You. For purposes of this + definition, "control" means (a) the power, direct or + indirect, to cause the direction or management of such + entity, whether by contract or otherwise, or (b) ownership + of more than fifty percent (50%) of the outstanding shares + or beneficial ownership of such entity. + + 2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, the + Initial Developer hereby grants You a world-wide, + royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Initial Developer, + to use, reproduce, modify, display, perform, + sublicense and distribute the Original Software (or + portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using or selling of Original Software, to make, have + made, use, practice, sell, and offer for sale, and/or + otherwise dispose of the Original Software (or + portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) + are effective on the date Initial Developer first + distributes or otherwise makes the Original Software + available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent + license is granted: (1) for code that You delete from + the Original Software, or (2) for infringements + caused by: (i) the modification of the Original + Software, or (ii) the combination of the Original + Software with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, each + Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Contributor to + use, reproduce, modify, display, perform, sublicense + and distribute the Modifications created by such + Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as + Covered Software and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using, or selling of Modifications made by that + Contributor either alone and/or in combination with + its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, + have made, and/or otherwise dispose of: (1) + Modifications made by that Contributor (or portions + thereof); and (2) the combination of Modifications + made by that Contributor with its Contributor Version + (or portions of such combination). + + (c) The licenses granted in Sections 2.2(a) and + 2.2(b) are effective on the date Contributor first + distributes or otherwise makes the Modifications + available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent + license is granted: (1) for any code that Contributor + has deleted from the Contributor Version; (2) for + infringements caused by: (i) third party + modifications of Contributor Version, or (ii) the + combination of Modifications made by that Contributor + with other software (except as part of the + Contributor Version) or other devices; or (3) under + Patent Claims infringed by Covered Software in the + absence of Modifications made by that Contributor. + + 3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make + available in Executable form must also be made available in + Source Code form and that Source Code form must be + distributed only under the terms of this License. You must + include a copy of this License with every copy of the + Source Code form of the Covered Software You distribute or + otherwise make available. You must inform recipients of any + such Covered Software in Executable form as to how they can + obtain such Covered Software in Source Code form in a + reasonable manner on or through a medium customarily used + for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You + contribute are governed by the terms of this License. You + represent that You believe Your Modifications are Your + original creation(s) and/or You have sufficient rights to + grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications + that identifies You as the Contributor of the Modification. + You may not remove or alter any copyright, patent or + trademark notices contained within the Covered Software, or + any notices of licensing or any descriptive text giving + attribution to any Contributor or the Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered + Software in Source Code form that alters or restricts the + applicable version of this License or the recipients' + rights hereunder. You may choose to offer, and to charge a + fee for, warranty, support, indemnity or liability + obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on + behalf of the Initial Developer or any Contributor. You + must make it absolutely clear that any such warranty, + support, indemnity or liability obligation is offered by + You alone, and You hereby agree to indemnify the Initial + Developer and every Contributor for any liability incurred + by the Initial Developer or such Contributor as a result of + warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered + Software under the terms of this License or under the terms + of a license of Your choice, which may contain terms + different from this License, provided that You are in + compliance with the terms of this License and that the + license for the Executable form does not attempt to limit + or alter the recipient's rights in the Source Code form + from the rights set forth in this License. If You + distribute the Covered Software in Executable form under a + different license, You must make it absolutely clear that + any terms which differ from this License are offered by You + alone, not by the Initial Developer or Contributor. You + hereby agree to indemnify the Initial Developer and every + Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms + You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software + with other code not governed by the terms of this License + and distribute the Larger Work as a single product. In such + a case, You must make sure the requirements of this License + are fulfilled for the Covered Software. + + 4. Versions of the License. + + 4.1. New Versions. + + Sun Microsystems, Inc. is the initial license steward and + may publish revised and/or new versions of this License + from time to time. Each version will be given a + distinguishing version number. Except as provided in + Section 4.3, no one other than the license steward has the + right to modify this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. If the Initial Developer includes a + notice in the Original Software prohibiting it from being + distributed or otherwise made available under any + subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. Otherwise, You may also choose to + use, distribute or otherwise make the Covered Software + available under the terms of any subsequent version of the + License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a + new license for Your Original Software, You may create and + use a modified version of this License if You: (a) rename + the license and remove any references to the name of the + license steward (except to note that the license differs + from this License); and (b) otherwise make it clear that + the license contains terms which differ from this License. + + 5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" + BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED + SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR + PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND + PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY + COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE + INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF + ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF + WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS + DISCLAIMER. + + 6. TERMINATION. + + 6.1. This License and the rights granted hereunder will + terminate automatically if You fail to comply with terms + herein and fail to cure such breach within 30 days of + becoming aware of the breach. Provisions which, by their + nature, must remain in effect beyond the termination of + this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or + a Contributor (the Initial Developer or Contributor against + whom You assert such claim is referred to as "Participant") + alleging that the Participant Software (meaning the + Contributor Version where the Participant is a Contributor + or the Original Software where the Participant is the + Initial Developer) directly or indirectly infringes any + patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial + Developer (if the Initial Developer is not the Participant) + and all Contributors under Sections 2.1 and/or 2.2 of this + License shall, upon 60 days notice from Participant + terminate prospectively and automatically at the expiration + of such 60 day notice period, unless if within such 60 day + period You withdraw Your claim with respect to the + Participant Software against such Participant either + unilaterally or pursuant to a written agreement with + Participant. + + 6.3. In the event of termination under Sections 6.1 or 6.2 + above, all end user licenses that have been validly granted + by You or any distributor hereunder prior to termination + (excluding licenses granted to You by any distributor) + shall survive termination. + + 7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE + LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK + STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL + INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT + APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO + NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR + CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT + APPLY TO YOU. + + 8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is + defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial + computer software" (as that term is defined at 48 C.F.R. ¤ + 252.227-7014(a)(1)) and "commercial computer software + documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. + 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 + through 227.7202-4 (June 1995), all U.S. Government End Users + acquire Covered Software with only those rights set forth herein. + This U.S. Government Rights clause is in lieu of, and supersedes, + any other FAR, DFAR, or other clause or provision that addresses + Government rights in computer software under this License. + + 9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the + extent necessary to make it enforceable. This License shall be + governed by the law of the jurisdiction specified in a notice + contained within the Original Software (except to the extent + applicable law, if any, provides otherwise), excluding such + jurisdiction's conflict-of-law provisions. Any litigation + relating to this License shall be subject to the jurisdiction of + the courts located in the jurisdiction and venue specified in a + notice contained within the Original Software, with the losing + party responsible for costs, including, without limitation, court + costs and reasonable attorneys' fees and expenses. The + application of the United Nations Convention on Contracts for the + International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall + be construed against the drafter shall not apply to this License. + You agree that You alone are responsible for compliance with the + United States export administration regulations (and the export + control laws and regulation of any other countries) when You use, + distribute or otherwise make available any Covered Software. + + 10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or + indirectly, out of its utilization of rights under this License + and You agree to work with Initial Developer and Contributors to + distribute such responsibility on an equitable basis. Nothing + herein is intended or shall be deemed to constitute any admission + of liability. + + from Oracle and/or its affiliates http://oracle.com + jaxb-api-2.1.jar from http://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.1/jaxb-api-2.1-sources.jar + jaxb-impl-2.1.7.jar from http://repo1.maven.org/maven2/com/sun/xml/bind/jaxb-impl/2.1.7/jaxb-impl-2.1.7-sources.jar + jaxb-xjc-2.1.7.jar from http://repo1.maven.org/maven2/com/sun/xml/bind/jaxb-xjc/2.1.7/jaxb-xjc-2.1.7-sources.jar + + licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) + + Copyright (c) 2006 Sun Microsystems, Inc. All rights reserved. + + COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 + + 1. Definitions. + + 1.1. "Contributor" means each individual or entity that + creates or contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the + Original Software, prior Modifications used by a + Contributor (if any), and the Modifications made by that + particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or + (b) Modifications, or (c) the combination of files + containing Original Software with files containing + Modifications, in each case including portions thereof. + + 1.4. "Executable" means the Covered Software in any form + other than Source Code. + + 1.5. "Initial Developer" means the individual or entity + that first makes Original Software available under this + License. + + 1.6. "Larger Work" means a work which combines Covered + Software or portions thereof with code not governed by the + terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the + maximum extent possible, whether at the time of the initial + grant or subsequently acquired, any and all of the rights + conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable + form of any of the following: + + A. Any file that results from an addition to, + deletion from or modification of the contents of a + file containing Original Software or previous + Modifications; + + B. Any new file that contains any part of the + Original Software or previous Modification; or + + C. Any new file that is contributed or otherwise made + available under the terms of this License. + + 1.10. "Original Software" means the Source Code and + Executable form of computer software code that is + originally released under this License. + + 1.11. "Patent Claims" means any patent claim(s), now owned + or hereafter acquired, including without limitation, + method, process, and apparatus claims, in any patent + Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer + software code in which modifications are made and (b) + associated documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal + entity exercising rights under, and complying with all of + the terms of, this License. For legal entities, "You" + includes any entity which controls, is controlled by, or is + under common control with You. For purposes of this + definition, "control" means (a) the power, direct or + indirect, to cause the direction or management of such + entity, whether by contract or otherwise, or (b) ownership + of more than fifty percent (50%) of the outstanding shares + or beneficial ownership of such entity. + + 2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, the + Initial Developer hereby grants You a world-wide, + royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Initial Developer, + to use, reproduce, modify, display, perform, + sublicense and distribute the Original Software (or + portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using or selling of Original Software, to make, have + made, use, practice, sell, and offer for sale, and/or + otherwise dispose of the Original Software (or + portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) + are effective on the date Initial Developer first + distributes or otherwise makes the Original Software + available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent + license is granted: (1) for code that You delete from + the Original Software, or (2) for infringements + caused by: (i) the modification of the Original + Software, or (ii) the combination of the Original + Software with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, each + Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Contributor to + use, reproduce, modify, display, perform, sublicense + and distribute the Modifications created by such + Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as + Covered Software and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using, or selling of Modifications made by that + Contributor either alone and/or in combination with + its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, + have made, and/or otherwise dispose of: (1) + Modifications made by that Contributor (or portions + thereof); and (2) the combination of Modifications + made by that Contributor with its Contributor Version + (or portions of such combination). + + (c) The licenses granted in Sections 2.2(a) and + 2.2(b) are effective on the date Contributor first + distributes or otherwise makes the Modifications + available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent + license is granted: (1) for any code that Contributor + has deleted from the Contributor Version; (2) for + infringements caused by: (i) third party + modifications of Contributor Version, or (ii) the + combination of Modifications made by that Contributor + with other software (except as part of the + Contributor Version) or other devices; or (3) under + Patent Claims infringed by Covered Software in the + absence of Modifications made by that Contributor. + + 3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make + available in Executable form must also be made available in + Source Code form and that Source Code form must be + distributed only under the terms of this License. You must + include a copy of this License with every copy of the + Source Code form of the Covered Software You distribute or + otherwise make available. You must inform recipients of any + such Covered Software in Executable form as to how they can + obtain such Covered Software in Source Code form in a + reasonable manner on or through a medium customarily used + for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You + contribute are governed by the terms of this License. You + represent that You believe Your Modifications are Your + original creation(s) and/or You have sufficient rights to + grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications + that identifies You as the Contributor of the Modification. + You may not remove or alter any copyright, patent or + trademark notices contained within the Covered Software, or + any notices of licensing or any descriptive text giving + attribution to any Contributor or the Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered + Software in Source Code form that alters or restricts the + applicable version of this License or the recipients' + rights hereunder. You may choose to offer, and to charge a + fee for, warranty, support, indemnity or liability + obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on + behalf of the Initial Developer or any Contributor. You + must make it absolutely clear that any such warranty, + support, indemnity or liability obligation is offered by + You alone, and You hereby agree to indemnify the Initial + Developer and every Contributor for any liability incurred + by the Initial Developer or such Contributor as a result of + warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered + Software under the terms of this License or under the terms + of a license of Your choice, which may contain terms + different from this License, provided that You are in + compliance with the terms of this License and that the + license for the Executable form does not attempt to limit + or alter the recipient's rights in the Source Code form + from the rights set forth in this License. If You + distribute the Covered Software in Executable form under a + different license, You must make it absolutely clear that + any terms which differ from this License are offered by You + alone, not by the Initial Developer or Contributor. You + hereby agree to indemnify the Initial Developer and every + Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms + You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software + with other code not governed by the terms of this License + and distribute the Larger Work as a single product. In such + a case, You must make sure the requirements of this License + are fulfilled for the Covered Software. + + 4. Versions of the License. + + 4.1. New Versions. + + Sun Microsystems, Inc. is the initial license steward and + may publish revised and/or new versions of this License + from time to time. Each version will be given a + distinguishing version number. Except as provided in + Section 4.3, no one other than the license steward has the + right to modify this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. If the Initial Developer includes a + notice in the Original Software prohibiting it from being + distributed or otherwise made available under any + subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. Otherwise, You may also choose to + use, distribute or otherwise make the Covered Software + available under the terms of any subsequent version of the + License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a + new license for Your Original Software, You may create and + use a modified version of this License if You: (a) rename + the license and remove any references to the name of the + license steward (except to note that the license differs + from this License); and (b) otherwise make it clear that + the license contains terms which differ from this License. + + 5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" + BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED + SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR + PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND + PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY + COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE + INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF + ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF + WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS + DISCLAIMER. + + 6. TERMINATION. + + 6.1. This License and the rights granted hereunder will + terminate automatically if You fail to comply with terms + herein and fail to cure such breach within 30 days of + becoming aware of the breach. Provisions which, by their + nature, must remain in effect beyond the termination of + this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or + a Contributor (the Initial Developer or Contributor against + whom You assert such claim is referred to as "Participant") + alleging that the Participant Software (meaning the + Contributor Version where the Participant is a Contributor + or the Original Software where the Participant is the + Initial Developer) directly or indirectly infringes any + patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial + Developer (if the Initial Developer is not the Participant) + and all Contributors under Sections 2.1 and/or 2.2 of this + License shall, upon 60 days notice from Participant + terminate prospectively and automatically at the expiration + of such 60 day notice period, unless if within such 60 day + period You withdraw Your claim with respect to the + Participant Software against such Participant either + unilaterally or pursuant to a written agreement with + Participant. + + 6.3. In the event of termination under Sections 6.1 or 6.2 + above, all end user licenses that have been validly granted + by You or any distributor hereunder prior to termination + (excluding licenses granted to You by any distributor) + shall survive termination. + + 7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE + LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK + STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL + INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT + APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO + NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR + CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT + APPLY TO YOU. + + 8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is + defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial + computer software" (as that term is defined at 48 C.F.R. ¤ + 252.227-7014(a)(1)) and "commercial computer software + documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. + 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 + through 227.7202-4 (June 1995), all U.S. Government End Users + acquire Covered Software with only those rights set forth herein. + This U.S. Government Rights clause is in lieu of, and supersedes, + any other FAR, DFAR, or other clause or provision that addresses + Government rights in computer software under this License. + + 9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the + extent necessary to make it enforceable. This License shall be + governed by the law of the jurisdiction specified in a notice + contained within the Original Software (except to the extent + applicable law, if any, provides otherwise), excluding such + jurisdiction's conflict-of-law provisions. Any litigation + relating to this License shall be subject to the jurisdiction of + the courts located in the jurisdiction and venue specified in a + notice contained within the Original Software, with the losing + party responsible for costs, including, without limitation, court + costs and reasonable attorneys' fees and expenses. The + application of the United Nations Convention on Contracts for the + International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall + be construed against the drafter shall not apply to this License. + You agree that You alone are responsible for compliance with the + United States export administration regulations (and the export + control laws and regulation of any other countries) when You use, + distribute or otherwise make available any Covered Software. + + 10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or + indirectly, out of its utilization of rights under this License + and You agree to work with Initial Developer and Contributors to + distribute such responsibility on an equitable basis. Nothing + herein is intended or shall be deemed to constitute any admission + of liability. + + from Project GlassFish http://glassfish.java.net/ + jta-1.1.jar from http://repo1.maven.org/maven2/javax/transaction/jta/1.1/jta-1.1-sources.jar + + licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) + + Copyright (c) 1997-2010 Oracle and/or its affiliates. All rights reserved. + + COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 + + 1. Definitions. + + 1.1. "Contributor" means each individual or entity that + creates or contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the + Original Software, prior Modifications used by a + Contributor (if any), and the Modifications made by that + particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or + (b) Modifications, or (c) the combination of files + containing Original Software with files containing + Modifications, in each case including portions thereof. + + 1.4. "Executable" means the Covered Software in any form + other than Source Code. + + 1.5. "Initial Developer" means the individual or entity + that first makes Original Software available under this + License. + + 1.6. "Larger Work" means a work which combines Covered + Software or portions thereof with code not governed by the + terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the + maximum extent possible, whether at the time of the initial + grant or subsequently acquired, any and all of the rights + conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable + form of any of the following: + + A. Any file that results from an addition to, + deletion from or modification of the contents of a + file containing Original Software or previous + Modifications; + + B. Any new file that contains any part of the + Original Software or previous Modification; or + + C. Any new file that is contributed or otherwise made + available under the terms of this License. + + 1.10. "Original Software" means the Source Code and + Executable form of computer software code that is + originally released under this License. + + 1.11. "Patent Claims" means any patent claim(s), now owned + or hereafter acquired, including without limitation, + method, process, and apparatus claims, in any patent + Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer + software code in which modifications are made and (b) + associated documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal + entity exercising rights under, and complying with all of + the terms of, this License. For legal entities, "You" + includes any entity which controls, is controlled by, or is + under common control with You. For purposes of this + definition, "control" means (a) the power, direct or + indirect, to cause the direction or management of such + entity, whether by contract or otherwise, or (b) ownership + of more than fifty percent (50%) of the outstanding shares + or beneficial ownership of such entity. + + 2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, the + Initial Developer hereby grants You a world-wide, + royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Initial Developer, + to use, reproduce, modify, display, perform, + sublicense and distribute the Original Software (or + portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using or selling of Original Software, to make, have + made, use, practice, sell, and offer for sale, and/or + otherwise dispose of the Original Software (or + portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) + are effective on the date Initial Developer first + distributes or otherwise makes the Original Software + available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent + license is granted: (1) for code that You delete from + the Original Software, or (2) for infringements + caused by: (i) the modification of the Original + Software, or (ii) the combination of the Original + Software with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, each + Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Contributor to + use, reproduce, modify, display, perform, sublicense + and distribute the Modifications created by such + Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as + Covered Software and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using, or selling of Modifications made by that + Contributor either alone and/or in combination with + its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, + have made, and/or otherwise dispose of: (1) + Modifications made by that Contributor (or portions + thereof); and (2) the combination of Modifications + made by that Contributor with its Contributor Version + (or portions of such combination). + + (c) The licenses granted in Sections 2.2(a) and + 2.2(b) are effective on the date Contributor first + distributes or otherwise makes the Modifications + available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent + license is granted: (1) for any code that Contributor + has deleted from the Contributor Version; (2) for + infringements caused by: (i) third party + modifications of Contributor Version, or (ii) the + combination of Modifications made by that Contributor + with other software (except as part of the + Contributor Version) or other devices; or (3) under + Patent Claims infringed by Covered Software in the + absence of Modifications made by that Contributor. + + 3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make + available in Executable form must also be made available in + Source Code form and that Source Code form must be + distributed only under the terms of this License. You must + include a copy of this License with every copy of the + Source Code form of the Covered Software You distribute or + otherwise make available. You must inform recipients of any + such Covered Software in Executable form as to how they can + obtain such Covered Software in Source Code form in a + reasonable manner on or through a medium customarily used + for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You + contribute are governed by the terms of this License. You + represent that You believe Your Modifications are Your + original creation(s) and/or You have sufficient rights to + grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications + that identifies You as the Contributor of the Modification. + You may not remove or alter any copyright, patent or + trademark notices contained within the Covered Software, or + any notices of licensing or any descriptive text giving + attribution to any Contributor or the Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered + Software in Source Code form that alters or restricts the + applicable version of this License or the recipients' + rights hereunder. You may choose to offer, and to charge a + fee for, warranty, support, indemnity or liability + obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on + behalf of the Initial Developer or any Contributor. You + must make it absolutely clear that any such warranty, + support, indemnity or liability obligation is offered by + You alone, and You hereby agree to indemnify the Initial + Developer and every Contributor for any liability incurred + by the Initial Developer or such Contributor as a result of + warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered + Software under the terms of this License or under the terms + of a license of Your choice, which may contain terms + different from this License, provided that You are in + compliance with the terms of this License and that the + license for the Executable form does not attempt to limit + or alter the recipient's rights in the Source Code form + from the rights set forth in this License. If You + distribute the Covered Software in Executable form under a + different license, You must make it absolutely clear that + any terms which differ from this License are offered by You + alone, not by the Initial Developer or Contributor. You + hereby agree to indemnify the Initial Developer and every + Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms + You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software + with other code not governed by the terms of this License + and distribute the Larger Work as a single product. In such + a case, You must make sure the requirements of this License + are fulfilled for the Covered Software. + + 4. Versions of the License. + + 4.1. New Versions. + + Sun Microsystems, Inc. is the initial license steward and + may publish revised and/or new versions of this License + from time to time. Each version will be given a + distinguishing version number. Except as provided in + Section 4.3, no one other than the license steward has the + right to modify this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. If the Initial Developer includes a + notice in the Original Software prohibiting it from being + distributed or otherwise made available under any + subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. Otherwise, You may also choose to + use, distribute or otherwise make the Covered Software + available under the terms of any subsequent version of the + License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a + new license for Your Original Software, You may create and + use a modified version of this License if You: (a) rename + the license and remove any references to the name of the + license steward (except to note that the license differs + from this License); and (b) otherwise make it clear that + the license contains terms which differ from this License. + + 5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" + BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED + SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR + PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND + PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY + COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE + INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF + ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF + WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS + DISCLAIMER. + + 6. TERMINATION. + + 6.1. This License and the rights granted hereunder will + terminate automatically if You fail to comply with terms + herein and fail to cure such breach within 30 days of + becoming aware of the breach. Provisions which, by their + nature, must remain in effect beyond the termination of + this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or + a Contributor (the Initial Developer or Contributor against + whom You assert such claim is referred to as "Participant") + alleging that the Participant Software (meaning the + Contributor Version where the Participant is a Contributor + or the Original Software where the Participant is the + Initial Developer) directly or indirectly infringes any + patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial + Developer (if the Initial Developer is not the Participant) + and all Contributors under Sections 2.1 and/or 2.2 of this + License shall, upon 60 days notice from Participant + terminate prospectively and automatically at the expiration + of such 60 day notice period, unless if within such 60 day + period You withdraw Your claim with respect to the + Participant Software against such Participant either + unilaterally or pursuant to a written agreement with + Participant. + + 6.3. In the event of termination under Sections 6.1 or 6.2 + above, all end user licenses that have been validly granted + by You or any distributor hereunder prior to termination + (excluding licenses granted to You by any distributor) + shall survive termination. + + 7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE + LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK + STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL + INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT + APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO + NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR + CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT + APPLY TO YOU. + + 8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is + defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial + computer software" (as that term is defined at 48 C.F.R. ¤ + 252.227-7014(a)(1)) and "commercial computer software + documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. + 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 + through 227.7202-4 (June 1995), all U.S. Government End Users + acquire Covered Software with only those rights set forth herein. + This U.S. Government Rights clause is in lieu of, and supersedes, + any other FAR, DFAR, or other clause or provision that addresses + Government rights in computer software under this License. + + 9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the + extent necessary to make it enforceable. This License shall be + governed by the law of the jurisdiction specified in a notice + contained within the Original Software (except to the extent + applicable law, if any, provides otherwise), excluding such + jurisdiction's conflict-of-law provisions. Any litigation + relating to this License shall be subject to the jurisdiction of + the courts located in the jurisdiction and venue specified in a + notice contained within the Original Software, with the losing + party responsible for costs, including, without limitation, court + costs and reasonable attorneys' fees and expenses. The + application of the United Nations Convention on Contracts for the + International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall + be construed against the drafter shall not apply to this License. + You agree that You alone are responsible for compliance with the + United States export administration regulations (and the export + control laws and regulation of any other countries) when You use, + distribute or otherwise make available any Covered Software. + + 10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or + indirectly, out of its utilization of rights under this License + and You agree to work with Initial Developer and Contributors to + distribute such responsibility on an equitable basis. Nothing + herein is intended or shall be deemed to constitute any admission + of liability. + + from Oracle and/or its affiliates http://oracle.com + mail-1.4.jar from http://kenai.com/projects/javamail + + licensed under the Common Public License - v 1.0 http://opensource.org/licenses/cpl1.0 (as follows) + + + Common Public License Version 1.0 (CPL) + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC + LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM + CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + + 1. DEFINITIONS + + "Contribution means: + + a) in the case of the initial Contributor, the initial code and documentation + distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + + i) changes to the Program, and + + ii) additions to the Program; + + where such changes and/or additions to the Program originate from and are + distributed by that particular Contributor. A Contribution 'originates' from a + Contributor if it was added to the Program by such Contributor itself or anyone + acting on such Contributor's behalf. Contributions do not include additions to + the Program which: (i) are separate modules of software distributed in + conjunction with the Program under their own license agreement, and (ii) are not + derivative works of the Program. + + "Contributor means any person or entity that distributes the Program. + + "Licensed Patents mean patent claims licensable by a Contributor which are + "necessarily infringed by the use or sale of its Contribution alone or when + "combined with the Program. + + "Program means the Contributions distributed in accordance with this Agreement. + + "Recipient means anyone who receives the Program under this Agreement, including + "all Contributors. + + 2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free copyright license to + reproduce, prepare derivative works of, publicly display, publicly perform, + distribute and sublicense the Contribution of such Contributor, if any, and such + derivative works, in source code and object code form. + + b) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed + Patents to make, use, sell, offer to sell, import and otherwise transfer the + Contribution of such Contributor, if any, in source code and object code form. + This patent license shall apply to the combination of the Contribution and the + Program if, at the time the Contribution is added by the Contributor, such + addition of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other combinations + which include the Contribution. No hardware per se is licensed hereunder. + + c) Recipient understands that although each Contributor grants the licenses to + its Contributions set forth herein, no assurances are provided by any + Contributor that the Program does not infringe the patent or other intellectual + property rights of any other entity. Each Contributor disclaims any liability to + Recipient for claims brought by any other entity based on infringement of + intellectual property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby assumes sole + responsibility to secure any other intellectual property rights needed, if any. + For example, if a third party patent license is required to allow Recipient to + distribute the Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has sufficient copyright + rights in its Contribution, if any, to grant the copyright license set forth in + this Agreement. + + 3. REQUIREMENTS + + A Contributor may choose to distribute the Program in object code form under its + own license agreement, provided that: + + a) it complies with the terms and conditions of this Agreement; and + + b) its license agreement: + + i) effectively disclaims on behalf of all Contributors all warranties and + conditions, express and implied, including warranties or conditions of title and + non-infringement, and implied warranties or conditions of merchantability and + fitness for a particular purpose; + + ii) effectively excludes on behalf of all Contributors all liability for + damages, including direct, indirect, special, incidental and consequential + damages, such as lost profits; + + iii) states that any provisions which differ from this Agreement are offered by + that Contributor alone and not by any other party; and + + iv) states that source code for the Program is available from such Contributor, + and informs licensees how to obtain it in a reasonable manner on or through a + medium customarily used for software exchange. + + When the Program is made available in source code form: + + a) it must be made available under this Agreement; and + + b) a copy of this Agreement must be included with each copy of the Program. + + Contributors may not remove or alter any copyright notices contained within the + Program. + + Each Contributor must identify itself as the originator of its Contribution, if + any, in a manner that reasonably allows subsequent Recipients to identify the + originator of the Contribution. + + 4. COMMERCIAL DISTRIBUTION + + Commercial distributors of software may accept certain responsibilities with + respect to end users, business partners and the like. While this license is + intended to facilitate the commercial use of the Program, the Contributor who + includes the Program in a commercial product offering should do so in a manner + which does not create potential liability for other Contributors. Therefore, if + a Contributor includes the Program in a commercial product offering, such + Contributor ("Commercial Contributor") hereby agrees to defend and indemnify + every other Contributor ("Indemnified Contributor") against any losses, damages + and costs (collectively "Losses") arising from claims, lawsuits and other legal + actions brought by a third party against the Indemnified Contributor to the + extent caused by the acts or omissions of such Commercial Contributor in + connection with its distribution of the Program in a commercial product + offering. The obligations in this section do not apply to any claims or Losses + relating to any actual or alleged intellectual property infringement. In order + to qualify, an Indemnified Contributor must: a) promptly notify the Commercial + Contributor in writing of such claim, and b) allow the Commercial Contributor to + control, and cooperate with the Commercial Contributor in, the defense and any + related settlement negotiations. The Indemnified Contributor may participate in + any such claim at its own expense. + + For example, a Contributor might include the Program in a commercial product + offering, Product X. That Contributor is then a Commercial Contributor. If that + Commercial Contributor then makes performance claims, or offers warranties + related to Product X, those performance claims and warranties are such + Commercial Contributor's responsibility alone. Under this section, the + Commercial Contributor would have to defend claims against the other + Contributors related to those performance claims and warranties, and if a court + requires any other Contributor to pay any damages as a result, the Commercial + Contributor must pay those damages. + + 5. NO WARRANTY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR + IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, + NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each + Recipient is solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its exercise of + rights under this Agreement, including but not limited to the risks and costs of + program errors, compliance with applicable laws, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + 6. DISCLAIMER OF LIABILITY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY + CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST + PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS + GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + + 7. GENERAL + + If any provision of this Agreement is invalid or unenforceable under applicable + law, it shall not affect the validity or enforceability of the remainder of the + terms of this Agreement, and without further action by the parties hereto, such + provision shall be reformed to the minimum extent necessary to make such + provision valid and enforceable. + + If Recipient institutes patent litigation against a Contributor with respect to + a patent applicable to software (including a cross-claim or counterclaim in a + lawsuit), then any patent licenses granted by that Contributor to such Recipient + under this Agreement shall terminate as of the date such litigation is filed. In + addition, if Recipient institutes patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that the Program + itself (excluding combinations of the Program with other software or hardware) + infringes such Recipient's patent(s), then such Recipient's rights granted under + Section 2(b) shall terminate as of the date such litigation is filed. + + All Recipient's rights under this Agreement shall terminate if it fails to + comply with any of the material terms or conditions of this Agreement and does + not cure such failure in a reasonable period of time after becoming aware of + such noncompliance. If all Recipient's rights under this Agreement terminate, + Recipient agrees to cease use and distribution of the Program as soon as + reasonably practicable. However, Recipient's obligations under this Agreement + and any licenses granted by Recipient relating to the Program shall continue and + survive. + + Everyone is permitted to copy and distribute copies of this Agreement, but in + order to avoid inconsistency the Agreement is copyrighted and may only be + modified in the following manner. The Agreement Steward reserves the right to + publish new versions (including revisions) of this Agreement from time to time. + No one other than the Agreement Steward has the right to modify this Agreement. + IBM is the initial Agreement Steward. IBM may assign the responsibility to serve + as the Agreement Steward to a suitable separate entity. Each new version of the + Agreement will be given a distinguishing version number. The Program (including + Contributions) may always be distributed subject to the version of the Agreement + under which it was received. In addition, after a new version of the Agreement + is published, Contributor may elect to distribute the Program (including its + Contributions) under the new version. Except as expressly stated in Sections + 2(a) and 2(b) above, Recipient receives no rights or licenses to the + intellectual property of any Contributor under this Agreement, whether + expressly, by implication, estoppel or otherwise. All rights in the Program not + expressly granted under this Agreement are reserved. + + This Agreement is governed by the laws of the State of New York and the + intellectual property laws of the United States of America. No party to this + Agreement will bring a legal action under this Agreement more than one year + after the cause of action arose. Each party waives its rights to a jury trial in + any resulting litigation. + + from JUnit Project http://www.junit.org/ + junit-4.8.1.jar from http://kentbeck.github.com/junit/ + + licensed under the Dom4J License http://dom4j.cvs.sourceforge.net/viewvc/dom4j/dom4j/LICENSE.txt (as follows) + + + Copyright 2001-2005 (C) MetaStuff, Ltd. All Rights Reserved. + + Redistribution and use of this software and associated documentation + ("Software"), with or without modification, are permitted provided + that the following conditions are met: + + 1. Redistributions of source code must retain copyright + statements and notices. Redistributions must also contain a + copy of this document. + + 2. Redistributions in binary form must reproduce the + above copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + + 3. The name "DOM4J" must not be used to endorse or promote + products derived from this Software without prior written + permission of MetaStuff, Ltd. For written permission, + please contact dom4j-info@metastuff.com. + + 4. Products derived from this Software may not be called "DOM4J" + nor may "DOM4J" appear in their names without prior written + permission of MetaStuff, Ltd. DOM4J is a registered + trademark of MetaStuff, Ltd. + + 5. Due credit should be given to the DOM4J Project - + http://www.dom4j.org + + THIS SOFTWARE IS PROVIDED BY METASTUFF, LTD. AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT + NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + METASTUFF, LTD. OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + OF THE POSSIBILITY OF SUCH DAMAGE. + + from DOM4J Project http://dom4j.sourceforge.net/ + dom4j-1.6.1.jar from http://dom4j.sourceforge.net/source-repository.html + + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Copyright (c) 2004-2011 QOS.ch + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from QOS.ch http://www.qos.ch/ + slf4j-api-1.5.11.jar from https://github.com/qos-ch/slf4j + slf4j-jdk14-1.5.11.jar from https://github.com/qos-ch/slf4j + + licensed under the Mozilla Public License, Version 1.1 http://www.mozilla.org/MPL/1.1/ (as follows) + + + MOZILLA PUBLIC LICENSE + Version 1.1 + + --------------- + + 1. Definitions. + + 1.0.1. "Commercial Use" means distribution or otherwise making the + Covered Code available to a third party. + + 1.1. "Contributor" means each entity that creates or contributes to + the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Code, prior Modifications used by a Contributor, and the Modifications + made by that particular Contributor. + + 1.3. "Covered Code" means the Original Code or Modifications or the + combination of the Original Code and Modifications, in each case + including portions thereof. + + 1.4. "Electronic Distribution Mechanism" means a mechanism generally + accepted in the software development community for the electronic + transfer of data. + + 1.5. "Executable" means Covered Code in any form other than Source + Code. + + 1.6. "Initial Developer" means the individual or entity identified + as the Initial Developer in the Source Code notice required by Exhibit + A. + + 1.7. "Larger Work" means a work which combines Covered Code or + portions thereof with code not governed by the terms of this License. + + 1.8. "License" means this document. + + 1.8.1. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means any addition to or deletion from the + substance or structure of either the Original Code or any previous + Modifications. When Covered Code is released as a series of files, a + Modification is: + A. Any addition to or deletion from the contents of a file + containing Original Code or previous Modifications. + + B. Any new file that contains any part of the Original Code or + previous Modifications. + + 1.10. "Original Code" means Source Code of computer software code + which is described in the Source Code notice required by Exhibit A as + Original Code, and which, at the time of its release under this + License is not already Covered Code governed by this License. + + 1.10.1. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.11. "Source Code" means the preferred form of the Covered Code for + making modifications to it, including all modules it contains, plus + any associated interface definition files, scripts used to control + compilation and installation of an Executable, or source code + differential comparisons against either the Original Code or another + well known, available Covered Code of the Contributor's choice. The + Source Code can be in a compressed or archival form, provided the + appropriate decompression or de-archiving software is widely available + for no charge. + + 1.12. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, this + License or a future version of this License issued under Section 6.1. + For legal entities, "You" includes any entity which controls, is + controlled by, or is under common control with You. For purposes of + this definition, "control" means (a) the power, direct or indirect, + to cause the direction or management of such entity, whether by + contract or otherwise, or (b) ownership of more than fifty percent + (50%) of the outstanding shares or beneficial ownership of such + entity. + + 2. Source Code License. + + 2.1. The Initial Developer Grant. + The Initial Developer hereby grants You a world-wide, royalty-free, + non-exclusive license, subject to third party intellectual property + claims: + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Code (or portions thereof) with or without Modifications, and/or + as part of a Larger Work; and + + (b) under Patents Claims infringed by the making, using or + selling of Original Code, to make, have made, use, practice, + sell, and offer for sale, and/or otherwise dispose of the + Original Code (or portions thereof). + + (c) the licenses granted in this Section 2.1(a) and (b) are + effective on the date Initial Developer first distributes + Original Code under the terms of this License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: 1) for code that You delete from the Original Code; 2) + separate from the Original Code; or 3) for infringements caused + by: i) the modification of the Original Code or ii) the + combination of the Original Code with other software or devices. + + 2.2. Contributor Grant. + Subject to third party intellectual property claims, each Contributor + hereby grants You a world-wide, royalty-free, non-exclusive license + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor, to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof) either on an + unmodified basis, with other Modifications, as Covered Code + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or + selling of Modifications made by that Contributor either alone + and/or in combination with its Contributor Version (or portions + of such combination), to make, use, sell, offer for sale, have + made, and/or otherwise dispose of: 1) Modifications made by that + Contributor (or portions thereof); and 2) the combination of + Modifications made by that Contributor with its Contributor + Version (or portions of such combination). + + (c) the licenses granted in Sections 2.2(a) and 2.2(b) are + effective on the date Contributor first makes Commercial Use of + the Covered Code. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: 1) for any code that Contributor has deleted from the + Contributor Version; 2) separate from the Contributor Version; + 3) for infringements caused by: i) third party modifications of + Contributor Version or ii) the combination of Modifications made + by that Contributor with other software (except as part of the + Contributor Version) or other devices; or 4) under Patent Claims + infringed by Covered Code in the absence of Modifications made by + that Contributor. + + 3. Distribution Obligations. + + 3.1. Application of License. + The Modifications which You create or to which You contribute are + governed by the terms of this License, including without limitation + Section 2.2. The Source Code version of Covered Code may be + distributed only under the terms of this License or a future version + of this License released under Section 6.1, and You must include a + copy of this License with every copy of the Source Code You + distribute. You may not offer or impose any terms on any Source Code + version that alters or restricts the applicable version of this + License or the recipients' rights hereunder. However, You may include + an additional document offering the additional rights described in + Section 3.5. + + 3.2. Availability of Source Code. + Any Modification which You create or to which You contribute must be + made available in Source Code form under the terms of this License + either on the same media as an Executable version or via an accepted + Electronic Distribution Mechanism to anyone to whom you made an + Executable version available; and if made available via Electronic + Distribution Mechanism, must remain available for at least twelve (12) + months after the date it initially became available, or at least six + (6) months after a subsequent version of that particular Modification + has been made available to such recipients. You are responsible for + ensuring that the Source Code version remains available even if the + Electronic Distribution Mechanism is maintained by a third party. + + 3.3. Description of Modifications. + You must cause all Covered Code to which You contribute to contain a + file documenting the changes You made to create that Covered Code and + the date of any change. You must include a prominent statement that + the Modification is derived, directly or indirectly, from Original + Code provided by the Initial Developer and including the name of the + Initial Developer in (a) the Source Code, and (b) in any notice in an + Executable version or related documentation in which You describe the + origin or ownership of the Covered Code. + + 3.4. Intellectual Property Matters + (a) Third Party Claims. + If Contributor has knowledge that a license under a third party's + intellectual property rights is required to exercise the rights + granted by such Contributor under Sections 2.1 or 2.2, + Contributor must include a text file with the Source Code + distribution titled "LEGAL" which describes the claim and the + party making the claim in sufficient detail that a recipient will + know whom to contact. If Contributor obtains such knowledge after + the Modification is made available as described in Section 3.2, + Contributor shall promptly modify the LEGAL file in all copies + Contributor makes available thereafter and shall take other steps + (such as notifying appropriate mailing lists or newsgroups) + reasonably calculated to inform those who received the Covered + Code that new knowledge has been obtained. + + (b) Contributor APIs. + If Contributor's Modifications include an application programming + interface and Contributor has knowledge of patent licenses which + are reasonably necessary to implement that API, Contributor must + also include this information in the LEGAL file. + + (c) Representations. + Contributor represents that, except as disclosed pursuant to + Section 3.4(a) above, Contributor believes that Contributor's + Modifications are Contributor's original creation(s) and/or + Contributor has sufficient rights to grant the rights conveyed by + this License. + + 3.5. Required Notices. + You must duplicate the notice in Exhibit A in each file of the Source + Code. If it is not possible to put such notice in a particular Source + Code file due to its structure, then You must include such notice in a + location (such as a relevant directory) where a user would be likely + to look for such a notice. If You created one or more Modification(s) + You may add your name as a Contributor to the notice described in + Exhibit A. You must also duplicate this License in any documentation + for the Source Code where You describe recipients' rights or ownership + rights relating to Covered Code. You may choose to offer, and to + charge a fee for, warranty, support, indemnity or liability + obligations to one or more recipients of Covered Code. However, You + may do so only on Your own behalf, and not on behalf of the Initial + Developer or any Contributor. You must make it absolutely clear than + any such warranty, support, indemnity or liability obligation is + offered by You alone, and You hereby agree to indemnify the Initial + Developer and every Contributor for any liability incurred by the + Initial Developer or such Contributor as a result of warranty, + support, indemnity or liability terms You offer. + + 3.6. Distribution of Executable Versions. + You may distribute Covered Code in Executable form only if the + requirements of Section 3.1-3.5 have been met for that Covered Code, + and if You include a notice stating that the Source Code version of + the Covered Code is available under the terms of this License, + including a description of how and where You have fulfilled the + obligations of Section 3.2. The notice must be conspicuously included + in any notice in an Executable version, related documentation or + collateral in which You describe recipients' rights relating to the + Covered Code. You may distribute the Executable version of Covered + Code or ownership rights under a license of Your choice, which may + contain terms different from this License, provided that You are in + compliance with the terms of this License and that the license for the + Executable version does not attempt to limit or alter the recipient's + rights in the Source Code version from the rights set forth in this + License. If You distribute the Executable version under a different + license You must make it absolutely clear that any terms which differ + from this License are offered by You alone, not by the Initial + Developer or any Contributor. You hereby agree to indemnify the + Initial Developer and every Contributor for any liability incurred by + the Initial Developer or such Contributor as a result of any such + terms You offer. + + 3.7. Larger Works. + You may create a Larger Work by combining Covered Code with other code + not governed by the terms of this License and distribute the Larger + Work as a single product. In such a case, You must make sure the + requirements of this License are fulfilled for the Covered Code. + + 4. Inability to Comply Due to Statute or Regulation. + + If it is impossible for You to comply with any of the terms of this + License with respect to some or all of the Covered Code due to + statute, judicial order, or regulation then You must: (a) comply with + the terms of this License to the maximum extent possible; and (b) + describe the limitations and the code they affect. Such description + must be included in the LEGAL file described in Section 3.4 and must + be included with all distributions of the Source Code. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + + 5. Application of this License. + + This License applies to code to which the Initial Developer has + attached the notice in Exhibit A and to related Covered Code. + + 6. Versions of the License. + + 6.1. New Versions. + Netscape Communications Corporation ("Netscape") may publish revised + and/or new versions of the License from time to time. Each version + will be given a distinguishing version number. + + 6.2. Effect of New Versions. + Once Covered Code has been published under a particular version of the + License, You may always continue to use it under the terms of that + version. You may also choose to use such Covered Code under the terms + of any subsequent version of the License published by Netscape. No one + other than Netscape has the right to modify the terms applicable to + Covered Code created under this License. + + 6.3. Derivative Works. + If You create or use a modified version of this License (which you may + only do in order to apply it to code which is not already Covered Code + governed by this License), You must (a) rename Your license so that + the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape", + "MPL", "NPL" or any confusingly similar phrase do not appear in your + license (except to note that your license differs from this License) + and (b) otherwise make it clear that Your version of the license + contains terms which differ from the Mozilla Public License and + Netscape Public License. (Filling in the name of the Initial + Developer, Original Code or Contributor in the notice described in + Exhibit A shall not of themselves be deemed to be modifications of + this License.) + + 7. DISCLAIMER OF WARRANTY. + + COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, + WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF + DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. + THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE + IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, + YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE + COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER + OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + + 8. TERMINATION. + + 8.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to cure + such breach within 30 days of becoming aware of the breach. All + sublicenses to the Covered Code which are properly granted shall + survive any termination of this License. Provisions which, by their + nature, must remain in effect beyond the termination of this License + shall survive. + + 8.2. If You initiate litigation by asserting a patent infringement + claim (excluding declatory judgment actions) against Initial Developer + or a Contributor (the Initial Developer or Contributor against whom + You file such action is referred to as "Participant") alleging that: + + (a) such Participant's Contributor Version directly or indirectly + infringes any patent, then any and all rights granted by such + Participant to You under Sections 2.1 and/or 2.2 of this License + shall, upon 60 days notice from Participant terminate prospectively, + unless if within 60 days after receipt of notice You either: (i) + agree in writing to pay Participant a mutually agreeable reasonable + royalty for Your past and future use of Modifications made by such + Participant, or (ii) withdraw Your litigation claim with respect to + the Contributor Version against such Participant. If within 60 days + of notice, a reasonable royalty and payment arrangement are not + mutually agreed upon in writing by the parties or the litigation claim + is not withdrawn, the rights granted by Participant to You under + Sections 2.1 and/or 2.2 automatically terminate at the expiration of + the 60 day notice period specified above. + + (b) any software, hardware, or device, other than such Participant's + Contributor Version, directly or indirectly infringes any patent, then + any rights granted to You by such Participant under Sections 2.1(b) + and 2.2(b) are revoked effective as of the date You first made, used, + sold, distributed, or had made, Modifications made by that + Participant. + + 8.3. If You assert a patent infringement claim against Participant + alleging that such Participant's Contributor Version directly or + indirectly infringes any patent where such claim is resolved (such as + by license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 8.4. In the event of termination under Sections 8.1 or 8.2 above, + all end user license agreements (excluding distributors and resellers) + which have been validly granted by You or any distributor hereunder + prior to termination shall survive termination. + + 9. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL + DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE, + OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR + ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY + CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, + WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY + RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW + PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE + EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO + THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. + + 10. U.S. GOVERNMENT END USERS. + + The Covered Code is a "commercial item," as that term is defined in + 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" and "commercial computer software documentation," as such + terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 + C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), + all U.S. Government End Users acquire Covered Code with only those + rights set forth herein. + + 11. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + California law provisions (except to the extent applicable law, if + any, provides otherwise), excluding its conflict-of-law provisions. + With respect to disputes in which at least one party is a citizen of, + or an entity chartered or registered to do business in the United + States of America, any litigation relating to this License shall be + subject to the jurisdiction of the Federal Courts of the Northern + District of California, with venue lying in Santa Clara County, + California, with the losing party responsible for costs, including + without limitation, court costs and reasonable attorneys' fees and + expenses. The application of the United Nations Convention on + Contracts for the International Sale of Goods is expressly excluded. + Any law or regulation which provides that the language of a contract + shall be construed against the drafter shall not apply to this + License. + + 12. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + + 13. MULTIPLE-LICENSED CODE. + + Initial Developer may designate portions of the Covered Code as + "Multiple-Licensed". "Multiple-Licensed" means that the Initial + Developer permits you to utilize portions of the Covered Code under + Your choice of the NPL or the alternative licenses, if any, specified + by the Initial Developer in the file described in Exhibit A. + + EXHIBIT A -Mozilla Public License. + + ``The contents of this file are subject to the Mozilla Public License + Version 1.1 (the "License"); you may not use this file except in + compliance with the License. You may obtain a copy of the License at + http://www.mozilla.org/MPL/ + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the + License for the specific language governing rights and limitations + under the License. + + The Original Code is Javassist. + + The Initial Developer of the Original Code is Shigeru Chiba. + Portions created by Shigeru Chiba are Copyright (C) + 1999-2008 Shigeru Chiba. All Rights Reserved. + + Contributor(s): . + + Alternatively, the contents of this file may be used under the terms + of the GNU Lesser General Public License Version 2.1 or later license (the "[LGPL] License"), in which case the + provisions of [LGPL] License are applicable instead of those + above. If you wish to allow use of your version of this file only + under the terms of the [LGPL] License and not to allow others to use + your version of this file under the MPL, indicate your decision by + deleting the provisions above and replace them with the notice and + other provisions required by the [LGPL] License. If you do not delete + the provisions above, a recipient may use your version of this file + under either the MPL or the [LGPL] License." + + [NOTE: The text of this Exhibit A may differ slightly from the text of + the notices in the Source Code files of the Original Code. You should + use the text of this Exhibit A rather than the text found in the + Original Code Source Code for Your Modifications.] + + + from Shigeru Chiba http://www.csg.ci.i.u-tokyo.ac.jp/~chiba/javassist/ + javassist-3.9.0.GA.jar from http://sourceforge.net/projects/jboss/files/Javassist/ + + licensed under the Mozilla Public License, Version 1.1 http://www.mozilla.org/MPL/1.1/ (as follows) + + Copyright (c) 2007-2012 VMware, Inc. All Rights Reserved. + + MOZILLA PUBLIC LICENSE + Version 1.1 + + --------------- + + 1. Definitions. + + 1.0.1. "Commercial Use" means distribution or otherwise making the + Covered Code available to a third party. + + 1.1. "Contributor" means each entity that creates or contributes to + the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Code, prior Modifications used by a Contributor, and the Modifications + made by that particular Contributor. + + 1.3. "Covered Code" means the Original Code or Modifications or the + combination of the Original Code and Modifications, in each case + including portions thereof. + + 1.4. "Electronic Distribution Mechanism" means a mechanism generally + accepted in the software development community for the electronic + transfer of data. + + 1.5. "Executable" means Covered Code in any form other than Source + Code. + + 1.6. "Initial Developer" means the individual or entity identified + as the Initial Developer in the Source Code notice required by Exhibit + A. + + 1.7. "Larger Work" means a work which combines Covered Code or + portions thereof with code not governed by the terms of this License. + + 1.8. "License" means this document. + + 1.8.1. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means any addition to or deletion from the + substance or structure of either the Original Code or any previous + Modifications. When Covered Code is released as a series of files, a + Modification is: + A. Any addition to or deletion from the contents of a file + containing Original Code or previous Modifications. + + B. Any new file that contains any part of the Original Code or + previous Modifications. + + 1.10. "Original Code" means Source Code of computer software code + which is described in the Source Code notice required by Exhibit A as + Original Code, and which, at the time of its release under this + License is not already Covered Code governed by this License. + + 1.10.1. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.11. "Source Code" means the preferred form of the Covered Code for + making modifications to it, including all modules it contains, plus + any associated interface definition files, scripts used to control + compilation and installation of an Executable, or source code + differential comparisons against either the Original Code or another + well known, available Covered Code of the Contributor's choice. The + Source Code can be in a compressed or archival form, provided the + appropriate decompression or de-archiving software is widely available + for no charge. + + 1.12. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, this + License or a future version of this License issued under Section 6.1. + For legal entities, "You" includes any entity which controls, is + controlled by, or is under common control with You. For purposes of + this definition, "control" means (a) the power, direct or indirect, + to cause the direction or management of such entity, whether by + contract or otherwise, or (b) ownership of more than fifty percent + (50%) of the outstanding shares or beneficial ownership of such + entity. + + 2. Source Code License. + + 2.1. The Initial Developer Grant. + The Initial Developer hereby grants You a world-wide, royalty-free, + non-exclusive license, subject to third party intellectual property + claims: + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Code (or portions thereof) with or without Modifications, and/or + as part of a Larger Work; and + + (b) under Patents Claims infringed by the making, using or + selling of Original Code, to make, have made, use, practice, + sell, and offer for sale, and/or otherwise dispose of the + Original Code (or portions thereof). + + (c) the licenses granted in this Section 2.1(a) and (b) are + effective on the date Initial Developer first distributes + Original Code under the terms of this License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: 1) for code that You delete from the Original Code; 2) + separate from the Original Code; or 3) for infringements caused + by: i) the modification of the Original Code or ii) the + combination of the Original Code with other software or devices. + + 2.2. Contributor Grant. + Subject to third party intellectual property claims, each Contributor + hereby grants You a world-wide, royalty-free, non-exclusive license + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor, to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof) either on an + unmodified basis, with other Modifications, as Covered Code + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or + selling of Modifications made by that Contributor either alone + and/or in combination with its Contributor Version (or portions + of such combination), to make, use, sell, offer for sale, have + made, and/or otherwise dispose of: 1) Modifications made by that + Contributor (or portions thereof); and 2) the combination of + Modifications made by that Contributor with its Contributor + Version (or portions of such combination). + + (c) the licenses granted in Sections 2.2(a) and 2.2(b) are + effective on the date Contributor first makes Commercial Use of + the Covered Code. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: 1) for any code that Contributor has deleted from the + Contributor Version; 2) separate from the Contributor Version; + 3) for infringements caused by: i) third party modifications of + Contributor Version or ii) the combination of Modifications made + by that Contributor with other software (except as part of the + Contributor Version) or other devices; or 4) under Patent Claims + infringed by Covered Code in the absence of Modifications made by + that Contributor. + + 3. Distribution Obligations. + + 3.1. Application of License. + The Modifications which You create or to which You contribute are + governed by the terms of this License, including without limitation + Section 2.2. The Source Code version of Covered Code may be + distributed only under the terms of this License or a future version + of this License released under Section 6.1, and You must include a + copy of this License with every copy of the Source Code You + distribute. You may not offer or impose any terms on any Source Code + version that alters or restricts the applicable version of this + License or the recipients' rights hereunder. However, You may include + an additional document offering the additional rights described in + Section 3.5. + + 3.2. Availability of Source Code. + Any Modification which You create or to which You contribute must be + made available in Source Code form under the terms of this License + either on the same media as an Executable version or via an accepted + Electronic Distribution Mechanism to anyone to whom you made an + Executable version available; and if made available via Electronic + Distribution Mechanism, must remain available for at least twelve (12) + months after the date it initially became available, or at least six + (6) months after a subsequent version of that particular Modification + has been made available to such recipients. You are responsible for + ensuring that the Source Code version remains available even if the + Electronic Distribution Mechanism is maintained by a third party. + + 3.3. Description of Modifications. + You must cause all Covered Code to which You contribute to contain a + file documenting the changes You made to create that Covered Code and + the date of any change. You must include a prominent statement that + the Modification is derived, directly or indirectly, from Original + Code provided by the Initial Developer and including the name of the + Initial Developer in (a) the Source Code, and (b) in any notice in an + Executable version or related documentation in which You describe the + origin or ownership of the Covered Code. + + 3.4. Intellectual Property Matters + (a) Third Party Claims. + If Contributor has knowledge that a license under a third party's + intellectual property rights is required to exercise the rights + granted by such Contributor under Sections 2.1 or 2.2, + Contributor must include a text file with the Source Code + distribution titled "LEGAL" which describes the claim and the + party making the claim in sufficient detail that a recipient will + know whom to contact. If Contributor obtains such knowledge after + the Modification is made available as described in Section 3.2, + Contributor shall promptly modify the LEGAL file in all copies + Contributor makes available thereafter and shall take other steps + (such as notifying appropriate mailing lists or newsgroups) + reasonably calculated to inform those who received the Covered + Code that new knowledge has been obtained. + + (b) Contributor APIs. + If Contributor's Modifications include an application programming + interface and Contributor has knowledge of patent licenses which + are reasonably necessary to implement that API, Contributor must + also include this information in the LEGAL file. + + (c) Representations. + Contributor represents that, except as disclosed pursuant to + Section 3.4(a) above, Contributor believes that Contributor's + Modifications are Contributor's original creation(s) and/or + Contributor has sufficient rights to grant the rights conveyed by + this License. + + 3.5. Required Notices. + You must duplicate the notice in Exhibit A in each file of the Source + Code. If it is not possible to put such notice in a particular Source + Code file due to its structure, then You must include such notice in a + location (such as a relevant directory) where a user would be likely + to look for such a notice. If You created one or more Modification(s) + You may add your name as a Contributor to the notice described in + Exhibit A. You must also duplicate this License in any documentation + for the Source Code where You describe recipients' rights or ownership + rights relating to Covered Code. You may choose to offer, and to + charge a fee for, warranty, support, indemnity or liability + obligations to one or more recipients of Covered Code. However, You + may do so only on Your own behalf, and not on behalf of the Initial + Developer or any Contributor. You must make it absolutely clear than + any such warranty, support, indemnity or liability obligation is + offered by You alone, and You hereby agree to indemnify the Initial + Developer and every Contributor for any liability incurred by the + Initial Developer or such Contributor as a result of warranty, + support, indemnity or liability terms You offer. + + 3.6. Distribution of Executable Versions. + You may distribute Covered Code in Executable form only if the + requirements of Section 3.1-3.5 have been met for that Covered Code, + and if You include a notice stating that the Source Code version of + the Covered Code is available under the terms of this License, + including a description of how and where You have fulfilled the + obligations of Section 3.2. The notice must be conspicuously included + in any notice in an Executable version, related documentation or + collateral in which You describe recipients' rights relating to the + Covered Code. You may distribute the Executable version of Covered + Code or ownership rights under a license of Your choice, which may + contain terms different from this License, provided that You are in + compliance with the terms of this License and that the license for the + Executable version does not attempt to limit or alter the recipient's + rights in the Source Code version from the rights set forth in this + License. If You distribute the Executable version under a different + license You must make it absolutely clear that any terms which differ + from this License are offered by You alone, not by the Initial + Developer or any Contributor. You hereby agree to indemnify the + Initial Developer and every Contributor for any liability incurred by + the Initial Developer or such Contributor as a result of any such + terms You offer. + + 3.7. Larger Works. + You may create a Larger Work by combining Covered Code with other code + not governed by the terms of this License and distribute the Larger + Work as a single product. In such a case, You must make sure the + requirements of this License are fulfilled for the Covered Code. + + 4. Inability to Comply Due to Statute or Regulation. + + If it is impossible for You to comply with any of the terms of this + License with respect to some or all of the Covered Code due to + statute, judicial order, or regulation then You must: (a) comply with + the terms of this License to the maximum extent possible; and (b) + describe the limitations and the code they affect. Such description + must be included in the LEGAL file described in Section 3.4 and must + be included with all distributions of the Source Code. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + + 5. Application of this License. + + This License applies to code to which the Initial Developer has + attached the notice in Exhibit A and to related Covered Code. + + 6. Versions of the License. + + 6.1. New Versions. + Netscape Communications Corporation ("Netscape") may publish revised + and/or new versions of the License from time to time. Each version + will be given a distinguishing version number. + + 6.2. Effect of New Versions. + Once Covered Code has been published under a particular version of the + License, You may always continue to use it under the terms of that + version. You may also choose to use such Covered Code under the terms + of any subsequent version of the License published by Netscape. No one + other than Netscape has the right to modify the terms applicable to + Covered Code created under this License. + + 6.3. Derivative Works. + If You create or use a modified version of this License (which you may + only do in order to apply it to code which is not already Covered Code + governed by this License), You must (a) rename Your license so that + the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape", + "MPL", "NPL" or any confusingly similar phrase do not appear in your + license (except to note that your license differs from this License) + and (b) otherwise make it clear that Your version of the license + contains terms which differ from the Mozilla Public License and + Netscape Public License. (Filling in the name of the Initial + Developer, Original Code or Contributor in the notice described in + Exhibit A shall not of themselves be deemed to be modifications of + this License.) + + 7. DISCLAIMER OF WARRANTY. + + COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, + WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF + DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. + THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE + IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, + YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE + COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER + OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + + 8. TERMINATION. + + 8.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to cure + such breach within 30 days of becoming aware of the breach. All + sublicenses to the Covered Code which are properly granted shall + survive any termination of this License. Provisions which, by their + nature, must remain in effect beyond the termination of this License + shall survive. + + 8.2. If You initiate litigation by asserting a patent infringement + claim (excluding declatory judgment actions) against Initial Developer + or a Contributor (the Initial Developer or Contributor against whom + You file such action is referred to as "Participant") alleging that: + + (a) such Participant's Contributor Version directly or indirectly + infringes any patent, then any and all rights granted by such + Participant to You under Sections 2.1 and/or 2.2 of this License + shall, upon 60 days notice from Participant terminate prospectively, + unless if within 60 days after receipt of notice You either: (i) + agree in writing to pay Participant a mutually agreeable reasonable + royalty for Your past and future use of Modifications made by such + Participant, or (ii) withdraw Your litigation claim with respect to + the Contributor Version against such Participant. If within 60 days + of notice, a reasonable royalty and payment arrangement are not + mutually agreed upon in writing by the parties or the litigation claim + is not withdrawn, the rights granted by Participant to You under + Sections 2.1 and/or 2.2 automatically terminate at the expiration of + the 60 day notice period specified above. + + (b) any software, hardware, or device, other than such Participant's + Contributor Version, directly or indirectly infringes any patent, then + any rights granted to You by such Participant under Sections 2.1(b) + and 2.2(b) are revoked effective as of the date You first made, used, + sold, distributed, or had made, Modifications made by that + Participant. + + 8.3. If You assert a patent infringement claim against Participant + alleging that such Participant's Contributor Version directly or + indirectly infringes any patent where such claim is resolved (such as + by license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 8.4. In the event of termination under Sections 8.1 or 8.2 above, + all end user license agreements (excluding distributors and resellers) + which have been validly granted by You or any distributor hereunder + prior to termination shall survive termination. + + 9. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL + DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE, + OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR + ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY + CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, + WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY + RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW + PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE + EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO + THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. + + 10. U.S. GOVERNMENT END USERS. + + The Covered Code is a "commercial item," as that term is defined in + 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" and "commercial computer software documentation," as such + terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 + C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), + all U.S. Government End Users acquire Covered Code with only those + rights set forth herein. + + 11. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + California law provisions (except to the extent applicable law, if + any, provides otherwise), excluding its conflict-of-law provisions. + With respect to disputes in which at least one party is a citizen of, + or an entity chartered or registered to do business in the United + States of America, any litigation relating to this License shall be + subject to the jurisdiction of the Federal Courts of the Northern + District of California, with venue lying in Santa Clara County, + California, with the losing party responsible for costs, including + without limitation, court costs and reasonable attorneys' fees and + expenses. The application of the United Nations Convention on + Contracts for the International Sale of Goods is expressly excluded. + Any law or regulation which provides that the language of a contract + shall be construed against the drafter shall not apply to this + License. + + 12. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + + 13. MULTIPLE-LICENSED CODE. + + Initial Developer may designate portions of the Covered Code as + "Multiple-Licensed". "Multiple-Licensed" means that the Initial + Developer permits you to utilize portions of the Covered Code under + Your choice of the NPL or the alternative licenses, if any, specified + by the Initial Developer in the file described in Exhibit A. + + EXHIBIT A -Mozilla Public License. + + ``The contents of this file are subject to the Mozilla Public License + Version 1.1 (the "License"); you may not use this file except in + compliance with the License. You may obtain a copy of the License at + http://www.mozilla.org/MPL/ + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the + License for the specific language governing rights and limitations + under the License. + + The Original Code is RabbitMQ. + + The Initial Developer of the Original Code is VMware, Ltd.. + Portions created by VMware, Ltd. are Copyright (C) + 2007-2012 VMware, Inc.. All Rights Reserved. + + Contributor(s): . + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License Version 2 license (the "[GPL] License"), in which case the + provisions of [GPL] License are applicable instead of those + above. If you wish to allow use of your version of this file only + under the terms of the [GPL] License and not to allow others to use + your version of this file under the MPL, indicate your decision by + deleting the provisions above and replace them with the notice and + other provisions required by the [GPL] License. If you do not delete + the provisions above, a recipient may use your version of this file + under either the MPL or the [GPL] License." + + [NOTE: The text of this Exhibit A may differ slightly from the text of + the notices in the Source Code files of the Original Code. You should + use the text of this Exhibit A rather than the text found in the + Original Code Source Code for Your Modifications.] + + + from VMware, Inc http://www.vmware.com/ + rabbitmq-client.jar from http://www.rabbitmq.com/java-client.html + +Within the patches/systemvm/debian/config/etc directory + placed in the public domain + by Adiscon GmbH http://www.adiscon.com/ + rsyslog.conf + by Simon Kelley + dnsmasq.conf + vpcdnsmasq.conf + +Within the patches/systemvm/debian/config/etc/apache2 directory + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2012 The Apache Software Foundation + from The Apache Software Foundation http://www.apache.org/ + httpd.conf + ports.conf + sites-available/default + sites-available/default-ssl + vhostexample.conf + +Within the patches/systemvm/debian/config/etc/ssh/ directory + licensed under the BSD (2-clause) http://www.opensource.org/licenses/BSD-2-Clause (as follows) + + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list + of conditions and the following disclaimer. Redistributions in binary form must + reproduce the above copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with the + distribution. + + Neither the name of the author nor the names of contributors may be used to + endorse or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + from OpenSSH Project http://www.openssh.org/ + sshd_config + +Within the patches/systemvm/debian/config/root/redundant_router directory + placed in the public domain + by The netfilter.org project http://www.netfilter.org/ + conntrackd.conf.templ + +Within the scripts/storage/secondary directory + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2010-2011 OpenStack, LLC. + from OpenStack, LLC http://www.openstack.org + swift + +Within the scripts/vm/hypervisor/xenserver directory + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2010-2011 OpenStack, LLC. + from OpenStack, LLC http://www.openstack.org + swift + +Within the target/jar directory + placed in the public domain + by Distributed Computing Laboratory at Emory University http://creativecommons.org/licenses/publicdomain/ + cloud-backport-util-concurrent-3.0.jar from http://backport-jsr166.sourceforge.net/ + + licensed under the Apache License, Version 1.1 http://www.apache.org/licenses/LICENSE-1.1 (as follows) + + Copyright (c) 2012 The Apache Software Foundation + + /* ==================================================================== + * The Apache Software License, Version 1.1 + * + * Copyright (c) 2000 The Apache Software Foundation. All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The end-user documentation included with the redistribution, + * if any, must include the following acknowledgment: + * "This product includes software developed by the + * Apache Software Foundation (http://www.apache.org/)." + * Alternately, this acknowledgment may appear in the software itself, + * if and wherever such third-party acknowledgments normally appear. + * + * 4. The names "Apache" and "Apache Software Foundation" must + * not be used to endorse or promote products derived from this + * software without prior written permission. For written + * permission, please contact apache@apache.org. + * + * 5. Products derived from this software may not be called "Apache", + * nor may "Apache" appear in their name, without prior written + * permission of the Apache Software Foundation. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + */ + + from The Apache Software Foundation http://www.apache.org/ + cloud-commons-discovery.jar from http://commons.apache.org/discovery/ + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2012 The Apache Software Foundation + from The Apache Software Foundation http://www.apache.org/ + cloud-axis.jar from http://axis.apache.org/axis/ + cloud-cglib.jar from http://cglib.sourceforge.net/ + cloud-commons-codec-1.5.jar from http://commons.apache.org/codec/ + cloud-commons-collections-3.2.1.jar from http://commons.apache.org/collections/ + cloud-commons-configuration-1.8.jar from http://commons.apache.org/configuration/ + cloud-commons-dbcp-1.4.jar from http://commons.apache.org/dbcp/ + cloud-commons-httpclient-3.1.jar from http://hc.apache.org/httpclient-3.x/ + cloud-commons-lang-2.6.jar from http://commons.apache.org/lang/ + cloud-commons-logging-1.1.1.jar from http://commons.apache.org/logging/ + cloud-commons-pool-1.5.6.jar from http://commons.apache.org/pool/ + cloud-log4j-extras.jar from http://logging.apache.org/log4j/companions/extras/ + cloud-log4j.jar from http://logging.apache.org/log4j/ + cloud-ws-commons-util-1.0.2.jar from http://ws.apache.org/commons/util/ + cloud-xmlrpc-client-3.1.3.jar from http://ws.apache.org/xmlrpc/client.html + cloud-xmlrpc-common-3.1.3.jar from http://ws.apache.org/xmlrpc/xmlrpc-common/ + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2007-2010, The JASYPT team (http://www.jasypt.org) + from The JASYPT team http://www.jasypt.org + cloud-jasypt-1.9.jar from http://www.jasypt.org + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2003-2007 Luck Consulting Pty Ltd + from Luck Consulting Pty Ltd http://gregluck.com/blog/about/ + cloud-ehcache.jar from http://ehcache.org/ + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2009 Google Inc. + from Google Inc. http://google.com + cloud-google-gson-1.7.1.jar from http://code.google.com/p/google-gson/ + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + + from Jetty Committers http://jetty.codehaus.org/jetty/ + jetty-6.1.26.jar from http://repo1.maven.org/maven2/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26-sources.jar + jetty-util-6.1.26.jar from http://repo1.maven.org/maven2/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26-sources.jar + + licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) + + Copyright (c) 2009, Caringo, Inc. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. + + from Caringo, Inc. http://www.caringo.com/ + CAStorSDK.jar from http://www.castor.org/download.html + + licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) + + Copyright (c) 2002-2011 Atsuhiko Yamanaka, JCraft,Inc. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. + + from JCraft http://www.jcraft.com/ + cloud-jsch-0.1.42.jar from http://www.jcraft.com/jsch/ + + licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) + + Copyright (c) 2007-2008 Trilead AG (http://www.trilead.com) + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. + + from Trilead AG http://www.trilead.com + cloud-trilead-ssh2-build213.jar from http://sourceforge.net/projects/orion-ssh2/ + + licensed under the Bouncy Castle adaptation of MIT X11 License http://www.bouncycastle.org/licence.html (as follows) + + + Please note: our license is an adaptation of the MIT X11 License and should be + read as such. + + LICENSE Copyright (c) 2000 - 2011 The Legion Of The Bouncy Castle + (http://www.bouncycastle.org) + + Permission is hereby granted, free of charge, to any person obtaining a copy of + this software and associated documentation files (the "Software"), to deal in + the Software without restriction, including without limitation the rights to + use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from The Legion Of The Bouncy Castle http://www.bouncycastle.org + cloud-bcprov-jdk16-1.45.jar from http://repo1.maven.org/maven2/org/bouncycastle/bcprov-jdk16/1.45/bcprov-jdk16-1.45-sources.jar + + licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) + + Copyright (c) 2006 Sun Microsystems, Inc. All rights reserved. + + COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 + + 1. Definitions. + + 1.1. "Contributor" means each individual or entity that + creates or contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the + Original Software, prior Modifications used by a + Contributor (if any), and the Modifications made by that + particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or + (b) Modifications, or (c) the combination of files + containing Original Software with files containing + Modifications, in each case including portions thereof. + + 1.4. "Executable" means the Covered Software in any form + other than Source Code. + + 1.5. "Initial Developer" means the individual or entity + that first makes Original Software available under this + License. + + 1.6. "Larger Work" means a work which combines Covered + Software or portions thereof with code not governed by the + terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the + maximum extent possible, whether at the time of the initial + grant or subsequently acquired, any and all of the rights + conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable + form of any of the following: + + A. Any file that results from an addition to, + deletion from or modification of the contents of a + file containing Original Software or previous + Modifications; + + B. Any new file that contains any part of the + Original Software or previous Modification; or + + C. Any new file that is contributed or otherwise made + available under the terms of this License. + + 1.10. "Original Software" means the Source Code and + Executable form of computer software code that is + originally released under this License. + + 1.11. "Patent Claims" means any patent claim(s), now owned + or hereafter acquired, including without limitation, + method, process, and apparatus claims, in any patent + Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer + software code in which modifications are made and (b) + associated documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal + entity exercising rights under, and complying with all of + the terms of, this License. For legal entities, "You" + includes any entity which controls, is controlled by, or is + under common control with You. For purposes of this + definition, "control" means (a) the power, direct or + indirect, to cause the direction or management of such + entity, whether by contract or otherwise, or (b) ownership + of more than fifty percent (50%) of the outstanding shares + or beneficial ownership of such entity. + + 2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, the + Initial Developer hereby grants You a world-wide, + royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Initial Developer, + to use, reproduce, modify, display, perform, + sublicense and distribute the Original Software (or + portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using or selling of Original Software, to make, have + made, use, practice, sell, and offer for sale, and/or + otherwise dispose of the Original Software (or + portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) + are effective on the date Initial Developer first + distributes or otherwise makes the Original Software + available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent + license is granted: (1) for code that You delete from + the Original Software, or (2) for infringements + caused by: (i) the modification of the Original + Software, or (ii) the combination of the Original + Software with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, each + Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Contributor to + use, reproduce, modify, display, perform, sublicense + and distribute the Modifications created by such + Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as + Covered Software and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using, or selling of Modifications made by that + Contributor either alone and/or in combination with + its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, + have made, and/or otherwise dispose of: (1) + Modifications made by that Contributor (or portions + thereof); and (2) the combination of Modifications + made by that Contributor with its Contributor Version + (or portions of such combination). + + (c) The licenses granted in Sections 2.2(a) and + 2.2(b) are effective on the date Contributor first + distributes or otherwise makes the Modifications + available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent + license is granted: (1) for any code that Contributor + has deleted from the Contributor Version; (2) for + infringements caused by: (i) third party + modifications of Contributor Version, or (ii) the + combination of Modifications made by that Contributor + with other software (except as part of the + Contributor Version) or other devices; or (3) under + Patent Claims infringed by Covered Software in the + absence of Modifications made by that Contributor. + + 3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make + available in Executable form must also be made available in + Source Code form and that Source Code form must be + distributed only under the terms of this License. You must + include a copy of this License with every copy of the + Source Code form of the Covered Software You distribute or + otherwise make available. You must inform recipients of any + such Covered Software in Executable form as to how they can + obtain such Covered Software in Source Code form in a + reasonable manner on or through a medium customarily used + for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You + contribute are governed by the terms of this License. You + represent that You believe Your Modifications are Your + original creation(s) and/or You have sufficient rights to + grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications + that identifies You as the Contributor of the Modification. + You may not remove or alter any copyright, patent or + trademark notices contained within the Covered Software, or + any notices of licensing or any descriptive text giving + attribution to any Contributor or the Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered + Software in Source Code form that alters or restricts the + applicable version of this License or the recipients' + rights hereunder. You may choose to offer, and to charge a + fee for, warranty, support, indemnity or liability + obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on + behalf of the Initial Developer or any Contributor. You + must make it absolutely clear that any such warranty, + support, indemnity or liability obligation is offered by + You alone, and You hereby agree to indemnify the Initial + Developer and every Contributor for any liability incurred + by the Initial Developer or such Contributor as a result of + warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered + Software under the terms of this License or under the terms + of a license of Your choice, which may contain terms + different from this License, provided that You are in + compliance with the terms of this License and that the + license for the Executable form does not attempt to limit + or alter the recipient's rights in the Source Code form + from the rights set forth in this License. If You + distribute the Covered Software in Executable form under a + different license, You must make it absolutely clear that + any terms which differ from this License are offered by You + alone, not by the Initial Developer or Contributor. You + hereby agree to indemnify the Initial Developer and every + Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms + You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software + with other code not governed by the terms of this License + and distribute the Larger Work as a single product. In such + a case, You must make sure the requirements of this License + are fulfilled for the Covered Software. + + 4. Versions of the License. + + 4.1. New Versions. + + Sun Microsystems, Inc. is the initial license steward and + may publish revised and/or new versions of this License + from time to time. Each version will be given a + distinguishing version number. Except as provided in + Section 4.3, no one other than the license steward has the + right to modify this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. If the Initial Developer includes a + notice in the Original Software prohibiting it from being + distributed or otherwise made available under any + subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. Otherwise, You may also choose to + use, distribute or otherwise make the Covered Software + available under the terms of any subsequent version of the + License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a + new license for Your Original Software, You may create and + use a modified version of this License if You: (a) rename + the license and remove any references to the name of the + license steward (except to note that the license differs + from this License); and (b) otherwise make it clear that + the license contains terms which differ from this License. + + 5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" + BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED + SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR + PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND + PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY + COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE + INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF + ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF + WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS + DISCLAIMER. + + 6. TERMINATION. + + 6.1. This License and the rights granted hereunder will + terminate automatically if You fail to comply with terms + herein and fail to cure such breach within 30 days of + becoming aware of the breach. Provisions which, by their + nature, must remain in effect beyond the termination of + this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or + a Contributor (the Initial Developer or Contributor against + whom You assert such claim is referred to as "Participant") + alleging that the Participant Software (meaning the + Contributor Version where the Participant is a Contributor + or the Original Software where the Participant is the + Initial Developer) directly or indirectly infringes any + patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial + Developer (if the Initial Developer is not the Participant) + and all Contributors under Sections 2.1 and/or 2.2 of this + License shall, upon 60 days notice from Participant + terminate prospectively and automatically at the expiration + of such 60 day notice period, unless if within such 60 day + period You withdraw Your claim with respect to the + Participant Software against such Participant either + unilaterally or pursuant to a written agreement with + Participant. + + 6.3. In the event of termination under Sections 6.1 or 6.2 + above, all end user licenses that have been validly granted + by You or any distributor hereunder prior to termination + (excluding licenses granted to You by any distributor) + shall survive termination. + + 7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE + LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK + STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL + INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT + APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO + NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR + CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT + APPLY TO YOU. + + 8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is + defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial + computer software" (as that term is defined at 48 C.F.R. ¤ + 252.227-7014(a)(1)) and "commercial computer software + documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. + 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 + through 227.7202-4 (June 1995), all U.S. Government End Users + acquire Covered Software with only those rights set forth herein. + This U.S. Government Rights clause is in lieu of, and supersedes, + any other FAR, DFAR, or other clause or provision that addresses + Government rights in computer software under this License. + + 9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the + extent necessary to make it enforceable. This License shall be + governed by the law of the jurisdiction specified in a notice + contained within the Original Software (except to the extent + applicable law, if any, provides otherwise), excluding such + jurisdiction's conflict-of-law provisions. Any litigation + relating to this License shall be subject to the jurisdiction of + the courts located in the jurisdiction and venue specified in a + notice contained within the Original Software, with the losing + party responsible for costs, including, without limitation, court + costs and reasonable attorneys' fees and expenses. The + application of the United Nations Convention on Contracts for the + International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall + be construed against the drafter shall not apply to this License. + You agree that You alone are responsible for compliance with the + United States export administration regulations (and the export + control laws and regulation of any other countries) when You use, + distribute or otherwise make available any Covered Software. + + 10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or + indirectly, out of its utilization of rights under this License + and You agree to work with Initial Developer and Contributors to + distribute such responsibility on an equitable basis. Nothing + herein is intended or shall be deemed to constitute any admission + of liability. + + from Project GlassFish http://glassfish.java.net/ + cloud-ejb-api-3.0.jar from http://repo1.maven.org/maven2/javax/ejb/ejb-api/3.0/ejb-api-3.0-sources.jar + cloud-jstl-1.2.jar from http://jstl.java.net/ + + licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) + + Copyright (c) 1997-2010 Oracle and/or its affiliates. All rights reserved. + + COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 + + 1. Definitions. + + 1.1. "Contributor" means each individual or entity that + creates or contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the + Original Software, prior Modifications used by a + Contributor (if any), and the Modifications made by that + particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or + (b) Modifications, or (c) the combination of files + containing Original Software with files containing + Modifications, in each case including portions thereof. + + 1.4. "Executable" means the Covered Software in any form + other than Source Code. + + 1.5. "Initial Developer" means the individual or entity + that first makes Original Software available under this + License. + + 1.6. "Larger Work" means a work which combines Covered + Software or portions thereof with code not governed by the + terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the + maximum extent possible, whether at the time of the initial + grant or subsequently acquired, any and all of the rights + conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable + form of any of the following: + + A. Any file that results from an addition to, + deletion from or modification of the contents of a + file containing Original Software or previous + Modifications; + + B. Any new file that contains any part of the + Original Software or previous Modification; or + + C. Any new file that is contributed or otherwise made + available under the terms of this License. + + 1.10. "Original Software" means the Source Code and + Executable form of computer software code that is + originally released under this License. + + 1.11. "Patent Claims" means any patent claim(s), now owned + or hereafter acquired, including without limitation, + method, process, and apparatus claims, in any patent + Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer + software code in which modifications are made and (b) + associated documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal + entity exercising rights under, and complying with all of + the terms of, this License. For legal entities, "You" + includes any entity which controls, is controlled by, or is + under common control with You. For purposes of this + definition, "control" means (a) the power, direct or + indirect, to cause the direction or management of such + entity, whether by contract or otherwise, or (b) ownership + of more than fifty percent (50%) of the outstanding shares + or beneficial ownership of such entity. + + 2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, the + Initial Developer hereby grants You a world-wide, + royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Initial Developer, + to use, reproduce, modify, display, perform, + sublicense and distribute the Original Software (or + portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using or selling of Original Software, to make, have + made, use, practice, sell, and offer for sale, and/or + otherwise dispose of the Original Software (or + portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) + are effective on the date Initial Developer first + distributes or otherwise makes the Original Software + available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent + license is granted: (1) for code that You delete from + the Original Software, or (2) for infringements + caused by: (i) the modification of the Original + Software, or (ii) the combination of the Original + Software with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, each + Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + (a) under intellectual property rights (other than + patent or trademark) Licensable by Contributor to + use, reproduce, modify, display, perform, sublicense + and distribute the Modifications created by such + Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as + Covered Software and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, + using, or selling of Modifications made by that + Contributor either alone and/or in combination with + its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, + have made, and/or otherwise dispose of: (1) + Modifications made by that Contributor (or portions + thereof); and (2) the combination of Modifications + made by that Contributor with its Contributor Version + (or portions of such combination). + + (c) The licenses granted in Sections 2.2(a) and + 2.2(b) are effective on the date Contributor first + distributes or otherwise makes the Modifications + available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent + license is granted: (1) for any code that Contributor + has deleted from the Contributor Version; (2) for + infringements caused by: (i) third party + modifications of Contributor Version, or (ii) the + combination of Modifications made by that Contributor + with other software (except as part of the + Contributor Version) or other devices; or (3) under + Patent Claims infringed by Covered Software in the + absence of Modifications made by that Contributor. + + 3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make + available in Executable form must also be made available in + Source Code form and that Source Code form must be + distributed only under the terms of this License. You must + include a copy of this License with every copy of the + Source Code form of the Covered Software You distribute or + otherwise make available. You must inform recipients of any + such Covered Software in Executable form as to how they can + obtain such Covered Software in Source Code form in a + reasonable manner on or through a medium customarily used + for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You + contribute are governed by the terms of this License. You + represent that You believe Your Modifications are Your + original creation(s) and/or You have sufficient rights to + grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications + that identifies You as the Contributor of the Modification. + You may not remove or alter any copyright, patent or + trademark notices contained within the Covered Software, or + any notices of licensing or any descriptive text giving + attribution to any Contributor or the Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered + Software in Source Code form that alters or restricts the + applicable version of this License or the recipients' + rights hereunder. You may choose to offer, and to charge a + fee for, warranty, support, indemnity or liability + obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on + behalf of the Initial Developer or any Contributor. You + must make it absolutely clear that any such warranty, + support, indemnity or liability obligation is offered by + You alone, and You hereby agree to indemnify the Initial + Developer and every Contributor for any liability incurred + by the Initial Developer or such Contributor as a result of + warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered + Software under the terms of this License or under the terms + of a license of Your choice, which may contain terms + different from this License, provided that You are in + compliance with the terms of this License and that the + license for the Executable form does not attempt to limit + or alter the recipient's rights in the Source Code form + from the rights set forth in this License. If You + distribute the Covered Software in Executable form under a + different license, You must make it absolutely clear that + any terms which differ from this License are offered by You + alone, not by the Initial Developer or Contributor. You + hereby agree to indemnify the Initial Developer and every + Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms + You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software + with other code not governed by the terms of this License + and distribute the Larger Work as a single product. In such + a case, You must make sure the requirements of this License + are fulfilled for the Covered Software. + + 4. Versions of the License. + + 4.1. New Versions. + + Sun Microsystems, Inc. is the initial license steward and + may publish revised and/or new versions of this License + from time to time. Each version will be given a + distinguishing version number. Except as provided in + Section 4.3, no one other than the license steward has the + right to modify this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. If the Initial Developer includes a + notice in the Original Software prohibiting it from being + distributed or otherwise made available under any + subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the + version of the License under which You originally received + the Covered Software. Otherwise, You may also choose to + use, distribute or otherwise make the Covered Software + available under the terms of any subsequent version of the + License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a + new license for Your Original Software, You may create and + use a modified version of this License if You: (a) rename + the license and remove any references to the name of the + license steward (except to note that the license differs + from this License); and (b) otherwise make it clear that + the license contains terms which differ from this License. + + 5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" + BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED + SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR + PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND + PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY + COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE + INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF + ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF + WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS + DISCLAIMER. + + 6. TERMINATION. + + 6.1. This License and the rights granted hereunder will + terminate automatically if You fail to comply with terms + herein and fail to cure such breach within 30 days of + becoming aware of the breach. Provisions which, by their + nature, must remain in effect beyond the termination of + this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or + a Contributor (the Initial Developer or Contributor against + whom You assert such claim is referred to as "Participant") + alleging that the Participant Software (meaning the + Contributor Version where the Participant is a Contributor + or the Original Software where the Participant is the + Initial Developer) directly or indirectly infringes any + patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial + Developer (if the Initial Developer is not the Participant) + and all Contributors under Sections 2.1 and/or 2.2 of this + License shall, upon 60 days notice from Participant + terminate prospectively and automatically at the expiration + of such 60 day notice period, unless if within such 60 day + period You withdraw Your claim with respect to the + Participant Software against such Participant either + unilaterally or pursuant to a written agreement with + Participant. + + 6.3. In the event of termination under Sections 6.1 or 6.2 + above, all end user licenses that have been validly granted + by You or any distributor hereunder prior to termination + (excluding licenses granted to You by any distributor) + shall survive termination. + + 7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE + LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK + STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL + INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT + APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO + NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR + CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT + APPLY TO YOU. + + 8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is + defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial + computer software" (as that term is defined at 48 C.F.R. ¤ + 252.227-7014(a)(1)) and "commercial computer software + documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. + 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 + through 227.7202-4 (June 1995), all U.S. Government End Users + acquire Covered Software with only those rights set forth herein. + This U.S. Government Rights clause is in lieu of, and supersedes, + any other FAR, DFAR, or other clause or provision that addresses + Government rights in computer software under this License. + + 9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the + extent necessary to make it enforceable. This License shall be + governed by the law of the jurisdiction specified in a notice + contained within the Original Software (except to the extent + applicable law, if any, provides otherwise), excluding such + jurisdiction's conflict-of-law provisions. Any litigation + relating to this License shall be subject to the jurisdiction of + the courts located in the jurisdiction and venue specified in a + notice contained within the Original Software, with the losing + party responsible for costs, including, without limitation, court + costs and reasonable attorneys' fees and expenses. The + application of the United Nations Convention on Contracts for the + International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall + be construed against the drafter shall not apply to this License. + You agree that You alone are responsible for compliance with the + United States export administration regulations (and the export + control laws and regulation of any other countries) when You use, + distribute or otherwise make available any Covered Software. + + 10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or + indirectly, out of its utilization of rights under this License + and You agree to work with Initial Developer and Contributors to + distribute such responsibility on an equitable basis. Nothing + herein is intended or shall be deemed to constitute any admission + of liability. + + from Oracle and/or its affiliates http://oracle.com + cloud-email.jar from http://kenai.com/projects/javamail + + licensed under the Common Public License - v 1.0 http://opensource.org/licenses/cpl1.0 (as follows) + + Copyright (c) IBM Corp 2006 + + Common Public License Version 1.0 (CPL) + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC + LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM + CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + + 1. DEFINITIONS + + "Contribution means: + + a) in the case of the initial Contributor, the initial code and documentation + distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + + i) changes to the Program, and + + ii) additions to the Program; + + where such changes and/or additions to the Program originate from and are + distributed by that particular Contributor. A Contribution 'originates' from a + Contributor if it was added to the Program by such Contributor itself or anyone + acting on such Contributor's behalf. Contributions do not include additions to + the Program which: (i) are separate modules of software distributed in + conjunction with the Program under their own license agreement, and (ii) are not + derivative works of the Program. + + "Contributor means any person or entity that distributes the Program. + + "Licensed Patents mean patent claims licensable by a Contributor which are + "necessarily infringed by the use or sale of its Contribution alone or when + "combined with the Program. + + "Program means the Contributions distributed in accordance with this Agreement. + + "Recipient means anyone who receives the Program under this Agreement, including + "all Contributors. + + 2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free copyright license to + reproduce, prepare derivative works of, publicly display, publicly perform, + distribute and sublicense the Contribution of such Contributor, if any, and such + derivative works, in source code and object code form. + + b) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed + Patents to make, use, sell, offer to sell, import and otherwise transfer the + Contribution of such Contributor, if any, in source code and object code form. + This patent license shall apply to the combination of the Contribution and the + Program if, at the time the Contribution is added by the Contributor, such + addition of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other combinations + which include the Contribution. No hardware per se is licensed hereunder. + + c) Recipient understands that although each Contributor grants the licenses to + its Contributions set forth herein, no assurances are provided by any + Contributor that the Program does not infringe the patent or other intellectual + property rights of any other entity. Each Contributor disclaims any liability to + Recipient for claims brought by any other entity based on infringement of + intellectual property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby assumes sole + responsibility to secure any other intellectual property rights needed, if any. + For example, if a third party patent license is required to allow Recipient to + distribute the Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has sufficient copyright + rights in its Contribution, if any, to grant the copyright license set forth in + this Agreement. + + 3. REQUIREMENTS + + A Contributor may choose to distribute the Program in object code form under its + own license agreement, provided that: + + a) it complies with the terms and conditions of this Agreement; and + + b) its license agreement: + + i) effectively disclaims on behalf of all Contributors all warranties and + conditions, express and implied, including warranties or conditions of title and + non-infringement, and implied warranties or conditions of merchantability and + fitness for a particular purpose; + + ii) effectively excludes on behalf of all Contributors all liability for + damages, including direct, indirect, special, incidental and consequential + damages, such as lost profits; + + iii) states that any provisions which differ from this Agreement are offered by + that Contributor alone and not by any other party; and + + iv) states that source code for the Program is available from such Contributor, + and informs licensees how to obtain it in a reasonable manner on or through a + medium customarily used for software exchange. + + When the Program is made available in source code form: + + a) it must be made available under this Agreement; and + + b) a copy of this Agreement must be included with each copy of the Program. + + Contributors may not remove or alter any copyright notices contained within the + Program. + + Each Contributor must identify itself as the originator of its Contribution, if + any, in a manner that reasonably allows subsequent Recipients to identify the + originator of the Contribution. + + 4. COMMERCIAL DISTRIBUTION + + Commercial distributors of software may accept certain responsibilities with + respect to end users, business partners and the like. While this license is + intended to facilitate the commercial use of the Program, the Contributor who + includes the Program in a commercial product offering should do so in a manner + which does not create potential liability for other Contributors. Therefore, if + a Contributor includes the Program in a commercial product offering, such + Contributor ("Commercial Contributor") hereby agrees to defend and indemnify + every other Contributor ("Indemnified Contributor") against any losses, damages + and costs (collectively "Losses") arising from claims, lawsuits and other legal + actions brought by a third party against the Indemnified Contributor to the + extent caused by the acts or omissions of such Commercial Contributor in + connection with its distribution of the Program in a commercial product + offering. The obligations in this section do not apply to any claims or Losses + relating to any actual or alleged intellectual property infringement. In order + to qualify, an Indemnified Contributor must: a) promptly notify the Commercial + Contributor in writing of such claim, and b) allow the Commercial Contributor to + control, and cooperate with the Commercial Contributor in, the defense and any + related settlement negotiations. The Indemnified Contributor may participate in + any such claim at its own expense. + + For example, a Contributor might include the Program in a commercial product + offering, Product X. That Contributor is then a Commercial Contributor. If that + Commercial Contributor then makes performance claims, or offers warranties + related to Product X, those performance claims and warranties are such + Commercial Contributor's responsibility alone. Under this section, the + Commercial Contributor would have to defend claims against the other + Contributors related to those performance claims and warranties, and if a court + requires any other Contributor to pay any damages as a result, the Commercial + Contributor must pay those damages. + + 5. NO WARRANTY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR + IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, + NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each + Recipient is solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its exercise of + rights under this Agreement, including but not limited to the risks and costs of + program errors, compliance with applicable laws, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + 6. DISCLAIMER OF LIABILITY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY + CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST + PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS + GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + + 7. GENERAL + + If any provision of this Agreement is invalid or unenforceable under applicable + law, it shall not affect the validity or enforceability of the remainder of the + terms of this Agreement, and without further action by the parties hereto, such + provision shall be reformed to the minimum extent necessary to make such + provision valid and enforceable. + + If Recipient institutes patent litigation against a Contributor with respect to + a patent applicable to software (including a cross-claim or counterclaim in a + lawsuit), then any patent licenses granted by that Contributor to such Recipient + under this Agreement shall terminate as of the date such litigation is filed. In + addition, if Recipient institutes patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that the Program + itself (excluding combinations of the Program with other software or hardware) + infringes such Recipient's patent(s), then such Recipient's rights granted under + Section 2(b) shall terminate as of the date such litigation is filed. + + All Recipient's rights under this Agreement shall terminate if it fails to + comply with any of the material terms or conditions of this Agreement and does + not cure such failure in a reasonable period of time after becoming aware of + such noncompliance. If all Recipient's rights under this Agreement terminate, + Recipient agrees to cease use and distribution of the Program as soon as + reasonably practicable. However, Recipient's obligations under this Agreement + and any licenses granted by Recipient relating to the Program shall continue and + survive. + + Everyone is permitted to copy and distribute copies of this Agreement, but in + order to avoid inconsistency the Agreement is copyrighted and may only be + modified in the following manner. The Agreement Steward reserves the right to + publish new versions (including revisions) of this Agreement from time to time. + No one other than the Agreement Steward has the right to modify this Agreement. + IBM is the initial Agreement Steward. IBM may assign the responsibility to serve + as the Agreement Steward to a suitable separate entity. Each new version of the + Agreement will be given a distinguishing version number. The Program (including + Contributions) may always be distributed subject to the version of the Agreement + under which it was received. In addition, after a new version of the Agreement + is published, Contributor may elect to distribute the Program (including its + Contributions) under the new version. Except as expressly stated in Sections + 2(a) and 2(b) above, Recipient receives no rights or licenses to the + intellectual property of any Contributor under this Agreement, whether + expressly, by implication, estoppel or otherwise. All rights in the Program not + expressly granted under this Agreement are reserved. + + This Agreement is governed by the laws of the State of New York and the + intellectual property laws of the United States of America. No party to this + Agreement will bring a legal action under this Agreement more than one year + after the cause of action arose. Each party waives its rights to a jury trial in + any resulting litigation. + + from IBM Corp http://www.ibm.com/ + cloud-wsdl4j-1.6.2.jar from http://sourceforge.net/projects/wsdl4j/ + cloud-wsdl4j.jar from http://sourceforge.net/projects/wsdl4j/ + + licensed under the Common Public License - v 1.0 http://opensource.org/licenses/cpl1.0 (as follows) + + + Common Public License Version 1.0 (CPL) + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC + LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM + CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + + 1. DEFINITIONS + + "Contribution means: + + a) in the case of the initial Contributor, the initial code and documentation + distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + + i) changes to the Program, and + + ii) additions to the Program; + + where such changes and/or additions to the Program originate from and are + distributed by that particular Contributor. A Contribution 'originates' from a + Contributor if it was added to the Program by such Contributor itself or anyone + acting on such Contributor's behalf. Contributions do not include additions to + the Program which: (i) are separate modules of software distributed in + conjunction with the Program under their own license agreement, and (ii) are not + derivative works of the Program. + + "Contributor means any person or entity that distributes the Program. + + "Licensed Patents mean patent claims licensable by a Contributor which are + "necessarily infringed by the use or sale of its Contribution alone or when + "combined with the Program. + + "Program means the Contributions distributed in accordance with this Agreement. + + "Recipient means anyone who receives the Program under this Agreement, including + "all Contributors. + + 2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free copyright license to + reproduce, prepare derivative works of, publicly display, publicly perform, + distribute and sublicense the Contribution of such Contributor, if any, and such + derivative works, in source code and object code form. + + b) Subject to the terms of this Agreement, each Contributor hereby grants + Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed + Patents to make, use, sell, offer to sell, import and otherwise transfer the + Contribution of such Contributor, if any, in source code and object code form. + This patent license shall apply to the combination of the Contribution and the + Program if, at the time the Contribution is added by the Contributor, such + addition of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other combinations + which include the Contribution. No hardware per se is licensed hereunder. + + c) Recipient understands that although each Contributor grants the licenses to + its Contributions set forth herein, no assurances are provided by any + Contributor that the Program does not infringe the patent or other intellectual + property rights of any other entity. Each Contributor disclaims any liability to + Recipient for claims brought by any other entity based on infringement of + intellectual property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby assumes sole + responsibility to secure any other intellectual property rights needed, if any. + For example, if a third party patent license is required to allow Recipient to + distribute the Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has sufficient copyright + rights in its Contribution, if any, to grant the copyright license set forth in + this Agreement. + + 3. REQUIREMENTS + + A Contributor may choose to distribute the Program in object code form under its + own license agreement, provided that: + + a) it complies with the terms and conditions of this Agreement; and + + b) its license agreement: + + i) effectively disclaims on behalf of all Contributors all warranties and + conditions, express and implied, including warranties or conditions of title and + non-infringement, and implied warranties or conditions of merchantability and + fitness for a particular purpose; + + ii) effectively excludes on behalf of all Contributors all liability for + damages, including direct, indirect, special, incidental and consequential + damages, such as lost profits; + + iii) states that any provisions which differ from this Agreement are offered by + that Contributor alone and not by any other party; and + + iv) states that source code for the Program is available from such Contributor, + and informs licensees how to obtain it in a reasonable manner on or through a + medium customarily used for software exchange. + + When the Program is made available in source code form: + + a) it must be made available under this Agreement; and + + b) a copy of this Agreement must be included with each copy of the Program. + + Contributors may not remove or alter any copyright notices contained within the + Program. + + Each Contributor must identify itself as the originator of its Contribution, if + any, in a manner that reasonably allows subsequent Recipients to identify the + originator of the Contribution. + + 4. COMMERCIAL DISTRIBUTION + + Commercial distributors of software may accept certain responsibilities with + respect to end users, business partners and the like. While this license is + intended to facilitate the commercial use of the Program, the Contributor who + includes the Program in a commercial product offering should do so in a manner + which does not create potential liability for other Contributors. Therefore, if + a Contributor includes the Program in a commercial product offering, such + Contributor ("Commercial Contributor") hereby agrees to defend and indemnify + every other Contributor ("Indemnified Contributor") against any losses, damages + and costs (collectively "Losses") arising from claims, lawsuits and other legal + actions brought by a third party against the Indemnified Contributor to the + extent caused by the acts or omissions of such Commercial Contributor in + connection with its distribution of the Program in a commercial product + offering. The obligations in this section do not apply to any claims or Losses + relating to any actual or alleged intellectual property infringement. In order + to qualify, an Indemnified Contributor must: a) promptly notify the Commercial + Contributor in writing of such claim, and b) allow the Commercial Contributor to + control, and cooperate with the Commercial Contributor in, the defense and any + related settlement negotiations. The Indemnified Contributor may participate in + any such claim at its own expense. + + For example, a Contributor might include the Program in a commercial product + offering, Product X. That Contributor is then a Commercial Contributor. If that + Commercial Contributor then makes performance claims, or offers warranties + related to Product X, those performance claims and warranties are such + Commercial Contributor's responsibility alone. Under this section, the + Commercial Contributor would have to defend claims against the other + Contributors related to those performance claims and warranties, and if a court + requires any other Contributor to pay any damages as a result, the Commercial + Contributor must pay those damages. + + 5. NO WARRANTY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR + IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, + NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each + Recipient is solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its exercise of + rights under this Agreement, including but not limited to the risks and costs of + program errors, compliance with applicable laws, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + 6. DISCLAIMER OF LIABILITY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY + CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST + PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS + GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + + 7. GENERAL + + If any provision of this Agreement is invalid or unenforceable under applicable + law, it shall not affect the validity or enforceability of the remainder of the + terms of this Agreement, and without further action by the parties hereto, such + provision shall be reformed to the minimum extent necessary to make such + provision valid and enforceable. + + If Recipient institutes patent litigation against a Contributor with respect to + a patent applicable to software (including a cross-claim or counterclaim in a + lawsuit), then any patent licenses granted by that Contributor to such Recipient + under this Agreement shall terminate as of the date such litigation is filed. In + addition, if Recipient institutes patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that the Program + itself (excluding combinations of the Program with other software or hardware) + infringes such Recipient's patent(s), then such Recipient's rights granted under + Section 2(b) shall terminate as of the date such litigation is filed. + + All Recipient's rights under this Agreement shall terminate if it fails to + comply with any of the material terms or conditions of this Agreement and does + not cure such failure in a reasonable period of time after becoming aware of + such noncompliance. If all Recipient's rights under this Agreement terminate, + Recipient agrees to cease use and distribution of the Program as soon as + reasonably practicable. However, Recipient's obligations under this Agreement + and any licenses granted by Recipient relating to the Program shall continue and + survive. + + Everyone is permitted to copy and distribute copies of this Agreement, but in + order to avoid inconsistency the Agreement is copyrighted and may only be + modified in the following manner. The Agreement Steward reserves the right to + publish new versions (including revisions) of this Agreement from time to time. + No one other than the Agreement Steward has the right to modify this Agreement. + IBM is the initial Agreement Steward. IBM may assign the responsibility to serve + as the Agreement Steward to a suitable separate entity. Each new version of the + Agreement will be given a distinguishing version number. The Program (including + Contributions) may always be distributed subject to the version of the Agreement + under which it was received. In addition, after a new version of the Agreement + is published, Contributor may elect to distribute the Program (including its + Contributions) under the new version. Except as expressly stated in Sections + 2(a) and 2(b) above, Recipient receives no rights or licenses to the + intellectual property of any Contributor under this Agreement, whether + expressly, by implication, estoppel or otherwise. All rights in the Program not + expressly granted under this Agreement are reserved. + + This Agreement is governed by the laws of the State of New York and the + intellectual property laws of the United States of America. No party to this + Agreement will bring a legal action under this Agreement more than one year + after the cause of action arose. Each party waives its rights to a jury trial in + any resulting litigation. + + from JUnit Project http://www.junit.org/ + cloud-junit.jar from http://kentbeck.github.com/junit/ + + licensed under the Eclipse Distribution License Version 1.0 http://www.eclipse.org/org/documents/edl-v10.php (as follows) + + Copyright (c) 2012 The Eclipse Foundation. + + Eclipse Distribution License Version 1.0 + + Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list + of conditions and the following disclaimer. Redistributions in binary form must + reproduce the above copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with the + distribution. Neither the name of the Eclipse Foundation, Inc. nor the names of + its contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + from The Eclipse Foundation http://www.eclipse.org + cloud-javax.persistence-2.0.0.jar from http://wiki.eclipse.org/EclipseLink/Release/2.0.0 + + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Copyright (C) 2008 Tóth István + 2008-2012 Daniel Veillard + 2009-2011 Bryan Kearney + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from The libvirt project http://libvirt.org/ + libvirt-java-0.4.9 + + licensed under the XStream BSD Style License https://fisheye.codehaus.org/browse/xstream/trunk/LICENSE.txt?hb=true (as follows) + + + (BSD Style License) + + Copyright (c) 2003-2006, Joe Walnes Copyright (c) 2006-2011, XStream Committers + All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list + of conditions and the following disclaimer. Redistributions in binary form must + reproduce the above copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with the + distribution. + + Neither the name of XStream nor the names of its contributors may be used to + endorse or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + from XStream Committers http://xstream.codehaus.org/ + cloud-xstream-1.3.1.jar from http://xstream.codehaus.org/repository.html + +Within the ui/lib directory + placed in the public domain + by Eric Meyer http://meyerweb.com/eric/ + reset.css from http://meyerweb.com/eric/tools/css/reset/ + + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright (c) 2006 Google Inc. + from Google Inc. http://google.com + excanvas.js from http://code.google.com/p/explorercanvas/ + + licensed under the BSD (2-clause) http://www.opensource.org/licenses/BSD-2-Clause (as follows) + + Copyright (c) 2008 George McGinley Smith + All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list + of conditions and the following disclaimer. Redistributions in binary form must + reproduce the above copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with the + distribution. + + Neither the name of the author nor the names of contributors may be used to + endorse or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + from George McGinley Smith + jquery.easing.js + + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from The Dojo Foundation http://dojofoundation.org/ + require.js from http://github.com/jrburke/requirejs + + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Copyright (c) 2011, John Resig + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from John Resig + jquery.js + + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Copyright (c) 2006 - 2011 Jörn Zaefferer + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from Jorn Zaefferer + jquery.validate.js + + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Copyright (c) 2010, Sebastian Tschan + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from Sebastian Tschan https://blueimp.net + jquery.md5.js + + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Copyright (c) 2006 Klaus Hartl (stilbuero.de) + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from Klaus Hartl http://stilbuero.de + jquery.cookies.js + +Within the ui/lib/flot directory + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Released under the MIT license by IOLA, December 2007. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from IOLA http://www.iola.dk/ + jquery.flot.crosshair.js + jquery.flot.fillbetween.js + jquery.flot.image.js + jquery.flot.js + jquery.flot.navigate.js + jquery.flot.resize.js + jquery.flot.selection.js + jquery.flot.stack.js + jquery.flot.symbol.js + jquery.flot.threshold.js + + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Created by Brian Medendorp, June 2009 + Updated November 2009 with contributions from: btburnett3, Anthony Aragues and Xavi Ivars + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from Brian Medendorp + jquery.pie.js + + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from Ole Laursen + jquery.colorhelpers.js + +Within the ui/lib/jquery-ui directory + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from jQuery UI Developers http://jqueryui.com/about + css/jquery-ui.css + index.html + js/jquery-ui.js + +Within the ui/lib/qunit directory + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + Copyright (c) 2012 John Resig, Jörn Zaefferer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/tools/whisker/descriptor-for-packaging.xml b/tools/whisker/descriptor-for-packaging.xml index 29a8284686b..3db74183089 100644 --- a/tools/whisker/descriptor-for-packaging.xml +++ b/tools/whisker/descriptor-for-packaging.xml @@ -2427,6 +2427,10 @@ Innovation Centre, 2006 (http://www.it-innovation.soton.ac.uk). id='vmware.com' name='VMware, Inc' url='http://www.vmware.com/' /> + Copyright (c) 2013 The Apache Software Foundation @@ -2508,6 +2512,14 @@ Copyright (c) 2009, John Resig + + +Copyright (c) 2010-2012, The Dojo Foundation All Rights Reserved. + + + + + Copyright (c) 2006 Google Inc. diff --git a/tools/whisker/descriptor.xml b/tools/whisker/descriptor.xml index 8e39586501d..0b3e508a124 100644 --- a/tools/whisker/descriptor.xml +++ b/tools/whisker/descriptor.xml @@ -2427,6 +2427,10 @@ Innovation Centre, 2006 (http://www.it-innovation.soton.ac.uk). id='person:patrick.debois' name='Patrick Debois' url='http://www.jedi.be/blog/' /> + Copyright (c) 2013 The Apache Software Foundation @@ -2508,6 +2512,14 @@ Copyright (c) 2009, John Resig + + +Copyright (c) 2010-2012, The Dojo Foundation All Rights Reserved. + + + + + Copyright (c) 2006 Google Inc. From d8537a4446b65c8f172e057976511f2750dffcb8 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 15 Feb 2013 21:12:22 +0530 Subject: [PATCH 033/486] Server side integration for LDAP Configuration --- ui/scripts/globalSettings.js | 45 +++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/ui/scripts/globalSettings.js b/ui/scripts/globalSettings.js index e9461490b27..5f7fb742b2d 100644 --- a/ui/scripts/globalSettings.js +++ b/ui/scripts/globalSettings.js @@ -106,10 +106,10 @@ var data = {}; listViewDataProvider(args, data); $.ajax({ - url: createURL(''), + url: createURL(''), //Need a list LDAP configuration API call which needs to be implemented data: data, success: function(json) { - // var items = json.listhypervisorcapabilitiesresponse.hypervisorCapabilities; + // var items = json.listldapconfigresponse; args.response.success({data:items}); }, error: function(data) { @@ -145,13 +145,11 @@ label:'SSL' , isBoolean:true, isChecked:false - // var $form = $(this).closest("form"); - }, port: { label: 'Port' , defaultValue: '389' }, - truststore:{ label:'Trust Store' , isHidden:true , dependsOn:'ssl' }, - truststorepassword:{ label:'Trust Store Password' ,isHidden:true , dependsOn:'ssl'} + truststore:{ label:'Trust Store' , isHidden:true , dependsOn:'ssl',validation:{required:true} }, + truststorepassword:{ label:'Trust Store Password' ,isHidden:true , dependsOn:'ssl', validation:{required:true}} } @@ -160,6 +158,41 @@ action:function(args) { + var array = []; + array.push("&binddn=" + todb(args.data.name)); + array.push("&bindpass=" + todb(args.data.password)); + array.push("&hostname=" + todb(args.data.hostname)); + array.push("&searchbase=" +todb(args.data.searchbase)); + array.push("&queryfilter=" +todb(args.data.queryfilter)); + array.push("&port=" +todb(args.data.port)); + + if(args.$form.find('.form-item[rel=ssl]').find('input[type=checkbox]').is(':Checked')== true) { + + array.push("&ssl=true"); + if(args.data.truststore != "") + array.push("&truststore=" +todb(args.data.truststore)); + + if(args.data.truststorepassword !="") + array.push("&truststorepass=" +todb(args.data.truststorepassword)); + + } + + else + array.push("&ssl=false"); + + $.ajax({ + url: createURL("ldapConfig" + array.join("")), + dataType: "json", + async: true, + success: function(json) { + var items = json.ldapconfigresponse; + args.response.success({ + data: items + }); + + } + + }); } From 2beb66fd32bd8b176a7b4eff2d0a34aa4d278045 Mon Sep 17 00:00:00 2001 From: Radhika PC Date: Fri, 15 Feb 2013 21:31:30 +0530 Subject: [PATCH 034/486] Egress Firewall Rules Documentation --- docs/en-US/creating-network-offerings.xml | 2 +- docs/en-US/egress-firewall-rule.xml | 98 +++++++++++++++++ docs/en-US/firewall-rules.xml | 119 +++++++++++++-------- docs/en-US/images/egress-firewall-rule.png | Bin 0 -> 10413 bytes docs/en-US/ip-forwarding-firewalling.xml | 44 ++++---- 5 files changed, 195 insertions(+), 68 deletions(-) create mode 100644 docs/en-US/egress-firewall-rule.xml create mode 100644 docs/en-US/images/egress-firewall-rule.png diff --git a/docs/en-US/creating-network-offerings.xml b/docs/en-US/creating-network-offerings.xml index df392420937..1f79fb166ce 100644 --- a/docs/en-US/creating-network-offerings.xml +++ b/docs/en-US/creating-network-offerings.xml @@ -117,7 +117,7 @@ Firewall - For more information, see For more information, see . For more information, see the Administration Guide. diff --git a/docs/en-US/egress-firewall-rule.xml b/docs/en-US/egress-firewall-rule.xml new file mode 100644 index 00000000000..ef0e25efd03 --- /dev/null +++ b/docs/en-US/egress-firewall-rule.xml @@ -0,0 +1,98 @@ + + +%BOOK_ENTITIES; +]> + +
+ Creating Egress Firewall Rules in an Advanced Zone + + The egress firewall rules are supported only on virtual routers. + + + The egress traffic originates from a private network to a public network, such as the + Internet. By default, the egress traffic is blocked, so no outgoing traffic is allowed from a + guest network to the Internet. However, you can control the egress traffic in an Advanced zone + by creating egress firewall rules. When an egress firewall rule is applied, the traffic specific + to the rule is allowed and the remaining traffic is blocked. When all the firewall rules are + removed the default policy, Block, is applied. + Consider the following scenarios to apply egress firewall rules: + + + Allow the egress traffic from specified source CIDR. The Source CIDR is part of guest + network CIDR. + + + Allow the egress traffic with destination protocol TCP,UDP,ICMP, or ALL. + + + Allow the egress traffic with destination protocol and port range. The port range is + specified for TCP, UDP or for ICMP type and code. + + + To configure an egress firewall rule: + + + Log in to the &PRODUCT; UI as an administrator or end user. + + + In the left navigation, choose Network. + + + In Select view, choose Guest networks, then click the Guest network you want. + + + To add an egress rule, click the Egress rules tab and fill out the following fields to + specify what type of traffic is allowed to be sent out of VM instances in this guest + network: + + + + + + egress-firewall-rule.png: adding an egress firewall rule + + + + + CIDR: (Add by CIDR only) To send traffic only to + the IP addresses within a particular address block, enter a CIDR or a comma-separated + list of CIDRs. The CIDR is the base IP address of the destination. For example, + 192.168.0.0/22. To allow all CIDRs, set to 0.0.0.0/0. + + + Protocol: The networking protocol that VMs uses to + send outgoing traffic. The TCP and UDP protocols are typically used for data exchange + and end-user communications. The ICMP protocol is typically used to send error messages + or network monitoring data. + + + Start Port, End Port: (TCP, UDP only) A range of + listening ports that are the destination for the outgoing traffic. If you are opening a + single port, use the same number in both fields. + + + ICMP Type, ICMP Code: (ICMP only) The type of + message and error code that are sent. + + + + + Click Add. + + +
diff --git a/docs/en-US/firewall-rules.xml b/docs/en-US/firewall-rules.xml index 01d072bbcc4..837a4c6f9d0 100644 --- a/docs/en-US/firewall-rules.xml +++ b/docs/en-US/firewall-rules.xml @@ -3,53 +3,80 @@ %BOOK_ENTITIES; ]> -
- Firewall Rules - By default, all incoming traffic to the public IP address is rejected by the firewall. To allow external traffic, you can open firewall ports by specifying firewall rules. You can optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to allow only incoming requests from certain IP addresses. - You cannot use firewall rules to open ports for an elastic IP address. When elastic IP is used, outside access is instead controlled through the use of security groups. See . - Firewall rules can be created using the Firewall tab in the Management Server UI. This tab is not displayed by default when &PRODUCT; is installed. To display the Firewall tab, the &PRODUCT; administrator must set the global configuration parameter firewall.rule.ui.enabled to "true." - To create a firewall rule: - - Log in to the &PRODUCT; UI as an administrator or end user. - In the left navigation, choose Network. - Click the name of the network where you want to work with. - Click View IP Addresses. - Click the IP address you want to work with. - - Click the Configuration tab and fill in the following values. - - Source CIDR. (Optional) To accept only traffic from IP - addresses within a particular address block, enter a CIDR or a - comma-separated list of CIDRs. Example: 192.168.0.0/22. Leave empty to allow - all CIDRs. - Protocol. The communication protocol in use on the opened - port(s). - Start Port and End Port. The port(s) you want to open on the - firewall. If you are opening a single port, use the same number in both - fields - ICMP Type and ICMP Code. Used only if Protocol is set to - ICMP. Provide the type and code required by the ICMP protocol to fill out - the ICMP header. Refer to ICMP documentation for more details if you are not - sure what to enter - - Click Add. - + Firewall Rules + By default, all incoming traffic to the public IP address is rejected by the firewall. To + allow external traffic, you can open firewall ports by specifying firewall rules. You can + optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to + allow only incoming requests from certain IP addresses. + You cannot use firewall rules to open ports for an elastic IP address. When elastic IP is + used, outside access is instead controlled through the use of security groups. See . + In an advanced zone, you can also create egress firewall rules by using the virtual router. + For more information, see . + Firewall rules can be created using the Firewall tab in the Management Server UI. This tab + is not displayed by default when &PRODUCT; is installed. To display the Firewall tab, the + &PRODUCT; administrator must set the global configuration parameter firewall.rule.ui.enabled to + "true." + To create a firewall rule: + + + Log in to the &PRODUCT; UI as an administrator or end user. + + + In the left navigation, choose Network. + + + Click the name of the network where you want to work with. + + + Click View IP Addresses. + + + Click the IP address you want to work with. + + + Click the Configuration tab and fill in the following values. + + + Source CIDR. (Optional) To accept only traffic from + IP addresses within a particular address block, enter a CIDR or a comma-separated list + of CIDRs. Example: 192.168.0.0/22. Leave empty to allow all CIDRs. + + + Protocol. The communication protocol in use on the + opened port(s). + + + Start Port and End Port. The port(s) you want to + open on the firewall. If you are opening a single port, use the same number in both + fields + + + ICMP Type and ICMP Code. Used only if Protocol is + set to ICMP. Provide the type and code required by the ICMP protocol to fill out the + ICMP header. Refer to ICMP documentation for more details if you are not sure what to + enter + + + + + Click Add. + +
diff --git a/docs/en-US/images/egress-firewall-rule.png b/docs/en-US/images/egress-firewall-rule.png new file mode 100644 index 0000000000000000000000000000000000000000..fa1d8ecd0bda33c2dd324dc7b60cd14e98118026 GIT binary patch literal 10413 zcmdUVXIPWl(sn=u1f)ps2-2c}O7Be!AT5%BbOk8}k={EAQ0%PhWfpSAkZaN z;CnkMA@J!XS9l2gBXE5PQ2~|ovMmD-M7GM>${U~&rwsElUw=Q5OxKZ`zv>}uE3)5%8(%Al?@NuTO>2JKxAA41`P+|KW;u?E zjIydW2EqZAH*vQvsW4<3-zO~;8ois8B@_?hxjyR0WXBUseeGLBPG0uW@ulmx-FW0g zs~l}$*4A(X>-z?_)@o1XytkA*ZD3E^x6dX!Eg(AYg8A*kv|)@Wt1QWih-#DC1ko=2ZU>0TDUD?EXw9 zAib3YLL$nDyygG3O$#*pUsJh0J|dLDe7QEa`!{j?3rHC8zgTFoyz&dp@Zi;m>K}^1 z!$?FxK$D!p;cwhlm~P2O&%~Dgj?divCFUX6u$IoHgPrkrr0LWD%-=xexbLNg$xgu5X=v7zopv9uj9q%dXGv=Ph}R2kJIk^Hd*s!(t!{Qh5xA2 z{q=oa))4z)bv2gsANhMpb6jCohNZ~99-SV4Y{n%5Hu!!q?38T(>;PhPwt6eZi~et6 z0is|ks`zg`Ws9mHNMb(fjeXY`ClBG@Z?i-R3)pc>JGXol0F_B$ddt%cmGNvVodM4a zEce*xk>GQh;F}k!SF*jB^v*w~HrWbSTA}>%@5w>b>g#B%zUbN^^X-kNI?rr)q-hBs>xV`jw9I4j?$kBwCaz8r24MfA^z-`oct9 z_+CvywYAKw`ocg+GGG&@mWxf4vaIj|0Sin|wMv9ob0MU$Ib?!TpFWk;RDCt55`bIdR)w8uF@*sy2{2`3}=fZ^`GMlRI*f z#+$wxH^6e7w&|TGij}9Ol5EAF=GH_uE$UA7x(&Jt=$qx(hkmsaHm*MBKq9(H2cOR@8bfPXPq}eEU11BR?^{~)*?ZDj(=BSzT4{ib)@I+)@ji|| zMT7|Y0A~%Gjdjh+w;!bClxNM{|amutwLi4pkX$cG4l^5;g}E<_PJ>` z_um>&-$Y|)Kl80Q9w9N`-(XO0LU$tylYBW=V;xgqcMfZPJk}rBo>P-M6TRg)dtl zX33aXFhBQ=5`I*GNR$FwbqKN$118?$hj)~Y`8zy&;g_4LL^2Ahz*0?TAE$ahuQu=x zk?0g~Hn=ygZ-u55$M;eAnIH=TyRAq@MigRN&v7pm7N*fEF*&&9%(1u2eHi}Sw8@Z> zlOHwvN^tkwDw~E6kQG|P>^=yZVVlD@KJaei7v8$37yxmo9#!H!IH-(hP!>}-A3y4P zz26-|Z>xo%iuLSjU@{UyVt+VChvPx&b|zqQy92np0C@RKqZ_IZ!~!zfRk9e77+HfaZ;YK`Uy=G^6!?t}i5|*gx_PWG@W92xQVP6H za?2R+CY?DA5wU%oe<&^en`s zn5oT4Xy)itjrFJ~C+FA750l*%s6UoCn3RC#4$O@dq#5xh)S5axRC#3W?pQxjZuB(z z;q-@(QAC~I-_RgU+64PA)Zz9tBby>){kGPbsJdSK+AeSO5;Z7+l9!~Li-IJyWYoGc z`-&!g#d9PWJ|(b5O5kEH+8;DXG;e(8<+O48?s1I7j?w5Mdf^8b^>r?3DtdOppuz09 z$ILIMANPyb$4ZDP7g#2JgXXTVhOu8)>a{0k(M!uRy%~PnG^QpXa3Eaa<}n-Y7Ap}q z(T&CI52_$NnFO|5p|{y6>THq+!ubT?J>}9EW(m@Jq>+*T^*se7k&H1+`vyS!{^kEd z+ig%n)O9)8zcA_6_Fgf>k52X2aBscYZO7Zae`;_65vOU@)1j`M6=_Fq zg09098#TgpHN~celiac@$MZ1c5sP|BMY5-x_5|aX&^N|%2H9E#Hm``U?k*ZJ8TbBh zr_n2+51ykyFeyq>P<(5*>CX*Ta6Eup>xJMxJYW+G5y<>eiI|^!Fwl$DmM$W(xb_WF zbf4};gS9J6zwGDEj#||Cia&~c(!*o5e7CvbwydIyo%K}kR>2wX&&mCG5ibt=(Oz0n zQF6m6~2Rx6GpPx0#@JPnY_7#o!xb1C-OS>y2Cg<-={rrUJe!ZOXzzM5G z$UT!1uHh7_^dLbV#r0zOv;WOwj~Bo5L_MR~>p$8yrZt<^F3yaP&xghzxu!P#Cex9x zTnsT)>nf6{s@+ZrykYv4HYG4>d!aR&g!l7@sV&5Pb;#VL_vsNt=-Dmrg&IlZ@EwJp z7NL1ElVrZ4eFrQ(vxYwycy0CUPKwUdgpEEf9~L8!=c%(`aZFVI{t2Zs3}QiNwkBf}iP9U`Hfk zNW=HZt{uz{n(7|6CK@5>i29lE7~(&`|-9qVu`PxL1Wl)ZEaoJ2Ku!ia|_PCy8?tr>2r#n?Cw-sc=3!<`qii3g5lj3W+5tuy@Q)r1fcYvojB=n|sJ- zLv=V+$HJIkPDOa?A-J0jcklRHy2+k{eobVWQ&N<`JbGAAS^$Z_wLoUjH?~7xt7xmV zn}+DyT%)GxApcy^yQ}0#H#8e|Hc&mk$I0Zp`7D0aXg4`Y3Y_}%8YGvF{@t4}3h}3% z>}sE6JvZ)I2ur?M`Dn>e8^wezRpgCXSkX0bhLs=D##Rfon*Y8*$@2DXhv&eO`xAC z$JwLCwabUICOYzSvbXOHPxf_ta75LD^0j!mxRKA&hzut_;X)GlLG&9<{+zLKVXBJa zysDq|S0*T246q~^bY@zhZqm8A{}k=ufn&y7+hVNvyjY_tSkfVe1in5{zKBG|AA4$_ zFAt#LEHh|1#ksy8W{P~9?{*i^X?Kp)ao-v0Gp(Tp(^)BBi~m~J=;4|o^PRP&9@kqE zi9}(dpN#6z#5b(!zbG@I)XjcLD8TRBpCZ%X+V!czb8|AWxpi);-V?IF=S`)KZWZlU zb-(c}_Z6WYS>5AQ6pKT%M)F)A>hN&CUA-WzA>w=;!;BxRwfkU~sM(U|kUdoN``k25yh-4WnIESUkF)BtB_VN_?}J&bRN1ci^+GN`r%h zd}eRd(4jvSTzoG?E|tUIduYBVn3+oR?$X}+&$}Y@MWLkujpOR7<2jk8Kab{?NTu84R%WOdA{|-LQF8ZOWP!L0wHpHvUQqyh&-Sn-o_#;`Kmrz)c^b z7orv&J@U1^{qT~#`K$b;ko+`6#aT!C4wq}vI-Mm=Tzj7J0vcPEzGHCKn`9kORRE`` z)lmO9Z@5boBvYb^6+AS=Py{4$&WN-cUAht(X=O}LSqWyi>i^bexD;_;)K7CU^3A)! z#AhaQ=)9dPcLg+6-{rC;RFe6y1d-hph;B6V0Ry~hp$vcR-LTQ~I!VZiP%9x6f`pKQ zy@>>at`!;Re2_>BulMkYght~0(pZ4lQI}Myu|NDY@r2b49`53PTS^#5WD8H!Zss$K z*e0d{0Hl(SWz=pdT@01y1TXtbF!${N4%le`4%1LRP@<%kPG#Be=9J=?Ng1p3?F3Mm&LQ+-zYyX+^X=vPmC+bvh{q@n$(7an) zA*gA1!*r(xmNP`Gk`Auipu5kMR!00EiXnr1i8Z7l1#~7&C7=ytOsY&JYNpN2cB+~5 zNDOHJH@tz|DCOM<#my`+WqoT%P5-5@t+?Ti!YLMocLP zm}73YC+ANQTb%qXn+jlrSmzPn>f~aB$LCn3Nqptc|Fm|t8#o~bAA2Y`Cv5hXQd3e^ z0^ovUUK1*8Z`@nm`LazK6R2AeMerG#`p$vjDK!!UA-t>2N~=Glt8n=AnQLxKy_;TL z6l#lu_6b7fyj@agk=+?qy(3X(E`>RH%Z;NK>f{Rl4x{;2^6}YNN?OKmHk@c8;pU+7 zG9SJuXU36V&{g@a0~*%IfCR@S`RacUp=g8d-e;j7s=OmEx+;y~gp)P+A`vzWz~y&E zdoVp|+Zc&(GAnR4kz5)|ANRLdKk%>TPe*#$Vj#Yu#%Az^ckTQW!#|y1jTDKKRlxpi ziRJ>LBGzdV7$m|f7dFlgfuF}}8l2wMU2Z4S?)Dafz}FL*3NeXWRGp>4R$C2;o;qh# zwdk)?*GAoPgIr*l%MMbQl6I~kO*}Pt9?eK;kC+3D9T%=yAbUI=hK&n5FX{=HG(F!} zY<^T=D(71fAm?)f#WxLH!;hHL{56JLnyetWF`?8I^ou2c*cyGNdmLV$;MFYq@3t4L zndJ`0o&Mr3(FJG&1><( zY#IB{CTcxZDKl6Q&No(*@P*KSbIKbNa8*}Bo01ay7RFb!El#Xmd}M}p(58A>U=dpJ zU)f)Y(iGf2eI54M6D&EoEF#1z?N{+IwGL{0XT`e+`{660iM!#3~&=#&@`ZeuZ6`Oe}c3bAdjcxiHq zkup6Eb(G~yt_2@*W`plV_to7eze@^9bnh2m71d^iLw3D#6L!y9<^xM%*wes0!(0)+ zf*mY;+VywmD%HC>vx4#UWc&5n^>0wM_(&AC?qVMsACKIWXVRHpq3)N}t8c=QE2d9r z_37m!Q7ax&3>M9J5sdv(WHGH5HiO!Fsh@YO>DX1e)s;OsLi}Kg4eal5-zEnql@E|qM(@zf=(#-QBzW(hy_u1;9?P3lBkX`;lMolWi z^6_TZik!VHX89n34lyok}Pw zTrf|lz%$xN9UdH5ie;R1I9+ZPk#ni7EPvQ(R`OHLFwfc%js=kUEq<+gut=!5wN0CVDa_LUD;5*FL-x_PT&xSMHeqCW9swcFQ!| zV>!Xi;i;caUDy86@5TUm4R2P1Q$9Y<^WW*nO+-)&beB)VvJ}xeb)yHmt1g-M%*xLN zu`#4e65#sc%h1;H?!ABEFa&K z75d;J;x6QB6tZMS`YE?0=0V2p+{i=Pa<~pdc=+93iv~5MZRha4U&}<@Rs2poJjqVx1 zqzVC-2GMlwLKm(eiHFKxfyPP^|ErX386>94qzxLTpe%{&RCy9Z3Z&X<{@Dei*OXWb zyk^iv`?Li!W}R(O(xuC2jD6trVSc|1S7AUH9o745gi*{yytY@e;$Ybw|0cL1A|hV0 z^d8+Hd^0G{6_ylmMpzh7@bw-EIls-TP_3nMZh!+p0*G~EraJwm3WH7$5D<1vbC;#T zo391g=nDfV0m-sdkgRT+5umaDY`TQjKh)EZm|fq~HNQX$FPS)L49x07Xc(<%r!t>g zsx*dDEC^LTsq03)8`5~=cR&lj-(4vNt?f|{{yU-Njcip$_dpIgWDGzUw3ZKU2Vx*l z54;*vLa)=qH4@Z66$Bbn=OylDOB73HC)5UtakHtv&~%yi^whhx2}wVHt6AM)aahtL zOCC7XY)rD=Ix}xZvSP{E`MHo-YeJ;w)O`PtCs4v>0IH}a-19!N?A3I@sQd0vATUY{ zjIw^X$9j{#Lnv%^g8ve}KANHn)T0Ap?Xn?RqYt$SYQZvH)(Mu_>OMI~R~R6?5~gs`OiBley^b@c zrJ{ryNX(}eMxPqY5jrt<)h{mP(eA32L<7Fyt@Hw@*Vbk)sKjtnm(k%O22M5Y6(!|L zIArV=hnAiINr+lO1S6i_k^;Uz%Iq3;i?s41onT^kCna#ds7WhP044ja zkD)=cZ^ZybQN|5w0z5hZJXX&->Tdq6K92W;4}noLLT_2{A@;rpshd)Z^MmUXn;;gE z;ER4|C6Uahm-0@q%!!6C_*{Lfaq7306ReH@^|d;ex1csB(G8i#`>ZkvZPx7pK!Whh zgH~P37vE%E?_AP3Y~OJvxh$`J+yQP+18VMT611eo9XEfW-2}3EC4qAGcx=5+=|DIo zF95XAFz#0=d(N2<`XkuR%HoX!2#bPf>oGcPEI%l`NqaO&L?>xu7CnfJ-AK^Y;sl&l ze*w)1bv8rM1E-ZUjjT660f0d4`hZShXvxxY_EfqD)P_O#z>0>2|?Lpg=evaK3U1~ zh|aM+3zA0EAI^u)V*1AkD|JuJ6~$fWh}a<~PWBH`yl4N9Gq?cG zVgNY*%_4qAH2-mtPeO`D=cOha6BKy zGWE-JzviQ=i2(D#fMhlwGF}_Xe=w43ybYjWy1rh@)(%~k z#-M|KY8s1*(TQdTeKn(fB6Squ87mRsdD1>|zI^_CvfN?gC|`4KNVI}AW31-+sHASD zf>qO6{cziiG`LK8<{2S`w>f&Gc6T%}bU(tu04FU@PnoY>uU`yKz;a3-9Q(5(?qgYl zR)~A+cQo1H$#g}t^v1H7vHb&_LcCJbtge5E&tIw}nXtz*55yGZmHh(W2B9EzjQ0>5 z^uhNaVYhwg(=sfo-%B9br1$#HPu5FMP6tI!za^hfAs1n?l8zIf<{X>15=(_olN~1l z|B6$Ucl6U`(Yp=dHMrjZvwP9CFT?5k+h9ojNgth{X=WB85Cabz5JdOWY}V;+K1O0< z0_S)quNljL`y{&~K8Y&GlQ|7N-&K`!>hQ*NuKZ?B+BTB$*$M<6|iLaZ$gh+$ke^N?=jj{`cTHcQj zxJ--8V=M~EbT{m_5W6xs|Hyi%q5C_)6@c>o)LK9x$A_`|))9tQ&PXx}OWRwKz*pNk40hcGz1z_jB1^SdS0bB#G}jJLR8`DE@iw?oo1- zRX1I}`EWaW1KmL1lEOd4noR1>2G40bvb0={n3A9R3Ru=$VeRvQH8cySeCg>z11g%V zrZK0UMGxHfR;e7N!lW}+a6tdW+jgpibHCdaHaR@JpP69BSNL>#lV9ZYxyOc22BE}? zahNBpTEYJ8W9SmM*+E=zcDxy>Vmxl=xxwiy(+X8XOcg?s2T&+xiPrH-98%NZM5PZ^ z2pMF}C~(h&Q_&JYNx}@W2ec(X{iQ!(?@bGLC z%4g>{e^dR;6YopEkbx84yT+qv*t2Kf8|XN3bllI$3t zftV2DZ{+JWMmb{uim1QO6)7MEOnI6IVtrq8w7_tWV*KbcXUJ>J7X==h?3VS}WbOx+8PP-wm9tb^S`ao$5mF z!Xo2~sao2NgLT<$m%kQ-9I@DM?2vi8Uokg4UtEzHc=_i4?~;NA!ht)ck#11sh=#Ag zvYyLFiZ-X)OKE2JS<|wB&3;B!z7}@!9yI5#(ff}UDgNUA<+{??YL0xue-t96Ff#`i zp#{G%@Bi?^(G~O@&*bQz?s`yAFcJ9ui}~s4FbR`dQ{gIEMtNa+_c3B~l7IT`Afh!s qAf=TJsO0$5V5cQ*m&Ke@<(Sm0m75LF0zWYXX{hPkD^YnI^8Wy_n`BD> literal 0 HcmV?d00001 diff --git a/docs/en-US/ip-forwarding-firewalling.xml b/docs/en-US/ip-forwarding-firewalling.xml index c154b078da3..54e18b7cfbc 100644 --- a/docs/en-US/ip-forwarding-firewalling.xml +++ b/docs/en-US/ip-forwarding-firewalling.xml @@ -3,28 +3,30 @@ %BOOK_ENTITIES; ]> -
- IP Forwarding and Firewalling - By default, all incoming traffic to the public IP address is rejected. All outgoing traffic from the guests is translated via NAT to the public IP address and is allowed. - To allow incoming traffic, users may set up firewall rules and/or port forwarding rules. For example, you can use a firewall rule to open a range of ports on the public IP address, such as 33 through 44. Then use port forwarding rules to direct traffic from individual ports within that range to specific ports on user VMs. For example, one port forwarding rule could route incoming traffic on the public IP's port 33 to port 100 on one user VM's private IP. - - + IP Forwarding and Firewalling + By default, all incoming traffic to the public IP address is rejected. All outgoing traffic + from the guests is translated via NAT to the public IP address and is allowed. + To allow incoming traffic, users may set up firewall rules and/or port forwarding rules. For + example, you can use a firewall rule to open a range of ports on the public IP address, such as + 33 through 44. Then use port forwarding rules to direct traffic from individual ports within + that range to specific ports on user VMs. For example, one port forwarding rule could route + incoming traffic on the public IP's port 33 to port 100 on one user VM's private IP. + +
From b54db07d20893a437bb327f08d891553aa98aef8 Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Fri, 15 Feb 2013 22:35:45 +0530 Subject: [PATCH 035/486] added db.properties to usage test resources --- usage/test/resources/db.properties | 70 ++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 usage/test/resources/db.properties diff --git a/usage/test/resources/db.properties b/usage/test/resources/db.properties new file mode 100644 index 00000000000..18bf54c2b61 --- /dev/null +++ b/usage/test/resources/db.properties @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +# management server clustering parameters, change cluster.node.IP to the machine IP address +# in which the management server(Tomcat) is running +cluster.node.IP=127.0.0.1 +cluster.servlet.port=9090 + +# CloudStack database settings +db.cloud.username=cloud +db.cloud.password=cloud +db.root.password= +db.cloud.host=localhost +db.cloud.port=3306 +db.cloud.name=cloud + +# CloudStack database tuning parameters +db.cloud.maxActive=250 +db.cloud.maxIdle=30 +db.cloud.maxWait=10000 +db.cloud.autoReconnect=true +db.cloud.validationQuery=SELECT 1 +db.cloud.testOnBorrow=true +db.cloud.testWhileIdle=true +db.cloud.timeBetweenEvictionRunsMillis=40000 +db.cloud.minEvictableIdleTimeMillis=240000 +db.cloud.poolPreparedStatements=false +db.cloud.url.params=prepStmtCacheSize=517&cachePrepStmts=true&prepStmtCacheSqlLimit=4096 + +# usage database settings +db.usage.username=cloud +db.usage.password=cloud +db.usage.host=localhost +db.usage.port=3306 +db.usage.name=cloud_usage + +# usage database tuning parameters +db.usage.maxActive=100 +db.usage.maxIdle=30 +db.usage.maxWait=10000 +db.usage.autoReconnect=true + +# awsapi database settings +db.awsapi.name=cloudbridge + +# Simulator database settings +db.simulator.username=cloud +db.simulator.password=cloud +db.simulator.host=localhost +db.simulator.port=3306 +db.simulator.name=simulator +db.simulator.maxActive=250 +db.simulator.maxIdle=30 +db.simulator.maxWait=10000 +db.simulator.autoReconnect=true From 8e88a4295f1659af2253002b060c249538d7dc31 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 15 Feb 2013 23:11:27 +0530 Subject: [PATCH 036/486] Reset a VM on reboot compute offering --- ui/scripts/configuration.js | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ui/scripts/configuration.js b/ui/scripts/configuration.js index c85a98acf9e..1e44ecfd688 100644 --- a/ui/scripts/configuration.js +++ b/ui/scripts/configuration.js @@ -136,6 +136,14 @@ isChecked: true, docID: 'helpComputeOfferingPublic' }, + + isVolatile:{ + label:'isVolatile', + isBoolean:true, + isChecked:false + + }, + domainId: { label: 'label.domain', docID: 'helpComputeOfferingDomain', @@ -196,6 +204,10 @@ $.extend(data, { limitcpuuse: (args.data.cpuCap == "on") }); + + $.extend(data, { + isvolatile: (args.data.isVolatile == "on") + }); if(args.$form.find('.form-item[rel=domainId]').css("display") != "none") { $.extend(data, { From 5147fb16acbb2085820a72911d033cea0e5dfe11 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Fri, 15 Feb 2013 10:40:39 -0800 Subject: [PATCH 037/486] CLOUDSTACK-1253: Remove @author tag. --- .../com/cloud/exception/RequestLimitException.java | 1 - api/src/org/apache/cloudstack/api/ApiErrorCode.java | 1 - .../org/apache/cloudstack/query/QueryService.java | 1 - .../cloudstack/ratelimit/ApiRateLimitService.java | 1 - .../cloudstack/ratelimit/EhcacheLimitStore.java | 1 - .../org/apache/cloudstack/ratelimit/LimitStore.java | 1 - .../org/apache/cloudstack/ratelimit/StoreEntry.java | 1 - .../apache/cloudstack/ratelimit/StoreEntryImpl.java | 1 - .../cloudstack/ratelimit/integration/APITest.java | 2 -- .../ratelimit/integration/LoginResponse.java | 2 -- .../integration/RateLimitIntegrationTest.java | 3 --- .../src/com/cloud/api/query/ViewResponseHelper.java | 1 - .../com/cloud/api/query/vo/ControlledViewEntity.java | 1 - server/src/com/cloud/api/query/vo/HostJoinVO.java | 1 - .../com/cloud/api/query/vo/StoragePoolJoinVO.java | 1 - server/test/com/cloud/api/APITest.java | 2 -- server/test/com/cloud/api/ListPerfTest.java | 2 -- server/test/com/cloud/api/LoginResponse.java | 2 -- utils/test/com/cloud/utils/db/TransactionTest.java | 12 +++++------- 19 files changed, 5 insertions(+), 32 deletions(-) diff --git a/api/src/com/cloud/exception/RequestLimitException.java b/api/src/com/cloud/exception/RequestLimitException.java index 0142f8e8726..ebaac802649 100644 --- a/api/src/com/cloud/exception/RequestLimitException.java +++ b/api/src/com/cloud/exception/RequestLimitException.java @@ -21,7 +21,6 @@ import com.cloud.utils.exception.CloudRuntimeException; /** * Exception thrown if number of requests is over api rate limit set. - * @author minc * */ public class RequestLimitException extends CloudRuntimeException { diff --git a/api/src/org/apache/cloudstack/api/ApiErrorCode.java b/api/src/org/apache/cloudstack/api/ApiErrorCode.java index ee28fa05878..69bd0284cef 100644 --- a/api/src/org/apache/cloudstack/api/ApiErrorCode.java +++ b/api/src/org/apache/cloudstack/api/ApiErrorCode.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.api; /** * Enum class for various API error code used in CloudStack - * @author minc * */ public enum ApiErrorCode { diff --git a/api/src/org/apache/cloudstack/query/QueryService.java b/api/src/org/apache/cloudstack/query/QueryService.java index bfe7b855c81..c3f86aabb7f 100644 --- a/api/src/org/apache/cloudstack/query/QueryService.java +++ b/api/src/org/apache/cloudstack/query/QueryService.java @@ -58,7 +58,6 @@ import com.cloud.exception.PermissionDeniedException; /** * Service used for list api query. - * @author minc * */ public interface QueryService { diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitService.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitService.java index c5b715019b6..a135556a502 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitService.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitService.java @@ -22,7 +22,6 @@ import com.cloud.utils.component.PluggableService; /** * Provide API rate limit service - * @author minc * */ public interface ApiRateLimitService extends PluggableService{ diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/EhcacheLimitStore.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/EhcacheLimitStore.java index 659cf81b0e6..ee7c528bd07 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/EhcacheLimitStore.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/EhcacheLimitStore.java @@ -23,7 +23,6 @@ import net.sf.ehcache.constructs.blocking.LockTimeoutException; /** * A Limit store implementation using Ehcache. - * @author minc * */ public class EhcacheLimitStore implements LimitStore { diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/LimitStore.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/LimitStore.java index a5e086b3029..373d9652ee9 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/LimitStore.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/LimitStore.java @@ -20,7 +20,6 @@ import com.cloud.user.Account; /** * Interface to define how an api limit store should work. - * @author minc * */ public interface LimitStore { diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntry.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntry.java index 76e8a2d9281..05a7029dcb0 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntry.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntry.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.ratelimit; /** * Interface for each entry in LimitStore. - * @author minc * */ public interface StoreEntry { diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntryImpl.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntryImpl.java index e8143e52370..9f10fe68a41 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntryImpl.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntryImpl.java @@ -20,7 +20,6 @@ import java.util.concurrent.atomic.AtomicInteger; /** * Implementation of limit store entry. - * @author minc * */ public class StoreEntryImpl implements StoreEntry { diff --git a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/APITest.java b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/APITest.java index 7701b1515b0..e75e852f0b7 100644 --- a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/APITest.java +++ b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/APITest.java @@ -37,8 +37,6 @@ import com.google.gson.Gson; /** * Base class for API Test * - * @author Min Chen - * */ public abstract class APITest { diff --git a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/LoginResponse.java b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/LoginResponse.java index 719f39c0a5e..61a178033af 100644 --- a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/LoginResponse.java +++ b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/LoginResponse.java @@ -24,8 +24,6 @@ import com.google.gson.annotations.SerializedName; /** * Login Response object * - * @author Min Chen - * */ public class LoginResponse extends BaseResponse { diff --git a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/RateLimitIntegrationTest.java b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/RateLimitIntegrationTest.java index 72d354c6c77..f9352333d12 100644 --- a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/RateLimitIntegrationTest.java +++ b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/RateLimitIntegrationTest.java @@ -34,9 +34,6 @@ import com.cloud.utils.exception.CloudRuntimeException; /** * Test fixture to do integration rate limit test. * Currently we commented out this test suite since it requires a real MS and Db running. - * - * @author Min Chen - * */ public class RateLimitIntegrationTest extends APITest { diff --git a/server/src/com/cloud/api/query/ViewResponseHelper.java b/server/src/com/cloud/api/query/ViewResponseHelper.java index 55d84bb5af4..9e612b07d1b 100644 --- a/server/src/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/com/cloud/api/query/ViewResponseHelper.java @@ -67,7 +67,6 @@ import com.cloud.user.UserContext; /** * Helper class to generate response from DB view VO objects. - * @author minc * */ public class ViewResponseHelper { diff --git a/server/src/com/cloud/api/query/vo/ControlledViewEntity.java b/server/src/com/cloud/api/query/vo/ControlledViewEntity.java index 12557504807..014abfaa3c0 100644 --- a/server/src/com/cloud/api/query/vo/ControlledViewEntity.java +++ b/server/src/com/cloud/api/query/vo/ControlledViewEntity.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.api.InternalIdentity; /** * This is the interface for all VO classes representing DB views created for previous ControlledEntity. * - * @author minc * */ public interface ControlledViewEntity extends ControlledEntity, InternalIdentity, Identity { diff --git a/server/src/com/cloud/api/query/vo/HostJoinVO.java b/server/src/com/cloud/api/query/vo/HostJoinVO.java index a3796b97eba..0b8f6721325 100644 --- a/server/src/com/cloud/api/query/vo/HostJoinVO.java +++ b/server/src/com/cloud/api/query/vo/HostJoinVO.java @@ -39,7 +39,6 @@ import org.apache.cloudstack.api.InternalIdentity; /** * Host DB view. - * @author minc * */ @Entity diff --git a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java index fd837bd5d88..89e79e5eea5 100644 --- a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java +++ b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.InternalIdentity; /** * Storage Pool DB view. - * @author minc * */ @Entity diff --git a/server/test/com/cloud/api/APITest.java b/server/test/com/cloud/api/APITest.java index 0b040abc3f5..63e08719f4b 100644 --- a/server/test/com/cloud/api/APITest.java +++ b/server/test/com/cloud/api/APITest.java @@ -36,8 +36,6 @@ import com.google.gson.Gson; /** * Base class for API Test * - * @author Min Chen - * */ public abstract class APITest { diff --git a/server/test/com/cloud/api/ListPerfTest.java b/server/test/com/cloud/api/ListPerfTest.java index b8cb97eb8f0..8437ca42770 100644 --- a/server/test/com/cloud/api/ListPerfTest.java +++ b/server/test/com/cloud/api/ListPerfTest.java @@ -33,8 +33,6 @@ import com.cloud.utils.exception.CloudRuntimeException; * Test fixture to do performance test for list command * Currently we commented out this test suite since it requires a real MS and Db running. * - * @author Min Chen - * */ public class ListPerfTest extends APITest { diff --git a/server/test/com/cloud/api/LoginResponse.java b/server/test/com/cloud/api/LoginResponse.java index 097ae42c999..0f58374f187 100644 --- a/server/test/com/cloud/api/LoginResponse.java +++ b/server/test/com/cloud/api/LoginResponse.java @@ -24,8 +24,6 @@ import com.google.gson.annotations.SerializedName; /** * Login Response object * - * @author Min Chen - * */ public class LoginResponse extends BaseResponse { diff --git a/utils/test/com/cloud/utils/db/TransactionTest.java b/utils/test/com/cloud/utils/db/TransactionTest.java index b952be2c28b..101a533f836 100644 --- a/utils/test/com/cloud/utils/db/TransactionTest.java +++ b/utils/test/com/cloud/utils/db/TransactionTest.java @@ -30,18 +30,16 @@ import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; /** - * A test fixture to test APIs or bugs found for Transaction class. This test fixture will do one time setup before + * A test fixture to test APIs or bugs found for Transaction class. This test fixture will do one time setup before * all its testcases to set up a test db table, and then tear down these test db artifacts after all testcases are run. - * - * @author Min Chen - * + * */ public class TransactionTest { @BeforeClass public static void oneTimeSetup() { Connection conn = null; - PreparedStatement pstmt = null; + PreparedStatement pstmt = null; try { conn = Transaction.getStandaloneConnection(); @@ -75,7 +73,7 @@ public class TransactionTest { * When a transaction is set to use user-managed db connection, for each following db statement, we should see * that the same db connection is reused rather than acquiring a new one each time in typical transaction model. */ - public void testUserManagedConnection() { + public void testUserManagedConnection() { DbTestDao testDao = ComponentContext.inject(DbTestDao.class); Transaction txn = Transaction.open("SingleConnectionThread"); Connection conn = null; @@ -97,7 +95,7 @@ public class TransactionTest { } catch (SQLException e) { Assert.fail(e.getMessage()); } finally { - txn.transitToAutoManagedConnection(Transaction.CLOUD_DB); + txn.transitToAutoManagedConnection(Transaction.CLOUD_DB); txn.close(); if (conn != null) { From 4d634980e27617b302de795fe62e06b7d9408cb3 Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Sat, 16 Feb 2013 09:08:42 +0530 Subject: [PATCH 038/486] exclude usage unit test since it requires db connection --- usage/pom.xml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/usage/pom.xml b/usage/pom.xml index 4b408c4313b..f7d9dd18c9e 100644 --- a/usage/pom.xml +++ b/usage/pom.xml @@ -53,6 +53,15 @@ + + org.apache.maven.plugins + maven-surefire-plugin + + + com/cloud/usage/UsageManagerTest.java + + + org.apache.maven.plugins maven-dependency-plugin From a621048869fcfaf73e8a0cab75e83e86b7e5e589 Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Fri, 15 Feb 2013 20:37:18 -0800 Subject: [PATCH 039/486] CLOUDSTACK-1307 Noticed NPE when we put host in maintenance mode in clustered management setup Changes: - Use of class variable of type GenericSearchBuilder causes issues when multiple threads invoke this method to findCapacity - Changing the scope of the variable to be local to these methods. --- server/src/com/cloud/capacity/dao/CapacityDaoImpl.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java index baaf39164cd..c33bfafc3af 100755 --- a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -62,7 +62,6 @@ public class CapacityDaoImpl extends GenericDaoBase implements private final SearchBuilder _hostIdTypeSearch; private final SearchBuilder _hostOrPoolIdSearch; - protected GenericSearchBuilder SummedCapacitySearch; private final SearchBuilder _allFieldsSearch; @Inject protected StoragePoolDao _storagePoolDao; @@ -309,7 +308,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId){ - SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); + GenericSearchBuilder SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); SummedCapacitySearch.select("dcId", Func.NATIVE, SummedCapacitySearch.entity().getDataCenterId()); SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity()); SummedCapacitySearch.select("sumReserved", Func.SUM, SummedCapacitySearch.entity().getReservedCapacity()); @@ -528,7 +527,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List findByClusterPodZone(Long zoneId, Long podId, Long clusterId){ - SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); + GenericSearchBuilder SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity()); SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity()); SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType()); @@ -563,7 +562,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List findNonSharedStorageForClusterPodZone(Long zoneId, Long podId, Long clusterId){ - SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); + GenericSearchBuilder SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity()); SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity()); SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType()); From 87b668b71b34c93e9ba85d4708a1c04f4020f6bf Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Mon, 11 Feb 2013 16:53:12 +0530 Subject: [PATCH 040/486] CLOUDSTACK-863: Fix Non-printable characters in api call Non-printable characters results in empty pages for all users loading the corrupted object in the web interface. It also results in the API call results getting truncated with an error when it encounters the non-printable characters. Every decoded parameter value is checked for control character using OWASP's ESAPI library. Signed-off-by: Rohit Yadav --- server/src/com/cloud/api/ApiServer.java | 6 ++++++ utils/pom.xml | 5 +++++ utils/src/com/cloud/utils/StringUtils.java | 5 +++++ 3 files changed, 16 insertions(+) diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index d99d188b5d5..be02f5e48f5 100755 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -326,6 +326,12 @@ public class ApiServer implements HttpRequestHandler { continue; } String[] value = (String[]) params.get(key); + // fail if parameter value contains ASCII control (non-printable) characters + String newValue = StringUtils.stripControlCharacters(value[0]); + if ( !newValue.equals(value[0]) ) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Received value " + value[0] + " for parameter " + + key + " is invalid, contains illegal ASCII non-printable characters"); + } paramMap.put(key, value[0]); } diff --git a/utils/pom.xml b/utils/pom.xml index 937fad35c0f..e4fd2b0f7e6 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -157,6 +157,11 @@ reflections ${cs.reflections.version} + + org.owasp.esapi + esapi + 2.0.1 + install diff --git a/utils/src/com/cloud/utils/StringUtils.java b/utils/src/com/cloud/utils/StringUtils.java index 8f0a503abef..14ff4b1ae94 100644 --- a/utils/src/com/cloud/utils/StringUtils.java +++ b/utils/src/com/cloud/utils/StringUtils.java @@ -23,6 +23,8 @@ import java.util.Iterator; import java.util.List; import java.util.regex.Pattern; +import org.owasp.esapi.StringUtilities; + // StringUtils exists in Apache Commons Lang, but rather than import the entire JAR to our system, for now // just implement the method needed public class StringUtils { @@ -150,6 +152,9 @@ public class StringUtils { return cleanResult; } + public static String stripControlCharacters(String s) { + return StringUtilities.stripControls(s); + } public static int formatForOutput(String text, int start, int columns, char separator) { if (start >= text.length()) { From 46ab973143eeb2461038cf62e89d5e43d71bf45b Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Sat, 16 Feb 2013 12:28:00 +0530 Subject: [PATCH 041/486] db: Fix Upgrade40to41 and add cleaning path schema-40to410-cleanup.sql Signed-off-by: Rohit Yadav --- .../com/cloud/upgrade/dao/Upgrade40to41.java | 37 +++---------------- setup/db/db/schema-40to410-cleanup.sql | 21 +++++++++++ 2 files changed, 27 insertions(+), 31 deletions(-) create mode 100644 setup/db/db/schema-40to410-cleanup.sql diff --git a/server/src/com/cloud/upgrade/dao/Upgrade40to41.java b/server/src/com/cloud/upgrade/dao/Upgrade40to41.java index d3a8cd5a9d3..2ebe9800756 100644 --- a/server/src/com/cloud/upgrade/dao/Upgrade40to41.java +++ b/server/src/com/cloud/upgrade/dao/Upgrade40to41.java @@ -32,47 +32,24 @@ import java.util.UUID; import org.apache.log4j.Logger; -/** - * @author htrippaers - * - */ public class Upgrade40to41 implements DbUpgrade { final static Logger s_logger = Logger.getLogger(Upgrade40to41.class); - /** - * - */ - public Upgrade40to41() { - // TODO Auto-generated constructor stub - } - - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#getUpgradableVersionRange() - */ @Override public String[] getUpgradableVersionRange() { return new String[] { "4.0.0", "4.1.0" }; } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#getUpgradedVersion() - */ @Override public String getUpgradedVersion() { return "4.1.0"; } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#supportsRollingUpgrade() - */ @Override public boolean supportsRollingUpgrade() { return false; } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#getPrepareScripts() - */ @Override public File[] getPrepareScripts() { String script = Script.findScript("", "db/schema-40to410.sql"); @@ -83,21 +60,20 @@ public class Upgrade40to41 implements DbUpgrade { return new File[] { new File(script) }; } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#performDataMigration(java.sql.Connection) - */ @Override public void performDataMigration(Connection conn) { upgradeEIPNetworkOfferings(conn); upgradeEgressFirewallRules(conn); } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#getCleanupScripts() - */ @Override public File[] getCleanupScripts() { - return new File[0]; + String script = Script.findScript("", "db/schema-40to410-cleanup.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-302to40-cleanup.sql"); + } + + return new File[] { new File(script) }; } private void upgradeEIPNetworkOfferings(Connection conn) { @@ -133,7 +109,6 @@ public class Upgrade40to41 implements DbUpgrade { } } - private void upgradeEgressFirewallRules(Connection conn) { PreparedStatement pstmt = null; ResultSet rs = null; diff --git a/setup/db/db/schema-40to410-cleanup.sql b/setup/db/db/schema-40to410-cleanup.sql new file mode 100644 index 00000000000..411b568de4a --- /dev/null +++ b/setup/db/db/schema-40to410-cleanup.sql @@ -0,0 +1,21 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema cleanup from 4.0.0 to 4.1.0; +--; + From f2ae6dcda9b93f77f1d9ac262e86e742ce19b0f1 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Sat, 16 Feb 2013 12:44:45 +0530 Subject: [PATCH 042/486] db: Remove and merge 4.1-new-db-schema.sql to schema-40to410.sql Signed-off-by: Rohit Yadav --- developer/pom.xml | 4 - setup/bindir/cloud-setup-databases.in | 2 +- setup/db/4.1-new-db-schema.sql | 142 -------------------------- setup/db/db/schema-40to410.sql | 125 +++++++++++++++++++++++ 4 files changed, 126 insertions(+), 147 deletions(-) delete mode 100644 setup/db/4.1-new-db-schema.sql diff --git a/developer/pom.xml b/developer/pom.xml index 81bb3ffc208..ae474022c25 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -159,8 +159,6 @@ ${basedir}/target/db/create-schema-premium.sql ${basedir}/target/db/create-schema-view.sql - - ${basedir}/target/db/4.1-new-db-schema.sql ${basedir}/target/db/templates.sql @@ -193,7 +191,5 @@ - - diff --git a/setup/bindir/cloud-setup-databases.in b/setup/bindir/cloud-setup-databases.in index 8330f35e659..c47d334a2d0 100755 --- a/setup/bindir/cloud-setup-databases.in +++ b/setup/bindir/cloud-setup-databases.in @@ -211,7 +211,7 @@ for full help ""), ) - for f in ["create-database","create-schema", "create-database-premium","create-schema-premium", "create-schema-view", "4.1-new-db-schema"]: + for f in ["create-database","create-schema", "create-database-premium","create-schema-premium", "create-schema-view"]: p = os.path.join(self.dbFilesPath,"%s.sql"%f) if not os.path.exists(p): continue text = file(p).read() diff --git a/setup/db/4.1-new-db-schema.sql b/setup/db/4.1-new-db-schema.sql deleted file mode 100644 index d60eca2f890..00000000000 --- a/setup/db/4.1-new-db-schema.sql +++ /dev/null @@ -1,142 +0,0 @@ --- Licensed to the Apache Software Foundation (ASF) under one --- or more contributor license agreements. See the NOTICE file --- distributed with this work for additional information --- regarding copyright ownership. The ASF licenses this file --- to you under the Apache License, Version 2.0 (the --- "License"); you may not use this file except in compliance --- with the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, --- software distributed under the License is distributed on an --- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY --- KIND, either express or implied. See the License for the --- specific language governing permissions and limitations --- under the License. - -use cloud; - -alter table vm_template add image_data_store_id bigint unsigned; -alter table vm_template add size bigint unsigned; -alter table vm_template add state varchar(255); -alter table vm_template add update_count bigint unsigned; -alter table vm_template add updated datetime; -alter table storage_pool add storage_provider_id bigint unsigned; -alter table storage_pool add scope varchar(255); -alter table storage_pool modify id bigint unsigned AUTO_INCREMENT UNIQUE NOT NULL; -alter table template_spool_ref add state varchar(255); -alter table template_spool_ref add update_count bigint unsigned; -alter table volumes add disk_type varchar(255); -alter table volumes drop foreign key `fk_volumes__account_id`; -alter table vm_instance add column disk_offering_id bigint unsigned; -alter table vm_instance add column cpu int(10) unsigned; -alter table vm_instance add column ram bigint unsigned; -alter table vm_instance add column owner varchar(255); -alter table vm_instance add column speed int(10) unsigned; -alter table vm_instance add column host_name varchar(255); -alter table vm_instance add column display_name varchar(255); - -alter table data_center add column owner varchar(255); -alter table data_center add column created datetime COMMENT 'date created'; -alter table data_center add column lastUpdated datetime COMMENT 'last updated'; -alter table data_center add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; -alter table host_pod_ref add column owner varchar(255); -alter table host_pod_ref add column created datetime COMMENT 'date created'; -alter table host_pod_ref add column lastUpdated datetime COMMENT 'last updated'; -alter table host_pod_ref add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; -alter table host add column owner varchar(255); -alter table host add column lastUpdated datetime COMMENT 'last updated'; -alter table host add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; - - -alter table cluster add column owner varchar(255); -alter table cluster add column created datetime COMMENT 'date created'; -alter table cluster add column lastUpdated datetime COMMENT 'last updated'; -alter table cluster add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; -CREATE TABLE `cloud`.`object_datastore_ref` ( - `id` bigint unsigned NOT NULL auto_increment, - `datastore_id` bigint unsigned NOT NULL, - `datastore_role` varchar(255) NOT NULL, - `object_id` bigint unsigned NOT NULL, - `object_type` varchar(255) NOT NULL, - `created` DATETIME NOT NULL, - `last_updated` DATETIME, - `job_id` varchar(255), - `download_pct` int(10) unsigned, - `download_state` varchar(255), - `error_str` varchar(255), - `local_path` varchar(255), - `install_path` varchar(255), - `size` bigint unsigned COMMENT 'the size of the template on the pool', - `state` varchar(255) NOT NULL, - `update_count` bigint unsigned NOT NULL, - `updated` DATETIME, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`data_store_provider` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `name` varchar(255) NOT NULL COMMENT 'name of primary data store provider', - `uuid` varchar(255) NOT NULL COMMENT 'uuid of primary data store provider', - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`image_data_store` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `name` varchar(255) NOT NULL COMMENT 'name of data store', - `image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider', - `protocol` varchar(255) NOT NULL COMMENT 'protocol of data store', - `data_center_id` bigint unsigned COMMENT 'datacenter id of data store', - `scope` varchar(255) COMMENT 'scope of data store', - `uuid` varchar(255) COMMENT 'uuid of data store', - PRIMARY KEY(`id`), - CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`vm_compute_tags` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `compute_tag` varchar(255) NOT NULL COMMENT 'name of tag', - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`vm_root_disk_tags` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `root_disk_tag` varchar(255) NOT NULL COMMENT 'name of tag', - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - - -CREATE TABLE `cloud`.`vm_network_map` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `network_id` bigint unsigned NOT NULL COMMENT 'network id', - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - - -CREATE TABLE `cloud`.`vm_reservation` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(40) NOT NULL COMMENT 'reservation id', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `data_center_id` bigint unsigned NOT NULL COMMENT 'zone id', - `pod_id` bigint unsigned NOT NULL COMMENT 'pod id', - `cluster_id` bigint unsigned NOT NULL COMMENT 'cluster id', - `host_id` bigint unsigned NOT NULL COMMENT 'host id', - `created` datetime COMMENT 'date created', - `removed` datetime COMMENT 'date removed if not null', - CONSTRAINT `uc_vm_reservation__uuid` UNIQUE (`uuid`), - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`volume_reservation` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `vm_reservation_id` bigint unsigned NOT NULL COMMENT 'id of the vm reservation', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `volume_id` bigint unsigned NOT NULL COMMENT 'volume id', - `pool_id` bigint unsigned NOT NULL COMMENT 'pool assigned to the volume', - CONSTRAINT `fk_vm_pool_reservation__vm_reservation_id` FOREIGN KEY (`vm_reservation_id`) REFERENCES `vm_reservation`(`id`) ON DELETE CASCADE, - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 3ea8bbdbb73..9a9441df50c 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -19,6 +19,131 @@ -- Schema upgrade from 4.0.0 to 4.1.0; --; +use cloud; + +alter table vm_template add image_data_store_id bigint unsigned; +alter table vm_template add size bigint unsigned; +alter table vm_template add state varchar(255); +alter table vm_template add update_count bigint unsigned; +alter table vm_template add updated datetime; +alter table storage_pool add storage_provider_id bigint unsigned; +alter table storage_pool add scope varchar(255); +alter table storage_pool modify id bigint unsigned AUTO_INCREMENT UNIQUE NOT NULL; +alter table template_spool_ref add state varchar(255); +alter table template_spool_ref add update_count bigint unsigned; +alter table volumes add disk_type varchar(255); +alter table volumes drop foreign key `fk_volumes__account_id`; +alter table vm_instance add column disk_offering_id bigint unsigned; +alter table vm_instance add column cpu int(10) unsigned; +alter table vm_instance add column ram bigint unsigned; +alter table vm_instance add column owner varchar(255); +alter table vm_instance add column speed int(10) unsigned; +alter table vm_instance add column host_name varchar(255); +alter table vm_instance add column display_name varchar(255); + +alter table data_center add column owner varchar(255); +alter table data_center add column created datetime COMMENT 'date created'; +alter table data_center add column lastUpdated datetime COMMENT 'last updated'; +alter table data_center add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; +alter table host_pod_ref add column owner varchar(255); +alter table host_pod_ref add column created datetime COMMENT 'date created'; +alter table host_pod_ref add column lastUpdated datetime COMMENT 'last updated'; +alter table host_pod_ref add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; +alter table host add column owner varchar(255); +alter table host add column lastUpdated datetime COMMENT 'last updated'; +alter table host add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; + +alter table cluster add column owner varchar(255); +alter table cluster add column created datetime COMMENT 'date created'; +alter table cluster add column lastUpdated datetime COMMENT 'last updated'; +alter table cluster add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; +CREATE TABLE `cloud`.`object_datastore_ref` ( + `id` bigint unsigned NOT NULL auto_increment, + `datastore_id` bigint unsigned NOT NULL, + `datastore_role` varchar(255) NOT NULL, + `object_id` bigint unsigned NOT NULL, + `object_type` varchar(255) NOT NULL, + `created` DATETIME NOT NULL, + `last_updated` DATETIME, + `job_id` varchar(255), + `download_pct` int(10) unsigned, + `download_state` varchar(255), + `error_str` varchar(255), + `local_path` varchar(255), + `install_path` varchar(255), + `size` bigint unsigned COMMENT 'the size of the template on the pool', + `state` varchar(255) NOT NULL, + `update_count` bigint unsigned NOT NULL, + `updated` DATETIME, + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`data_store_provider` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `name` varchar(255) NOT NULL COMMENT 'name of primary data store provider', + `uuid` varchar(255) NOT NULL COMMENT 'uuid of primary data store provider', + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`image_data_store` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `name` varchar(255) NOT NULL COMMENT 'name of data store', + `image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider', + `protocol` varchar(255) NOT NULL COMMENT 'protocol of data store', + `data_center_id` bigint unsigned COMMENT 'datacenter id of data store', + `scope` varchar(255) COMMENT 'scope of data store', + `uuid` varchar(255) COMMENT 'uuid of data store', + PRIMARY KEY(`id`), + CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`vm_compute_tags` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `compute_tag` varchar(255) NOT NULL COMMENT 'name of tag', + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`vm_root_disk_tags` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `root_disk_tag` varchar(255) NOT NULL COMMENT 'name of tag', + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE `cloud`.`vm_network_map` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `network_id` bigint unsigned NOT NULL COMMENT 'network id', + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE `cloud`.`vm_reservation` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40) NOT NULL COMMENT 'reservation id', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `data_center_id` bigint unsigned NOT NULL COMMENT 'zone id', + `pod_id` bigint unsigned NOT NULL COMMENT 'pod id', + `cluster_id` bigint unsigned NOT NULL COMMENT 'cluster id', + `host_id` bigint unsigned NOT NULL COMMENT 'host id', + `created` datetime COMMENT 'date created', + `removed` datetime COMMENT 'date removed if not null', + CONSTRAINT `uc_vm_reservation__uuid` UNIQUE (`uuid`), + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`volume_reservation` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `vm_reservation_id` bigint unsigned NOT NULL COMMENT 'id of the vm reservation', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `volume_id` bigint unsigned NOT NULL COMMENT 'volume id', + `pool_id` bigint unsigned NOT NULL COMMENT 'pool assigned to the volume', + CONSTRAINT `fk_vm_pool_reservation__vm_reservation_id` FOREIGN KEY (`vm_reservation_id`) REFERENCES `vm_reservation`(`id`) ON DELETE CASCADE, + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + CREATE TABLE `cloud`.`s3` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', `uuid` varchar(40), From 44c8a33cf2c944715a08efc41db955b1fe095bf1 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Sat, 16 Feb 2013 12:47:45 +0530 Subject: [PATCH 043/486] db: Fix commas which should be dots in create-schema, fix debug msg Signed-off-by: Rohit Yadav --- server/src/com/cloud/upgrade/dao/Upgrade40to41.java | 2 +- setup/db/create-schema.sql | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/server/src/com/cloud/upgrade/dao/Upgrade40to41.java b/server/src/com/cloud/upgrade/dao/Upgrade40to41.java index 2ebe9800756..e7fea237700 100644 --- a/server/src/com/cloud/upgrade/dao/Upgrade40to41.java +++ b/server/src/com/cloud/upgrade/dao/Upgrade40to41.java @@ -70,7 +70,7 @@ public class Upgrade40to41 implements DbUpgrade { public File[] getCleanupScripts() { String script = Script.findScript("", "db/schema-40to410-cleanup.sql"); if (script == null) { - throw new CloudRuntimeException("Unable to find db/schema-302to40-cleanup.sql"); + throw new CloudRuntimeException("Unable to find db/schema-40to410-cleanup.sql"); } return new File[] { new File(script) }; diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index b4f992d4287..369a075cf6f 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -145,12 +145,12 @@ DROP TABLE IF EXISTS `cloud`.`region`; DROP TABLE IF EXISTS `cloud`.`s2s_customer_gateway`; DROP TABLE IF EXISTS `cloud`.`s2s_vpn_gateway`; DROP TABLE IF EXISTS `cloud`.`s2s_vpn_connection`; -DROP TABLE IF EXISTS `cloud`,`external_nicira_nvp_devices`; -DROP TABLE IF EXISTS `cloud`,`nicira_nvp_nic_map`; -DROP TABLE IF EXISTS `cloud`,`s3`; -DROP TABLE IF EXISTS `cloud`,`template_s3_ref`; -DROP TABLE IF EXISTS `cloud`,`nicira_nvp_router_map`; -DROP TABLE IF EXISTS `cloud`,`external_bigswitch_vns_devices`; +DROP TABLE IF EXISTS `cloud`.`external_nicira_nvp_devices`; +DROP TABLE IF EXISTS `cloud`.`nicira_nvp_nic_map`; +DROP TABLE IF EXISTS `cloud`.`s3`; +DROP TABLE IF EXISTS `cloud`.`template_s3_ref`; +DROP TABLE IF EXISTS `cloud`.`nicira_nvp_router_map`; +DROP TABLE IF EXISTS `cloud`.`external_bigswitch_vns_devices`; DROP TABLE IF EXISTS `cloud`.`autoscale_vmgroup_policy_map`; DROP TABLE IF EXISTS `cloud`.`autoscale_policy_condition_map`; DROP TABLE IF EXISTS `cloud`.`autoscale_vmgroups`; From 0e354473f799fd3a387747c8fdb85d65ecac8fea Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Sat, 16 Feb 2013 13:28:12 +0530 Subject: [PATCH 044/486] db: Add stubs for Upgrade410to420, schema-410to420.sql and schema-410to420-cleanup.sql Signed-off-by: Rohit Yadav --- .../cloud/upgrade/dao/Upgrade410to420.java | 73 +++++++++++++++++++ setup/db/db/schema-410to420-cleanup.sql | 21 ++++++ setup/db/db/schema-410to420.sql | 21 ++++++ 3 files changed, 115 insertions(+) create mode 100644 server/src/com/cloud/upgrade/dao/Upgrade410to420.java create mode 100644 setup/db/db/schema-410to420-cleanup.sql create mode 100644 setup/db/db/schema-410to420.sql diff --git a/server/src/com/cloud/upgrade/dao/Upgrade410to420.java b/server/src/com/cloud/upgrade/dao/Upgrade410to420.java new file mode 100644 index 00000000000..a43727caaa5 --- /dev/null +++ b/server/src/com/cloud/upgrade/dao/Upgrade410to420.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.UUID; + +import org.apache.log4j.Logger; + +public class Upgrade410to420 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade410to420.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "4.1.0", "4.2.0" }; + } + + @Override + public String getUpgradedVersion() { + return "4.2.0"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-410to420.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-410to420.sql"); + } + + return new File[] { new File(script) }; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public File[] getCleanupScripts() { + String script = Script.findScript("", "db/schema-410to420-cleanup.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-410to420-cleanup.sql"); + } + + return new File[] { new File(script) }; + } +} diff --git a/setup/db/db/schema-410to420-cleanup.sql b/setup/db/db/schema-410to420-cleanup.sql new file mode 100644 index 00000000000..51970b21b89 --- /dev/null +++ b/setup/db/db/schema-410to420-cleanup.sql @@ -0,0 +1,21 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema cleanup from 4.1.0 to 4.2.0; +--; + diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql new file mode 100644 index 00000000000..d1f90be449c --- /dev/null +++ b/setup/db/db/schema-410to420.sql @@ -0,0 +1,21 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.1.0 to 4.2.0; +--; + From 295b327281182c2767be57f8c3b75b8126452ea2 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Sat, 16 Feb 2013 13:28:43 +0530 Subject: [PATCH 045/486] client: In pom.xml fix jetty:run to include utilities/scripts/db/ in its classpath Signed-off-by: Rohit Yadav --- client/pom.xml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/pom.xml b/client/pom.xml index bd8283d7c82..38d0c4ee943 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -229,9 +229,12 @@ -XX:MaxPermSize=512m -Xmx2g - /client ${project.build.directory}/${project.build.finalName}/WEB-INF/web.xml ${project.build.directory}/${project.build.finalName} + + /client + ${project.build.directory}/utilities/scripts/db/;${project.build.directory}/utilities/scripts/db/db/ + From 8094e933b08d830822dbdc711e5caad3c067b5d7 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Sat, 16 Feb 2013 17:09:37 +0530 Subject: [PATCH 046/486] ApiServer: Get rid of finding system account and user at init() time Signed-off-by: Rohit Yadav --- server/src/com/cloud/api/ApiServer.java | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index be02f5e48f5..0ef84c29ef2 100755 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -157,8 +157,6 @@ public class ApiServer implements HttpRequestHandler { @Inject List _pluggableServices; @Inject List _apiAccessCheckers; - private Account _systemAccount = null; - private User _systemUser = null; @Inject private RegionManager _regionMgr = null; private static int _workerCount = 0; @@ -182,9 +180,6 @@ public class ApiServer implements HttpRequestHandler { } public void init() { - _systemAccount = _accountMgr.getSystemAccount(); - _systemUser = _accountMgr.getSystemUser(); - Integer apiPort = null; // api port, null by default SearchCriteria sc = _configDao.createSearchCriteria(); sc.addAnd("name", SearchCriteria.Op.EQ, "integration.api.port"); @@ -278,7 +273,7 @@ public class ApiServer implements HttpRequestHandler { try { // always trust commands from API port, user context will always be UID_SYSTEM/ACCOUNT_ID_SYSTEM - UserContext.registerContext(_systemUser.getId(), _systemAccount, null, true); + UserContext.registerContext(_accountMgr.getSystemUser().getId(), _accountMgr.getSystemAccount(), null, true); sb.insert(0, "(userId=" + User.UID_SYSTEM + " accountId=" + Account.ACCOUNT_ID_SYSTEM + " sessionId=" + null + ") "); String responseText = handleRequest(parameterMap, responseType, sb); sb.append(" 200 " + ((responseText == null) ? 0 : responseText.length())); From c63dbb88042d7eabea2664c2b608c51792fc9f18 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Sat, 16 Feb 2013 17:12:26 +0530 Subject: [PATCH 047/486] db: Refactor new change in create-schema/premium to schema40-410.sql - Move changes since 4.0 to schema upgrade path (schema40-410.sql) - Comment out some table names where we're trying to copy uuid from id, they don't exists - We don't run above step for tables which are newly created for 410 and don't exist in 4.0 for example autoscale related ones, code is commented and not removed - Drop indexes which are removed before dropping the column - Comment out insertion, as for default region we're inserting the same in code, in ConfigurationServerImpl:createDefaultRegion(), fix same in premium Testing; Deployed fresh 4.0 database, compiled and ran mgmt server. It did a smooth rolling upgrade from 4.0 to 4.1.0 with no database exceptions or any other db error. TODO: - 4.2.0 relates changes like ipv6 should go into its schema-410to420.sql Signed-off-by: Rohit Yadav --- setup/db/create-schema-premium.sql | 1 - setup/db/create-schema.sql | 277 +---------------------- setup/db/db/schema-40to410.sql | 348 +++++++++++++++++++++++------ 3 files changed, 288 insertions(+), 338 deletions(-) diff --git a/setup/db/create-schema-premium.sql b/setup/db/create-schema-premium.sql index e30812ba68e..2f86c0b9ab5 100644 --- a/setup/db/create-schema-premium.sql +++ b/setup/db/create-schema-premium.sql @@ -137,7 +137,6 @@ CREATE TABLE `cloud_usage`.`account` ( `cleanup_needed` tinyint(1) NOT NULL default '0', `network_domain` varchar(100) COMMENT 'Network domain name of the Vms of the account', `default_zone_id` bigint unsigned, - `region_id` int unsigned NOT NULL, CONSTRAINT `uc_account__uuid` UNIQUE (`uuid`), PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index 369a075cf6f..3570dfeaad0 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -207,7 +207,7 @@ CREATE TABLE `cloud`.`version` ( ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -INSERT INTO `version` (`version`, `updated`, `step`) VALUES('4.1.0', now(), 'Complete'); +INSERT INTO `version` (`version`, `updated`, `step`) VALUES('4.0.0', now(), 'Complete'); CREATE TABLE `cloud`.`op_it_work` ( `id` char(40) COMMENT 'reservation id', @@ -352,8 +352,6 @@ CREATE TABLE `cloud`.`network_offerings` ( `elastic_ip_service` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the network offering provides elastic ip service', `elastic_lb_service` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the network offering provides elastic lb service', `specify_ip_ranges` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the network offering provides an ability to define ip ranges', - `inline` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'Is this network offering LB provider is in inline mode', - `is_persistent` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the network offering provides an ability to create persistent networks', PRIMARY KEY (`id`), INDEX `i_network_offerings__system_only`(`system_only`), INDEX `i_network_offerings__removed`(`removed`), @@ -528,14 +526,12 @@ CREATE TABLE `cloud`.`snapshots` ( `removed` datetime COMMENT 'Date removed. not null if removed', `backup_snap_id` varchar(255) COMMENT 'Back up uuid of the snapshot', `swift_id` bigint unsigned COMMENT 'which swift', - `s3_id` bigint unsigned COMMENT 'S3 to which this snapshot will be stored', `sechost_id` bigint unsigned COMMENT 'secondary storage host id', `prev_snap_id` bigint unsigned COMMENT 'Id of the most recent snapshot', `hypervisor_type` varchar(32) NOT NULL COMMENT 'hypervisor that the snapshot was taken under', `version` varchar(32) COMMENT 'snapshot version', PRIMARY KEY (`id`), CONSTRAINT `uc_snapshots__uuid` UNIQUE (`uuid`), - CONSTRAINT `fk_snapshots__s3_id` FOREIGN KEY `fk_snapshots__s3_id` (`s3_id`) REFERENCES `s3` (`id`), INDEX `i_snapshots__removed`(`removed`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -711,7 +707,7 @@ CREATE TABLE `cloud`.`op_dc_vnet_alloc` ( PRIMARY KEY (`id`), UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id__account_id`(`vnet`, `data_center_id`, `account_id`), INDEX `i_op_dc_vnet_alloc__dc_taken`(`data_center_id`, `taken`), - UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id`(`vnet`, `physical_network_id`, `data_center_id`), + UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id`(`vnet`, `data_center_id`), CONSTRAINT `fk_op_dc_vnet_alloc__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_op_dc_vnet_alloc__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -794,10 +790,12 @@ CREATE TABLE `cloud`.`load_balancer_stickiness_policies` ( CREATE TABLE `cloud`.`inline_load_balancer_nic_map` ( `id` bigint unsigned NOT NULL auto_increment, + `load_balancer_id` bigint unsigned NOT NULL, `public_ip_address` char(40) NOT NULL, `nic_id` bigint unsigned NULL COMMENT 'nic id', PRIMARY KEY (`id`), UNIQUE KEY (`nic_id`), + CONSTRAINT `fk_inline_load_balancer_nic_map__load_balancer_id` FOREIGN KEY(`load_balancer_id`) REFERENCES `load_balancing_rules`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_inline_load_balancer_nic_map__nic_id` FOREIGN KEY(`nic_id`) REFERENCES `nics`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -950,7 +948,6 @@ CREATE TABLE `cloud`.`user` ( `timezone` varchar(30) default NULL, `registration_token` varchar(255) default NULL, `is_registered` tinyint NOT NULL DEFAULT 0 COMMENT '1: yes, 0: no', - `region_id` int unsigned NOT NULL, `incorrect_login_attempts` integer unsigned NOT NULL DEFAULT 0, PRIMARY KEY (`id`), INDEX `i_user__removed`(`removed`), @@ -1079,7 +1076,6 @@ CREATE TABLE `cloud`.`vm_instance` ( `uuid` varchar(40), `instance_name` varchar(255) NOT NULL COMMENT 'name of the vm instance running on the hosts', `state` varchar(32) NOT NULL, - `desired_state` varchar(32) NULL, `vm_template_id` bigint unsigned, `guest_os_id` bigint unsigned NOT NULL, `private_mac_address` varchar(17), @@ -1169,7 +1165,6 @@ CREATE TABLE `cloud`.`upload` ( `id` bigint unsigned NOT NULL auto_increment, `host_id` bigint unsigned NOT NULL, `type_id` bigint unsigned NOT NULL, - `uuid` varchar(40), `type` varchar(255), `mode` varchar(255), `created` DATETIME NOT NULL, @@ -1302,7 +1297,6 @@ CREATE TABLE `cloud`.`domain` ( `state` char(32) NOT NULL default 'Active' COMMENT 'state of the domain', `network_domain` varchar(255), `type` varchar(255) NOT NULL DEFAULT 'Normal' COMMENT 'type of the domain - can be Normal or Project', - `region_id` int unsigned NOT NULL, PRIMARY KEY (`id`), UNIQUE (parent, name, removed), INDEX `i_domain__path`(`path`), @@ -1321,7 +1315,6 @@ CREATE TABLE `cloud`.`account` ( `cleanup_needed` tinyint(1) NOT NULL default '0', `network_domain` varchar(255), `default_zone_id` bigint unsigned, - `region_id` int unsigned NOT NULL, PRIMARY KEY (`id`), INDEX i_account__removed(`removed`), CONSTRAINT `fk_account__default_zone_id` FOREIGN KEY `fk_account__default_zone_id`(`default_zone_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, @@ -1384,7 +1377,6 @@ CREATE TABLE `cloud`.`alert` ( `last_sent` DATETIME NULL COMMENT 'Last time the alert was sent', `resolved` DATETIME NULL COMMENT 'when the alert status was resolved (available memory no longer at critical level, etc.)', PRIMARY KEY (`id`), - INDEX `last_sent` (`last_sent` DESC), CONSTRAINT `uc_alert__uuid` UNIQUE (`uuid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -1395,7 +1387,7 @@ CREATE TABLE `cloud`.`async_job` ( `session_key` varchar(64) COMMENT 'all async-job manage to apply session based security enforcement', `instance_type` varchar(64) COMMENT 'instance_type and instance_id work together to allow attaching an instance object to a job', `instance_id` bigint unsigned, - `job_cmd` varchar(255) NOT NULL COMMENT 'command name', + `job_cmd` varchar(64) NOT NULL COMMENT 'command name', `job_cmd_originator` varchar(64) COMMENT 'command originator', `job_cmd_info` text COMMENT 'command parameter info', `job_cmd_ver` int(1) COMMENT 'command version', @@ -1428,15 +1420,16 @@ CREATE TABLE `cloud`.`sync_queue` ( `id` bigint unsigned NOT NULL auto_increment, `sync_objtype` varchar(64) NOT NULL, `sync_objid` bigint unsigned NOT NULL, + `queue_proc_msid` bigint, `queue_proc_number` bigint COMMENT 'process number, increase 1 for each iteration', + `queue_proc_time` datetime COMMENT 'last time to process the queue', `created` datetime COMMENT 'date created', `last_updated` datetime COMMENT 'date created', - `queue_size` smallint DEFAULT 0 COMMENT 'number of items being processed by the queue', - `queue_size_limit` smallint DEFAULT 1 COMMENT 'max number of items the queue can process concurrently', PRIMARY KEY (`id`), UNIQUE `i_sync_queue__objtype__objid`(`sync_objtype`, `sync_objid`), INDEX `i_sync_queue__created`(`created`), - INDEX `i_sync_queue__last_updated`(`last_updated`) + INDEX `i_sync_queue__last_updated`(`last_updated`), + INDEX `i_sync_queue__queue_proc_time`(`queue_proc_time`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `cloud`.`stack_maid` ( @@ -1457,15 +1450,13 @@ CREATE TABLE `cloud`.`sync_queue_item` ( `content_id` bigint, `queue_proc_msid` bigint COMMENT 'owner msid when the queue item is being processed', `queue_proc_number` bigint COMMENT 'used to distinguish raw items and items being in process', - `queue_proc_time` datetime COMMENT 'when processing started for the item', `created` datetime COMMENT 'time created', PRIMARY KEY (`id`), CONSTRAINT `fk_sync_queue_item__queue_id` FOREIGN KEY `fk_sync_queue_item__queue_id` (`queue_id`) REFERENCES `sync_queue` (`id`) ON DELETE CASCADE, INDEX `i_sync_queue_item__queue_id`(`queue_id`), INDEX `i_sync_queue_item__created`(`created`), INDEX `i_sync_queue_item__queue_proc_number`(`queue_proc_number`), - INDEX `i_sync_queue_item__queue_proc_msid`(`queue_proc_msid`), - INDEX `i_sync_queue__queue_proc_time`(`queue_proc_time`) + INDEX `i_sync_queue_item__queue_proc_msid`(`queue_proc_msid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `cloud`.`disk_offering` ( @@ -1909,37 +1900,6 @@ CREATE TABLE `cloud`.`swift` ( CONSTRAINT `uc_swift__uuid` UNIQUE (`uuid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -CREATE TABLE `cloud`.`s3` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(40), - `access_key` varchar(20) NOT NULL COMMENT ' The S3 access key', - `secret_key` varchar(40) NOT NULL COMMENT ' The S3 secret key', - `end_point` varchar(1024) COMMENT ' The S3 host', - `bucket` varchar(63) NOT NULL COMMENT ' The S3 host', - `https` tinyint unsigned DEFAULT NULL COMMENT ' Flag indicating whether or not to connect over HTTPS', - `connection_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) when initially establishing a connection before giving up and timing out.', - `max_error_retry` integer COMMENT ' The maximum number of retry attempts for failed retryable requests (ex: 5xx error responses from services).', - `socket_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) for data to be transfered over an established, open connection before the connection times out and is closed.', - `created` datetime COMMENT 'date the s3 first signed on', - PRIMARY KEY (`id`), - CONSTRAINT `uc_s3__uuid` UNIQUE (`uuid`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`template_s3_ref` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `s3_id` bigint unsigned NOT NULL COMMENT ' Associated S3 instance id', - `template_id` bigint unsigned NOT NULL COMMENT ' Associated template id', - `created` DATETIME NOT NULL COMMENT ' The creation timestamp', - `size` bigint unsigned COMMENT ' The size of the object', - `physical_size` bigint unsigned DEFAULT 0 COMMENT ' The physical size of the object', - PRIMARY KEY (`id`), - CONSTRAINT `uc_template_s3_ref__template_id` UNIQUE (`template_id`), - CONSTRAINT `fk_template_s3_ref__s3_id` FOREIGN KEY `fk_template_s3_ref__s3_id` (`s3_id`) REFERENCES `s3` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_template_s3_ref__template_id` FOREIGN KEY `fk_template_s3_ref__template_id` (`template_id`) REFERENCES `vm_template` (`id`), - INDEX `i_template_s3_ref__s3_id`(`s3_id`), - INDEX `i_template_s3_ref__template_id`(`template_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - CREATE TABLE `cloud`.`op_host_transfer` ( `id` bigint unsigned UNIQUE NOT NULL COMMENT 'Id of the host', `initial_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transfered from', @@ -2130,6 +2090,7 @@ CREATE TABLE `cloud`.`external_load_balancer_devices` ( `device_state` varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'state (enabled/disabled/shutdown) of the device', `allocation_state` varchar(32) NOT NULL DEFAULT 'Free' COMMENT 'Allocation state (Free/Shared/Dedicated/Provider) of the device', `is_dedicated` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if device/appliance is provisioned for dedicated use only', + `is_inline` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load balancer will be used in in-line configuration with firewall', `is_managed` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load balancer appliance is provisioned and its life cycle is managed by by cloudstack', `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external load balancer device', `parent_host_id` bigint unsigned COMMENT 'if the load balancer appliance is cloudstack managed, then host id on which this appliance is provisioned', @@ -2306,16 +2267,6 @@ CREATE TABLE `cloud`.`netscaler_pod_ref` ( CONSTRAINT `fk_ns_pod_ref__device_id` FOREIGN KEY (`external_load_balancer_device_id`) REFERENCES `external_load_balancer_devices`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`region` ( - `id` int unsigned NOT NULL UNIQUE, - `name` varchar(255) NOT NULL UNIQUE, - `end_point` varchar(255) NOT NULL, - `api_key` varchar(255), - `secret_key` varchar(255), - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - CREATE TABLE `cloud`.`vpc` ( `id` bigint unsigned NOT NULL auto_increment COMMENT 'id', `uuid` varchar(40) NOT NULL, @@ -2477,211 +2428,5 @@ CREATE TABLE `cloud`.`nicira_nvp_nic_map` ( PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -CREATE TABLE `cloud`.`nicira_nvp_router_map` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `logicalrouter_uuid` varchar(255) NOT NULL UNIQUE COMMENT 'nicira uuid of logical router', - `network_id` bigint unsigned NOT NULL UNIQUE COMMENT 'cloudstack id of the network', - PRIMARY KEY (`id`), - CONSTRAINT `fk_nicira_nvp_router_map__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`external_bigswitch_vns_devices` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(255) UNIQUE, - `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which bigswitch vns device is added', - `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this bigswitch vns device', - `device_name` varchar(255) NOT NULL COMMENT 'name of the bigswitch vns device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external bigswitch vns device', - PRIMARY KEY (`id`), - CONSTRAINT `fk_external_bigswitch_vns_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_external_bigswitch_vns_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`counter` ( - `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(40), - `source` varchar(255) NOT NULL COMMENT 'source e.g. netscaler, snmp', - `name` varchar(255) NOT NULL COMMENT 'Counter name', - `value` varchar(255) NOT NULL COMMENT 'Value in case of source=snmp', - `removed` datetime COMMENT 'date removed if not null', - `created` datetime NOT NULL COMMENT 'date created', - PRIMARY KEY (`id`), - CONSTRAINT `uc_counter__uuid` UNIQUE (`uuid`), - INDEX `i_counter__removed`(`removed`), - INDEX `i_counter__source`(`source`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`conditions` ( - `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(40), - `counter_id` bigint unsigned NOT NULL COMMENT 'Counter Id', - `threshold` bigint unsigned NOT NULL COMMENT 'threshold value for the given counter', - `relational_operator` char(2) COMMENT 'relational operator to be used upon the counter and condition', - `domain_id` bigint unsigned NOT NULL COMMENT 'domain the Condition belongs to', - `account_id` bigint unsigned NOT NULL COMMENT 'owner of this Condition', - `removed` datetime COMMENT 'date removed if not null', - `created` datetime NOT NULL COMMENT 'date created', - PRIMARY KEY (`id`), - CONSTRAINT `fk_conditions__counter_id` FOREIGN KEY `fk_condition__counter_id`(`counter_id`) REFERENCES `counter`(`id`), - CONSTRAINT `fk_conditions__account_id` FOREIGN KEY `fk_condition__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_conditions__domain_id` FOREIGN KEY `fk_condition__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, - CONSTRAINT `uc_conditions__uuid` UNIQUE (`uuid`), - INDEX `i_conditions__removed`(`removed`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_vmprofiles` ( - `id` bigint unsigned NOT NULL auto_increment, - `uuid` varchar(40), - `zone_id` bigint unsigned NOT NULL, - `domain_id` bigint unsigned NOT NULL, - `account_id` bigint unsigned NOT NULL, - `autoscale_user_id` bigint unsigned NOT NULL, - `service_offering_id` bigint unsigned NOT NULL, - `template_id` bigint unsigned NOT NULL, - `other_deploy_params` varchar(1024) COMMENT 'other deployment parameters that is in addition to zoneid,serviceofferingid,domainid', - `destroy_vm_grace_period` int unsigned COMMENT 'the time allowed for existing connections to get closed before a vm is destroyed', - `counter_params` varchar(1024) COMMENT 'the parameters for the counter to be used to get metric information from VMs', - `created` datetime NOT NULL COMMENT 'date created', - `removed` datetime COMMENT 'date removed if not null', - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_vmprofiles__domain_id` FOREIGN KEY `fk_autoscale_vmprofiles__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmprofiles__account_id` FOREIGN KEY `fk_autoscale_vmprofiles__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmprofiles__autoscale_user_id` FOREIGN KEY `fk_autoscale_vmprofiles__autoscale_user_id` (`autoscale_user_id`) REFERENCES `user`(`id`) ON DELETE CASCADE, - CONSTRAINT `uc_autoscale_vmprofiles__uuid` UNIQUE (`uuid`), - INDEX `i_autoscale_vmprofiles__removed`(`removed`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_policies` ( - `id` bigint unsigned NOT NULL auto_increment, - `uuid` varchar(40), - `domain_id` bigint unsigned NOT NULL, - `account_id` bigint unsigned NOT NULL, - `duration` int unsigned NOT NULL, - `quiet_time` int unsigned NOT NULL, - `action` varchar(15), - `created` datetime NOT NULL COMMENT 'date created', - `removed` datetime COMMENT 'date removed if not null', - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_policies__domain_id` FOREIGN KEY `fk_autoscale_policies__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_policies__account_id` FOREIGN KEY `fk_autoscale_policies__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, - CONSTRAINT `uc_autoscale_policies__uuid` UNIQUE (`uuid`), - INDEX `i_autoscale_policies__removed`(`removed`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_vmgroups` ( - `id` bigint unsigned NOT NULL auto_increment, - `uuid` varchar(40), - `zone_id` bigint unsigned NOT NULL, - `domain_id` bigint unsigned NOT NULL, - `account_id` bigint unsigned NOT NULL, - `load_balancer_id` bigint unsigned NOT NULL, - `min_members` int unsigned DEFAULT 1, - `max_members` int unsigned NOT NULL, - `member_port` int unsigned NOT NULL, - `interval` int unsigned NOT NULL, - `profile_id` bigint unsigned NOT NULL, - `state` varchar(255) NOT NULL COMMENT 'enabled or disabled, a vmgroup is disabled to stop autoscaling activity', - `created` datetime NOT NULL COMMENT 'date created', - `removed` datetime COMMENT 'date removed if not null', - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_vmgroup__autoscale_vmprofile_id` FOREIGN KEY(`profile_id`) REFERENCES `autoscale_vmprofiles`(`id`), - CONSTRAINT `fk_autoscale_vmgroup__load_balancer_id` FOREIGN KEY(`load_balancer_id`) REFERENCES `load_balancing_rules`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmgroups__domain_id` FOREIGN KEY `fk_autoscale_vmgroups__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmgroups__account_id` FOREIGN KEY `fk_autoscale_vmgroups__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmgroups__zone_id` FOREIGN KEY `fk_autoscale_vmgroups__zone_id`(`zone_id`) REFERENCES `data_center`(`id`), - CONSTRAINT `uc_autoscale_vmgroups__uuid` UNIQUE (`uuid`), - INDEX `i_autoscale_vmgroups__removed`(`removed`), - INDEX `i_autoscale_vmgroups__load_balancer_id`(`load_balancer_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_policy_condition_map` ( - `id` bigint unsigned NOT NULL auto_increment, - `policy_id` bigint unsigned NOT NULL, - `condition_id` bigint unsigned NOT NULL, - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_policy_condition_map__policy_id` FOREIGN KEY `fk_autoscale_policy_condition_map__policy_id` (`policy_id`) REFERENCES `autoscale_policies` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_policy_condition_map__condition_id` FOREIGN KEY `fk_autoscale_policy_condition_map__condition_id` (`condition_id`) REFERENCES `conditions` (`id`), - INDEX `i_autoscale_policy_condition_map__policy_id`(`policy_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_vmgroup_policy_map` ( - `id` bigint unsigned NOT NULL auto_increment, - `vmgroup_id` bigint unsigned NOT NULL, - `policy_id` bigint unsigned NOT NULL, - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_vmgroup_policy_map__vmgroup_id` FOREIGN KEY `fk_autoscale_vmgroup_policy_map__vmgroup_id` (`vmgroup_id`) REFERENCES `autoscale_vmgroups` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmgroup_policy_map__policy_id` FOREIGN KEY `fk_autoscale_vmgroup_policy_map__policy_id` (`policy_id`) REFERENCES `autoscale_policies` (`id`), - INDEX `i_autoscale_vmgroup_policy_map__vmgroup_id`(`vmgroup_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`region_sync` ( - `id` bigint unsigned NOT NULL auto_increment, - `region_id` int unsigned NOT NULL, - `api` varchar(1024) NOT NULL, - `created` datetime NOT NULL COMMENT 'date created', - `processed` tinyint NOT NULL default '0', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (1, UUID(), 'snmp','Linux User CPU - percentage', '1.3.6.1.4.1.2021.11.9.0', now()); -INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (2, UUID(), 'snmp','Linux System CPU - percentage', '1.3.6.1.4.1.2021.11.10.0', now()); -INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (3, UUID(), 'snmp','Linux CPU Idle - percentage', '1.3.6.1.4.1.2021.11.11.0', now()); -INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (100, UUID(), 'netscaler','Response Time - microseconds', 'RESPTIME', now()); -CREATE TABLE `cloud`.`vm_snapshots` ( - `id` bigint(20) unsigned NOT NULL auto_increment COMMENT 'Primary Key', - `uuid` varchar(40) NOT NULL, - `name` varchar(255) NOT NULL, - `display_name` varchar(255) default NULL, - `description` varchar(255) default NULL, - `vm_id` bigint(20) unsigned NOT NULL, - `account_id` bigint(20) unsigned NOT NULL, - `domain_id` bigint(20) unsigned NOT NULL, - `vm_snapshot_type` varchar(32) default NULL, - `state` varchar(32) NOT NULL, - `parent` bigint unsigned default NULL, - `current` int(1) unsigned default NULL, - `update_count` bigint unsigned NOT NULL DEFAULT 0, - `updated` datetime default NULL, - `created` datetime default NULL, - `removed` datetime default NULL, - PRIMARY KEY (`id`), - CONSTRAINT UNIQUE KEY `uc_vm_snapshots_uuid` (`uuid`), - INDEX `vm_snapshots_name` (`name`), - INDEX `vm_snapshots_vm_id` (`vm_id`), - INDEX `vm_snapshots_account_id` (`account_id`), - INDEX `vm_snapshots_display_name` (`display_name`), - INDEX `vm_snapshots_removed` (`removed`), - INDEX `vm_snapshots_parent` (`parent`), - CONSTRAINT `fk_vm_snapshots_vm_id__vm_instance_id` FOREIGN KEY `fk_vm_snapshots_vm_id__vm_instance_id` (`vm_id`) REFERENCES `vm_instance` (`id`), - CONSTRAINT `fk_vm_snapshots_account_id__account_id` FOREIGN KEY `fk_vm_snapshots_account_id__account_id` (`account_id`) REFERENCES `account` (`id`), - CONSTRAINT `fk_vm_snapshots_domain_id__domain_id` FOREIGN KEY `fk_vm_snapshots_domain_id__domain_id` (`domain_id`) REFERENCES `domain` (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - - -CREATE TABLE `cloud`.`user_ipv6_address` ( - `id` bigint unsigned NOT NULL UNIQUE auto_increment, - `uuid` varchar(40), - `account_id` bigint unsigned NULL, - `domain_id` bigint unsigned NULL, - `ip_address` char(50) NOT NULL, - `data_center_id` bigint unsigned NOT NULL COMMENT 'zone that it belongs to', - `vlan_id` bigint unsigned NOT NULL, - `state` char(32) NOT NULL default 'Free' COMMENT 'state of the ip address', - `mac_address` varchar(40) NOT NULL COMMENT 'mac address of this ip', - `source_network_id` bigint unsigned NOT NULL COMMENT 'network id ip belongs to', - `network_id` bigint unsigned COMMENT 'network this public ip address is associated with', - `physical_network_id` bigint unsigned NOT NULL COMMENT 'physical network id that this configuration is based on', - `created` datetime NULL COMMENT 'Date this ip was allocated to someone', - PRIMARY KEY (`id`), - UNIQUE (`ip_address`, `source_network_id`), - CONSTRAINT `fk_user_ipv6_address__source_network_id` FOREIGN KEY (`source_network_id`) REFERENCES `networks`(`id`), - CONSTRAINT `fk_user_ipv6_address__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`), - CONSTRAINT `fk_user_ipv6_address__account_id` FOREIGN KEY (`account_id`) REFERENCES `account`(`id`), - CONSTRAINT `fk_user_ipv6_address__vlan_id` FOREIGN KEY (`vlan_id`) REFERENCES `vlan`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_user_ipv6_address__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, - CONSTRAINT `uc_user_ipv6_address__uuid` UNIQUE (`uuid`), - CONSTRAINT `fk_user_ipv6_address__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - SET foreign_key_checks = 1; diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 9a9441df50c..9e639c623a2 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -40,6 +40,7 @@ alter table vm_instance add column owner varchar(255); alter table vm_instance add column speed int(10) unsigned; alter table vm_instance add column host_name varchar(255); alter table vm_instance add column display_name varchar(255); +alter table vm_instance add column `desired_state` varchar(32) NULL; alter table data_center add column owner varchar(255); alter table data_center add column created datetime COMMENT 'date created'; @@ -171,7 +172,7 @@ CREATE TABLE `cloud`.`template_s3_ref` ( CONSTRAINT `uc_template_s3_ref__template_id` UNIQUE (`template_id`), CONSTRAINT `fk_template_s3_ref__s3_id` FOREIGN KEY `fk_template_s3_ref__s3_id` (`s3_id`) REFERENCES `s3` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_template_s3_ref__template_id` FOREIGN KEY `fk_template_s3_ref__template_id` (`template_id`) REFERENCES `vm_template` (`id`), - INDEX `i_template_s3_ref__swift_id`(`s3_id`), + INDEX `i_template_s3_ref__s3_id`(`s3_id`), INDEX `i_template_s3_ref__template_id`(`template_id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -189,11 +190,19 @@ ALTER TABLE `cloud`.`external_load_balancer_devices` DROP COLUMN `is_inline`; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network','DEFAULT','NetworkManager','network.dhcp.nondefaultnetwork.setgateway.guestos','Windows','The guest OS\'s name start with this fields would result in DHCP server response gateway information even when the network it\'s on is not default network. Names are separated by comma.'); -ALTER TABLE `sync_queue` ADD `queue_size` SMALLINT NOT NULL DEFAULT '0' COMMENT 'number of items being processed by the queue'; +ALTER TABLE `cloud`.`sync_queue` ADD `queue_size` SMALLINT NOT NULL DEFAULT '0' COMMENT 'number of items being processed by the queue'; -ALTER TABLE `sync_queue` ADD `queue_size_limit` SMALLINT NOT NULL DEFAULT '1' COMMENT 'max number of items the queue can process concurrently'; +ALTER TABLE `cloud`.`sync_queue` ADD `queue_size_limit` SMALLINT NOT NULL DEFAULT '1' COMMENT 'max number of items the queue can process concurrently'; -ALTER TABLE `sync_queue_item` ADD `queue_proc_time` DATETIME NOT NULL COMMENT 'when processing started for the item' AFTER `queue_proc_number`; +ALTER TABLE `cloud`.`sync_queue` DROP INDEX `i_sync_queue__queue_proc_time`; + +ALTER TABLE `cloud`.`sync_queue` DROP COLUMN `queue_proc_time`; + +ALTER TABLE `cloud`.`sync_queue` DROP COLUMN `queue_proc_msid`; + +ALTER TABLE `cloud`.`sync_queue_item` ADD `queue_proc_time` DATETIME NOT NULL COMMENT 'when processing started for the item' AFTER `queue_proc_number`; + +ALTER TABLE `cloud`.`sync_queue_item` ADD INDEX `i_sync_queue__queue_proc_time`(`queue_proc_time`); ALTER TABLE `cloud`.`inline_load_balancer_nic_map` DROP FOREIGN KEY fk_inline_load_balancer_nic_map__load_balancer_id; @@ -253,7 +262,7 @@ UPDATE `cloud`.`swift` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`upload` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`user` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`user_ip_address` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`user_vm_temp` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`user_vm_temp` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`virtual_router_providers` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`virtual_supervisor_module` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`vlan` set uuid=id WHERE uuid is NULL; @@ -264,23 +273,258 @@ UPDATE `cloud`.`vpc_gateways` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`vpc_offerings` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`vpn_users` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`volumes` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`autoscale_vmgroups` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`autoscale_vmprofiles` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`autoscale_policies` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`counter` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`conditions` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`autoscale_vmgroups` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`autoscale_vmprofiles` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`autoscale_policies` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`counter` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`conditions` set uuid=id WHERE uuid is NULL; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'detail.batch.query.size', '2000', 'Default entity detail batch query size for listing'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'api.throttling.interval', '1', 'Time interval (in seconds) to reset API count'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'api.throttling.max', '25', 'Max allowed number of APIs within fixed interval'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'api.throttling.cachesize', '50000', 'Account based API count cache size'); +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'direct.agent.pool.size', '500', 'Default size for DirectAgentPool'); + +ALTER TABLE `cloud`.`op_dc_vnet_alloc` DROP INDEX i_op_dc_vnet_alloc__vnet__data_center_id; + +ALTER TABLE `cloud`.`op_dc_vnet_alloc` ADD CONSTRAINT UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id`(`vnet`, `physical_network_id`, `data_center_id`); + +CREATE TABLE `cloud`.`region` ( + `id` int unsigned NOT NULL UNIQUE, + `name` varchar(255) NOT NULL UNIQUE, + `end_point` varchar(255) NOT NULL, + `api_key` varchar(255), + `secret_key` varchar(255), + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`region_sync` ( + `id` bigint unsigned NOT NULL auto_increment, + `region_id` int unsigned NOT NULL, + `api` varchar(1024) NOT NULL, + `created` datetime NOT NULL COMMENT 'date created', + `processed` tinyint NOT NULL default '0', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- INSERT INTO `cloud`.`region` values ('1','Local','http://localhost:8080/client/api','',''); +ALTER TABLE `cloud`.`account` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; +ALTER TABLE `cloud`.`user` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; +ALTER TABLE `cloud`.`domain` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.cpus', '40', 'The default maximum number of cpu cores that can be used for an account'); + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.memory', '40960', 'The default maximum memory (in MB) that can be used for an account'); + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.cpus', '40', 'The default maximum number of cpu cores that can be used for a project'); + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.memory', '40960', 'The default maximum memory (in MB) that can be used for a project'); + +ALTER TABLE `cloud_usage`.`account` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; + +CREATE TABLE `cloud`.`nicira_nvp_router_map` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `logicalrouter_uuid` varchar(255) NOT NULL UNIQUE COMMENT 'nicira uuid of logical router', + `network_id` bigint unsigned NOT NULL UNIQUE COMMENT 'cloudstack id of the network', + PRIMARY KEY (`id`), + CONSTRAINT `fk_nicira_nvp_router_map__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`external_bigswitch_vns_devices` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(255) UNIQUE, + `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which bigswitch vns device is added', + `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this bigswitch vns device', + `device_name` varchar(255) NOT NULL COMMENT 'name of the bigswitch vns device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external bigswitch vns device', + PRIMARY KEY (`id`), + CONSTRAINT `fk_external_bigswitch_vns_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_external_bigswitch_vns_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`counter` ( + `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40), + `source` varchar(255) NOT NULL COMMENT 'source e.g. netscaler, snmp', + `name` varchar(255) NOT NULL COMMENT 'Counter name', + `value` varchar(255) NOT NULL COMMENT 'Value in case of source=snmp', + `removed` datetime COMMENT 'date removed if not null', + `created` datetime NOT NULL COMMENT 'date created', + PRIMARY KEY (`id`), + CONSTRAINT `uc_counter__uuid` UNIQUE (`uuid`), + INDEX `i_counter__removed`(`removed`), + INDEX `i_counter__source`(`source`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`conditions` ( + `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40), + `counter_id` bigint unsigned NOT NULL COMMENT 'Counter Id', + `threshold` bigint unsigned NOT NULL COMMENT 'threshold value for the given counter', + `relational_operator` char(2) COMMENT 'relational operator to be used upon the counter and condition', + `domain_id` bigint unsigned NOT NULL COMMENT 'domain the Condition belongs to', + `account_id` bigint unsigned NOT NULL COMMENT 'owner of this Condition', + `removed` datetime COMMENT 'date removed if not null', + `created` datetime NOT NULL COMMENT 'date created', + PRIMARY KEY (`id`), + CONSTRAINT `fk_conditions__counter_id` FOREIGN KEY `fk_condition__counter_id`(`counter_id`) REFERENCES `counter`(`id`), + CONSTRAINT `fk_conditions__account_id` FOREIGN KEY `fk_condition__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_conditions__domain_id` FOREIGN KEY `fk_condition__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, + CONSTRAINT `uc_conditions__uuid` UNIQUE (`uuid`), + INDEX `i_conditions__removed`(`removed`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_vmprofiles` ( + `id` bigint unsigned NOT NULL auto_increment, + `uuid` varchar(40), + `zone_id` bigint unsigned NOT NULL, + `domain_id` bigint unsigned NOT NULL, + `account_id` bigint unsigned NOT NULL, + `autoscale_user_id` bigint unsigned NOT NULL, + `service_offering_id` bigint unsigned NOT NULL, + `template_id` bigint unsigned NOT NULL, + `other_deploy_params` varchar(1024) COMMENT 'other deployment parameters that is in addition to zoneid,serviceofferingid,domainid', + `destroy_vm_grace_period` int unsigned COMMENT 'the time allowed for existing connections to get closed before a vm is destroyed', + `counter_params` varchar(1024) COMMENT 'the parameters for the counter to be used to get metric information from VMs', + `created` datetime NOT NULL COMMENT 'date created', + `removed` datetime COMMENT 'date removed if not null', + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_vmprofiles__domain_id` FOREIGN KEY `fk_autoscale_vmprofiles__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmprofiles__account_id` FOREIGN KEY `fk_autoscale_vmprofiles__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmprofiles__autoscale_user_id` FOREIGN KEY `fk_autoscale_vmprofiles__autoscale_user_id` (`autoscale_user_id`) REFERENCES `user`(`id`) ON DELETE CASCADE, + CONSTRAINT `uc_autoscale_vmprofiles__uuid` UNIQUE (`uuid`), + INDEX `i_autoscale_vmprofiles__removed`(`removed`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_policies` ( + `id` bigint unsigned NOT NULL auto_increment, + `uuid` varchar(40), + `domain_id` bigint unsigned NOT NULL, + `account_id` bigint unsigned NOT NULL, + `duration` int unsigned NOT NULL, + `quiet_time` int unsigned NOT NULL, + `action` varchar(15), + `created` datetime NOT NULL COMMENT 'date created', + `removed` datetime COMMENT 'date removed if not null', + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_policies__domain_id` FOREIGN KEY `fk_autoscale_policies__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_policies__account_id` FOREIGN KEY `fk_autoscale_policies__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, + CONSTRAINT `uc_autoscale_policies__uuid` UNIQUE (`uuid`), + INDEX `i_autoscale_policies__removed`(`removed`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_vmgroups` ( + `id` bigint unsigned NOT NULL auto_increment, + `uuid` varchar(40), + `zone_id` bigint unsigned NOT NULL, + `domain_id` bigint unsigned NOT NULL, + `account_id` bigint unsigned NOT NULL, + `load_balancer_id` bigint unsigned NOT NULL, + `min_members` int unsigned DEFAULT 1, + `max_members` int unsigned NOT NULL, + `member_port` int unsigned NOT NULL, + `interval` int unsigned NOT NULL, + `profile_id` bigint unsigned NOT NULL, + `state` varchar(255) NOT NULL COMMENT 'enabled or disabled, a vmgroup is disabled to stop autoscaling activity', + `created` datetime NOT NULL COMMENT 'date created', + `removed` datetime COMMENT 'date removed if not null', + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_vmgroup__autoscale_vmprofile_id` FOREIGN KEY(`profile_id`) REFERENCES `autoscale_vmprofiles`(`id`), + CONSTRAINT `fk_autoscale_vmgroup__load_balancer_id` FOREIGN KEY(`load_balancer_id`) REFERENCES `load_balancing_rules`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmgroups__domain_id` FOREIGN KEY `fk_autoscale_vmgroups__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmgroups__account_id` FOREIGN KEY `fk_autoscale_vmgroups__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmgroups__zone_id` FOREIGN KEY `fk_autoscale_vmgroups__zone_id`(`zone_id`) REFERENCES `data_center`(`id`), + CONSTRAINT `uc_autoscale_vmgroups__uuid` UNIQUE (`uuid`), + INDEX `i_autoscale_vmgroups__removed`(`removed`), + INDEX `i_autoscale_vmgroups__load_balancer_id`(`load_balancer_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_policy_condition_map` ( + `id` bigint unsigned NOT NULL auto_increment, + `policy_id` bigint unsigned NOT NULL, + `condition_id` bigint unsigned NOT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_policy_condition_map__policy_id` FOREIGN KEY `fk_autoscale_policy_condition_map__policy_id` (`policy_id`) REFERENCES `autoscale_policies` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_policy_condition_map__condition_id` FOREIGN KEY `fk_autoscale_policy_condition_map__condition_id` (`condition_id`) REFERENCES `conditions` (`id`), + INDEX `i_autoscale_policy_condition_map__policy_id`(`policy_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_vmgroup_policy_map` ( + `id` bigint unsigned NOT NULL auto_increment, + `vmgroup_id` bigint unsigned NOT NULL, + `policy_id` bigint unsigned NOT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_vmgroup_policy_map__vmgroup_id` FOREIGN KEY `fk_autoscale_vmgroup_policy_map__vmgroup_id` (`vmgroup_id`) REFERENCES `autoscale_vmgroups` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmgroup_policy_map__policy_id` FOREIGN KEY `fk_autoscale_vmgroup_policy_map__policy_id` (`policy_id`) REFERENCES `autoscale_policies` (`id`), + INDEX `i_autoscale_vmgroup_policy_map__vmgroup_id`(`vmgroup_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (1, UUID(), 'snmp','Linux User CPU - percentage', '1.3.6.1.4.1.2021.11.9.0', now()); +INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (2, UUID(), 'snmp','Linux System CPU - percentage', '1.3.6.1.4.1.2021.11.10.0', now()); +INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (3, UUID(), 'snmp','Linux CPU Idle - percentage', '1.3.6.1.4.1.2021.11.11.0', now()); +INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (100, UUID(), 'netscaler','Response Time - microseconds', 'RESPTIME', now()); +CREATE TABLE `cloud`.`vm_snapshots` ( + `id` bigint(20) unsigned NOT NULL auto_increment COMMENT 'Primary Key', + `uuid` varchar(40) NOT NULL, + `name` varchar(255) NOT NULL, + `display_name` varchar(255) default NULL, + `description` varchar(255) default NULL, + `vm_id` bigint(20) unsigned NOT NULL, + `account_id` bigint(20) unsigned NOT NULL, + `domain_id` bigint(20) unsigned NOT NULL, + `vm_snapshot_type` varchar(32) default NULL, + `state` varchar(32) NOT NULL, + `parent` bigint unsigned default NULL, + `current` int(1) unsigned default NULL, + `update_count` bigint unsigned NOT NULL DEFAULT 0, + `updated` datetime default NULL, + `created` datetime default NULL, + `removed` datetime default NULL, + PRIMARY KEY (`id`), + CONSTRAINT UNIQUE KEY `uc_vm_snapshots_uuid` (`uuid`), + INDEX `vm_snapshots_name` (`name`), + INDEX `vm_snapshots_vm_id` (`vm_id`), + INDEX `vm_snapshots_account_id` (`account_id`), + INDEX `vm_snapshots_display_name` (`display_name`), + INDEX `vm_snapshots_removed` (`removed`), + INDEX `vm_snapshots_parent` (`parent`), + CONSTRAINT `fk_vm_snapshots_vm_id__vm_instance_id` FOREIGN KEY `fk_vm_snapshots_vm_id__vm_instance_id` (`vm_id`) REFERENCES `vm_instance` (`id`), + CONSTRAINT `fk_vm_snapshots_account_id__account_id` FOREIGN KEY `fk_vm_snapshots_account_id__account_id` (`account_id`) REFERENCES `account` (`id`), + CONSTRAINT `fk_vm_snapshots_domain_id__domain_id` FOREIGN KEY `fk_vm_snapshots_domain_id__domain_id` (`domain_id`) REFERENCES `domain` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`user_ipv6_address` ( + `id` bigint unsigned NOT NULL UNIQUE auto_increment, + `uuid` varchar(40), + `account_id` bigint unsigned NULL, + `domain_id` bigint unsigned NULL, + `ip_address` char(50) NOT NULL, + `data_center_id` bigint unsigned NOT NULL COMMENT 'zone that it belongs to', + `vlan_id` bigint unsigned NOT NULL, + `state` char(32) NOT NULL default 'Free' COMMENT 'state of the ip address', + `mac_address` varchar(40) NOT NULL COMMENT 'mac address of this ip', + `source_network_id` bigint unsigned NOT NULL COMMENT 'network id ip belongs to', + `network_id` bigint unsigned COMMENT 'network this public ip address is associated with', + `physical_network_id` bigint unsigned NOT NULL COMMENT 'physical network id that this configuration is based on', + `created` datetime NULL COMMENT 'Date this ip was allocated to someone', + PRIMARY KEY (`id`), + UNIQUE (`ip_address`, `source_network_id`), + CONSTRAINT `fk_user_ipv6_address__source_network_id` FOREIGN KEY (`source_network_id`) REFERENCES `networks`(`id`), + CONSTRAINT `fk_user_ipv6_address__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`), + CONSTRAINT `fk_user_ipv6_address__account_id` FOREIGN KEY (`account_id`) REFERENCES `account`(`id`), + CONSTRAINT `fk_user_ipv6_address__vlan_id` FOREIGN KEY (`vlan_id`) REFERENCES `vlan`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_user_ipv6_address__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, + CONSTRAINT `uc_user_ipv6_address__uuid` UNIQUE (`uuid`), + CONSTRAINT `fk_user_ipv6_address__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + -- DB views for list api DROP VIEW IF EXISTS `cloud`.`user_vm_view`; CREATE VIEW `cloud`.`user_vm_view` AS - select + select vm_instance.id id, vm_instance.name name, user_vm.display_name display_name, @@ -445,7 +689,7 @@ CREATE VIEW `cloud`.`user_vm_view` AS DROP VIEW IF EXISTS `cloud`.`domain_router_view`; CREATE VIEW `cloud`.`domain_router_view` AS - select + select vm_instance.id id, vm_instance.name name, account.id account_id, @@ -544,7 +788,7 @@ CREATE VIEW `cloud`.`domain_router_view` AS DROP VIEW IF EXISTS `cloud`.`security_group_view`; CREATE VIEW `cloud`.`security_group_view` AS - select + select security_group.id id, security_group.name name, security_group.description description, @@ -603,7 +847,7 @@ CREATE VIEW `cloud`.`security_group_view` AS DROP VIEW IF EXISTS `cloud`.`resource_tag_view`; CREATE VIEW `cloud`.`resource_tag_view` AS - select + select resource_tags.id, resource_tags.uuid, resource_tags.key, @@ -635,7 +879,7 @@ CREATE VIEW `cloud`.`resource_tag_view` AS DROP VIEW IF EXISTS `cloud`.`event_view`; CREATE VIEW `cloud`.`event_view` AS - select + select event.id, event.uuid, event.type, @@ -674,7 +918,7 @@ CREATE VIEW `cloud`.`event_view` AS DROP VIEW IF EXISTS `cloud`.`instance_group_view`; CREATE VIEW `cloud`.`instance_group_view` AS - select + select instance_group.id, instance_group.uuid, instance_group.name, @@ -702,7 +946,7 @@ CREATE VIEW `cloud`.`instance_group_view` AS DROP VIEW IF EXISTS `cloud`.`user_view`; CREATE VIEW `cloud`.`user_view` AS - select + select user.id, user.uuid, user.username, @@ -745,7 +989,7 @@ CREATE VIEW `cloud`.`user_view` AS DROP VIEW IF EXISTS `cloud`.`project_view`; CREATE VIEW `cloud`.`project_view` AS - select + select projects.id, projects.uuid, projects.name, @@ -786,7 +1030,7 @@ CREATE VIEW `cloud`.`project_view` AS DROP VIEW IF EXISTS `cloud`.`project_account_view`; CREATE VIEW `cloud`.`project_account_view` AS - select + select project_account.id, account.id account_id, account.uuid account_uuid, @@ -811,7 +1055,7 @@ CREATE VIEW `cloud`.`project_account_view` AS DROP VIEW IF EXISTS `cloud`.`project_invitation_view`; CREATE VIEW `cloud`.`project_invitation_view` AS - select + select project_invitations.id, project_invitations.uuid, project_invitations.email, @@ -839,7 +1083,7 @@ CREATE VIEW `cloud`.`project_invitation_view` AS DROP VIEW IF EXISTS `cloud`.`host_view`; CREATE VIEW `cloud`.`host_view` AS - select + select host.id, host.uuid, host.name, @@ -909,7 +1153,7 @@ CREATE VIEW `cloud`.`host_view` AS DROP VIEW IF EXISTS `cloud`.`volume_view`; CREATE VIEW `cloud`.`volume_view` AS - select + select volumes.id, volumes.uuid, volumes.name, @@ -1010,7 +1254,7 @@ CREATE VIEW `cloud`.`volume_view` AS DROP VIEW IF EXISTS `cloud`.`account_netstats_view`; CREATE VIEW `cloud`.`account_netstats_view` AS - SELECT + SELECT account_id, sum(net_bytes_received) + sum(current_bytes_received) as bytesReceived, sum(net_bytes_sent) + sum(current_bytes_sent) as bytesSent @@ -1021,7 +1265,7 @@ CREATE VIEW `cloud`.`account_netstats_view` AS DROP VIEW IF EXISTS `cloud`.`account_vmstats_view`; CREATE VIEW `cloud`.`account_vmstats_view` AS - SELECT + SELECT account_id, state, count(*) as vmcount from `cloud`.`vm_instance` @@ -1029,7 +1273,7 @@ CREATE VIEW `cloud`.`account_vmstats_view` AS DROP VIEW IF EXISTS `cloud`.`free_ip_view`; CREATE VIEW `cloud`.`free_ip_view` AS - select + select count(user_ip_address.id) free_ip from `cloud`.`user_ip_address` @@ -1041,7 +1285,7 @@ CREATE VIEW `cloud`.`free_ip_view` AS DROP VIEW IF EXISTS `cloud`.`account_view`; CREATE VIEW `cloud`.`account_view` AS - select + select account.id, account.uuid, account.account_name, @@ -1168,7 +1412,7 @@ CREATE VIEW `cloud`.`account_view` AS DROP VIEW IF EXISTS `cloud`.`async_job_view`; CREATE VIEW `cloud`.`async_job_view` AS - select + select account.id account_id, account.uuid account_uuid, account.account_name account_name, @@ -1277,7 +1521,7 @@ CREATE VIEW `cloud`.`async_job_view` AS DROP VIEW IF EXISTS `cloud`.`storage_pool_view`; CREATE VIEW `cloud`.`storage_pool_view` AS - select + select storage_pool.id, storage_pool.uuid, storage_pool.name, @@ -1326,7 +1570,7 @@ CREATE VIEW `cloud`.`storage_pool_view` AS DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; CREATE VIEW `cloud`.`disk_offering_view` AS - select + select disk_offering.id, disk_offering.uuid, disk_offering.name, @@ -1351,7 +1595,7 @@ CREATE VIEW `cloud`.`disk_offering_view` AS DROP VIEW IF EXISTS `cloud`.`service_offering_view`; CREATE VIEW `cloud`.`service_offering_view` AS - select + select service_offering.id, disk_offering.uuid, disk_offering.name, @@ -1382,10 +1626,10 @@ CREATE VIEW `cloud`.`service_offering_view` AS `cloud`.`disk_offering` ON service_offering.id = disk_offering.id left join `cloud`.`domain` ON disk_offering.domain_id = domain.id; - + DROP VIEW IF EXISTS `cloud`.`data_center_view`; CREATE VIEW `cloud`.`data_center_view` AS - select + select data_center.id, data_center.uuid, data_center.name, @@ -1410,43 +1654,5 @@ CREATE VIEW `cloud`.`data_center_view` AS from `cloud`.`data_center` left join - `cloud`.`domain` ON data_center.domain_id = domain.id; - -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'direct.agent.pool.size', '500', 'Default size for DirectAgentPool'); + `cloud`.`domain` ON data_center.domain_id = domain.id; -ALTER TABLE `cloud`.`op_dc_vnet_alloc` DROP INDEX i_op_dc_vnet_alloc__vnet__data_center_id; - -ALTER TABLE `cloud`.`op_dc_vnet_alloc` ADD CONSTRAINT UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id`(`vnet`, `physical_network_id`, `data_center_id`); - -CREATE TABLE `cloud`.`region` ( - `id` int unsigned NOT NULL UNIQUE, - `name` varchar(255) NOT NULL UNIQUE, - `end_point` varchar(255) NOT NULL, - `api_key` varchar(255), - `secret_key` varchar(255), - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`region_sync` ( - `id` bigint unsigned NOT NULL auto_increment, - `region_id` int unsigned NOT NULL, - `api` varchar(1024) NOT NULL, - `created` datetime NOT NULL COMMENT 'date created', - `processed` tinyint NOT NULL default '0', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -INSERT INTO `cloud`.`region` values ('1','Local','http://localhost:8080/client/api','',''); -ALTER TABLE `cloud`.`account` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; -ALTER TABLE `cloud`.`user` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; -ALTER TABLE `cloud`.`domain` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; - -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.cpus', '40', 'The default maximum number of cpu cores that can be used for an account'); - -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.memory', '40960', 'The default maximum memory (in MB) that can be used for an account'); - -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.cpus', '40', 'The default maximum number of cpu cores that can be used for a project'); - -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.memory', '40960', 'The default maximum memory (in MB) that can be used for a project'); - -ALTER TABLE `cloud_usage`.`account` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; From 16e81130cca78d2a10ff47856e374d92fa4f3ecc Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Sat, 16 Feb 2013 17:18:42 +0530 Subject: [PATCH 048/486] db: Fix developer's deploydb and cloud-setup-databases for rolling update - Fix developer prefill to use 4.0's schema - Fix developer/pom.xml and cloud-setup-databases to not run create-schema-view, the upgrade path is configured to do a rolling update and set it up Signed-off-by: Rohit Yadav --- developer/developer-prefill.sql | 20 ++++++++++---------- developer/pom.xml | 2 -- setup/bindir/cloud-setup-databases.in | 4 ++-- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/developer/developer-prefill.sql b/developer/developer-prefill.sql index 8713d731645..6300d35df64 100644 --- a/developer/developer-prefill.sql +++ b/developer/developer-prefill.sql @@ -18,25 +18,25 @@ -- Add a default ROOT domain use cloud; -INSERT INTO `cloud`.`domain` (id, uuid, name, parent, path, owner, region_id) VALUES - (1, UUID(), 'ROOT', NULL, '/', 2, 1); +INSERT INTO `cloud`.`domain` (id, uuid, name, parent, path, owner) VALUES + (1, UUID(), 'ROOT', NULL, '/', 2); -- Add system and admin accounts -INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, state, region_id) VALUES - (1, UUID(), 'system', 1, 1, 'enabled', 1); +INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, state) VALUES + (1, UUID(), 'system', 1, 1, 'enabled'); -INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, state, region_id) VALUES - (2, UUID(), 'admin', 1, 1, 'enabled', 1); +INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, state) VALUES + (2, UUID(), 'admin', 1, 1, 'enabled'); -- Add system user INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, - lastname, email, state, created, region_id) VALUES (1, UUID(), 'system', RAND(), - '1', 'system', 'cloud', NULL, 'enabled', NOW(), 1); + lastname, email, state, created) VALUES (1, UUID(), 'system', RAND(), + '1', 'system', 'cloud', NULL, 'enabled', NOW()); -- Add system user with encrypted password=password INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, - lastname, email, state, created, region_id) VALUES (2, UUID(), 'admin', '5f4dcc3b5aa765d61d8327deb882cf99', - '2', 'Admin', 'User', 'admin@mailprovider.com', 'enabled', NOW(), 1); + lastname, email, state, created) VALUES (2, UUID(), 'admin', '5f4dcc3b5aa765d61d8327deb882cf99', + '2', 'Admin', 'User', 'admin@mailprovider.com', 'enabled', NOW()); -- Add configurations INSERT INTO `cloud`.`configuration` (category, instance, component, name, value) diff --git a/developer/pom.xml b/developer/pom.xml index ae474022c25..b448e1bf0fd 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -157,8 +157,6 @@ ${basedir}/target/db/create-schema.sql ${basedir}/target/db/create-schema-premium.sql - - ${basedir}/target/db/create-schema-view.sql ${basedir}/target/db/templates.sql diff --git a/setup/bindir/cloud-setup-databases.in b/setup/bindir/cloud-setup-databases.in index c47d334a2d0..52a23d6f0fc 100755 --- a/setup/bindir/cloud-setup-databases.in +++ b/setup/bindir/cloud-setup-databases.in @@ -211,7 +211,7 @@ for full help ""), ) - for f in ["create-database","create-schema", "create-database-premium","create-schema-premium", "create-schema-view"]: + for f in ["create-database","create-schema", "create-database-premium","create-schema-premium"]: p = os.path.join(self.dbFilesPath,"%s.sql"%f) if not os.path.exists(p): continue text = file(p).read() @@ -358,7 +358,7 @@ for example: if not os.path.exists(dbf): self.errorAndExit("Cannot find %s"%dbf) - coreSchemas = ['create-database.sql', 'create-schema.sql', 'create-schema-view.sql', 'templates.sql'] + coreSchemas = ['create-database.sql', 'create-schema.sql', 'templates.sql'] if not self.serversetup: coreSchemas.append('server-setup.sql') From 215621daa88883dde4f09d55fde49e0bf0cd75f5 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Sat, 16 Feb 2013 22:01:43 +0530 Subject: [PATCH 049/486] CPU&RAM overcommit ratio uI integration code --- ui/scripts/system.js | 57 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/ui/scripts/system.js b/ui/scripts/system.js index e51bf90ba81..d76aa3e6a2e 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -7440,6 +7440,18 @@ validation: { required: true } }, + cpuovercommit:{ + label: 'CPU overcommit ratio', + defaultValue:'1' + + }, + + ramovercommit:{ + label: 'RAM overcommit ratio', + defaultValue:'1' + + }, + //hypervisor==VMWare begins here vCenterHost: { label: 'label.vcenter.host', @@ -7497,6 +7509,13 @@ array1.push("&podId=" + args.data.podId); var clusterName = args.data.name; + + if(args.data.cpuovercommit != "") + array1.push("&cpuovercommitratio=" + todb(args.data.cpuovercommit)); + + if(args.data.ramovercommit != "") + array1.push("&ramovercommitratio=" + todb(args.data.ramovercommit)); + if(args.data.hypervisor == "VMware") { array1.push("&username=" + todb(args.data.vCenterUsername)); array1.push("&password=" + todb(args.data.vCenterPassword)); @@ -7572,6 +7591,38 @@ }, actions: { + + edit: { + label: 'label.edit', + action: function(args) { + var array1 = []; + + if (args.data.cpuovercommitratio != "" && args.data.cpuovercommitratio > 0) + array1.push("&cpuovercommitratio=" + args.data.cpuovercommitratio); + + if (args.data.ramovercommitratio != "" && args.data.ramovercommitratio > 0) + array1.push("&ramovercommitratio=" + args.data.ramovercommitratio); + + $.ajax({ + + url: createURL("updateCluster&id=" + args.context.clusters[0].id + array1.join("")), + dataType: "json", + async: true, + success: function(json) { + var item = json.updateclusterresponse.cluster; + args.context.clusters[0].cpuovercommitratio = item.cpuovercommitratio; + args.context.clusters[0].ramovercommitratio = item.ramovercommitratio; + addExtraPropertiesToClusterObject(item); + args.response.success({ + actionFilter: clusterActionfilter, + data:item + }); + + } + }); + } + }, + enable: { label: 'label.action.enable.cluster', messages: { @@ -7741,6 +7792,8 @@ podname: { label: 'label.pod' }, hypervisortype: { label: 'label.hypervisor' }, clustertype: { label: 'label.cluster.type' }, + cpuovercommitratio:{ label: 'CPU overcommit Ratio', isEditable:true}, + ramovercommitratio:{ label: 'RAM overcommit Ratio', isEditable:true}, //allocationstate: { label: 'label.allocation.state' }, //managedstate: { label: 'Managed State' }, state: { label: 'label.state' } @@ -10407,10 +10460,14 @@ if(jsonObj.state == "Enabled") {//managed, allocation enabled allowedActions.push("unmanage"); allowedActions.push("disable"); + allowedActions.push("edit"); + } else if(jsonObj.state == "Disabled") { //managed, allocation disabled allowedActions.push("unmanage"); allowedActions.push("enable"); + allowedActions.push("edit"); + } else { //Unmanaged, PrepareUnmanaged , PrepareUnmanagedError allowedActions.push("manage"); From 31d6f03308aa10a170d5a37a3d6f8d572cf9e7c7 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Sun, 17 Feb 2013 23:26:00 +0530 Subject: [PATCH 050/486] db: Remove create-schema-view.sql, views are created using schema-40to410.sql - Remove create-schema-view.sql, views are created when mgmt server does rolling upgrade from 4.0.0 to 4.1.0 - Fix reference and usage of the sql file in scripts Signed-off-by: Rohit Yadav --- setup/db/create-schema-view.sql | 1152 ------------------------------- setup/db/deploy-db-dev.sh | 11 - setup/db/deploy-db-simulator.sh | 11 - 3 files changed, 1174 deletions(-) delete mode 100644 setup/db/create-schema-view.sql diff --git a/setup/db/create-schema-view.sql b/setup/db/create-schema-view.sql deleted file mode 100644 index 265779dccdc..00000000000 --- a/setup/db/create-schema-view.sql +++ /dev/null @@ -1,1152 +0,0 @@ --- Licensed to the Apache Software Foundation (ASF) under one --- or more contributor license agreements. See the NOTICE file --- distributed with this work for additional information --- regarding copyright ownership. The ASF licenses this file --- to you under the Apache License, Version 2.0 (the --- "License"); you may not use this file except in compliance --- with the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, --- software distributed under the License is distributed on an --- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY --- KIND, either express or implied. See the License for the --- specific language governing permissions and limitations --- under the License. - --- DB views for list api - -DROP VIEW IF EXISTS `cloud`.`user_vm_view`; -CREATE VIEW `cloud`.`user_vm_view` AS - select - vm_instance.id id, - vm_instance.name name, - user_vm.display_name display_name, - user_vm.user_data user_data, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - instance_group.id instance_group_id, - instance_group.uuid instance_group_uuid, - instance_group.name instance_group_name, - vm_instance.uuid uuid, - vm_instance.last_host_id last_host_id, - vm_instance.vm_type type, - vm_instance.vnc_password vnc_password, - vm_instance.limit_cpu_use limit_cpu_use, - vm_instance.created created, - vm_instance.state state, - vm_instance.removed removed, - vm_instance.ha_enabled ha_enabled, - vm_instance.hypervisor_type hypervisor_type, - vm_instance.instance_name instance_name, - vm_instance.guest_os_id guest_os_id, - guest_os.uuid guest_os_uuid, - vm_instance.pod_id pod_id, - host_pod_ref.uuid pod_uuid, - vm_instance.private_ip_address private_ip_address, - vm_instance.private_mac_address private_mac_address, - vm_instance.vm_type vm_type, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - data_center.is_security_group_enabled security_group_enabled, - host.id host_id, - host.uuid host_uuid, - host.name host_name, - vm_template.id template_id, - vm_template.uuid template_uuid, - vm_template.name template_name, - vm_template.display_text template_display_text, - vm_template.enable_password password_enabled, - iso.id iso_id, - iso.uuid iso_uuid, - iso.name iso_name, - iso.display_text iso_display_text, - service_offering.id service_offering_id, - disk_offering.uuid service_offering_uuid, - service_offering.cpu cpu, - service_offering.speed speed, - service_offering.ram_size ram_size, - disk_offering.name service_offering_name, - storage_pool.id pool_id, - storage_pool.uuid pool_uuid, - storage_pool.pool_type pool_type, - volumes.id volume_id, - volumes.uuid volume_uuid, - volumes.device_id volume_device_id, - volumes.volume_type volume_type, - security_group.id security_group_id, - security_group.uuid security_group_uuid, - security_group.name security_group_name, - security_group.description security_group_description, - nics.id nic_id, - nics.uuid nic_uuid, - nics.network_id network_id, - nics.ip4_address ip_address, - nics.default_nic is_default_nic, - nics.gateway gateway, - nics.netmask netmask, - nics.mac_address mac_address, - nics.broadcast_uri broadcast_uri, - nics.isolation_uri isolation_uri, - vpc.id vpc_id, - vpc.uuid vpc_uuid, - networks.uuid network_uuid, - networks.traffic_type traffic_type, - networks.guest_type guest_type, - user_ip_address.id public_ip_id, - user_ip_address.uuid public_ip_uuid, - user_ip_address.public_ip_address public_ip_address, - ssh_keypairs.keypair_name keypair_name, - resource_tags.id tag_id, - resource_tags.uuid tag_uuid, - resource_tags.key tag_key, - resource_tags.value tag_value, - resource_tags.domain_id tag_domain_id, - resource_tags.account_id tag_account_id, - resource_tags.resource_id tag_resource_id, - resource_tags.resource_uuid tag_resource_uuid, - resource_tags.resource_type tag_resource_type, - resource_tags.customer tag_customer, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`user_vm` - inner join - `cloud`.`vm_instance` ON vm_instance.id = user_vm.id - and vm_instance.removed is NULL - inner join - `cloud`.`account` ON vm_instance.account_id = account.id - inner join - `cloud`.`domain` ON vm_instance.domain_id = domain.id - left join - `cloud`.`guest_os` ON vm_instance.guest_os_id = guest_os.id - left join - `cloud`.`host_pod_ref` ON vm_instance.pod_id = host_pod_ref.id - left join - `cloud`.`projects` ON projects.project_account_id = account.id - left join - `cloud`.`instance_group_vm_map` ON vm_instance.id = instance_group_vm_map.instance_id - left join - `cloud`.`instance_group` ON instance_group_vm_map.group_id = instance_group.id - left join - `cloud`.`data_center` ON vm_instance.data_center_id = data_center.id - left join - `cloud`.`host` ON vm_instance.host_id = host.id - left join - `cloud`.`vm_template` ON vm_instance.vm_template_id = vm_template.id - left join - `cloud`.`vm_template` iso ON iso.id = user_vm.iso_id - left join - `cloud`.`service_offering` ON vm_instance.service_offering_id = service_offering.id - left join - `cloud`.`disk_offering` ON vm_instance.service_offering_id = disk_offering.id - left join - `cloud`.`volumes` ON vm_instance.id = volumes.instance_id - left join - `cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id - left join - `cloud`.`security_group_vm_map` ON vm_instance.id = security_group_vm_map.instance_id - left join - `cloud`.`security_group` ON security_group_vm_map.security_group_id = security_group.id - left join - `cloud`.`nics` ON vm_instance.id = nics.instance_id - left join - `cloud`.`networks` ON nics.network_id = networks.id - left join - `cloud`.`vpc` ON networks.vpc_id = vpc.id - left join - `cloud`.`user_ip_address` ON user_ip_address.vm_id = vm_instance.id - left join - `cloud`.`user_vm_details` ON user_vm_details.vm_id = vm_instance.id - and user_vm_details.name = 'SSH.PublicKey' - left join - `cloud`.`ssh_keypairs` ON ssh_keypairs.public_key = user_vm_details.value - left join - `cloud`.`resource_tags` ON resource_tags.resource_id = vm_instance.id - and resource_tags.resource_type = 'UserVm' - left join - `cloud`.`async_job` ON async_job.instance_id = vm_instance.id - and async_job.instance_type = 'VirtualMachine' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`domain_router_view`; -CREATE VIEW `cloud`.`domain_router_view` AS - select - vm_instance.id id, - vm_instance.name name, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - vm_instance.uuid uuid, - vm_instance.created created, - vm_instance.state state, - vm_instance.removed removed, - vm_instance.pod_id pod_id, - vm_instance.instance_name instance_name, - host_pod_ref.uuid pod_uuid, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - data_center.dns1 dns1, - data_center.dns2 dns2, - host.id host_id, - host.uuid host_uuid, - host.name host_name, - vm_template.id template_id, - vm_template.uuid template_uuid, - service_offering.id service_offering_id, - disk_offering.uuid service_offering_uuid, - disk_offering.name service_offering_name, - nics.id nic_id, - nics.uuid nic_uuid, - nics.network_id network_id, - nics.ip4_address ip_address, - nics.default_nic is_default_nic, - nics.gateway gateway, - nics.netmask netmask, - nics.mac_address mac_address, - nics.broadcast_uri broadcast_uri, - nics.isolation_uri isolation_uri, - vpc.id vpc_id, - vpc.uuid vpc_uuid, - networks.uuid network_uuid, - networks.name network_name, - networks.network_domain network_domain, - networks.traffic_type traffic_type, - networks.guest_type guest_type, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id, - domain_router.template_version template_version, - domain_router.scripts_version scripts_version, - domain_router.is_redundant_router is_redundant_router, - domain_router.redundant_state redundant_state, - domain_router.stop_pending stop_pending - from - `cloud`.`domain_router` - inner join - `cloud`.`vm_instance` ON vm_instance.id = domain_router.id - inner join - `cloud`.`account` ON vm_instance.account_id = account.id - inner join - `cloud`.`domain` ON vm_instance.domain_id = domain.id - left join - `cloud`.`host_pod_ref` ON vm_instance.pod_id = host_pod_ref.id - left join - `cloud`.`projects` ON projects.project_account_id = account.id - left join - `cloud`.`data_center` ON vm_instance.data_center_id = data_center.id - left join - `cloud`.`host` ON vm_instance.host_id = host.id - left join - `cloud`.`vm_template` ON vm_instance.vm_template_id = vm_template.id - left join - `cloud`.`service_offering` ON vm_instance.service_offering_id = service_offering.id - left join - `cloud`.`disk_offering` ON vm_instance.service_offering_id = disk_offering.id - left join - `cloud`.`volumes` ON vm_instance.id = volumes.instance_id - left join - `cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id - left join - `cloud`.`nics` ON vm_instance.id = nics.instance_id - left join - `cloud`.`networks` ON nics.network_id = networks.id - left join - `cloud`.`vpc` ON networks.vpc_id = vpc.id - left join - `cloud`.`async_job` ON async_job.instance_id = vm_instance.id - and async_job.instance_type = 'DomainRouter' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`security_group_view`; -CREATE VIEW `cloud`.`security_group_view` AS - select - security_group.id id, - security_group.name name, - security_group.description description, - security_group.uuid uuid, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - security_group_rule.id rule_id, - security_group_rule.uuid rule_uuid, - security_group_rule.type rule_type, - security_group_rule.start_port rule_start_port, - security_group_rule.end_port rule_end_port, - security_group_rule.protocol rule_protocol, - security_group_rule.allowed_network_id rule_allowed_network_id, - security_group_rule.allowed_ip_cidr rule_allowed_ip_cidr, - security_group_rule.create_status rule_create_status, - resource_tags.id tag_id, - resource_tags.uuid tag_uuid, - resource_tags.key tag_key, - resource_tags.value tag_value, - resource_tags.domain_id tag_domain_id, - resource_tags.account_id tag_account_id, - resource_tags.resource_id tag_resource_id, - resource_tags.resource_uuid tag_resource_uuid, - resource_tags.resource_type tag_resource_type, - resource_tags.customer tag_customer, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`security_group` - left join - `cloud`.`security_group_rule` ON security_group.id = security_group_rule.security_group_id - inner join - `cloud`.`account` ON security_group.account_id = account.id - inner join - `cloud`.`domain` ON security_group.domain_id = domain.id - left join - `cloud`.`projects` ON projects.project_account_id = security_group.account_id - left join - `cloud`.`resource_tags` ON resource_tags.resource_id = security_group.id - and resource_tags.resource_type = 'SecurityGroup' - left join - `cloud`.`async_job` ON async_job.instance_id = security_group.id - and async_job.instance_type = 'SecurityGroup' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`resource_tag_view`; -CREATE VIEW `cloud`.`resource_tag_view` AS - select - resource_tags.id, - resource_tags.uuid, - resource_tags.key, - resource_tags.value, - resource_tags.resource_id, - resource_tags.resource_uuid, - resource_tags.resource_type, - resource_tags.customer, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name - from - `cloud`.`resource_tags` - inner join - `cloud`.`account` ON resource_tags.account_id = account.id - inner join - `cloud`.`domain` ON resource_tags.domain_id = domain.id - left join - `cloud`.`projects` ON projects.project_account_id = resource_tags.account_id; - - -DROP VIEW IF EXISTS `cloud`.`event_view`; -CREATE VIEW `cloud`.`event_view` AS - select - event.id, - event.uuid, - event.type, - event.state, - event.description, - event.created, - event.level, - event.parameters, - event.start_id, - eve.uuid start_uuid, - event.user_id, - user.username user_name, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name - from - `cloud`.`event` - inner join - `cloud`.`account` ON event.account_id = account.id - inner join - `cloud`.`domain` ON event.domain_id = domain.id - inner join - `cloud`.`user` ON event.user_id = user.id - left join - `cloud`.`projects` ON projects.project_account_id = event.account_id - left join - `cloud`.`event` eve ON event.start_id = eve.id; - -DROP VIEW IF EXISTS `cloud`.`instance_group_view`; -CREATE VIEW `cloud`.`instance_group_view` AS - select - instance_group.id, - instance_group.uuid, - instance_group.name, - instance_group.removed, - instance_group.created, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name - from - `cloud`.`instance_group` - inner join - `cloud`.`account` ON instance_group.account_id = account.id - inner join - `cloud`.`domain` ON account.domain_id = domain.id - left join - `cloud`.`projects` ON projects.project_account_id = instance_group.account_id; - -DROP VIEW IF EXISTS `cloud`.`user_view`; -CREATE VIEW `cloud`.`user_view` AS - select - user.id, - user.uuid, - user.username, - user.password, - user.firstname, - user.lastname, - user.email, - user.state, - user.api_key, - user.secret_key, - user.created, - user.removed, - user.timezone, - user.registration_token, - user.is_registered, - user.incorrect_login_attempts, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`user` - inner join - `cloud`.`account` ON user.account_id = account.id - inner join - `cloud`.`domain` ON account.domain_id = domain.id - left join - `cloud`.`async_job` ON async_job.instance_id = user.id - and async_job.instance_type = 'User' - and async_job.job_status = 0; - - -DROP VIEW IF EXISTS `cloud`.`project_view`; -CREATE VIEW `cloud`.`project_view` AS - select - projects.id, - projects.uuid, - projects.name, - projects.display_text, - projects.state, - projects.removed, - projects.created, - account.account_name owner, - pacct.account_id, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - resource_tags.id tag_id, - resource_tags.uuid tag_uuid, - resource_tags.key tag_key, - resource_tags.value tag_value, - resource_tags.domain_id tag_domain_id, - resource_tags.account_id tag_account_id, - resource_tags.resource_id tag_resource_id, - resource_tags.resource_uuid tag_resource_uuid, - resource_tags.resource_type tag_resource_type, - resource_tags.customer tag_customer - from - `cloud`.`projects` - inner join - `cloud`.`domain` ON projects.domain_id = domain.id - inner join - `cloud`.`project_account` ON projects.id = project_account.project_id - and project_account.account_role = 'Admin' - inner join - `cloud`.`account` ON account.id = project_account.account_id - left join - `cloud`.`resource_tags` ON resource_tags.resource_id = projects.id - and resource_tags.resource_type = 'Project' - left join - `cloud`.`project_account` pacct ON projects.id = pacct.project_id; - -DROP VIEW IF EXISTS `cloud`.`project_account_view`; -CREATE VIEW `cloud`.`project_account_view` AS - select - project_account.id, - account.id account_id, - account.uuid account_uuid, - account.account_name, - account.type account_type, - project_account.account_role, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`project_account` - inner join - `cloud`.`account` ON project_account.account_id = account.id - inner join - `cloud`.`domain` ON account.domain_id = domain.id - inner join - `cloud`.`projects` ON projects.id = project_account.project_id; - -DROP VIEW IF EXISTS `cloud`.`project_invitation_view`; -CREATE VIEW `cloud`.`project_invitation_view` AS - select - project_invitations.id, - project_invitations.uuid, - project_invitations.email, - project_invitations.created, - project_invitations.state, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - account.id account_id, - account.uuid account_uuid, - account.account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`project_invitations` - left join - `cloud`.`account` ON project_invitations.account_id = account.id - left join - `cloud`.`domain` ON project_invitations.domain_id = domain.id - left join - `cloud`.`projects` ON projects.id = project_invitations.project_id; - -DROP VIEW IF EXISTS `cloud`.`host_view`; -CREATE VIEW `cloud`.`host_view` AS - select - host.id, - host.uuid, - host.name, - host.status, - host.disconnected, - host.type, - host.private_ip_address, - host.version, - host.hypervisor_type, - host.hypervisor_version, - host.capabilities, - host.last_ping, - host.created, - host.removed, - host.resource_state, - host.mgmt_server_id, - host.cpus, - host.speed, - host.ram, - cluster.id cluster_id, - cluster.uuid cluster_uuid, - cluster.name cluster_name, - cluster.cluster_type, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - host_pod_ref.id pod_id, - host_pod_ref.uuid pod_uuid, - host_pod_ref.name pod_name, - host_tags.tag, - guest_os_category.id guest_os_category_id, - guest_os_category.uuid guest_os_category_uuid, - guest_os_category.name guest_os_category_name, - mem_caps.used_capacity memory_used_capacity, - mem_caps.reserved_capacity memory_reserved_capacity, - cpu_caps.used_capacity cpu_used_capacity, - cpu_caps.reserved_capacity cpu_reserved_capacity, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`host` - left join - `cloud`.`cluster` ON host.cluster_id = cluster.id - left join - `cloud`.`data_center` ON host.data_center_id = data_center.id - left join - `cloud`.`host_pod_ref` ON host.pod_id = host_pod_ref.id - left join - `cloud`.`host_details` ON host.id = host_details.id - and host_details.name = 'guest.os.category.id' - left join - `cloud`.`guest_os_category` ON guest_os_category.id = CONVERT( host_details.value , UNSIGNED) - left join - `cloud`.`host_tags` ON host_tags.host_id = host.id - left join - `cloud`.`op_host_capacity` mem_caps ON host.id = mem_caps.host_id - and mem_caps.capacity_type = 0 - left join - `cloud`.`op_host_capacity` cpu_caps ON host.id = cpu_caps.host_id - and cpu_caps.capacity_type = 1 - left join - `cloud`.`async_job` ON async_job.instance_id = host.id - and async_job.instance_type = 'Host' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`volume_view`; -CREATE VIEW `cloud`.`volume_view` AS - select - volumes.id, - volumes.uuid, - volumes.name, - volumes.device_id, - volumes.volume_type, - volumes.size, - volumes.created, - volumes.state, - volumes.attached, - volumes.removed, - volumes.pod_id, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - vm_instance.id vm_id, - vm_instance.uuid vm_uuid, - vm_instance.name vm_name, - vm_instance.state vm_state, - vm_instance.vm_type, - user_vm.display_name vm_display_name, - volume_host_ref.size volume_host_size, - volume_host_ref.created volume_host_created, - volume_host_ref.format, - volume_host_ref.download_pct, - volume_host_ref.download_state, - volume_host_ref.error_str, - disk_offering.id disk_offering_id, - disk_offering.uuid disk_offering_uuid, - disk_offering.name disk_offering_name, - disk_offering.display_text disk_offering_display_text, - disk_offering.use_local_storage, - disk_offering.system_use, - storage_pool.id pool_id, - storage_pool.uuid pool_uuid, - storage_pool.name pool_name, - cluster.hypervisor_type, - vm_template.id template_id, - vm_template.uuid template_uuid, - vm_template.extractable, - vm_template.type template_type, - resource_tags.id tag_id, - resource_tags.uuid tag_uuid, - resource_tags.key tag_key, - resource_tags.value tag_value, - resource_tags.domain_id tag_domain_id, - resource_tags.account_id tag_account_id, - resource_tags.resource_id tag_resource_id, - resource_tags.resource_uuid tag_resource_uuid, - resource_tags.resource_type tag_resource_type, - resource_tags.customer tag_customer, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`volumes` - inner join - `cloud`.`account` ON volumes.account_id = account.id - inner join - `cloud`.`domain` ON volumes.domain_id = domain.id - left join - `cloud`.`projects` ON projects.project_account_id = account.id - left join - `cloud`.`data_center` ON volumes.data_center_id = data_center.id - left join - `cloud`.`vm_instance` ON volumes.instance_id = vm_instance.id - left join - `cloud`.`user_vm` ON user_vm.id = vm_instance.id - left join - `cloud`.`volume_host_ref` ON volumes.id = volume_host_ref.volume_id - and volumes.data_center_id = volume_host_ref.zone_id - left join - `cloud`.`disk_offering` ON volumes.disk_offering_id = disk_offering.id - left join - `cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id - left join - `cloud`.`cluster` ON storage_pool.cluster_id = cluster.id - left join - `cloud`.`vm_template` ON volumes.template_id = vm_template.id - left join - `cloud`.`resource_tags` ON resource_tags.resource_id = volumes.id - and resource_tags.resource_type = 'Volume' - left join - `cloud`.`async_job` ON async_job.instance_id = volumes.id - and async_job.instance_type = 'Volume' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`account_netstats_view`; -CREATE VIEW `cloud`.`account_netstats_view` AS - SELECT - account_id, - sum(net_bytes_received) + sum(current_bytes_received) as bytesReceived, - sum(net_bytes_sent) + sum(current_bytes_sent) as bytesSent - FROM - `cloud`.`user_statistics` - group by account_id; - - -DROP VIEW IF EXISTS `cloud`.`account_vmstats_view`; -CREATE VIEW `cloud`.`account_vmstats_view` AS - SELECT - account_id, state, count(*) as vmcount - from - `cloud`.`vm_instance` - group by account_id , state; - -DROP VIEW IF EXISTS `cloud`.`free_ip_view`; -CREATE VIEW `cloud`.`free_ip_view` AS - select - count(user_ip_address.id) free_ip - from - `cloud`.`user_ip_address` - inner join - `cloud`.`vlan` ON vlan.id = user_ip_address.vlan_db_id - and vlan.vlan_type = 'VirtualNetwork' - where - state = 'Free'; - -DROP VIEW IF EXISTS `cloud`.`account_view`; -CREATE VIEW `cloud`.`account_view` AS - select - account.id, - account.uuid, - account.account_name, - account.type, - account.state, - account.removed, - account.cleanup_needed, - account.network_domain, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - account_netstats_view.bytesReceived, - account_netstats_view.bytesSent, - vmlimit.max vmLimit, - vmcount.count vmTotal, - runningvm.vmcount runningVms, - stoppedvm.vmcount stoppedVms, - iplimit.max ipLimit, - ipcount.count ipTotal, - free_ip_view.free_ip ipFree, - volumelimit.max volumeLimit, - volumecount.count volumeTotal, - snapshotlimit.max snapshotLimit, - snapshotcount.count snapshotTotal, - templatelimit.max templateLimit, - templatecount.count templateTotal, - vpclimit.max vpcLimit, - vpccount.count vpcTotal, - projectlimit.max projectLimit, - projectcount.count projectTotal, - networklimit.max networkLimit, - networkcount.count networkTotal, - cpulimit.max cpuLimit, - cpucount.count cpuTotal, - memorylimit.max memoryLimit, - memorycount.count memoryTotal, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`free_ip_view`, - `cloud`.`account` - inner join - `cloud`.`domain` ON account.domain_id = domain.id - left join - `cloud`.`data_center` ON account.default_zone_id = data_center.id - left join - `cloud`.`account_netstats_view` ON account.id = account_netstats_view.account_id - left join - `cloud`.`resource_limit` vmlimit ON account.id = vmlimit.account_id - and vmlimit.type = 'user_vm' - left join - `cloud`.`resource_count` vmcount ON account.id = vmcount.account_id - and vmcount.type = 'user_vm' - left join - `cloud`.`account_vmstats_view` runningvm ON account.id = runningvm.account_id - and runningvm.state = 'Running' - left join - `cloud`.`account_vmstats_view` stoppedvm ON account.id = stoppedvm.account_id - and stoppedvm.state = 'Stopped' - left join - `cloud`.`resource_limit` iplimit ON account.id = iplimit.account_id - and iplimit.type = 'public_ip' - left join - `cloud`.`resource_count` ipcount ON account.id = ipcount.account_id - and ipcount.type = 'public_ip' - left join - `cloud`.`resource_limit` volumelimit ON account.id = volumelimit.account_id - and volumelimit.type = 'volume' - left join - `cloud`.`resource_count` volumecount ON account.id = volumecount.account_id - and volumecount.type = 'volume' - left join - `cloud`.`resource_limit` snapshotlimit ON account.id = snapshotlimit.account_id - and snapshotlimit.type = 'snapshot' - left join - `cloud`.`resource_count` snapshotcount ON account.id = snapshotcount.account_id - and snapshotcount.type = 'snapshot' - left join - `cloud`.`resource_limit` templatelimit ON account.id = templatelimit.account_id - and templatelimit.type = 'template' - left join - `cloud`.`resource_count` templatecount ON account.id = templatecount.account_id - and templatecount.type = 'template' - left join - `cloud`.`resource_limit` vpclimit ON account.id = vpclimit.account_id - and vpclimit.type = 'vpc' - left join - `cloud`.`resource_count` vpccount ON account.id = vpccount.account_id - and vpccount.type = 'vpc' - left join - `cloud`.`resource_limit` projectlimit ON account.id = projectlimit.account_id - and projectlimit.type = 'project' - left join - `cloud`.`resource_count` projectcount ON account.id = projectcount.account_id - and projectcount.type = 'project' - left join - `cloud`.`resource_limit` networklimit ON account.id = networklimit.account_id - and networklimit.type = 'network' - left join - `cloud`.`resource_count` networkcount ON account.id = networkcount.account_id - and networkcount.type = 'network' - left join - `cloud`.`resource_limit` cpulimit ON account.id = cpulimit.account_id - and cpulimit.type = 'cpu' - left join - `cloud`.`resource_count` cpucount ON account.id = cpucount.account_id - and cpucount.type = 'cpu' - left join - `cloud`.`resource_limit` memorylimit ON account.id = memorylimit.account_id - and memorylimit.type = 'memory' - left join - `cloud`.`resource_count` memorycount ON account.id = memorycount.account_id - and memorycount.type = 'memory' - left join - `cloud`.`async_job` ON async_job.instance_id = account.id - and async_job.instance_type = 'Account' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`async_job_view`; -CREATE VIEW `cloud`.`async_job_view` AS - select - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - user.id user_id, - user.uuid user_uuid, - async_job.id, - async_job.uuid, - async_job.job_cmd, - async_job.job_status, - async_job.job_process_status, - async_job.job_result_code, - async_job.job_result, - async_job.created, - async_job.removed, - async_job.instance_type, - async_job.instance_id, - CASE - WHEN async_job.instance_type = 'Volume' THEN volumes.uuid - WHEN - async_job.instance_type = 'Template' - or async_job.instance_type = 'Iso' - THEN - vm_template.uuid - WHEN - async_job.instance_type = 'VirtualMachine' - or async_job.instance_type = 'ConsoleProxy' - or async_job.instance_type = 'SystemVm' - or async_job.instance_type = 'DomainRouter' - THEN - vm_instance.uuid - WHEN async_job.instance_type = 'Snapshot' THEN snapshots.uuid - WHEN async_job.instance_type = 'Host' THEN host.uuid - WHEN async_job.instance_type = 'StoragePool' THEN storage_pool.uuid - WHEN async_job.instance_type = 'IpAddress' THEN user_ip_address.uuid - WHEN async_job.instance_type = 'SecurityGroup' THEN security_group.uuid - WHEN async_job.instance_type = 'PhysicalNetwork' THEN physical_network.uuid - WHEN async_job.instance_type = 'TrafficType' THEN physical_network_traffic_types.uuid - WHEN async_job.instance_type = 'PhysicalNetworkServiceProvider' THEN physical_network_service_providers.uuid - WHEN async_job.instance_type = 'FirewallRule' THEN firewall_rules.uuid - WHEN async_job.instance_type = 'Account' THEN acct.uuid - WHEN async_job.instance_type = 'User' THEN us.uuid - WHEN async_job.instance_type = 'StaticRoute' THEN static_routes.uuid - WHEN async_job.instance_type = 'PrivateGateway' THEN vpc_gateways.uuid - WHEN async_job.instance_type = 'Counter' THEN counter.uuid - WHEN async_job.instance_type = 'Condition' THEN conditions.uuid - WHEN async_job.instance_type = 'AutoScalePolicy' THEN autoscale_policies.uuid - WHEN async_job.instance_type = 'AutoScaleVmProfile' THEN autoscale_vmprofiles.uuid - WHEN async_job.instance_type = 'AutoScaleVmGroup' THEN autoscale_vmgroups.uuid - ELSE null - END instance_uuid - from - `cloud`.`async_job` - left join - `cloud`.`account` ON async_job.account_id = account.id - left join - `cloud`.`domain` ON domain.id = account.domain_id - left join - `cloud`.`user` ON async_job.user_id = user.id - left join - `cloud`.`volumes` ON async_job.instance_id = volumes.id - left join - `cloud`.`vm_template` ON async_job.instance_id = vm_template.id - left join - `cloud`.`vm_instance` ON async_job.instance_id = vm_instance.id - left join - `cloud`.`snapshots` ON async_job.instance_id = snapshots.id - left join - `cloud`.`host` ON async_job.instance_id = host.id - left join - `cloud`.`storage_pool` ON async_job.instance_id = storage_pool.id - left join - `cloud`.`user_ip_address` ON async_job.instance_id = user_ip_address.id - left join - `cloud`.`security_group` ON async_job.instance_id = security_group.id - left join - `cloud`.`physical_network` ON async_job.instance_id = physical_network.id - left join - `cloud`.`physical_network_traffic_types` ON async_job.instance_id = physical_network_traffic_types.id - left join - `cloud`.`physical_network_service_providers` ON async_job.instance_id = physical_network_service_providers.id - left join - `cloud`.`firewall_rules` ON async_job.instance_id = firewall_rules.id - left join - `cloud`.`account` acct ON async_job.instance_id = acct.id - left join - `cloud`.`user` us ON async_job.instance_id = us.id - left join - `cloud`.`static_routes` ON async_job.instance_id = static_routes.id - left join - `cloud`.`vpc_gateways` ON async_job.instance_id = vpc_gateways.id - left join - `cloud`.`counter` ON async_job.instance_id = counter.id - left join - `cloud`.`conditions` ON async_job.instance_id = conditions.id - left join - `cloud`.`autoscale_policies` ON async_job.instance_id = autoscale_policies.id - left join - `cloud`.`autoscale_vmprofiles` ON async_job.instance_id = autoscale_vmprofiles.id - left join - `cloud`.`autoscale_vmgroups` ON async_job.instance_id = autoscale_vmgroups.id; - -DROP VIEW IF EXISTS `cloud`.`storage_pool_view`; -CREATE VIEW `cloud`.`storage_pool_view` AS - select - storage_pool.id, - storage_pool.uuid, - storage_pool.name, - storage_pool.status, - storage_pool.path, - storage_pool.pool_type, - storage_pool.host_address, - storage_pool.created, - storage_pool.removed, - storage_pool.capacity_bytes, - cluster.id cluster_id, - cluster.uuid cluster_uuid, - cluster.name cluster_name, - cluster.cluster_type, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - host_pod_ref.id pod_id, - host_pod_ref.uuid pod_uuid, - host_pod_ref.name pod_name, - storage_pool_details.name tag, - op_host_capacity.used_capacity disk_used_capacity, - op_host_capacity.reserved_capacity disk_reserved_capacity, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`storage_pool` - left join - `cloud`.`cluster` ON storage_pool.cluster_id = cluster.id - left join - `cloud`.`data_center` ON storage_pool.data_center_id = data_center.id - left join - `cloud`.`host_pod_ref` ON storage_pool.pod_id = host_pod_ref.id - left join - `cloud`.`storage_pool_details` ON storage_pool_details.pool_id = storage_pool.id - and storage_pool_details.value = 'true' - left join - `cloud`.`op_host_capacity` ON storage_pool.id = op_host_capacity.host_id - and op_host_capacity.capacity_type = 3 - left join - `cloud`.`async_job` ON async_job.instance_id = storage_pool.id - and async_job.instance_type = 'StoragePool' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; -CREATE VIEW `cloud`.`disk_offering_view` AS - select - disk_offering.id, - disk_offering.uuid, - disk_offering.name, - disk_offering.display_text, - disk_offering.disk_size, - disk_offering.created, - disk_offering.tags, - disk_offering.customized, - disk_offering.removed, - disk_offering.use_local_storage, - disk_offering.system_use, - disk_offering.sort_key, - disk_offering.type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`disk_offering` - left join - `cloud`.`domain` ON disk_offering.domain_id = domain.id; - -DROP VIEW IF EXISTS `cloud`.`service_offering_view`; -CREATE VIEW `cloud`.`service_offering_view` AS - select - service_offering.id, - disk_offering.uuid, - disk_offering.name, - disk_offering.display_text, - disk_offering.created, - disk_offering.tags, - disk_offering.removed, - disk_offering.use_local_storage, - disk_offering.system_use, - service_offering.cpu, - service_offering.speed, - service_offering.ram_size, - service_offering.nw_rate, - service_offering.mc_rate, - service_offering.ha_enabled, - service_offering.limit_cpu_use, - service_offering.host_tag, - service_offering.default_use, - service_offering.vm_type, - service_offering.sort_key, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`service_offering` - inner join - `cloud`.`disk_offering` ON service_offering.id = disk_offering.id - left join - `cloud`.`domain` ON disk_offering.domain_id = domain.id; - -DROP VIEW IF EXISTS `cloud`.`data_center_view`; -CREATE VIEW `cloud`.`data_center_view` AS - select - data_center.id, - data_center.uuid, - data_center.name, - data_center.is_security_group_enabled, - data_center.is_local_storage_enabled, - data_center.description, - data_center.dns1, - data_center.dns2, - data_center.internal_dns1, - data_center.internal_dns2, - data_center.guest_network_cidr, - data_center.domain, - data_center.networktype, - data_center.allocation_state, - data_center.zone_token, - data_center.dhcp_provider, - data_center.removed, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`data_center` - left join - `cloud`.`domain` ON data_center.domain_id = domain.id; \ No newline at end of file diff --git a/setup/db/deploy-db-dev.sh b/setup/db/deploy-db-dev.sh index a40e278b002..29ec4db6050 100755 --- a/setup/db/deploy-db-dev.sh +++ b/setup/db/deploy-db-dev.sh @@ -55,11 +55,6 @@ if [ ! -f create-index-fk.sql ]; then exit 6; fi -if [ ! -f create-schema-view.sql ]; then - printf "Error: Unable to find create-schema-view.sql\n" - exit 7 -fi - PATHSEP=':' if [[ $OSTYPE == "cygwin" ]] ; then export CATALINA_HOME=`cygpath -m $CATALINA_HOME` @@ -105,12 +100,6 @@ if [ $? -ne 0 ]; then exit 11 fi -mysql --user=cloud --password=cloud cloud < create-schema-view.sql -if [ $? -ne 0 ]; then - printf "Error: Cannot execute create-schema-view.sql\n" - exit 11 -fi - CP=./ CP=${CP}$PATHSEP$CATALINA_HOME/conf diff --git a/setup/db/deploy-db-simulator.sh b/setup/db/deploy-db-simulator.sh index c918df43009..20e12d37794 100644 --- a/setup/db/deploy-db-simulator.sh +++ b/setup/db/deploy-db-simulator.sh @@ -55,11 +55,6 @@ if [ ! -f create-index-fk.sql ]; then exit 6; fi -if [ ! -f create-schema-view.sql ]; then - printf "Error: Unable to find create-schema-view.sql\n" - exit 7 -fi - PATHSEP=':' if [[ $OSTYPE == "cygwin" ]] ; then @@ -109,12 +104,6 @@ if [ $? -ne 0 ]; then exit 11 fi -mysql --user=cloud --password=cloud cloud < create-schema-view.sql -if [ $? -ne 0 ]; then - printf "Error: Cannot execute create-schema-view.sql\n" - exit 11 -fi - mysql --user=cloud --password=cloud cloud < create-schema-simulator.sql if [ $? -ne 0 ]; then printf "Error: Cannot execute create-schema-simulator.sql\n" From a4510efc24719a38d6a5964b6a8ce8547638583b Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Mon, 18 Feb 2013 10:26:49 +0530 Subject: [PATCH 051/486] maven: parent pom of tools/* projects is tools/pom.xml Fixes broken builds for tools Signed-off-by: Prasanna Santhanam --- tools/apidoc/pom.xml | 2 +- tools/cli/pom.xml | 2 +- tools/devcloud-kvm/pom.xml | 2 +- tools/devcloud/pom.xml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/apidoc/pom.xml b/tools/apidoc/pom.xml index 7358b926642..d75286b81b0 100644 --- a/tools/apidoc/pom.xml +++ b/tools/apidoc/pom.xml @@ -16,7 +16,7 @@ pom org.apache.cloudstack - cloudstack + cloud-tools 4.1.0-SNAPSHOT ../pom.xml diff --git a/tools/cli/pom.xml b/tools/cli/pom.xml index 5f14d7c7d5a..a145d814c38 100644 --- a/tools/cli/pom.xml +++ b/tools/cli/pom.xml @@ -24,7 +24,7 @@ pom org.apache.cloudstack - cloudstack + cloud-tools 4.1.0-SNAPSHOT ../pom.xml diff --git a/tools/devcloud-kvm/pom.xml b/tools/devcloud-kvm/pom.xml index d4e12e48c41..713b12d87bd 100644 --- a/tools/devcloud-kvm/pom.xml +++ b/tools/devcloud-kvm/pom.xml @@ -16,7 +16,7 @@ pom org.apache.cloudstack - cloudstack + cloud-tools 4.1.0-SNAPSHOT ../pom.xml diff --git a/tools/devcloud/pom.xml b/tools/devcloud/pom.xml index 5257cb8d1bf..b3fd79bb0bb 100644 --- a/tools/devcloud/pom.xml +++ b/tools/devcloud/pom.xml @@ -16,7 +16,7 @@ pom org.apache.cloudstack - cloudstack + cloud-tools 4.1.0-SNAPSHOT ../pom.xml From ed757f24aea31a3befacc0f6ee0646c8e4e02b19 Mon Sep 17 00:00:00 2001 From: Koushik Das Date: Mon, 18 Feb 2013 15:25:09 +0530 Subject: [PATCH 052/486] As part of the upgrade changes, some db changes got removed from create-schema.sql. Moved the changes to the corresponding upgrade sql. --- setup/db/db/schema-410to420.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index d1f90be449c..8bd9bfd353e 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -19,3 +19,7 @@ -- Schema upgrade from 4.1.0 to 4.2.0; --; +ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `max_hosts_per_cluster` int unsigned DEFAULT NULL COMMENT 'Max. hosts in cluster supported by hypervisor'; +UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `hypervisor_type`='VMware'; +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32); +DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max'; From 2327831f7f2177894397dfc7265ffb4d8b77872a Mon Sep 17 00:00:00 2001 From: Radhika PC Date: Mon, 18 Feb 2013 15:07:05 +0530 Subject: [PATCH 053/486] cloudstack-991 Signed-off-by: Radhika PC --- docs/en-US/console-proxy.xml | 6 +-- docs/en-US/working-with-system-vm.xml | 56 +++++++++++++++------------ 2 files changed, 34 insertions(+), 28 deletions(-) diff --git a/docs/en-US/console-proxy.xml b/docs/en-US/console-proxy.xml index 3dd7b9fd692..697ee2e2146 100644 --- a/docs/en-US/console-proxy.xml +++ b/docs/en-US/console-proxy.xml @@ -24,11 +24,11 @@ console view via the web UI. It connects the user’s browser to the VNC port made available via the hypervisor for the console of the guest. Both the administrator and end user web UIs offer a console connection. - Clicking on a console icon brings up a new window. The AJAX code downloaded into that window + Clicking a console icon brings up a new window. The AJAX code downloaded into that window refers to the public IP address of a console proxy VM. There is exactly one public IP address allocated per console proxy VM. The AJAX application connects to this IP. The console proxy then - proxies the connection to the VNC port for the requested VM on the Host hosting the guest. - . + proxies the connection to the VNC port for the requested VM on the Host hosting the + guest. The hypervisors will have many ports assigned to VNC usage so that multiple VNC sessions can occur simultaneously. diff --git a/docs/en-US/working-with-system-vm.xml b/docs/en-US/working-with-system-vm.xml index 97459f947bf..70f7dd1aa4e 100644 --- a/docs/en-US/working-with-system-vm.xml +++ b/docs/en-US/working-with-system-vm.xml @@ -1,33 +1,39 @@ - %BOOK_ENTITIES; ]> - - - Working with System Virtual Machines - &PRODUCT; uses several types of system virtual machines to perform tasks in the cloud. In general &PRODUCT; manages these system VMs and creates, starts, and stops them as needed based on scale and immediate needs. However, the administrator should be aware of them and their roles to assist in debugging issues. - - - - - + Working with System Virtual Machines + &PRODUCT; uses several types of system virtual machines to perform tasks in the cloud. In + general &PRODUCT; manages these system VMs and creates, starts, and stops them as needed based + on scale and immediate needs. However, the administrator should be aware of them and their roles + to assist in debugging issues. + + You can configure the system.vm.random.password parameter to create a random system VM + password to ensure higher security. If you reset the value for system.vm.random.password to + true and restart the Management Server, a random password is generated and stored encrypted in + the database. You can view the decrypted password under the system.vm.password global + parameter on the &PRODUCT; UI or by calling the listConfigurations API. + + + + + + From ab63a433ecbf60e18ad6cbcb0353c61fa432bcdc Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Mon, 18 Feb 2013 18:37:34 +0530 Subject: [PATCH 054/486] systemvmtemplate: Fix definitions and partition map - Fix definitions, create vhd (virtual disk) format (reusable for hyperv) - Fix partition maps in preseed, create /boot, /home, /var, /usr, /opt etc. Signed-off-by: Rohit Yadav --- .../systemvmtemplate/definition.rb | 2 +- .../definitions/systemvmtemplate/preseed.cfg | 64 +++++++++++++++---- 2 files changed, 54 insertions(+), 12 deletions(-) diff --git a/tools/appliance/definitions/systemvmtemplate/definition.rb b/tools/appliance/definitions/systemvmtemplate/definition.rb index a839182bbd9..8fd85306018 100644 --- a/tools/appliance/definitions/systemvmtemplate/definition.rb +++ b/tools/appliance/definitions/systemvmtemplate/definition.rb @@ -1,7 +1,7 @@ Veewee::Definition.declare({ :cpu_count => '1', :memory_size=> '256', - :disk_size => '2048', :disk_format => 'VMDK', :hostiocache => 'off', + :disk_size => '2048', :disk_format => 'VHD', :hostiocache => 'off', :os_type_id => 'Debian', :iso_file => "debian-wheezy-DI-b4-i386-netinst.iso", :iso_src => "http://cdimage.debian.org/cdimage/wheezy_di_beta4/i386/iso-cd/debian-wheezy-DI-b4-i386-netinst.iso", diff --git a/tools/appliance/definitions/systemvmtemplate/preseed.cfg b/tools/appliance/definitions/systemvmtemplate/preseed.cfg index 204d573a9d0..b4d28955473 100644 --- a/tools/appliance/definitions/systemvmtemplate/preseed.cfg +++ b/tools/appliance/definitions/systemvmtemplate/preseed.cfg @@ -99,29 +99,28 @@ d-i clock-setup/ntp boolean true d-i partman-auto/disk string /dev/sda # In addition, you'll need to specify the method to use. # The presently available methods are: "regular", "lvm" and "crypto" -d-i partman-auto/method string lvm +d-i partman-auto/method string regular # If one of the disks that are going to be automatically partitioned # contains an old LVM configuration, the user will normally receive a # warning. This can be preseeded away... -d-i partman-lvm/device_remove_lvm boolean true +#d-i partman-lvm/device_remove_lvm boolean true # The same applies to pre-existing software RAID array: -d-i partman-md/device_remove_md boolean true +#d-i partman-md/device_remove_md boolean true # And the same goes for the confirmation to write the lvm partitions. -d-i partman-lvm/confirm boolean true -d-i partman-lvm/confirm_nooverwrite boolean true +#d-i partman-lvm/confirm boolean true +#d-i partman-lvm/confirm_nooverwrite boolean true - -d-i partman/choose_partition select finish -d-i partman-auto-lvm/guided_size string max +#d-i partman/choose_partition select finish +#d-i partman-auto-lvm/guided_size string max # You can choose one of the three predefined partitioning recipes: # - atomic: all files in one partition # - home: separate /home partition # - multi: separate /home, /usr, /var, and /tmp partitions -d-i partman-auto/choose_recipe select multi -d-i partman/default_filesystem string ext3 +d-i partman-auto/choose_recipe select atomic +#d-i partman/default_filesystem string ext3 # Or provide a recipe of your own... # The recipe format is documented in the file devel/partman-auto-recipe.txt. @@ -129,6 +128,48 @@ d-i partman/default_filesystem string ext3 # just point at it. #d-i partman-auto/expert_recipe_file string /hd-media/recipe +d-i partman-auto/expert_recipe string \ + boot-root :: \ + 40 50 100 ext4 \ + $primary{ } $bootable{ } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /boot } \ + . \ + 400 40 500 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ / } \ + . \ + 60 100 200 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /home } \ + . \ + 500 30 1000 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /usr } \ + . \ + 400 40 500 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /opt } \ + . \ + 500 60 1000 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /var } \ + . \ + 100 70 400 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ /tmp } \ + . \ + 64 512 300% linux-swap \ + method{ swap } format{ } \ + . + # If not, you can put an entire recipe into the preconfiguration file in one # (logical) line. This example creates a small /boot partition, suitable # swap, and uses the rest of the space for the root partition: @@ -154,11 +195,12 @@ d-i partman/default_filesystem string ext3 # This makes partman automatically partition without confirmation, provided # that you told it what to do using one of the methods above. +#d-i partman-partitioning/confirm_write_new_label boolean true d-i partman/confirm_write_new_label boolean true +d-i partman/choose_partition select finish d-i partman/confirm boolean true d-i partman/confirm_nooverwrite boolean true - ### Base system installation # Select the initramfs generator used to generate the initrd for 2.6 kernels. #d-i base-installer/kernel/linux/initramfs-generators string yaird From a4ed061dbfa5daec0208e2c3a84273c1e3f84a52 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Mon, 18 Feb 2013 18:51:17 +0530 Subject: [PATCH 055/486] systemvmtemplate: Fix build.sh to export ova, vhd, qcow2 and vhd-hyperv Signed-off-by: Rohit Yadav --- tools/appliance/build.sh | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tools/appliance/build.sh b/tools/appliance/build.sh index cfd4e8b2349..ef3e384b597 100644 --- a/tools/appliance/build.sh +++ b/tools/appliance/build.sh @@ -18,6 +18,9 @@ set -x appliance="systemvmtemplate" +build_date=`date +%Y-%m-%d` +branch="master" +rootdir=$PWD # Initialize veewee and dependencies bundle @@ -25,3 +28,34 @@ bundle # Start building the appliance veewee vbox build $appliance --nogui veewee vbox halt $appliance + +# Get appliance uuids +machine_uuid=`vboxmanage showvminfo $appliance | grep UUID | head -1 | awk '{print $2}'` +hdd_uuid=`vboxmanage showvminfo $appliance | grep vhd | head -1 | awk '{print $8}' | cut -d ')' -f 1` + +# Start exporting +rm -fr dist +mkdir dist +cd dist + +# Export for VMWare vSphere +vboxmanage export $machine_uuid --output $appliance-$build_date-$branch-vmware.ova + +# Export for HyperV +vboxmanage clonehd $hdd_uuid $appliance-$build_date-$branch-hyperv.vhd --format VHD +bzip2 $appliance-$build_date-$branch-hyperv.vhd +rm $appliance-$build_date-$branch-hyperv.vhd + +# Export for KVM +vboxmanage clonehd $hdd_uuid raw.img --format RAW +qemu-img convert -f raw -O qcow2 raw.img $appliance-$build_date-$branch-kvm.qcow2 +bzip2 $appliance-$build_date-$branch-kvm.qcow2 +rm $appliance-$build_date-$branch-kvm.qcow2 + +# Export for Xen +# This will be an overwrite convert so, do it at the end +vhd-util convert -s 0 -t 1 -i raw.img -o $appliance-$build_date-$branch-xen.vhd +bzip2 $appliance-$build_date-$branch-hyperv.vhd +rm $appliance-$build_date-$branch-hyperv.vhd + +cd $rootdir From cc2a3c5498f18cc99ac2dc23554d440138aabb7a Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Mon, 18 Feb 2013 20:29:19 +0530 Subject: [PATCH 056/486] CLOUDSTACK-651: Removed 7 nics limitation for VR on XenServer # of nics can be configured using global param xen.nics.max. Param defaults to 7 Added new param to upgrade schema. --- .../xen/discoverer/XcpServerDiscoverer.java | 1 + .../xen/resource/CitrixResourceBase.java | 45 ++++++++----------- .../src/com/cloud/configuration/Config.java | 4 +- .../com/cloud/resource/DiscovererBase.java | 1 + setup/db/db/schema-410to420.sql | 1 + 5 files changed, 23 insertions(+), 29 deletions(-) diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java index 65a97a8de31..a0540637e95 100755 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java @@ -315,6 +315,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L params.put("wait", Integer.toString(_wait)); details.put("wait", Integer.toString(_wait)); params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString())); + params.put(Config.XenMaxNics.toString().toLowerCase(), _configDao.getValue(Config.XenMaxNics.toString())); params.put(Config.InstanceName.toString().toLowerCase(), _instance); details.put(Config.InstanceName.toString().toLowerCase(), _instance); try { diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 22f4ba9cb80..e2e2d6696a9 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -331,6 +331,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe protected boolean _isOvs = false; protected List _tmpDom0Vif = new ArrayList(); protected XenServerStorageResource storageResource; + protected int _maxNics = 7; public enum SRType { NFS, LVM, ISCSI, ISO, LVMOISCSI, LVMOHBA, EXT, FILE; @@ -3842,22 +3843,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe throw new CloudRuntimeException("Could not find an available slot in VM with name to attach a new disk."); } - - protected String getUnusedVIFNum(Connection conn, VM vm) { - String vmName = ""; - try { - vmName = vm.getNameLabel(conn); - Set allowedVIFDevices = vm.getAllowedVIFDevices(conn); - if (allowedVIFDevices.size() > 0) { - return allowedVIFDevices.iterator().next(); - } - } catch (Exception e) { - String msg = "getUnusedVIFNum failed due to " + e.toString(); - s_logger.warn(msg, e); - } - throw new CloudRuntimeException("Could not find available VIF slot in VM with name: " + vmName + " to plug a VIF"); - } - protected String callHostPlugin(Connection conn, String plugin, String cmd, String... params) { Map args = new HashMap(); String msg; @@ -3990,21 +3975,25 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe protected String getLowestAvailableVIFDeviceNum(Connection conn, VM vm) { try { - Set availableDeviceNums = vm.getAllowedVIFDevices(conn); - Iterator deviceNumsIterator = availableDeviceNums.iterator(); - List sortedDeviceNums = new ArrayList(); - - while (deviceNumsIterator.hasNext()) { - try { - sortedDeviceNums.add(Integer.valueOf(deviceNumsIterator.next())); + List usedDeviceNums = new ArrayList(); + Set vifs = vm.getVIFs(conn); + Iterator vifIter = vifs.iterator(); + while(vifIter.hasNext()){ + VIF vif = vifIter.next(); + try{ + usedDeviceNums.add(Integer.valueOf(vif.getDevice(conn))); } catch (NumberFormatException e) { - s_logger.debug("Obtained an invalid value for an available VIF device number for VM: " + vm.getNameLabel(conn)); + s_logger.debug("Obtained an invalid value for an allocated VIF device number for VM: " + vm.getNameLabel(conn)); return null; } } - Collections.sort(sortedDeviceNums); - return String.valueOf(sortedDeviceNums.get(0)); + for(Integer i=0; i< _maxNics; i++){ + if(!usedDeviceNums.contains(i)){ + s_logger.debug("Lowest available Vif device number: "+i+" for VM: " + vm.getNameLabel(conn)); + return i.toString(); + } + } } catch (XmlRpcException e) { String msg = "Caught XmlRpcException: " + e.getMessage(); s_logger.warn(msg, e); @@ -5655,6 +5644,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe value = (String) params.get("migratewait"); _migratewait = NumbersUtil.parseInt(value, 3600); + _maxNics = NumbersUtil.parseInt((String) params.get("xen.nics.max"), 7); + if (_pod == null) { throw new ConfigurationException("Unable to get the pod"); } @@ -7765,7 +7756,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe s_logger.warn(msg); return new PlugNicAnswer(cmd, false, msg); } - String deviceId = getUnusedVIFNum(conn, vm); + String deviceId = getLowestAvailableVIFDeviceNum(conn, vm); nic.setDeviceId(Integer.parseInt(deviceId)); vif = createVif(conn, vmName, vm, nic); vif.plug(conn); diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index abc6a09e4ef..4a0306a1986 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -247,7 +247,7 @@ public enum Config { XenBondStorageNic("Advanced", ManagementServer.class, String.class, "xen.bond.storage.nics", null, "Attempt to bond the two networks if found", null), XenHeartBeatInterval("Advanced", ManagementServer.class, Integer.class, "xen.heartbeat.interval", "60", "heartbeat to use when implementing XenServer Self Fencing", null), XenGuestNetwork("Hidden", ManagementServer.class, String.class, "xen.guest.network.device", null, "Specify for guest network name label", null), - + XenMaxNics("Advanced", AgentManager.class, Integer.class, "xen.nics.max", "7", "Maximum allowed nics for Vms created on Xen", null), // VMware VmwarePrivateNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.private.vswitch", null, "Specify the vSwitch on host for private network", null), VmwarePublicNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.public.vswitch", null, "Specify the vSwitch on host for public network", null), @@ -360,7 +360,7 @@ public enum Config { VpcMaxNetworks("Advanced", ManagementServer.class, Integer.class, "vpc.max.networks", "3", "Maximum number of networks per vpc", null), DetailBatchQuerySize("Advanced", ManagementServer.class, Integer.class, "detail.batch.query.size", "2000", "Default entity detail batch query size for listing", null), ConcurrentSnapshotsThresholdPerHost("Advanced", ManagementServer.class, Long.class, "concurrent.snapshots.threshold.perhost", - null, "Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited", null), + null, "Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited", null), NetworkIPv6SearchRetryMax("Network", ManagementServer.class, Integer.class, "network.ipv6.search.retry.max", "10000", "The maximum number of retrying times to search for an available IPv6 address in the table", null), ExternalBaremetalSystemUrl("Advanced", ManagementServer.class, String.class, "external.baremetal.system.url", null, "url of external baremetal system that CloudStack will talk to", null), diff --git a/server/src/com/cloud/resource/DiscovererBase.java b/server/src/com/cloud/resource/DiscovererBase.java index 940608c4419..b7c5b6f58de 100644 --- a/server/src/com/cloud/resource/DiscovererBase.java +++ b/server/src/com/cloud/resource/DiscovererBase.java @@ -128,6 +128,7 @@ public abstract class DiscovererBase extends AdapterBase implements Discoverer { params.put("secondary.storage.vm", "false"); params.put("max.template.iso.size", _configDao.getValue(Config.MaxTemplateAndIsoSize.toString())); params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString())); + params.put(Config.XenMaxNics.toString().toLowerCase(), _configDao.getValue(Config.XenMaxNics.toString())); return params; } diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index 8bd9bfd353e..65add75294b 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -23,3 +23,4 @@ ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `max_hosts_per_cluster` UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `hypervisor_type`='VMware'; INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32); DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max'; +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen'); \ No newline at end of file From c97653b42ecb5741fefa3ce45477744c223f6806 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Mon, 18 Feb 2013 21:32:30 +0530 Subject: [PATCH 057/486] systemvmtemplate: Clean any old vm before building appliance Signed-off-by: Rohit Yadav --- tools/appliance/build.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/appliance/build.sh b/tools/appliance/build.sh index ef3e384b597..e504d9cbf67 100644 --- a/tools/appliance/build.sh +++ b/tools/appliance/build.sh @@ -25,7 +25,8 @@ rootdir=$PWD # Initialize veewee and dependencies bundle -# Start building the appliance +# Clean and start building the appliance +veewee vbox destroy $appliance veewee vbox build $appliance --nogui veewee vbox halt $appliance From a6f5052958d56a2cba672dcda754d3328557e6b9 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Feb 2013 11:33:45 +0530 Subject: [PATCH 058/486] CLOUDSTACK-1066: Wait for appliance to shutdown before exporting to various fmts Signed-off-by: Rohit Yadav --- tools/appliance/build.sh | 14 +++++++++----- .../definitions/systemvmtemplate/definition.rb | 2 +- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/tools/appliance/build.sh b/tools/appliance/build.sh index e504d9cbf67..38d71fd00d0 100644 --- a/tools/appliance/build.sh +++ b/tools/appliance/build.sh @@ -16,6 +16,7 @@ # under the License. set -x +set -e appliance="systemvmtemplate" build_date=`date +%Y-%m-%d` @@ -30,9 +31,15 @@ veewee vbox destroy $appliance veewee vbox build $appliance --nogui veewee vbox halt $appliance +while [[ `vboxmanage list runningvms | grep $appliance | wc -l` -ne 0 ]]; +do + echo "Waiting for $appliance to shutdown" + sleep 2; +done + # Get appliance uuids machine_uuid=`vboxmanage showvminfo $appliance | grep UUID | head -1 | awk '{print $2}'` -hdd_uuid=`vboxmanage showvminfo $appliance | grep vhd | head -1 | awk '{print $8}' | cut -d ')' -f 1` +hdd_uuid=`vboxmanage showvminfo $appliance | grep vmdk | head -1 | awk '{print $8}' | cut -d ')' -f 1` # Start exporting rm -fr dist @@ -45,18 +52,15 @@ vboxmanage export $machine_uuid --output $appliance-$build_date-$branch-vmware.o # Export for HyperV vboxmanage clonehd $hdd_uuid $appliance-$build_date-$branch-hyperv.vhd --format VHD bzip2 $appliance-$build_date-$branch-hyperv.vhd -rm $appliance-$build_date-$branch-hyperv.vhd # Export for KVM vboxmanage clonehd $hdd_uuid raw.img --format RAW qemu-img convert -f raw -O qcow2 raw.img $appliance-$build_date-$branch-kvm.qcow2 bzip2 $appliance-$build_date-$branch-kvm.qcow2 -rm $appliance-$build_date-$branch-kvm.qcow2 # Export for Xen # This will be an overwrite convert so, do it at the end vhd-util convert -s 0 -t 1 -i raw.img -o $appliance-$build_date-$branch-xen.vhd -bzip2 $appliance-$build_date-$branch-hyperv.vhd -rm $appliance-$build_date-$branch-hyperv.vhd +bzip2 $appliance-$build_date-$branch-xen.vhd cd $rootdir diff --git a/tools/appliance/definitions/systemvmtemplate/definition.rb b/tools/appliance/definitions/systemvmtemplate/definition.rb index 8fd85306018..a839182bbd9 100644 --- a/tools/appliance/definitions/systemvmtemplate/definition.rb +++ b/tools/appliance/definitions/systemvmtemplate/definition.rb @@ -1,7 +1,7 @@ Veewee::Definition.declare({ :cpu_count => '1', :memory_size=> '256', - :disk_size => '2048', :disk_format => 'VHD', :hostiocache => 'off', + :disk_size => '2048', :disk_format => 'VMDK', :hostiocache => 'off', :os_type_id => 'Debian', :iso_file => "debian-wheezy-DI-b4-i386-netinst.iso", :iso_src => "http://cdimage.debian.org/cdimage/wheezy_di_beta4/i386/iso-cd/debian-wheezy-DI-b4-i386-netinst.iso", From a5f77d348da1b83146e054966ea69b1a4c75013b Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Feb 2013 12:31:32 +0530 Subject: [PATCH 059/486] CLOUDSTACK-1066: RVM needs a login shell, fix build.sh to start with a login shell Signed-off-by: Rohit Yadav --- tools/appliance/build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/appliance/build.sh b/tools/appliance/build.sh index 38d71fd00d0..4dcb200915e 100644 --- a/tools/appliance/build.sh +++ b/tools/appliance/build.sh @@ -1,3 +1,4 @@ +#!/bin/bash -xl # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information From 447b2d9c956337de51dfc1725b043b1f360e71a8 Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Tue, 19 Feb 2013 12:26:23 +0530 Subject: [PATCH 060/486] listroutercmd: response should be set to the name of router host name of router returned instead of the name of the router. correcting the setter. Signed-off-by: Prasanna Santhanam --- server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java index 96b91df79f9..3b780ee83ef 100644 --- a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java @@ -68,7 +68,7 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase Date: Tue, 19 Feb 2013 13:24:30 +0530 Subject: [PATCH 061/486] CLOUDSTACK-1066: Make appliance export verbose, put archived images in dist/ Signed-off-by: Rohit Yadav --- tools/appliance/build.sh | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tools/appliance/build.sh b/tools/appliance/build.sh index 4dcb200915e..366b246fbe2 100644 --- a/tools/appliance/build.sh +++ b/tools/appliance/build.sh @@ -45,23 +45,25 @@ hdd_uuid=`vboxmanage showvminfo $appliance | grep vmdk | head -1 | awk '{print $ # Start exporting rm -fr dist mkdir dist -cd dist # Export for VMWare vSphere -vboxmanage export $machine_uuid --output $appliance-$build_date-$branch-vmware.ova +vboxmanage export $machine_uuid --output dist/$appliance-$build_date-$branch-vmware.ova +echo "$appliance exported for VMWare: dist/$appliance-$build_date-$branch-vmware.ova" # Export for HyperV -vboxmanage clonehd $hdd_uuid $appliance-$build_date-$branch-hyperv.vhd --format VHD -bzip2 $appliance-$build_date-$branch-hyperv.vhd +vboxmanage clonehd $hdd_uuid dist/$appliance-$build_date-$branch-hyperv.vhd --format VHD +bzip2 dist/$appliance-$build_date-$branch-hyperv.vhd +echo "$appliance exported for HyperV: dist/$appliance-$build_date-$branch-hyperv.vhd.bz2" # Export for KVM -vboxmanage clonehd $hdd_uuid raw.img --format RAW -qemu-img convert -f raw -O qcow2 raw.img $appliance-$build_date-$branch-kvm.qcow2 -bzip2 $appliance-$build_date-$branch-kvm.qcow2 +vboxmanage clonehd $hdd_uuid dist/raw.img --format RAW +qemu-img convert -f raw -O qcow2 dist/raw.img dist/$appliance-$build_date-$branch-kvm.qcow2 +bzip2 dist/$appliance-$build_date-$branch-kvm.qcow2 +echo "$appliance exported for KVM: dist/$appliance-$build_date-$branch-kvm.qcow2.bz2" # Export for Xen # This will be an overwrite convert so, do it at the end -vhd-util convert -s 0 -t 1 -i raw.img -o $appliance-$build_date-$branch-xen.vhd -bzip2 $appliance-$build_date-$branch-xen.vhd +vhd-util convert -s 0 -t 1 -i dist/raw.img -o dist/$appliance-$build_date-$branch-xen.vhd +bzip2 dist/$appliance-$build_date-$branch-xen.vhd +echo "$appliance exported for Xen: dist/$appliance-$build_date-$branch-xen.vhd.bz2" -cd $rootdir From ca432fe6b7d006952bd0a7d85b94b988cf68c623 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Feb 2013 13:36:21 +0530 Subject: [PATCH 062/486] db: Fix upgrade paths to 4.1.0 Signed-off-by: Rohit Yadav --- .../cloud/upgrade/DatabaseUpgradeChecker.java | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index f831a032385..f48709452b1 100755 --- a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -83,81 +83,81 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.1.8", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.1.9", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.1", new DbUpgrade[] { new Upgrade221to222(), new UpgradeSnapshot223to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.2", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.3", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.4", new DbUpgrade[] { new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.5", new DbUpgrade[] { new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), - new Upgrade302to40() }); + new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.6", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.7", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.8", new DbUpgrade[] { new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30() - , new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + , new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.9", new DbUpgrade[] { new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.10", new DbUpgrade[] { new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.11", new DbUpgrade[] { new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.12", new DbUpgrade[] { new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.13", new DbUpgrade[] { new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("2.2.14", new DbUpgrade[] { new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), - new Upgrade302to40() }); + new Upgrade302to40(), new Upgrade40to41() }); - _upgradeMap.put("3.0.0", new DbUpgrade[] { new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + _upgradeMap.put("3.0.0", new DbUpgrade[] { new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); - _upgradeMap.put("3.0.1", new DbUpgrade[] { new Upgrade301to302(), new Upgrade302to40() }); + _upgradeMap.put("3.0.1", new DbUpgrade[] { new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); - _upgradeMap.put("3.0.2", new DbUpgrade[] { new Upgrade302to40() }); + _upgradeMap.put("3.0.2", new DbUpgrade[] { new Upgrade302to40(), new Upgrade40to41() }); _upgradeMap.put("4.0.0", new DbUpgrade[] { new Upgrade40to41() }); } From e85b1cbe8fd25eb8612ad9fe08f8802a554be707 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 19 Feb 2013 14:01:50 +0530 Subject: [PATCH 063/486] CLOUDSTACK-1066: Fix README for building systemvm appliance Signed-off-by: Rohit Yadav --- tools/appliance/README.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tools/appliance/README.md b/tools/appliance/README.md index aa1001e1b1b..2f6f656212d 100644 --- a/tools/appliance/README.md +++ b/tools/appliance/README.md @@ -19,6 +19,8 @@ under the License. # Setting up Tools and Environment + - Install VirtualBox 4.2 or latest + - Tool for exporting appliances: qemu-img, vboxmanage, vhd-util - Install [RVM](https://rvm.io/rvm/install) - Setup paths: export PATH=~/.rvm/bin:$PATH @@ -36,7 +38,14 @@ Note, gem may require gcc-4.2, make sure link exists: sudo ln -s /usr/bin/gcc /usr/bin/gcc-4.2 -# How to build SystemVM template appliance +# How to build SystemVMs automatically + +Just run build.sh, it will export archived appliances for KVM, Xen, +VMWare and HyperV in `dist`: + + sh build.sh + +# Building SystemVM template appliance manually List available appliances one can build: @@ -50,3 +59,9 @@ Build systemvm template appliance: Start the box: veewee vbox up 'systemvmtemplate' + +Halt the box: + + veewee vbox halt 'systemvmtemplate' + +Now VirtualBox can be used to export appliance. From c94a75c0a6d4048fced1ff1db2e8219b534d3d2f Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Tue, 19 Feb 2013 14:14:52 +0530 Subject: [PATCH 064/486] CLOUDSTACK-752: Removed 1 private gateway for VPC limitation. Verified that more than 8 private gateways can be created. --- .../hypervisor/xen/resource/CitrixResourceBase.java | 11 +++++++---- .../router/VpcVirtualNetworkApplianceManagerImpl.java | 4 ++-- server/src/com/cloud/network/vpc/VpcManagerImpl.java | 10 ++-------- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index e2e2d6696a9..33ad18d0331 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -3974,7 +3974,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } protected String getLowestAvailableVIFDeviceNum(Connection conn, VM vm) { + String vmName = ""; try { + vmName = vm.getNameLabel(conn); List usedDeviceNums = new ArrayList(); Set vifs = vm.getVIFs(conn); Iterator vifIter = vifs.iterator(); @@ -3983,14 +3985,15 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe try{ usedDeviceNums.add(Integer.valueOf(vif.getDevice(conn))); } catch (NumberFormatException e) { - s_logger.debug("Obtained an invalid value for an allocated VIF device number for VM: " + vm.getNameLabel(conn)); - return null; + String msg = "Obtained an invalid value for an allocated VIF device number for VM: " + vmName; + s_logger.debug(msg, e); + throw new CloudRuntimeException(msg); } } for(Integer i=0; i< _maxNics; i++){ if(!usedDeviceNums.contains(i)){ - s_logger.debug("Lowest available Vif device number: "+i+" for VM: " + vm.getNameLabel(conn)); + s_logger.debug("Lowest available Vif device number: "+i+" for VM: " + vmName); return i.toString(); } } @@ -4002,7 +4005,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe s_logger.warn(msg, e); } - return null; + throw new CloudRuntimeException("Could not find available VIF slot in VM with name: " + vmName); } protected VDI mount(Connection conn, StoragePoolType pooltype, String volumeFolder, String volumePath) { diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index d7fe3e05d97..4d1968d9f94 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -341,11 +341,11 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian _agentMgr.send(dest.getHost().getId(), cmds); PlugNicAnswer plugNicAnswer = cmds.getAnswer(PlugNicAnswer.class); if (!(plugNicAnswer != null && plugNicAnswer.getResult())) { - s_logger.warn("Unable to plug nic for vm " + vm.getHostName()); + s_logger.warn("Unable to plug nic for vm " + vm.getName()); result = false; } } catch (OperationTimedoutException e) { - throw new AgentUnavailableException("Unable to plug nic for router " + vm.getHostName() + " in network " + network, + throw new AgentUnavailableException("Unable to plug nic for router " + vm.getName() + " in network " + network, dest.getHost().getId(), e); } } else { diff --git a/server/src/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/com/cloud/network/vpc/VpcManagerImpl.java index 7197c363264..fbb5788d362 100644 --- a/server/src/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/com/cloud/network/vpc/VpcManagerImpl.java @@ -1233,13 +1233,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ ex.addProxyObject("vpc", vpcId, "VPC"); throw ex; } - - //allow only one private gateway per vpc - VpcGatewayVO gatewayVO = _vpcGatewayDao.getPrivateGatewayForVpc(vpcId); - if (gatewayVO != null) { - throw new InvalidParameterValueException("Private ip address already exists for vpc " + vpc); - } - + //Validate physical network if (physicalNetworkId == null) { List pNtwks = _ntwkModel.getPhysicalNtwksSupportingTrafficType(vpc.getZoneId(), TrafficType.Guest); @@ -1258,7 +1252,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ vlan, ipAddress, null, gateway, netmask, gatewayOwnerId, vpcId); //2) create gateway entry - gatewayVO = new VpcGatewayVO(ipAddress, VpcGateway.Type.Private, vpcId, privateNtwk.getDataCenterId(), + VpcGatewayVO gatewayVO = new VpcGatewayVO(ipAddress, VpcGateway.Type.Private, vpcId, privateNtwk.getDataCenterId(), privateNtwk.getId(), vlan, gateway, netmask, vpc.getAccountId(), vpc.getDomainId()); _vpcGatewayDao.persist(gatewayVO); From f2b97db0f9409b3a45eb58d88d04f27709e3ace0 Mon Sep 17 00:00:00 2001 From: Abhinandan Prateek Date: Tue, 19 Feb 2013 15:36:39 +0530 Subject: [PATCH 065/486] CLOUDSTACK-1172: LDAP enhancements --- .../configuration/ConfigurationService.java | 2 + .../api/command/admin/ldap/LDAPConfigCmd.java | 61 ++++++++++++++++--- .../api/response/LDAPConfigResponse.java | 2 +- .../server/auth/LDAPUserAuthenticator.java | 9 ++- .../ConfigurationManagerImpl.java | 33 ++++++++-- .../vpc/MockConfigurationManagerImpl.java | 9 +++ 6 files changed, 99 insertions(+), 17 deletions(-) diff --git a/api/src/com/cloud/configuration/ConfigurationService.java b/api/src/com/cloud/configuration/ConfigurationService.java index a9595fe7574..e63fcece525 100644 --- a/api/src/com/cloud/configuration/ConfigurationService.java +++ b/api/src/com/cloud/configuration/ConfigurationService.java @@ -264,6 +264,8 @@ public interface ConfigurationService { boolean removeLDAP(LDAPRemoveCmd cmd); + LDAPConfigCmd listLDAPConfig(LDAPConfigCmd cmd); + /** * @param offering * @return diff --git a/api/src/org/apache/cloudstack/api/command/admin/ldap/LDAPConfigCmd.java b/api/src/org/apache/cloudstack/api/command/admin/ldap/LDAPConfigCmd.java index fbe8ab000e6..2976de4bf28 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/ldap/LDAPConfigCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/ldap/LDAPConfigCmd.java @@ -31,6 +31,7 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.user.Account; @APICommand(name = "ldapConfig", description="Configure the LDAP context for this site.", responseObject=LDAPConfigResponse.class, since="3.0.0") @@ -43,7 +44,10 @@ public class LDAPConfigCmd extends BaseCmd { //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.HOST_NAME, type=CommandType.STRING, required=true, description="Hostname or ip address of the ldap server eg: my.ldap.com") + @Parameter(name=ApiConstants.LIST_ALL, type=CommandType.STRING, description="Hostname or ip address of the ldap server eg: my.ldap.com") + private String listall; + + @Parameter(name=ApiConstants.HOST_NAME, type=CommandType.STRING, description="Hostname or ip address of the ldap server eg: my.ldap.com") private String hostname; @Parameter(name=ApiConstants.PORT, type=CommandType.INTEGER, description="Specify the LDAP port if required, default is 389.") @@ -52,10 +56,10 @@ public class LDAPConfigCmd extends BaseCmd { @Parameter(name=ApiConstants.USE_SSL, type=CommandType.BOOLEAN, description="Check Use SSL if the external LDAP server is configured for LDAP over SSL.") private Boolean useSSL; - @Parameter(name=ApiConstants.SEARCH_BASE, type=CommandType.STRING, required=true, description="The search base defines the starting point for the search in the directory tree Example: dc=cloud,dc=com.") + @Parameter(name=ApiConstants.SEARCH_BASE, type=CommandType.STRING, description="The search base defines the starting point for the search in the directory tree Example: dc=cloud,dc=com.") private String searchBase; - @Parameter(name=ApiConstants.QUERY_FILTER, type=CommandType.STRING, required=true, description="You specify a query filter here, which narrows down the users, who can be part of this domain.") + @Parameter(name=ApiConstants.QUERY_FILTER, type=CommandType.STRING, description="You specify a query filter here, which narrows down the users, who can be part of this domain.") private String queryFilter; @Parameter(name=ApiConstants.BIND_DN, type=CommandType.STRING, description="Specify the distinguished name of a user with the search permission on the directory.") @@ -74,6 +78,10 @@ public class LDAPConfigCmd extends BaseCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// + public String getListAll() { + return listall == null ? "false" : listall; + } + public String getBindPassword() { return bindPassword; } @@ -82,30 +90,56 @@ public class LDAPConfigCmd extends BaseCmd { return bindDN; } + public void setBindDN(String bdn) { + this.bindDN=bdn; + } + public String getQueryFilter() { return queryFilter; } + public void setQueryFilter(String queryFilter) { + this.queryFilter=queryFilter; + } public String getSearchBase() { return searchBase; } + public void setSearchBase(String searchBase) { + this.searchBase=searchBase; + } + public Boolean getUseSSL() { - return useSSL == null ? Boolean.FALSE : Boolean.TRUE; + return useSSL == null ? Boolean.FALSE : useSSL; + } + + public void setUseSSL(Boolean useSSL) { + this.useSSL=useSSL; } public String getHostname() { return hostname; } + public void setHostname(String hostname) { + this.hostname=hostname; + } + public Integer getPort() { return port <= 0 ? 389 : port; } + public void setPort(Integer port) { + this.port=port; + } + public String getTrustStore() { return trustStore; } + public void setTrustStore(String trustStore) { + this.trustStore=trustStore; + } public String getTrustStorePassword() { return trustStorePassword; @@ -122,12 +156,25 @@ public class LDAPConfigCmd extends BaseCmd { InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException { try { - boolean result = _configService.updateLDAP(this); - if (result){ - LDAPConfigResponse lr = _responseGenerator.createLDAPConfigResponse(getHostname(), getPort(), getUseSSL(), getQueryFilter(), getSearchBase(), getBindDN()); + if ("true".equalsIgnoreCase(getListAll())){ + // return the existing conf + LDAPConfigCmd cmd = _configService.listLDAPConfig(this); + LDAPConfigResponse lr = _responseGenerator.createLDAPConfigResponse(cmd.getHostname(), cmd.getPort(), cmd.getUseSSL(), + cmd.getQueryFilter(), cmd.getSearchBase(), cmd.getBindDN()); lr.setResponseName(getCommandName()); this.setResponseObject(lr); } + else if (getHostname()==null || getSearchBase() == null || getQueryFilter() == null) { + throw new InvalidParameterValueException("You need to provide hostname, serachbase and queryfilter to configure your LDAP server"); + } + else { + boolean result = _configService.updateLDAP(this); + if (result){ + LDAPConfigResponse lr = _responseGenerator.createLDAPConfigResponse(getHostname(), getPort(), getUseSSL(), getQueryFilter(), getSearchBase(), getBindDN()); + lr.setResponseName(getCommandName()); + this.setResponseObject(lr); + } + } } catch (NamingException ne){ ne.printStackTrace(); diff --git a/api/src/org/apache/cloudstack/api/response/LDAPConfigResponse.java b/api/src/org/apache/cloudstack/api/response/LDAPConfigResponse.java index aa10229f2bd..bbeec630d81 100644 --- a/api/src/org/apache/cloudstack/api/response/LDAPConfigResponse.java +++ b/api/src/org/apache/cloudstack/api/response/LDAPConfigResponse.java @@ -30,7 +30,7 @@ public class LDAPConfigResponse extends BaseResponse { @SerializedName(ApiConstants.PORT) @Param(description="Specify the LDAP port if required, default is 389") private String port; - @SerializedName(ApiConstants.PORT) @Param(description="Check Use SSL if the external LDAP server is configured for LDAP over SSL") + @SerializedName(ApiConstants.USE_SSL) @Param(description="Check Use SSL if the external LDAP server is configured for LDAP over SSL") private String useSSL; @SerializedName(ApiConstants.SEARCH_BASE) @Param(description="The search base defines the starting point for the search in the directory tree Example: dc=cloud,dc=com") diff --git a/plugins/user-authenticators/ldap/src/com/cloud/server/auth/LDAPUserAuthenticator.java b/plugins/user-authenticators/ldap/src/com/cloud/server/auth/LDAPUserAuthenticator.java index fb0273e6ea3..61eebe5fc93 100644 --- a/plugins/user-authenticators/ldap/src/com/cloud/server/auth/LDAPUserAuthenticator.java +++ b/plugins/user-authenticators/ldap/src/com/cloud/server/auth/LDAPUserAuthenticator.java @@ -66,7 +66,7 @@ public class LDAPUserAuthenticator extends DefaultUserAuthenticator { String port = _configDao.getValue(LDAPParams.port.toString()); String queryFilter = _configDao.getValue(LDAPParams.queryfilter.toString()); String searchBase = _configDao.getValue(LDAPParams.searchbase.toString()); - String useSSL = _configDao.getValue(LDAPParams.usessl.toString()); + Boolean useSSL = Boolean.valueOf(_configDao.getValue(LDAPParams.usessl.toString())); String bindDN = _configDao.getValue(LDAPParams.dn.toString()); String bindPasswd = _configDao.getValue(LDAPParams.passwd.toString()); String trustStore = _configDao.getValue(LDAPParams.truststore.toString()); @@ -77,7 +77,7 @@ public class LDAPUserAuthenticator extends DefaultUserAuthenticator { Hashtable env = new Hashtable(11); env.put(Context.INITIAL_CONTEXT_FACTORY,"com.sun.jndi.ldap.LdapCtxFactory"); String protocol = "ldap://" ; - if (new Boolean(useSSL)){ + if (useSSL){ env.put(Context.SECURITY_PROTOCOL, "ssl"); protocol="ldaps://" ; System.setProperty("javax.net.ssl.trustStore", trustStore); @@ -123,7 +123,7 @@ public class LDAPUserAuthenticator extends DefaultUserAuthenticator { env = new Hashtable(11); env.put(Context.INITIAL_CONTEXT_FACTORY,"com.sun.jndi.ldap.LdapCtxFactory"); protocol = "ldap://" ; - if (new Boolean(useSSL)){ + if (useSSL){ env.put(Context.SECURITY_PROTOCOL, "ssl"); protocol="ldaps://" ; } @@ -135,8 +135,7 @@ public class LDAPUserAuthenticator extends DefaultUserAuthenticator { ctx.close(); } catch (NamingException ne) { - ne.printStackTrace(); - s_logger.warn("Authentication failed due to " + ne.getMessage()); + s_logger.warn("Authentication Failed ! " + ne.getMessage() + (ne.getCause() != null ? ("; Caused by:" + ne.getCause().getMessage()) : "")); return false; } catch (Exception e){ diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index 074675cdd33..b886bedbc48 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -1246,6 +1246,27 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return true; } + + @Override + @DB + public LDAPConfigCmd listLDAPConfig(LDAPConfigCmd cmd) { + String hostname = _configDao.getValue(LDAPParams.hostname.toString()); + cmd.setHostname(hostname == null ? "" : hostname); + String port = _configDao.getValue(LDAPParams.port.toString()); + cmd.setPort(port == null ? 0 : Integer.valueOf(port)); + String queryFilter = _configDao.getValue(LDAPParams.queryfilter.toString()); + cmd.setQueryFilter(queryFilter == null ? "" : queryFilter); + String searchBase = _configDao.getValue(LDAPParams.searchbase.toString()); + cmd.setSearchBase(searchBase == null ? "" : searchBase); + String useSSL = _configDao.getValue(LDAPParams.usessl.toString()); + cmd.setUseSSL(useSSL == null ? Boolean.FALSE : Boolean.valueOf(useSSL)); + String binddn = _configDao.getValue(LDAPParams.dn.toString()); + cmd.setBindDN(binddn == null ? "" : binddn); + String truststore = _configDao.getValue(LDAPParams.truststore.toString()); + cmd.setTrustStore(truststore == null ? "" : truststore); + return cmd; + } + @Override @DB public boolean updateLDAP(LDAPConfigCmd cmd) { @@ -1265,11 +1286,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("If you specify a bind name then you need to provide bind password too."); } + // check query filter if it contains valid substitution + if (!queryFilter.contains("%u") && !queryFilter.contains("%n") && !queryFilter.contains("%e")){ + throw new InvalidParameterValueException("QueryFilter should contain at least one of the substitutions: %u, %n or %e: " + queryFilter); + } + // check if the info is correct Hashtable env = new Hashtable(11); env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.ldap.LdapCtxFactory"); String protocol = "ldap://"; - if (new Boolean(useSSL)) { + if (useSSL) { env.put(Context.SECURITY_PROTOCOL, "ssl"); protocol = "ldaps://"; if (trustStore == null || trustStorePassword == null) { @@ -1288,7 +1314,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati DirContext ctx = new InitialDirContext(env); ctx.close(); - // store the result in DB COnfiguration + // store the result in DB Configuration ConfigurationVO cvo = _configDao.findByName(LDAPParams.hostname.toString()); if (cvo == null) { cvo = new ConfigurationVO("Hidden", "DEFAULT", "management-server", LDAPParams.hostname.toString(), null, "Hostname or ip address of the ldap server eg: my.ldap.com"); @@ -1356,8 +1382,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati s_logger.debug("The ldap server is configured: " + hostname); } catch (NamingException ne) { - ne.printStackTrace(); - throw new InvalidParameterValueException("Naming Exception, check you ldap data ! " + ne.getMessage() + (ne.getCause() != null ? ("Caused by:" + ne.getCause().getMessage()) : "")); + throw new InvalidParameterValueException("Naming Exception, check you ldap data ! " + ne.getMessage() + (ne.getCause() != null ? ("; Caused by:" + ne.getCause().getMessage()) : "")); } return true; } diff --git a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java index 574ce0a0352..180138ac136 100644 --- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -365,6 +365,15 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu return false; } + /* (non-Javadoc) + * @see com.cloud.configuration.ConfigurationService#listLDAPConfig(org.apache.cloudstack.api.commands.LDAPConfigCmd) + */ + @Override + public LDAPConfigCmd listLDAPConfig(LDAPConfigCmd cmd) { + // TODO Auto-generated method stub + return null; + } + /* (non-Javadoc) * @see com.cloud.configuration.ConfigurationService#isOfferingForVpc(com.cloud.offering.NetworkOffering) */ From bff3d8ac9a0566addeb24d4c4c16ceab99b242b6 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Tue, 19 Feb 2013 16:40:47 +0530 Subject: [PATCH 066/486] LDAP-UI integration code --- ui/scripts/globalSettings.js | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/ui/scripts/globalSettings.js b/ui/scripts/globalSettings.js index 5f7fb742b2d..598c9c542c8 100644 --- a/ui/scripts/globalSettings.js +++ b/ui/scripts/globalSettings.js @@ -97,19 +97,17 @@ ssl: { label: 'SSL' - } - }, dataProvider:function(args){ var data = {}; listViewDataProvider(args, data); $.ajax({ - url: createURL(''), //Need a list LDAP configuration API call which needs to be implemented + url: createURL('ldapConfig&listall=true'), //Need a list LDAP configuration API call which needs to be implemented data: data, success: function(json) { - // var items = json.listldapconfigresponse; + var items = json.ldapconfigresponse.ldapconfig; args.response.success({data:items}); }, error: function(data) { @@ -185,7 +183,7 @@ dataType: "json", async: true, success: function(json) { - var items = json.ldapconfigresponse; + var items = json.ldapconfigresponse.ldapconfig; args.response.success({ data: items }); From edac894f318e79c5bd121ba4f914998bf48f21f5 Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Tue, 19 Feb 2013 16:30:25 +0530 Subject: [PATCH 067/486] spring changes for simulator component allow spring to do DI for simulator plugin. componentContext.xml will have simulator components disabled by default. Signed-off-by: Prasanna Santhanam --- client/tomcatconf/componentContext.xml.in | 13 ++++ .../agent/manager/MockAgentManagerImpl.java | 2 + .../agent/manager/MockStorageManagerImpl.java | 2 + .../agent/manager/MockVmManagerImpl.java | 61 ++++----------- .../agent/manager/SimulatorManagerImpl.java | 76 +++---------------- .../cloud/resource/SimulatorDiscoverer.java | 2 +- .../SimulatorSecondaryDiscoverer.java | 2 + .../com/cloud/simulator/SimulatorGuru.java | 7 +- .../dao/MockConfigurationDaoImpl.java | 13 ++-- .../cloud/simulator/dao/MockHostDaoImpl.java | 6 +- .../simulator/dao/MockSecStorageDaoImpl.java | 6 +- .../dao/MockSecurityRulesDaoImpl.java | 14 ++-- .../simulator/dao/MockStoragePoolDaoImpl.java | 6 +- .../cloud/simulator/dao/MockVMDaoImpl.java | 17 +++-- .../simulator/dao/MockVolumeDaoImpl.java | 9 ++- 15 files changed, 90 insertions(+), 146 deletions(-) diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in index c45ab1bd91b..370c0fc1518 100644 --- a/client/tomcatconf/componentContext.xml.in +++ b/client/tomcatconf/componentContext.xml.in @@ -174,6 +174,13 @@ + + + @@ -265,5 +272,11 @@ + + diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java index 2178651403e..8542de3bd9f 100755 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java @@ -62,7 +62,9 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; +import org.springframework.stereotype.Component; +@Component @Local(value = { MockAgentManager.class }) public class MockAgentManagerImpl extends ManagerBase implements MockAgentManager { private static final Logger s_logger = Logger.getLogger(MockAgentManagerImpl.class); diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java index f445bb32900..859acc85958 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java @@ -97,7 +97,9 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine.State; +import org.springframework.stereotype.Component; +@Component @Local(value = { MockStorageManager.class }) public class MockStorageManagerImpl extends ManagerBase implements MockStorageManager { private static final Logger s_logger = Logger.getLogger(MockStorageManagerImpl.class); diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java index 60e1a61a0bd..40a5b9846db 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java @@ -17,58 +17,12 @@ package com.cloud.agent.manager; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.BumpUpPriorityCommand; -import com.cloud.agent.api.CheckRouterAnswer; -import com.cloud.agent.api.CheckRouterCommand; -import com.cloud.agent.api.CheckVirtualMachineAnswer; -import com.cloud.agent.api.CheckVirtualMachineCommand; -import com.cloud.agent.api.CleanupNetworkRulesCmd; -import com.cloud.agent.api.GetDomRVersionAnswer; -import com.cloud.agent.api.GetDomRVersionCmd; -import com.cloud.agent.api.GetVmStatsAnswer; -import com.cloud.agent.api.GetVmStatsCommand; -import com.cloud.agent.api.GetVncPortAnswer; -import com.cloud.agent.api.GetVncPortCommand; -import com.cloud.agent.api.MigrateAnswer; -import com.cloud.agent.api.MigrateCommand; -import com.cloud.agent.api.NetworkUsageAnswer; -import com.cloud.agent.api.NetworkUsageCommand; -import com.cloud.agent.api.PrepareForMigrationAnswer; -import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.RebootAnswer; -import com.cloud.agent.api.RebootCommand; -import com.cloud.agent.api.SecurityGroupRuleAnswer; -import com.cloud.agent.api.SecurityGroupRulesCmd; -import com.cloud.agent.api.StartAnswer; -import com.cloud.agent.api.StartCommand; -import com.cloud.agent.api.StopAnswer; -import com.cloud.agent.api.StopCommand; -import com.cloud.agent.api.VmStatsEntry; +import com.cloud.agent.api.*; import com.cloud.agent.api.check.CheckSshAnswer; import com.cloud.agent.api.check.CheckSshCommand; import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand; import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand; -import com.cloud.agent.api.routing.DhcpEntryCommand; -import com.cloud.agent.api.routing.IpAssocCommand; -import com.cloud.agent.api.routing.LoadBalancerConfigCommand; -import com.cloud.agent.api.routing.NetworkElementCommand; -import com.cloud.agent.api.routing.SavePasswordCommand; -import com.cloud.agent.api.routing.SetFirewallRulesCommand; -import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; -import com.cloud.agent.api.routing.SetStaticNatRulesCommand; -import com.cloud.agent.api.routing.VmDataCommand; +import com.cloud.agent.api.routing.*; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.network.Networks.TrafficType; @@ -86,7 +40,18 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +@Component @Local(value = { MockVmManager.class }) public class MockVmManagerImpl extends ManagerBase implements MockVmManager { private static final Logger s_logger = Logger.getLogger(MockVmManagerImpl.class); diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java index 41443572efd..c234cc5cb2e 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java @@ -16,73 +16,12 @@ // under the License. package com.cloud.agent.manager; -import java.util.HashMap; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.AttachIsoCommand; -import com.cloud.agent.api.AttachVolumeCommand; -import com.cloud.agent.api.BackupSnapshotCommand; -import com.cloud.agent.api.BumpUpPriorityCommand; -import com.cloud.agent.api.CheckHealthCommand; -import com.cloud.agent.api.CheckNetworkCommand; -import com.cloud.agent.api.CheckRouterCommand; -import com.cloud.agent.api.CheckVirtualMachineCommand; -import com.cloud.agent.api.CleanupNetworkRulesCmd; -import com.cloud.agent.api.ClusterSyncCommand; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.ComputeChecksumCommand; -import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; -import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; -import com.cloud.agent.api.CreateStoragePoolCommand; -import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; -import com.cloud.agent.api.DeleteSnapshotBackupCommand; -import com.cloud.agent.api.DeleteStoragePoolCommand; -import com.cloud.agent.api.GetDomRVersionCmd; -import com.cloud.agent.api.GetHostStatsCommand; -import com.cloud.agent.api.GetStorageStatsCommand; -import com.cloud.agent.api.GetVmStatsCommand; -import com.cloud.agent.api.GetVncPortCommand; -import com.cloud.agent.api.MaintainCommand; -import com.cloud.agent.api.ManageSnapshotCommand; -import com.cloud.agent.api.MigrateCommand; -import com.cloud.agent.api.ModifyStoragePoolCommand; -import com.cloud.agent.api.NetworkUsageCommand; -import com.cloud.agent.api.PingTestCommand; -import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.RebootCommand; -import com.cloud.agent.api.SecStorageSetupCommand; -import com.cloud.agent.api.SecStorageVMSetupCommand; -import com.cloud.agent.api.SecurityGroupRulesCmd; -import com.cloud.agent.api.StartCommand; -import com.cloud.agent.api.StopCommand; -import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.agent.api.*; import com.cloud.agent.api.check.CheckSshCommand; import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand; import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand; -import com.cloud.agent.api.routing.DhcpEntryCommand; -import com.cloud.agent.api.routing.IpAssocCommand; -import com.cloud.agent.api.routing.LoadBalancerConfigCommand; -import com.cloud.agent.api.routing.SavePasswordCommand; -import com.cloud.agent.api.routing.SetFirewallRulesCommand; -import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; -import com.cloud.agent.api.routing.SetStaticNatRulesCommand; -import com.cloud.agent.api.routing.VmDataCommand; -import com.cloud.agent.api.storage.CopyVolumeCommand; -import com.cloud.agent.api.storage.CreateCommand; -import com.cloud.agent.api.storage.DeleteTemplateCommand; -import com.cloud.agent.api.storage.DestroyCommand; -import com.cloud.agent.api.storage.DownloadCommand; -import com.cloud.agent.api.storage.DownloadProgressCommand; -import com.cloud.agent.api.storage.ListTemplateCommand; -import com.cloud.agent.api.storage.ListVolumeCommand; -import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; +import com.cloud.agent.api.routing.*; +import com.cloud.agent.api.storage.*; import com.cloud.simulator.MockConfigurationVO; import com.cloud.simulator.MockHost; import com.cloud.simulator.MockVMVO; @@ -95,7 +34,16 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.HashMap; +import java.util.Map; + +@Component @Local(value = { SimulatorManager.class }) public class SimulatorManagerImpl extends ManagerBase implements SimulatorManager { private static final Logger s_logger = Logger.getLogger(SimulatorManagerImpl.class); diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java index 5cb094184ba..00fe356103b 100755 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java @@ -53,7 +53,7 @@ import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VMTemplateZoneDao; - +import org.springframework.stereotype.Component; @Local(value = Discoverer.class) public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java index cd0cd2725c9..1dd71c5c27f 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java @@ -42,7 +42,9 @@ import com.cloud.storage.SnapshotVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.secondary.SecondaryStorageDiscoverer; import com.cloud.utils.exception.CloudRuntimeException; +import org.springframework.stereotype.Component; +@Component @Local(value=Discoverer.class) public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer implements ResourceStateAdapter, Listener { private static final Logger s_logger = Logger.getLogger(SimulatorSecondaryDiscoverer.class); diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java index c9d308023ed..57a38f1d3d8 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java @@ -16,9 +16,6 @@ // under the License. package com.cloud.simulator; -import javax.ejb.Local; -import javax.inject.Inject; - import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuru; @@ -28,6 +25,10 @@ import com.cloud.storage.dao.GuestOSDao; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; +import javax.ejb.Local; +import javax.inject.Inject; + + @Local(value=HypervisorGuru.class) public class SimulatorGuru extends HypervisorGuruBase implements HypervisorGuru { @Inject GuestOSDao _guestOsDao; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java index bd1b48dfde8..fd825b751ed 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java @@ -16,18 +16,19 @@ // under the License. package com.cloud.simulator.dao; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.util.Formatter; - -import javax.ejb.Local; - import com.cloud.simulator.MockConfigurationVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.Formatter; + +@Component @Local(value={MockConfigurationDao.class}) public class MockConfigurationDaoImpl extends GenericDaoBase implements MockConfigurationDao { private SearchBuilder _searchByDcIdName; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockHostDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockHostDaoImpl.java index 8a566d79ed2..4b60bc02d47 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockHostDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockHostDaoImpl.java @@ -16,14 +16,16 @@ // under the License. package com.cloud.simulator.dao; -import javax.ejb.Local; - import com.cloud.simulator.MockHost; import com.cloud.simulator.MockHostVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.springframework.stereotype.Component; +import javax.ejb.Local; + +@Component @Local(value={MockHostDao.class}) public class MockHostDaoImpl extends GenericDaoBase implements MockHostDao { protected final SearchBuilder GuidSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecStorageDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecStorageDaoImpl.java index 65a375f5843..d4903244179 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecStorageDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecStorageDaoImpl.java @@ -16,13 +16,15 @@ // under the License. package com.cloud.simulator.dao; -import javax.ejb.Local; - import com.cloud.simulator.MockSecStorageVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.springframework.stereotype.Component; +import javax.ejb.Local; + +@Component @Local(value={MockSecStorageDao.class}) public class MockSecStorageDaoImpl extends GenericDaoBase implements MockSecStorageDao { protected final SearchBuilder urlSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecurityRulesDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecurityRulesDaoImpl.java index 8831efef2ec..d35607e0ebc 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecurityRulesDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecurityRulesDaoImpl.java @@ -16,16 +16,18 @@ // under the License. package com.cloud.simulator.dao; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.naming.ConfigurationException; - import com.cloud.simulator.MockSecurityRulesVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.springframework.stereotype.Component; + +import javax.ejb.Local; +import javax.naming.ConfigurationException; +import java.util.List; +import java.util.Map; + +@Component @Local(value={MockSecurityRulesDao.class}) public class MockSecurityRulesDaoImpl extends GenericDaoBase implements MockSecurityRulesDao { protected SearchBuilder vmIdSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockStoragePoolDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockStoragePoolDaoImpl.java index 3a64d27e30d..0fc41abdc4c 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockStoragePoolDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockStoragePoolDaoImpl.java @@ -16,14 +16,16 @@ // under the License. package com.cloud.simulator.dao; -import javax.ejb.Local; - import com.cloud.simulator.MockStoragePoolVO; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.springframework.stereotype.Component; +import javax.ejb.Local; + +@Component @Local(value={MockStoragePoolDao.class}) public class MockStoragePoolDaoImpl extends GenericDaoBase implements MockStoragePoolDao { protected final SearchBuilder uuidSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java index be7a98859e2..5a8c66d69ef 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java @@ -16,14 +16,6 @@ // under the License. package com.cloud.simulator.dao; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - import com.cloud.simulator.MockHostVO; import com.cloud.simulator.MockVMVO; import com.cloud.utils.db.GenericDaoBase; @@ -31,7 +23,16 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.VirtualMachine; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Component @Local(value={MockVMDao.class}) public class MockVMDaoImpl extends GenericDaoBase implements MockVMDao { protected SearchBuilder GuidSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVolumeDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVolumeDaoImpl.java index a3a35179337..5d64a9fa246 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVolumeDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVolumeDaoImpl.java @@ -16,10 +16,6 @@ // under the License. package com.cloud.simulator.dao; -import java.util.List; - -import javax.ejb.Local; - import com.cloud.simulator.MockVolumeVO; import com.cloud.simulator.MockVolumeVO.MockVolumeType; import com.cloud.utils.db.GenericDaoBase; @@ -27,7 +23,12 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import java.util.List; + +@Component @Local(value={MockVolumeDao.class}) public class MockVolumeDaoImpl extends GenericDaoBase implements MockVolumeDao { protected final SearchBuilder idTypeSearch; From 07102fc8ab37d2688f88c732c337bac34224a5c5 Mon Sep 17 00:00:00 2001 From: Gavin Lee Date: Tue, 19 Feb 2013 23:22:16 +0800 Subject: [PATCH 068/486] CLOUDSTACK-1318:Fix build devcloud issue --- tools/devcloud/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/devcloud/pom.xml b/tools/devcloud/pom.xml index b3fd79bb0bb..f45a86d26f6 100644 --- a/tools/devcloud/pom.xml +++ b/tools/devcloud/pom.xml @@ -62,8 +62,8 @@ - ${project.parent.basedir}/utils/conf/db.properties - ${project.parent.basedir}/utils/conf/db.properties.override + ${project.parent.parent.basedir}/utils/conf/db.properties + ${project.parent.parent.basedir}/utils/conf/db.properties.override true From 018554ce29cbb4f6bfd590f46d5299429cb682c9 Mon Sep 17 00:00:00 2001 From: Chip Childers Date: Tue, 19 Feb 2013 12:57:25 -0500 Subject: [PATCH 069/486] Fixed strange duplication of content within the tools/whisker/LICENSE file. Signed-off-by: Chip Childers --- tools/whisker/LICENSE | 4757 ----------------------------------------- 1 file changed, 4757 deletions(-) diff --git a/tools/whisker/LICENSE b/tools/whisker/LICENSE index bada5182ce8..7ef2e45c4e6 100644 --- a/tools/whisker/LICENSE +++ b/tools/whisker/LICENSE @@ -4755,4760 +4755,3 @@ Within the utils/src/com/cloud/utils/db directory from Clinton Begin http://code.google.com/p/mybatis/ ScriptRunner.java from http://code.google.com/p/mybatis/ -Copyright (c) 2013 The Apache Software Foundation - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - -This distribution contains third party resources. -Within the . directory - licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) - - Copyright (c) 2005-2010 Thomas Nagy - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. - - from Thomas Nagy http://code.google.com/p/waf/ - waf - -Within the awsapi directory - licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) - - Copyright (c) 2005-2010 Thomas Nagy - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. - - from Thomas Nagy http://code.google.com/p/waf/ - waf - -Within the console-proxy/js directory - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Copyright (c) 2009, John Resig - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from John Resig - jquery.js - -Within the deps directory - licensed under the BSD (2-clause) for XenServerJava http://www.opensource.org/licenses/BSD-2-Clause (as follows) - - Copyright (c) Citrix Systems, Inc. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - from Citrix Systems, Inc http://www.citrix.com/ - XenServerJava from http://community.citrix.com/cdn/xs/sdks/ - -Within the deps/awsapi-lib directory - licensed under the ANTLR 2 License http://www.antlr2.org/license.html (as follows) - - - ANTLR 2 License - - We reserve no legal rights to the ANTLR--it is fully in the public domain. An - individual or company may do whatever they wish with source code distributed - with ANTLR or the code generated by ANTLR, including the incorporation of ANTLR, - or its output, into commerical software. We encourage users to develop software - with ANTLR. However, we do ask that credit is given to us for developing ANTLR. - By "credit", we mean that if you use ANTLR or incorporate any source code into - one of your programs (commercial product, research project, or otherwise) that - you acknowledge this fact somewhere in the documentation, research report, - etc... If you like ANTLR and have developed a nice tool with the output, please - mention that you developed it using ANTLR. In addition, we ask that the headers - remain intact in our source code. As long as these guidelines are kept, we - expect to continue enhancing this system and expect to make other tools - available as they are completed. - - from ANTLR Translator Generator Project http://www.antlr2.org/ - antlr-2.7.6.jar from http://repo1.maven.org/maven2/antlr/antlr/2.7.6/antlr-2.7.6-sources.jar - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2004-2008 The Apache Software Foundation - from The Apache Software Foundation http://www.apache.org/ - XmlSchema-1.4.3.jar - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2004-2012 The Apache Software Foundation - from The Apache Software Foundation http://www.apache.org/ - apache-log4j-extras-1.0.jar from http://logging.apache.org/log4j/companions/extras/ - axiom-api-1.2.8.jar from http://ws.apache.org/axiom/source-repository.html - axiom-impl-1.2.8.jar from http://ws.apache.org/axiom/source-repository.html - axis2-1.5.1.jar from http://axis.apache.org/axis/ - axis2-adb-1.5.1.jar from http://axis.apache.org/axis/ - axis2-ant-plugin-1.5.1.jar from http://axis.apache.org/axis/ - axis2-codegen-1.4.1.jar from http://axis.apache.org/axis/ - axis2-jaxbri-1.5.1.jar from http://axis.apache.org/axis/ - axis2-jaxws-1.5.1.jar from http://axis.apache.org/axis/ - axis2-jibx-1.5.1.jar from http://axis.apache.org/axis/ - axis2-json-1.5.1.jar from http://axis.apache.org/axis/ - axis2-kernel-1.5.1.jar from http://axis.apache.org/axis/ - axis2-transport-http-1.5.1.jar from http://axis.apache.org/axis/ - axis2-transport-local-1.5.1.jar from http://axis.apache.org/axis/ - axis2-webapp-1.5.1.war from http://axis.apache.org/axis/ - commons-codec-1.4.jar from http://commons.apache.org/codec/ - commons-collections-3.1.jar from http://commons.apache.org/collections/ - commons-fileupload-1.2.jar from http://commons.apache.org/fileupload/ - commons-httpclient-3.1.jar from http://hc.apache.org/httpclient-3.x/ - commons-io-1.4.jar from http://commons.apache.org/io/ - commons-logging-1.1.1.jar from http://commons.apache.org/logging/ - httpcore-4.0.jar from http://hc.apache.org/httpcomponents-core-ga/ - log4j-1.2.15.jar from http://logging.apache.org/log4j/ - neethi-2.0.4.jar from http://svn.apache.org/viewvc/webservices/commons/tags/neethi/2.0.4/ - rampart-lib from http://axis.apache.org/axis2/java/rampart/download/1.5/download.cgi - woden-api-1.0M8.jar from http://svn.apache.org/viewvc/webservices/woden/tags/1.0M8_20080423/ - woden-impl-dom-1.0M8.jar from http://svn.apache.org/viewvc/webservices/woden/tags/1.0M8_20080423/ - wss4j-1.5.8.jar from http://ws.apache.org/wss4j/source-repository.html - xercesImpl.jar from http://xerces.apache.org/xerces2-j/source-repository.html - xml-apis.jar from http://repo1.maven.org/maven2/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04-sources.jar - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2009 Google Inc. - from Google Inc. http://google.com - cloud-gson.jar from http://code.google.com/p/google-gson/ - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - - from Json.simple Project http://code.google.com/p/json-simple/ - json_simple-1.1.jar from http://code.google.com/p/json-simple/source/checkout - - licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) - - Copyright (c) 2002-2011 Atsuhiko Yamanaka, JCraft,Inc. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. - - from JCraft http://www.jcraft.com/ - jsch-0.1.42.jar from http://www.jcraft.com/jsch/ - - licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) - - Copyright (c) 1997-2010 Oracle and/or its affiliates. All rights reserved. - - COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - - 1. Definitions. - - 1.1. "Contributor" means each individual or entity that - creates or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the - Original Software, prior Modifications used by a - Contributor (if any), and the Modifications made by that - particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or - (b) Modifications, or (c) the combination of files - containing Original Software with files containing - Modifications, in each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form - other than Source Code. - - 1.5. "Initial Developer" means the individual or entity - that first makes Original Software available under this - License. - - 1.6. "Larger Work" means a work which combines Covered - Software or portions thereof with code not governed by the - terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the - maximum extent possible, whether at the time of the initial - grant or subsequently acquired, any and all of the rights - conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable - form of any of the following: - - A. Any file that results from an addition to, - deletion from or modification of the contents of a - file containing Original Software or previous - Modifications; - - B. Any new file that contains any part of the - Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made - available under the terms of this License. - - 1.10. "Original Software" means the Source Code and - Executable form of computer software code that is - originally released under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned - or hereafter acquired, including without limitation, - method, process, and apparatus claims, in any patent - Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer - software code in which modifications are made and (b) - associated documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal - entity exercising rights under, and complying with all of - the terms of, this License. For legal entities, "You" - includes any entity which controls, is controlled by, or is - under common control with You. For purposes of this - definition, "control" means (a) the power, direct or - indirect, to cause the direction or management of such - entity, whether by contract or otherwise, or (b) ownership - of more than fifty percent (50%) of the outstanding shares - or beneficial ownership of such entity. - - 2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, the - Initial Developer hereby grants You a world-wide, - royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Initial Developer, - to use, reproduce, modify, display, perform, - sublicense and distribute the Original Software (or - portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using or selling of Original Software, to make, have - made, use, practice, sell, and offer for sale, and/or - otherwise dispose of the Original Software (or - portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) - are effective on the date Initial Developer first - distributes or otherwise makes the Original Software - available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent - license is granted: (1) for code that You delete from - the Original Software, or (2) for infringements - caused by: (i) the modification of the Original - Software, or (ii) the combination of the Original - Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, each - Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Contributor to - use, reproduce, modify, display, perform, sublicense - and distribute the Modifications created by such - Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as - Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using, or selling of Modifications made by that - Contributor either alone and/or in combination with - its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, - have made, and/or otherwise dispose of: (1) - Modifications made by that Contributor (or portions - thereof); and (2) the combination of Modifications - made by that Contributor with its Contributor Version - (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and - 2.2(b) are effective on the date Contributor first - distributes or otherwise makes the Modifications - available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent - license is granted: (1) for any code that Contributor - has deleted from the Contributor Version; (2) for - infringements caused by: (i) third party - modifications of Contributor Version, or (ii) the - combination of Modifications made by that Contributor - with other software (except as part of the - Contributor Version) or other devices; or (3) under - Patent Claims infringed by Covered Software in the - absence of Modifications made by that Contributor. - - 3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make - available in Executable form must also be made available in - Source Code form and that Source Code form must be - distributed only under the terms of this License. You must - include a copy of this License with every copy of the - Source Code form of the Covered Software You distribute or - otherwise make available. You must inform recipients of any - such Covered Software in Executable form as to how they can - obtain such Covered Software in Source Code form in a - reasonable manner on or through a medium customarily used - for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You - contribute are governed by the terms of this License. You - represent that You believe Your Modifications are Your - original creation(s) and/or You have sufficient rights to - grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications - that identifies You as the Contributor of the Modification. - You may not remove or alter any copyright, patent or - trademark notices contained within the Covered Software, or - any notices of licensing or any descriptive text giving - attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered - Software in Source Code form that alters or restricts the - applicable version of this License or the recipients' - rights hereunder. You may choose to offer, and to charge a - fee for, warranty, support, indemnity or liability - obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on - behalf of the Initial Developer or any Contributor. You - must make it absolutely clear that any such warranty, - support, indemnity or liability obligation is offered by - You alone, and You hereby agree to indemnify the Initial - Developer and every Contributor for any liability incurred - by the Initial Developer or such Contributor as a result of - warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered - Software under the terms of this License or under the terms - of a license of Your choice, which may contain terms - different from this License, provided that You are in - compliance with the terms of this License and that the - license for the Executable form does not attempt to limit - or alter the recipient's rights in the Source Code form - from the rights set forth in this License. If You - distribute the Covered Software in Executable form under a - different license, You must make it absolutely clear that - any terms which differ from this License are offered by You - alone, not by the Initial Developer or Contributor. You - hereby agree to indemnify the Initial Developer and every - Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms - You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software - with other code not governed by the terms of this License - and distribute the Larger Work as a single product. In such - a case, You must make sure the requirements of this License - are fulfilled for the Covered Software. - - 4. Versions of the License. - - 4.1. New Versions. - - Sun Microsystems, Inc. is the initial license steward and - may publish revised and/or new versions of this License - from time to time. Each version will be given a - distinguishing version number. Except as provided in - Section 4.3, no one other than the license steward has the - right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. If the Initial Developer includes a - notice in the Original Software prohibiting it from being - distributed or otherwise made available under any - subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. Otherwise, You may also choose to - use, distribute or otherwise make the Covered Software - available under the terms of any subsequent version of the - License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a - new license for Your Original Software, You may create and - use a modified version of this License if You: (a) rename - the license and remove any references to the name of the - license steward (except to note that the license differs - from this License); and (b) otherwise make it clear that - the license contains terms which differ from this License. - - 5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" - BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED - SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR - PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND - PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY - COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE - INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF - ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF - WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS - DISCLAIMER. - - 6. TERMINATION. - - 6.1. This License and the rights granted hereunder will - terminate automatically if You fail to comply with terms - herein and fail to cure such breach within 30 days of - becoming aware of the breach. Provisions which, by their - nature, must remain in effect beyond the termination of - this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or - a Contributor (the Initial Developer or Contributor against - whom You assert such claim is referred to as "Participant") - alleging that the Participant Software (meaning the - Contributor Version where the Participant is a Contributor - or the Original Software where the Participant is the - Initial Developer) directly or indirectly infringes any - patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial - Developer (if the Initial Developer is not the Participant) - and all Contributors under Sections 2.1 and/or 2.2 of this - License shall, upon 60 days notice from Participant - terminate prospectively and automatically at the expiration - of such 60 day notice period, unless if within such 60 day - period You withdraw Your claim with respect to the - Participant Software against such Participant either - unilaterally or pursuant to a written agreement with - Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 - above, all end user licenses that have been validly granted - by You or any distributor hereunder prior to termination - (excluding licenses granted to You by any distributor) - shall survive termination. - - 7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE - LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK - STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL - INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT - APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO - NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR - CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT - APPLY TO YOU. - - 8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is - defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. ¤ - 252.227-7014(a)(1)) and "commercial computer software - documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. - 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 - through 227.7202-4 (June 1995), all U.S. Government End Users - acquire Covered Software with only those rights set forth herein. - This U.S. Government Rights clause is in lieu of, and supersedes, - any other FAR, DFAR, or other clause or provision that addresses - Government rights in computer software under this License. - - 9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the - extent necessary to make it enforceable. This License shall be - governed by the law of the jurisdiction specified in a notice - contained within the Original Software (except to the extent - applicable law, if any, provides otherwise), excluding such - jurisdiction's conflict-of-law provisions. Any litigation - relating to this License shall be subject to the jurisdiction of - the courts located in the jurisdiction and venue specified in a - notice contained within the Original Software, with the losing - party responsible for costs, including, without limitation, court - costs and reasonable attorneys' fees and expenses. The - application of the United Nations Convention on Contracts for the - International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall - be construed against the drafter shall not apply to this License. - You agree that You alone are responsible for compliance with the - United States export administration regulations (and the export - control laws and regulation of any other countries) when You use, - distribute or otherwise make available any Covered Software. - - 10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or - indirectly, out of its utilization of rights under this License - and You agree to work with Initial Developer and Contributors to - distribute such responsibility on an equitable basis. Nothing - herein is intended or shall be deemed to constitute any admission - of liability. - - from Oracle and/or its affiliates http://oracle.com - jaxb-api-2.1.jar from http://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.1/jaxb-api-2.1-sources.jar - jaxb-impl-2.1.7.jar from http://repo1.maven.org/maven2/com/sun/xml/bind/jaxb-impl/2.1.7/jaxb-impl-2.1.7-sources.jar - jaxb-xjc-2.1.7.jar from http://repo1.maven.org/maven2/com/sun/xml/bind/jaxb-xjc/2.1.7/jaxb-xjc-2.1.7-sources.jar - - licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) - - Copyright (c) 2006 Sun Microsystems, Inc. All rights reserved. - - COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - - 1. Definitions. - - 1.1. "Contributor" means each individual or entity that - creates or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the - Original Software, prior Modifications used by a - Contributor (if any), and the Modifications made by that - particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or - (b) Modifications, or (c) the combination of files - containing Original Software with files containing - Modifications, in each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form - other than Source Code. - - 1.5. "Initial Developer" means the individual or entity - that first makes Original Software available under this - License. - - 1.6. "Larger Work" means a work which combines Covered - Software or portions thereof with code not governed by the - terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the - maximum extent possible, whether at the time of the initial - grant or subsequently acquired, any and all of the rights - conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable - form of any of the following: - - A. Any file that results from an addition to, - deletion from or modification of the contents of a - file containing Original Software or previous - Modifications; - - B. Any new file that contains any part of the - Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made - available under the terms of this License. - - 1.10. "Original Software" means the Source Code and - Executable form of computer software code that is - originally released under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned - or hereafter acquired, including without limitation, - method, process, and apparatus claims, in any patent - Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer - software code in which modifications are made and (b) - associated documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal - entity exercising rights under, and complying with all of - the terms of, this License. For legal entities, "You" - includes any entity which controls, is controlled by, or is - under common control with You. For purposes of this - definition, "control" means (a) the power, direct or - indirect, to cause the direction or management of such - entity, whether by contract or otherwise, or (b) ownership - of more than fifty percent (50%) of the outstanding shares - or beneficial ownership of such entity. - - 2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, the - Initial Developer hereby grants You a world-wide, - royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Initial Developer, - to use, reproduce, modify, display, perform, - sublicense and distribute the Original Software (or - portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using or selling of Original Software, to make, have - made, use, practice, sell, and offer for sale, and/or - otherwise dispose of the Original Software (or - portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) - are effective on the date Initial Developer first - distributes or otherwise makes the Original Software - available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent - license is granted: (1) for code that You delete from - the Original Software, or (2) for infringements - caused by: (i) the modification of the Original - Software, or (ii) the combination of the Original - Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, each - Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Contributor to - use, reproduce, modify, display, perform, sublicense - and distribute the Modifications created by such - Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as - Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using, or selling of Modifications made by that - Contributor either alone and/or in combination with - its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, - have made, and/or otherwise dispose of: (1) - Modifications made by that Contributor (or portions - thereof); and (2) the combination of Modifications - made by that Contributor with its Contributor Version - (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and - 2.2(b) are effective on the date Contributor first - distributes or otherwise makes the Modifications - available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent - license is granted: (1) for any code that Contributor - has deleted from the Contributor Version; (2) for - infringements caused by: (i) third party - modifications of Contributor Version, or (ii) the - combination of Modifications made by that Contributor - with other software (except as part of the - Contributor Version) or other devices; or (3) under - Patent Claims infringed by Covered Software in the - absence of Modifications made by that Contributor. - - 3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make - available in Executable form must also be made available in - Source Code form and that Source Code form must be - distributed only under the terms of this License. You must - include a copy of this License with every copy of the - Source Code form of the Covered Software You distribute or - otherwise make available. You must inform recipients of any - such Covered Software in Executable form as to how they can - obtain such Covered Software in Source Code form in a - reasonable manner on or through a medium customarily used - for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You - contribute are governed by the terms of this License. You - represent that You believe Your Modifications are Your - original creation(s) and/or You have sufficient rights to - grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications - that identifies You as the Contributor of the Modification. - You may not remove or alter any copyright, patent or - trademark notices contained within the Covered Software, or - any notices of licensing or any descriptive text giving - attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered - Software in Source Code form that alters or restricts the - applicable version of this License or the recipients' - rights hereunder. You may choose to offer, and to charge a - fee for, warranty, support, indemnity or liability - obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on - behalf of the Initial Developer or any Contributor. You - must make it absolutely clear that any such warranty, - support, indemnity or liability obligation is offered by - You alone, and You hereby agree to indemnify the Initial - Developer and every Contributor for any liability incurred - by the Initial Developer or such Contributor as a result of - warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered - Software under the terms of this License or under the terms - of a license of Your choice, which may contain terms - different from this License, provided that You are in - compliance with the terms of this License and that the - license for the Executable form does not attempt to limit - or alter the recipient's rights in the Source Code form - from the rights set forth in this License. If You - distribute the Covered Software in Executable form under a - different license, You must make it absolutely clear that - any terms which differ from this License are offered by You - alone, not by the Initial Developer or Contributor. You - hereby agree to indemnify the Initial Developer and every - Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms - You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software - with other code not governed by the terms of this License - and distribute the Larger Work as a single product. In such - a case, You must make sure the requirements of this License - are fulfilled for the Covered Software. - - 4. Versions of the License. - - 4.1. New Versions. - - Sun Microsystems, Inc. is the initial license steward and - may publish revised and/or new versions of this License - from time to time. Each version will be given a - distinguishing version number. Except as provided in - Section 4.3, no one other than the license steward has the - right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. If the Initial Developer includes a - notice in the Original Software prohibiting it from being - distributed or otherwise made available under any - subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. Otherwise, You may also choose to - use, distribute or otherwise make the Covered Software - available under the terms of any subsequent version of the - License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a - new license for Your Original Software, You may create and - use a modified version of this License if You: (a) rename - the license and remove any references to the name of the - license steward (except to note that the license differs - from this License); and (b) otherwise make it clear that - the license contains terms which differ from this License. - - 5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" - BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED - SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR - PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND - PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY - COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE - INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF - ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF - WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS - DISCLAIMER. - - 6. TERMINATION. - - 6.1. This License and the rights granted hereunder will - terminate automatically if You fail to comply with terms - herein and fail to cure such breach within 30 days of - becoming aware of the breach. Provisions which, by their - nature, must remain in effect beyond the termination of - this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or - a Contributor (the Initial Developer or Contributor against - whom You assert such claim is referred to as "Participant") - alleging that the Participant Software (meaning the - Contributor Version where the Participant is a Contributor - or the Original Software where the Participant is the - Initial Developer) directly or indirectly infringes any - patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial - Developer (if the Initial Developer is not the Participant) - and all Contributors under Sections 2.1 and/or 2.2 of this - License shall, upon 60 days notice from Participant - terminate prospectively and automatically at the expiration - of such 60 day notice period, unless if within such 60 day - period You withdraw Your claim with respect to the - Participant Software against such Participant either - unilaterally or pursuant to a written agreement with - Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 - above, all end user licenses that have been validly granted - by You or any distributor hereunder prior to termination - (excluding licenses granted to You by any distributor) - shall survive termination. - - 7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE - LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK - STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL - INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT - APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO - NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR - CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT - APPLY TO YOU. - - 8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is - defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. ¤ - 252.227-7014(a)(1)) and "commercial computer software - documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. - 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 - through 227.7202-4 (June 1995), all U.S. Government End Users - acquire Covered Software with only those rights set forth herein. - This U.S. Government Rights clause is in lieu of, and supersedes, - any other FAR, DFAR, or other clause or provision that addresses - Government rights in computer software under this License. - - 9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the - extent necessary to make it enforceable. This License shall be - governed by the law of the jurisdiction specified in a notice - contained within the Original Software (except to the extent - applicable law, if any, provides otherwise), excluding such - jurisdiction's conflict-of-law provisions. Any litigation - relating to this License shall be subject to the jurisdiction of - the courts located in the jurisdiction and venue specified in a - notice contained within the Original Software, with the losing - party responsible for costs, including, without limitation, court - costs and reasonable attorneys' fees and expenses. The - application of the United Nations Convention on Contracts for the - International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall - be construed against the drafter shall not apply to this License. - You agree that You alone are responsible for compliance with the - United States export administration regulations (and the export - control laws and regulation of any other countries) when You use, - distribute or otherwise make available any Covered Software. - - 10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or - indirectly, out of its utilization of rights under this License - and You agree to work with Initial Developer and Contributors to - distribute such responsibility on an equitable basis. Nothing - herein is intended or shall be deemed to constitute any admission - of liability. - - from Project GlassFish http://glassfish.java.net/ - jta-1.1.jar from http://repo1.maven.org/maven2/javax/transaction/jta/1.1/jta-1.1-sources.jar - - licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) - - Copyright (c) 1997-2010 Oracle and/or its affiliates. All rights reserved. - - COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - - 1. Definitions. - - 1.1. "Contributor" means each individual or entity that - creates or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the - Original Software, prior Modifications used by a - Contributor (if any), and the Modifications made by that - particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or - (b) Modifications, or (c) the combination of files - containing Original Software with files containing - Modifications, in each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form - other than Source Code. - - 1.5. "Initial Developer" means the individual or entity - that first makes Original Software available under this - License. - - 1.6. "Larger Work" means a work which combines Covered - Software or portions thereof with code not governed by the - terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the - maximum extent possible, whether at the time of the initial - grant or subsequently acquired, any and all of the rights - conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable - form of any of the following: - - A. Any file that results from an addition to, - deletion from or modification of the contents of a - file containing Original Software or previous - Modifications; - - B. Any new file that contains any part of the - Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made - available under the terms of this License. - - 1.10. "Original Software" means the Source Code and - Executable form of computer software code that is - originally released under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned - or hereafter acquired, including without limitation, - method, process, and apparatus claims, in any patent - Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer - software code in which modifications are made and (b) - associated documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal - entity exercising rights under, and complying with all of - the terms of, this License. For legal entities, "You" - includes any entity which controls, is controlled by, or is - under common control with You. For purposes of this - definition, "control" means (a) the power, direct or - indirect, to cause the direction or management of such - entity, whether by contract or otherwise, or (b) ownership - of more than fifty percent (50%) of the outstanding shares - or beneficial ownership of such entity. - - 2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, the - Initial Developer hereby grants You a world-wide, - royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Initial Developer, - to use, reproduce, modify, display, perform, - sublicense and distribute the Original Software (or - portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using or selling of Original Software, to make, have - made, use, practice, sell, and offer for sale, and/or - otherwise dispose of the Original Software (or - portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) - are effective on the date Initial Developer first - distributes or otherwise makes the Original Software - available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent - license is granted: (1) for code that You delete from - the Original Software, or (2) for infringements - caused by: (i) the modification of the Original - Software, or (ii) the combination of the Original - Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, each - Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Contributor to - use, reproduce, modify, display, perform, sublicense - and distribute the Modifications created by such - Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as - Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using, or selling of Modifications made by that - Contributor either alone and/or in combination with - its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, - have made, and/or otherwise dispose of: (1) - Modifications made by that Contributor (or portions - thereof); and (2) the combination of Modifications - made by that Contributor with its Contributor Version - (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and - 2.2(b) are effective on the date Contributor first - distributes or otherwise makes the Modifications - available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent - license is granted: (1) for any code that Contributor - has deleted from the Contributor Version; (2) for - infringements caused by: (i) third party - modifications of Contributor Version, or (ii) the - combination of Modifications made by that Contributor - with other software (except as part of the - Contributor Version) or other devices; or (3) under - Patent Claims infringed by Covered Software in the - absence of Modifications made by that Contributor. - - 3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make - available in Executable form must also be made available in - Source Code form and that Source Code form must be - distributed only under the terms of this License. You must - include a copy of this License with every copy of the - Source Code form of the Covered Software You distribute or - otherwise make available. You must inform recipients of any - such Covered Software in Executable form as to how they can - obtain such Covered Software in Source Code form in a - reasonable manner on or through a medium customarily used - for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You - contribute are governed by the terms of this License. You - represent that You believe Your Modifications are Your - original creation(s) and/or You have sufficient rights to - grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications - that identifies You as the Contributor of the Modification. - You may not remove or alter any copyright, patent or - trademark notices contained within the Covered Software, or - any notices of licensing or any descriptive text giving - attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered - Software in Source Code form that alters or restricts the - applicable version of this License or the recipients' - rights hereunder. You may choose to offer, and to charge a - fee for, warranty, support, indemnity or liability - obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on - behalf of the Initial Developer or any Contributor. You - must make it absolutely clear that any such warranty, - support, indemnity or liability obligation is offered by - You alone, and You hereby agree to indemnify the Initial - Developer and every Contributor for any liability incurred - by the Initial Developer or such Contributor as a result of - warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered - Software under the terms of this License or under the terms - of a license of Your choice, which may contain terms - different from this License, provided that You are in - compliance with the terms of this License and that the - license for the Executable form does not attempt to limit - or alter the recipient's rights in the Source Code form - from the rights set forth in this License. If You - distribute the Covered Software in Executable form under a - different license, You must make it absolutely clear that - any terms which differ from this License are offered by You - alone, not by the Initial Developer or Contributor. You - hereby agree to indemnify the Initial Developer and every - Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms - You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software - with other code not governed by the terms of this License - and distribute the Larger Work as a single product. In such - a case, You must make sure the requirements of this License - are fulfilled for the Covered Software. - - 4. Versions of the License. - - 4.1. New Versions. - - Sun Microsystems, Inc. is the initial license steward and - may publish revised and/or new versions of this License - from time to time. Each version will be given a - distinguishing version number. Except as provided in - Section 4.3, no one other than the license steward has the - right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. If the Initial Developer includes a - notice in the Original Software prohibiting it from being - distributed or otherwise made available under any - subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. Otherwise, You may also choose to - use, distribute or otherwise make the Covered Software - available under the terms of any subsequent version of the - License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a - new license for Your Original Software, You may create and - use a modified version of this License if You: (a) rename - the license and remove any references to the name of the - license steward (except to note that the license differs - from this License); and (b) otherwise make it clear that - the license contains terms which differ from this License. - - 5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" - BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED - SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR - PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND - PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY - COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE - INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF - ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF - WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS - DISCLAIMER. - - 6. TERMINATION. - - 6.1. This License and the rights granted hereunder will - terminate automatically if You fail to comply with terms - herein and fail to cure such breach within 30 days of - becoming aware of the breach. Provisions which, by their - nature, must remain in effect beyond the termination of - this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or - a Contributor (the Initial Developer or Contributor against - whom You assert such claim is referred to as "Participant") - alleging that the Participant Software (meaning the - Contributor Version where the Participant is a Contributor - or the Original Software where the Participant is the - Initial Developer) directly or indirectly infringes any - patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial - Developer (if the Initial Developer is not the Participant) - and all Contributors under Sections 2.1 and/or 2.2 of this - License shall, upon 60 days notice from Participant - terminate prospectively and automatically at the expiration - of such 60 day notice period, unless if within such 60 day - period You withdraw Your claim with respect to the - Participant Software against such Participant either - unilaterally or pursuant to a written agreement with - Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 - above, all end user licenses that have been validly granted - by You or any distributor hereunder prior to termination - (excluding licenses granted to You by any distributor) - shall survive termination. - - 7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE - LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK - STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL - INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT - APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO - NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR - CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT - APPLY TO YOU. - - 8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is - defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. ¤ - 252.227-7014(a)(1)) and "commercial computer software - documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. - 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 - through 227.7202-4 (June 1995), all U.S. Government End Users - acquire Covered Software with only those rights set forth herein. - This U.S. Government Rights clause is in lieu of, and supersedes, - any other FAR, DFAR, or other clause or provision that addresses - Government rights in computer software under this License. - - 9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the - extent necessary to make it enforceable. This License shall be - governed by the law of the jurisdiction specified in a notice - contained within the Original Software (except to the extent - applicable law, if any, provides otherwise), excluding such - jurisdiction's conflict-of-law provisions. Any litigation - relating to this License shall be subject to the jurisdiction of - the courts located in the jurisdiction and venue specified in a - notice contained within the Original Software, with the losing - party responsible for costs, including, without limitation, court - costs and reasonable attorneys' fees and expenses. The - application of the United Nations Convention on Contracts for the - International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall - be construed against the drafter shall not apply to this License. - You agree that You alone are responsible for compliance with the - United States export administration regulations (and the export - control laws and regulation of any other countries) when You use, - distribute or otherwise make available any Covered Software. - - 10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or - indirectly, out of its utilization of rights under this License - and You agree to work with Initial Developer and Contributors to - distribute such responsibility on an equitable basis. Nothing - herein is intended or shall be deemed to constitute any admission - of liability. - - from Oracle and/or its affiliates http://oracle.com - mail-1.4.jar from http://kenai.com/projects/javamail - - licensed under the Common Public License - v 1.0 http://opensource.org/licenses/cpl1.0 (as follows) - - - Common Public License Version 1.0 (CPL) - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC - LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM - CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - - 1. DEFINITIONS - - "Contribution means: - - a) in the case of the initial Contributor, the initial code and documentation - distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - - i) changes to the Program, and - - ii) additions to the Program; - - where such changes and/or additions to the Program originate from and are - distributed by that particular Contributor. A Contribution 'originates' from a - Contributor if it was added to the Program by such Contributor itself or anyone - acting on such Contributor's behalf. Contributions do not include additions to - the Program which: (i) are separate modules of software distributed in - conjunction with the Program under their own license agreement, and (ii) are not - derivative works of the Program. - - "Contributor means any person or entity that distributes the Program. - - "Licensed Patents mean patent claims licensable by a Contributor which are - "necessarily infringed by the use or sale of its Contribution alone or when - "combined with the Program. - - "Program means the Contributions distributed in accordance with this Agreement. - - "Recipient means anyone who receives the Program under this Agreement, including - "all Contributors. - - 2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free copyright license to - reproduce, prepare derivative works of, publicly display, publicly perform, - distribute and sublicense the Contribution of such Contributor, if any, and such - derivative works, in source code and object code form. - - b) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed - Patents to make, use, sell, offer to sell, import and otherwise transfer the - Contribution of such Contributor, if any, in source code and object code form. - This patent license shall apply to the combination of the Contribution and the - Program if, at the time the Contribution is added by the Contributor, such - addition of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other combinations - which include the Contribution. No hardware per se is licensed hereunder. - - c) Recipient understands that although each Contributor grants the licenses to - its Contributions set forth herein, no assurances are provided by any - Contributor that the Program does not infringe the patent or other intellectual - property rights of any other entity. Each Contributor disclaims any liability to - Recipient for claims brought by any other entity based on infringement of - intellectual property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby assumes sole - responsibility to secure any other intellectual property rights needed, if any. - For example, if a third party patent license is required to allow Recipient to - distribute the Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has sufficient copyright - rights in its Contribution, if any, to grant the copyright license set forth in - this Agreement. - - 3. REQUIREMENTS - - A Contributor may choose to distribute the Program in object code form under its - own license agreement, provided that: - - a) it complies with the terms and conditions of this Agreement; and - - b) its license agreement: - - i) effectively disclaims on behalf of all Contributors all warranties and - conditions, express and implied, including warranties or conditions of title and - non-infringement, and implied warranties or conditions of merchantability and - fitness for a particular purpose; - - ii) effectively excludes on behalf of all Contributors all liability for - damages, including direct, indirect, special, incidental and consequential - damages, such as lost profits; - - iii) states that any provisions which differ from this Agreement are offered by - that Contributor alone and not by any other party; and - - iv) states that source code for the Program is available from such Contributor, - and informs licensees how to obtain it in a reasonable manner on or through a - medium customarily used for software exchange. - - When the Program is made available in source code form: - - a) it must be made available under this Agreement; and - - b) a copy of this Agreement must be included with each copy of the Program. - - Contributors may not remove or alter any copyright notices contained within the - Program. - - Each Contributor must identify itself as the originator of its Contribution, if - any, in a manner that reasonably allows subsequent Recipients to identify the - originator of the Contribution. - - 4. COMMERCIAL DISTRIBUTION - - Commercial distributors of software may accept certain responsibilities with - respect to end users, business partners and the like. While this license is - intended to facilitate the commercial use of the Program, the Contributor who - includes the Program in a commercial product offering should do so in a manner - which does not create potential liability for other Contributors. Therefore, if - a Contributor includes the Program in a commercial product offering, such - Contributor ("Commercial Contributor") hereby agrees to defend and indemnify - every other Contributor ("Indemnified Contributor") against any losses, damages - and costs (collectively "Losses") arising from claims, lawsuits and other legal - actions brought by a third party against the Indemnified Contributor to the - extent caused by the acts or omissions of such Commercial Contributor in - connection with its distribution of the Program in a commercial product - offering. The obligations in this section do not apply to any claims or Losses - relating to any actual or alleged intellectual property infringement. In order - to qualify, an Indemnified Contributor must: a) promptly notify the Commercial - Contributor in writing of such claim, and b) allow the Commercial Contributor to - control, and cooperate with the Commercial Contributor in, the defense and any - related settlement negotiations. The Indemnified Contributor may participate in - any such claim at its own expense. - - For example, a Contributor might include the Program in a commercial product - offering, Product X. That Contributor is then a Commercial Contributor. If that - Commercial Contributor then makes performance claims, or offers warranties - related to Product X, those performance claims and warranties are such - Commercial Contributor's responsibility alone. Under this section, the - Commercial Contributor would have to defend claims against the other - Contributors related to those performance claims and warranties, and if a court - requires any other Contributor to pay any damages as a result, the Commercial - Contributor must pay those damages. - - 5. NO WARRANTY - - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR - IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, - NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each - Recipient is solely responsible for determining the appropriateness of using and - distributing the Program and assumes all risks associated with its exercise of - rights under this Agreement, including but not limited to the risks and costs of - program errors, compliance with applicable laws, damage to or loss of data, - programs or equipment, and unavailability or interruption of operations. - - 6. DISCLAIMER OF LIABILITY - - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY - CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST - PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS - GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - 7. GENERAL - - If any provision of this Agreement is invalid or unenforceable under applicable - law, it shall not affect the validity or enforceability of the remainder of the - terms of this Agreement, and without further action by the parties hereto, such - provision shall be reformed to the minimum extent necessary to make such - provision valid and enforceable. - - If Recipient institutes patent litigation against a Contributor with respect to - a patent applicable to software (including a cross-claim or counterclaim in a - lawsuit), then any patent licenses granted by that Contributor to such Recipient - under this Agreement shall terminate as of the date such litigation is filed. In - addition, if Recipient institutes patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that the Program - itself (excluding combinations of the Program with other software or hardware) - infringes such Recipient's patent(s), then such Recipient's rights granted under - Section 2(b) shall terminate as of the date such litigation is filed. - - All Recipient's rights under this Agreement shall terminate if it fails to - comply with any of the material terms or conditions of this Agreement and does - not cure such failure in a reasonable period of time after becoming aware of - such noncompliance. If all Recipient's rights under this Agreement terminate, - Recipient agrees to cease use and distribution of the Program as soon as - reasonably practicable. However, Recipient's obligations under this Agreement - and any licenses granted by Recipient relating to the Program shall continue and - survive. - - Everyone is permitted to copy and distribute copies of this Agreement, but in - order to avoid inconsistency the Agreement is copyrighted and may only be - modified in the following manner. The Agreement Steward reserves the right to - publish new versions (including revisions) of this Agreement from time to time. - No one other than the Agreement Steward has the right to modify this Agreement. - IBM is the initial Agreement Steward. IBM may assign the responsibility to serve - as the Agreement Steward to a suitable separate entity. Each new version of the - Agreement will be given a distinguishing version number. The Program (including - Contributions) may always be distributed subject to the version of the Agreement - under which it was received. In addition, after a new version of the Agreement - is published, Contributor may elect to distribute the Program (including its - Contributions) under the new version. Except as expressly stated in Sections - 2(a) and 2(b) above, Recipient receives no rights or licenses to the - intellectual property of any Contributor under this Agreement, whether - expressly, by implication, estoppel or otherwise. All rights in the Program not - expressly granted under this Agreement are reserved. - - This Agreement is governed by the laws of the State of New York and the - intellectual property laws of the United States of America. No party to this - Agreement will bring a legal action under this Agreement more than one year - after the cause of action arose. Each party waives its rights to a jury trial in - any resulting litigation. - - from JUnit Project http://www.junit.org/ - junit-4.8.1.jar from http://kentbeck.github.com/junit/ - - licensed under the Dom4J License http://dom4j.cvs.sourceforge.net/viewvc/dom4j/dom4j/LICENSE.txt (as follows) - - - Copyright 2001-2005 (C) MetaStuff, Ltd. All Rights Reserved. - - Redistribution and use of this software and associated documentation - ("Software"), with or without modification, are permitted provided - that the following conditions are met: - - 1. Redistributions of source code must retain copyright - statements and notices. Redistributions must also contain a - copy of this document. - - 2. Redistributions in binary form must reproduce the - above copyright notice, this list of conditions and the - following disclaimer in the documentation and/or other - materials provided with the distribution. - - 3. The name "DOM4J" must not be used to endorse or promote - products derived from this Software without prior written - permission of MetaStuff, Ltd. For written permission, - please contact dom4j-info@metastuff.com. - - 4. Products derived from this Software may not be called "DOM4J" - nor may "DOM4J" appear in their names without prior written - permission of MetaStuff, Ltd. DOM4J is a registered - trademark of MetaStuff, Ltd. - - 5. Due credit should be given to the DOM4J Project - - http://www.dom4j.org - - THIS SOFTWARE IS PROVIDED BY METASTUFF, LTD. AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT - NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - METASTUFF, LTD. OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - OF THE POSSIBILITY OF SUCH DAMAGE. - - from DOM4J Project http://dom4j.sourceforge.net/ - dom4j-1.6.1.jar from http://dom4j.sourceforge.net/source-repository.html - - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Copyright (c) 2004-2011 QOS.ch - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from QOS.ch http://www.qos.ch/ - slf4j-api-1.5.11.jar from https://github.com/qos-ch/slf4j - slf4j-jdk14-1.5.11.jar from https://github.com/qos-ch/slf4j - - licensed under the Mozilla Public License, Version 1.1 http://www.mozilla.org/MPL/1.1/ (as follows) - - - MOZILLA PUBLIC LICENSE - Version 1.1 - - --------------- - - 1. Definitions. - - 1.0.1. "Commercial Use" means distribution or otherwise making the - Covered Code available to a third party. - - 1.1. "Contributor" means each entity that creates or contributes to - the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Code, prior Modifications used by a Contributor, and the Modifications - made by that particular Contributor. - - 1.3. "Covered Code" means the Original Code or Modifications or the - combination of the Original Code and Modifications, in each case - including portions thereof. - - 1.4. "Electronic Distribution Mechanism" means a mechanism generally - accepted in the software development community for the electronic - transfer of data. - - 1.5. "Executable" means Covered Code in any form other than Source - Code. - - 1.6. "Initial Developer" means the individual or entity identified - as the Initial Developer in the Source Code notice required by Exhibit - A. - - 1.7. "Larger Work" means a work which combines Covered Code or - portions thereof with code not governed by the terms of this License. - - 1.8. "License" means this document. - - 1.8.1. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means any addition to or deletion from the - substance or structure of either the Original Code or any previous - Modifications. When Covered Code is released as a series of files, a - Modification is: - A. Any addition to or deletion from the contents of a file - containing Original Code or previous Modifications. - - B. Any new file that contains any part of the Original Code or - previous Modifications. - - 1.10. "Original Code" means Source Code of computer software code - which is described in the Source Code notice required by Exhibit A as - Original Code, and which, at the time of its release under this - License is not already Covered Code governed by this License. - - 1.10.1. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, process, - and apparatus claims, in any patent Licensable by grantor. - - 1.11. "Source Code" means the preferred form of the Covered Code for - making modifications to it, including all modules it contains, plus - any associated interface definition files, scripts used to control - compilation and installation of an Executable, or source code - differential comparisons against either the Original Code or another - well known, available Covered Code of the Contributor's choice. The - Source Code can be in a compressed or archival form, provided the - appropriate decompression or de-archiving software is widely available - for no charge. - - 1.12. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms of, this - License or a future version of this License issued under Section 6.1. - For legal entities, "You" includes any entity which controls, is - controlled by, or is under common control with You. For purposes of - this definition, "control" means (a) the power, direct or indirect, - to cause the direction or management of such entity, whether by - contract or otherwise, or (b) ownership of more than fifty percent - (50%) of the outstanding shares or beneficial ownership of such - entity. - - 2. Source Code License. - - 2.1. The Initial Developer Grant. - The Initial Developer hereby grants You a world-wide, royalty-free, - non-exclusive license, subject to third party intellectual property - claims: - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer to use, reproduce, - modify, display, perform, sublicense and distribute the Original - Code (or portions thereof) with or without Modifications, and/or - as part of a Larger Work; and - - (b) under Patents Claims infringed by the making, using or - selling of Original Code, to make, have made, use, practice, - sell, and offer for sale, and/or otherwise dispose of the - Original Code (or portions thereof). - - (c) the licenses granted in this Section 2.1(a) and (b) are - effective on the date Initial Developer first distributes - Original Code under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: 1) for code that You delete from the Original Code; 2) - separate from the Original Code; or 3) for infringements caused - by: i) the modification of the Original Code or ii) the - combination of the Original Code with other software or devices. - - 2.2. Contributor Grant. - Subject to third party intellectual property claims, each Contributor - hereby grants You a world-wide, royalty-free, non-exclusive license - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor, to use, reproduce, modify, - display, perform, sublicense and distribute the Modifications - created by such Contributor (or portions thereof) either on an - unmodified basis, with other Modifications, as Covered Code - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or - selling of Modifications made by that Contributor either alone - and/or in combination with its Contributor Version (or portions - of such combination), to make, use, sell, offer for sale, have - made, and/or otherwise dispose of: 1) Modifications made by that - Contributor (or portions thereof); and 2) the combination of - Modifications made by that Contributor with its Contributor - Version (or portions of such combination). - - (c) the licenses granted in Sections 2.2(a) and 2.2(b) are - effective on the date Contributor first makes Commercial Use of - the Covered Code. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: 1) for any code that Contributor has deleted from the - Contributor Version; 2) separate from the Contributor Version; - 3) for infringements caused by: i) third party modifications of - Contributor Version or ii) the combination of Modifications made - by that Contributor with other software (except as part of the - Contributor Version) or other devices; or 4) under Patent Claims - infringed by Covered Code in the absence of Modifications made by - that Contributor. - - 3. Distribution Obligations. - - 3.1. Application of License. - The Modifications which You create or to which You contribute are - governed by the terms of this License, including without limitation - Section 2.2. The Source Code version of Covered Code may be - distributed only under the terms of this License or a future version - of this License released under Section 6.1, and You must include a - copy of this License with every copy of the Source Code You - distribute. You may not offer or impose any terms on any Source Code - version that alters or restricts the applicable version of this - License or the recipients' rights hereunder. However, You may include - an additional document offering the additional rights described in - Section 3.5. - - 3.2. Availability of Source Code. - Any Modification which You create or to which You contribute must be - made available in Source Code form under the terms of this License - either on the same media as an Executable version or via an accepted - Electronic Distribution Mechanism to anyone to whom you made an - Executable version available; and if made available via Electronic - Distribution Mechanism, must remain available for at least twelve (12) - months after the date it initially became available, or at least six - (6) months after a subsequent version of that particular Modification - has been made available to such recipients. You are responsible for - ensuring that the Source Code version remains available even if the - Electronic Distribution Mechanism is maintained by a third party. - - 3.3. Description of Modifications. - You must cause all Covered Code to which You contribute to contain a - file documenting the changes You made to create that Covered Code and - the date of any change. You must include a prominent statement that - the Modification is derived, directly or indirectly, from Original - Code provided by the Initial Developer and including the name of the - Initial Developer in (a) the Source Code, and (b) in any notice in an - Executable version or related documentation in which You describe the - origin or ownership of the Covered Code. - - 3.4. Intellectual Property Matters - (a) Third Party Claims. - If Contributor has knowledge that a license under a third party's - intellectual property rights is required to exercise the rights - granted by such Contributor under Sections 2.1 or 2.2, - Contributor must include a text file with the Source Code - distribution titled "LEGAL" which describes the claim and the - party making the claim in sufficient detail that a recipient will - know whom to contact. If Contributor obtains such knowledge after - the Modification is made available as described in Section 3.2, - Contributor shall promptly modify the LEGAL file in all copies - Contributor makes available thereafter and shall take other steps - (such as notifying appropriate mailing lists or newsgroups) - reasonably calculated to inform those who received the Covered - Code that new knowledge has been obtained. - - (b) Contributor APIs. - If Contributor's Modifications include an application programming - interface and Contributor has knowledge of patent licenses which - are reasonably necessary to implement that API, Contributor must - also include this information in the LEGAL file. - - (c) Representations. - Contributor represents that, except as disclosed pursuant to - Section 3.4(a) above, Contributor believes that Contributor's - Modifications are Contributor's original creation(s) and/or - Contributor has sufficient rights to grant the rights conveyed by - this License. - - 3.5. Required Notices. - You must duplicate the notice in Exhibit A in each file of the Source - Code. If it is not possible to put such notice in a particular Source - Code file due to its structure, then You must include such notice in a - location (such as a relevant directory) where a user would be likely - to look for such a notice. If You created one or more Modification(s) - You may add your name as a Contributor to the notice described in - Exhibit A. You must also duplicate this License in any documentation - for the Source Code where You describe recipients' rights or ownership - rights relating to Covered Code. You may choose to offer, and to - charge a fee for, warranty, support, indemnity or liability - obligations to one or more recipients of Covered Code. However, You - may do so only on Your own behalf, and not on behalf of the Initial - Developer or any Contributor. You must make it absolutely clear than - any such warranty, support, indemnity or liability obligation is - offered by You alone, and You hereby agree to indemnify the Initial - Developer and every Contributor for any liability incurred by the - Initial Developer or such Contributor as a result of warranty, - support, indemnity or liability terms You offer. - - 3.6. Distribution of Executable Versions. - You may distribute Covered Code in Executable form only if the - requirements of Section 3.1-3.5 have been met for that Covered Code, - and if You include a notice stating that the Source Code version of - the Covered Code is available under the terms of this License, - including a description of how and where You have fulfilled the - obligations of Section 3.2. The notice must be conspicuously included - in any notice in an Executable version, related documentation or - collateral in which You describe recipients' rights relating to the - Covered Code. You may distribute the Executable version of Covered - Code or ownership rights under a license of Your choice, which may - contain terms different from this License, provided that You are in - compliance with the terms of this License and that the license for the - Executable version does not attempt to limit or alter the recipient's - rights in the Source Code version from the rights set forth in this - License. If You distribute the Executable version under a different - license You must make it absolutely clear that any terms which differ - from this License are offered by You alone, not by the Initial - Developer or any Contributor. You hereby agree to indemnify the - Initial Developer and every Contributor for any liability incurred by - the Initial Developer or such Contributor as a result of any such - terms You offer. - - 3.7. Larger Works. - You may create a Larger Work by combining Covered Code with other code - not governed by the terms of this License and distribute the Larger - Work as a single product. In such a case, You must make sure the - requirements of this License are fulfilled for the Covered Code. - - 4. Inability to Comply Due to Statute or Regulation. - - If it is impossible for You to comply with any of the terms of this - License with respect to some or all of the Covered Code due to - statute, judicial order, or regulation then You must: (a) comply with - the terms of this License to the maximum extent possible; and (b) - describe the limitations and the code they affect. Such description - must be included in the LEGAL file described in Section 3.4 and must - be included with all distributions of the Source Code. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - - 5. Application of this License. - - This License applies to code to which the Initial Developer has - attached the notice in Exhibit A and to related Covered Code. - - 6. Versions of the License. - - 6.1. New Versions. - Netscape Communications Corporation ("Netscape") may publish revised - and/or new versions of the License from time to time. Each version - will be given a distinguishing version number. - - 6.2. Effect of New Versions. - Once Covered Code has been published under a particular version of the - License, You may always continue to use it under the terms of that - version. You may also choose to use such Covered Code under the terms - of any subsequent version of the License published by Netscape. No one - other than Netscape has the right to modify the terms applicable to - Covered Code created under this License. - - 6.3. Derivative Works. - If You create or use a modified version of this License (which you may - only do in order to apply it to code which is not already Covered Code - governed by this License), You must (a) rename Your license so that - the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape", - "MPL", "NPL" or any confusingly similar phrase do not appear in your - license (except to note that your license differs from this License) - and (b) otherwise make it clear that Your version of the license - contains terms which differ from the Mozilla Public License and - Netscape Public License. (Filling in the name of the Initial - Developer, Original Code or Contributor in the notice described in - Exhibit A shall not of themselves be deemed to be modifications of - this License.) - - 7. DISCLAIMER OF WARRANTY. - - COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, - WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, - WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF - DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. - THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE - IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, - YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE - COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER - OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - - 8. TERMINATION. - - 8.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to cure - such breach within 30 days of becoming aware of the breach. All - sublicenses to the Covered Code which are properly granted shall - survive any termination of this License. Provisions which, by their - nature, must remain in effect beyond the termination of this License - shall survive. - - 8.2. If You initiate litigation by asserting a patent infringement - claim (excluding declatory judgment actions) against Initial Developer - or a Contributor (the Initial Developer or Contributor against whom - You file such action is referred to as "Participant") alleging that: - - (a) such Participant's Contributor Version directly or indirectly - infringes any patent, then any and all rights granted by such - Participant to You under Sections 2.1 and/or 2.2 of this License - shall, upon 60 days notice from Participant terminate prospectively, - unless if within 60 days after receipt of notice You either: (i) - agree in writing to pay Participant a mutually agreeable reasonable - royalty for Your past and future use of Modifications made by such - Participant, or (ii) withdraw Your litigation claim with respect to - the Contributor Version against such Participant. If within 60 days - of notice, a reasonable royalty and payment arrangement are not - mutually agreed upon in writing by the parties or the litigation claim - is not withdrawn, the rights granted by Participant to You under - Sections 2.1 and/or 2.2 automatically terminate at the expiration of - the 60 day notice period specified above. - - (b) any software, hardware, or device, other than such Participant's - Contributor Version, directly or indirectly infringes any patent, then - any rights granted to You by such Participant under Sections 2.1(b) - and 2.2(b) are revoked effective as of the date You first made, used, - sold, distributed, or had made, Modifications made by that - Participant. - - 8.3. If You assert a patent infringement claim against Participant - alleging that such Participant's Contributor Version directly or - indirectly infringes any patent where such claim is resolved (such as - by license or settlement) prior to the initiation of patent - infringement litigation, then the reasonable value of the licenses - granted by such Participant under Sections 2.1 or 2.2 shall be taken - into account in determining the amount or value of any payment or - license. - - 8.4. In the event of termination under Sections 8.1 or 8.2 above, - all end user license agreements (excluding distributors and resellers) - which have been validly granted by You or any distributor hereunder - prior to termination shall survive termination. - - 9. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL - DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE, - OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR - ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY - CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, - WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY - RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW - PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE - EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO - THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - - 10. U.S. GOVERNMENT END USERS. - - The Covered Code is a "commercial item," as that term is defined in - 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer - software" and "commercial computer software documentation," as such - terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 - C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), - all U.S. Government End Users acquire Covered Code with only those - rights set forth herein. - - 11. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed by - California law provisions (except to the extent applicable law, if - any, provides otherwise), excluding its conflict-of-law provisions. - With respect to disputes in which at least one party is a citizen of, - or an entity chartered or registered to do business in the United - States of America, any litigation relating to this License shall be - subject to the jurisdiction of the Federal Courts of the Northern - District of California, with venue lying in Santa Clara County, - California, with the losing party responsible for costs, including - without limitation, court costs and reasonable attorneys' fees and - expenses. The application of the United Nations Convention on - Contracts for the International Sale of Goods is expressly excluded. - Any law or regulation which provides that the language of a contract - shall be construed against the drafter shall not apply to this - License. - - 12. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or indirectly, - out of its utilization of rights under this License and You agree to - work with Initial Developer and Contributors to distribute such - responsibility on an equitable basis. Nothing herein is intended or - shall be deemed to constitute any admission of liability. - - 13. MULTIPLE-LICENSED CODE. - - Initial Developer may designate portions of the Covered Code as - "Multiple-Licensed". "Multiple-Licensed" means that the Initial - Developer permits you to utilize portions of the Covered Code under - Your choice of the NPL or the alternative licenses, if any, specified - by the Initial Developer in the file described in Exhibit A. - - EXHIBIT A -Mozilla Public License. - - ``The contents of this file are subject to the Mozilla Public License - Version 1.1 (the "License"); you may not use this file except in - compliance with the License. You may obtain a copy of the License at - http://www.mozilla.org/MPL/ - - Software distributed under the License is distributed on an "AS IS" - basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the - License for the specific language governing rights and limitations - under the License. - - The Original Code is Javassist. - - The Initial Developer of the Original Code is Shigeru Chiba. - Portions created by Shigeru Chiba are Copyright (C) - 1999-2008 Shigeru Chiba. All Rights Reserved. - - Contributor(s): . - - Alternatively, the contents of this file may be used under the terms - of the GNU Lesser General Public License Version 2.1 or later license (the "[LGPL] License"), in which case the - provisions of [LGPL] License are applicable instead of those - above. If you wish to allow use of your version of this file only - under the terms of the [LGPL] License and not to allow others to use - your version of this file under the MPL, indicate your decision by - deleting the provisions above and replace them with the notice and - other provisions required by the [LGPL] License. If you do not delete - the provisions above, a recipient may use your version of this file - under either the MPL or the [LGPL] License." - - [NOTE: The text of this Exhibit A may differ slightly from the text of - the notices in the Source Code files of the Original Code. You should - use the text of this Exhibit A rather than the text found in the - Original Code Source Code for Your Modifications.] - - - from Shigeru Chiba http://www.csg.ci.i.u-tokyo.ac.jp/~chiba/javassist/ - javassist-3.9.0.GA.jar from http://sourceforge.net/projects/jboss/files/Javassist/ - - licensed under the Mozilla Public License, Version 1.1 http://www.mozilla.org/MPL/1.1/ (as follows) - - Copyright (c) 2007-2012 VMware, Inc. All Rights Reserved. - - MOZILLA PUBLIC LICENSE - Version 1.1 - - --------------- - - 1. Definitions. - - 1.0.1. "Commercial Use" means distribution or otherwise making the - Covered Code available to a third party. - - 1.1. "Contributor" means each entity that creates or contributes to - the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Code, prior Modifications used by a Contributor, and the Modifications - made by that particular Contributor. - - 1.3. "Covered Code" means the Original Code or Modifications or the - combination of the Original Code and Modifications, in each case - including portions thereof. - - 1.4. "Electronic Distribution Mechanism" means a mechanism generally - accepted in the software development community for the electronic - transfer of data. - - 1.5. "Executable" means Covered Code in any form other than Source - Code. - - 1.6. "Initial Developer" means the individual or entity identified - as the Initial Developer in the Source Code notice required by Exhibit - A. - - 1.7. "Larger Work" means a work which combines Covered Code or - portions thereof with code not governed by the terms of this License. - - 1.8. "License" means this document. - - 1.8.1. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means any addition to or deletion from the - substance or structure of either the Original Code or any previous - Modifications. When Covered Code is released as a series of files, a - Modification is: - A. Any addition to or deletion from the contents of a file - containing Original Code or previous Modifications. - - B. Any new file that contains any part of the Original Code or - previous Modifications. - - 1.10. "Original Code" means Source Code of computer software code - which is described in the Source Code notice required by Exhibit A as - Original Code, and which, at the time of its release under this - License is not already Covered Code governed by this License. - - 1.10.1. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, process, - and apparatus claims, in any patent Licensable by grantor. - - 1.11. "Source Code" means the preferred form of the Covered Code for - making modifications to it, including all modules it contains, plus - any associated interface definition files, scripts used to control - compilation and installation of an Executable, or source code - differential comparisons against either the Original Code or another - well known, available Covered Code of the Contributor's choice. The - Source Code can be in a compressed or archival form, provided the - appropriate decompression or de-archiving software is widely available - for no charge. - - 1.12. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms of, this - License or a future version of this License issued under Section 6.1. - For legal entities, "You" includes any entity which controls, is - controlled by, or is under common control with You. For purposes of - this definition, "control" means (a) the power, direct or indirect, - to cause the direction or management of such entity, whether by - contract or otherwise, or (b) ownership of more than fifty percent - (50%) of the outstanding shares or beneficial ownership of such - entity. - - 2. Source Code License. - - 2.1. The Initial Developer Grant. - The Initial Developer hereby grants You a world-wide, royalty-free, - non-exclusive license, subject to third party intellectual property - claims: - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer to use, reproduce, - modify, display, perform, sublicense and distribute the Original - Code (or portions thereof) with or without Modifications, and/or - as part of a Larger Work; and - - (b) under Patents Claims infringed by the making, using or - selling of Original Code, to make, have made, use, practice, - sell, and offer for sale, and/or otherwise dispose of the - Original Code (or portions thereof). - - (c) the licenses granted in this Section 2.1(a) and (b) are - effective on the date Initial Developer first distributes - Original Code under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: 1) for code that You delete from the Original Code; 2) - separate from the Original Code; or 3) for infringements caused - by: i) the modification of the Original Code or ii) the - combination of the Original Code with other software or devices. - - 2.2. Contributor Grant. - Subject to third party intellectual property claims, each Contributor - hereby grants You a world-wide, royalty-free, non-exclusive license - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor, to use, reproduce, modify, - display, perform, sublicense and distribute the Modifications - created by such Contributor (or portions thereof) either on an - unmodified basis, with other Modifications, as Covered Code - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or - selling of Modifications made by that Contributor either alone - and/or in combination with its Contributor Version (or portions - of such combination), to make, use, sell, offer for sale, have - made, and/or otherwise dispose of: 1) Modifications made by that - Contributor (or portions thereof); and 2) the combination of - Modifications made by that Contributor with its Contributor - Version (or portions of such combination). - - (c) the licenses granted in Sections 2.2(a) and 2.2(b) are - effective on the date Contributor first makes Commercial Use of - the Covered Code. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: 1) for any code that Contributor has deleted from the - Contributor Version; 2) separate from the Contributor Version; - 3) for infringements caused by: i) third party modifications of - Contributor Version or ii) the combination of Modifications made - by that Contributor with other software (except as part of the - Contributor Version) or other devices; or 4) under Patent Claims - infringed by Covered Code in the absence of Modifications made by - that Contributor. - - 3. Distribution Obligations. - - 3.1. Application of License. - The Modifications which You create or to which You contribute are - governed by the terms of this License, including without limitation - Section 2.2. The Source Code version of Covered Code may be - distributed only under the terms of this License or a future version - of this License released under Section 6.1, and You must include a - copy of this License with every copy of the Source Code You - distribute. You may not offer or impose any terms on any Source Code - version that alters or restricts the applicable version of this - License or the recipients' rights hereunder. However, You may include - an additional document offering the additional rights described in - Section 3.5. - - 3.2. Availability of Source Code. - Any Modification which You create or to which You contribute must be - made available in Source Code form under the terms of this License - either on the same media as an Executable version or via an accepted - Electronic Distribution Mechanism to anyone to whom you made an - Executable version available; and if made available via Electronic - Distribution Mechanism, must remain available for at least twelve (12) - months after the date it initially became available, or at least six - (6) months after a subsequent version of that particular Modification - has been made available to such recipients. You are responsible for - ensuring that the Source Code version remains available even if the - Electronic Distribution Mechanism is maintained by a third party. - - 3.3. Description of Modifications. - You must cause all Covered Code to which You contribute to contain a - file documenting the changes You made to create that Covered Code and - the date of any change. You must include a prominent statement that - the Modification is derived, directly or indirectly, from Original - Code provided by the Initial Developer and including the name of the - Initial Developer in (a) the Source Code, and (b) in any notice in an - Executable version or related documentation in which You describe the - origin or ownership of the Covered Code. - - 3.4. Intellectual Property Matters - (a) Third Party Claims. - If Contributor has knowledge that a license under a third party's - intellectual property rights is required to exercise the rights - granted by such Contributor under Sections 2.1 or 2.2, - Contributor must include a text file with the Source Code - distribution titled "LEGAL" which describes the claim and the - party making the claim in sufficient detail that a recipient will - know whom to contact. If Contributor obtains such knowledge after - the Modification is made available as described in Section 3.2, - Contributor shall promptly modify the LEGAL file in all copies - Contributor makes available thereafter and shall take other steps - (such as notifying appropriate mailing lists or newsgroups) - reasonably calculated to inform those who received the Covered - Code that new knowledge has been obtained. - - (b) Contributor APIs. - If Contributor's Modifications include an application programming - interface and Contributor has knowledge of patent licenses which - are reasonably necessary to implement that API, Contributor must - also include this information in the LEGAL file. - - (c) Representations. - Contributor represents that, except as disclosed pursuant to - Section 3.4(a) above, Contributor believes that Contributor's - Modifications are Contributor's original creation(s) and/or - Contributor has sufficient rights to grant the rights conveyed by - this License. - - 3.5. Required Notices. - You must duplicate the notice in Exhibit A in each file of the Source - Code. If it is not possible to put such notice in a particular Source - Code file due to its structure, then You must include such notice in a - location (such as a relevant directory) where a user would be likely - to look for such a notice. If You created one or more Modification(s) - You may add your name as a Contributor to the notice described in - Exhibit A. You must also duplicate this License in any documentation - for the Source Code where You describe recipients' rights or ownership - rights relating to Covered Code. You may choose to offer, and to - charge a fee for, warranty, support, indemnity or liability - obligations to one or more recipients of Covered Code. However, You - may do so only on Your own behalf, and not on behalf of the Initial - Developer or any Contributor. You must make it absolutely clear than - any such warranty, support, indemnity or liability obligation is - offered by You alone, and You hereby agree to indemnify the Initial - Developer and every Contributor for any liability incurred by the - Initial Developer or such Contributor as a result of warranty, - support, indemnity or liability terms You offer. - - 3.6. Distribution of Executable Versions. - You may distribute Covered Code in Executable form only if the - requirements of Section 3.1-3.5 have been met for that Covered Code, - and if You include a notice stating that the Source Code version of - the Covered Code is available under the terms of this License, - including a description of how and where You have fulfilled the - obligations of Section 3.2. The notice must be conspicuously included - in any notice in an Executable version, related documentation or - collateral in which You describe recipients' rights relating to the - Covered Code. You may distribute the Executable version of Covered - Code or ownership rights under a license of Your choice, which may - contain terms different from this License, provided that You are in - compliance with the terms of this License and that the license for the - Executable version does not attempt to limit or alter the recipient's - rights in the Source Code version from the rights set forth in this - License. If You distribute the Executable version under a different - license You must make it absolutely clear that any terms which differ - from this License are offered by You alone, not by the Initial - Developer or any Contributor. You hereby agree to indemnify the - Initial Developer and every Contributor for any liability incurred by - the Initial Developer or such Contributor as a result of any such - terms You offer. - - 3.7. Larger Works. - You may create a Larger Work by combining Covered Code with other code - not governed by the terms of this License and distribute the Larger - Work as a single product. In such a case, You must make sure the - requirements of this License are fulfilled for the Covered Code. - - 4. Inability to Comply Due to Statute or Regulation. - - If it is impossible for You to comply with any of the terms of this - License with respect to some or all of the Covered Code due to - statute, judicial order, or regulation then You must: (a) comply with - the terms of this License to the maximum extent possible; and (b) - describe the limitations and the code they affect. Such description - must be included in the LEGAL file described in Section 3.4 and must - be included with all distributions of the Source Code. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - - 5. Application of this License. - - This License applies to code to which the Initial Developer has - attached the notice in Exhibit A and to related Covered Code. - - 6. Versions of the License. - - 6.1. New Versions. - Netscape Communications Corporation ("Netscape") may publish revised - and/or new versions of the License from time to time. Each version - will be given a distinguishing version number. - - 6.2. Effect of New Versions. - Once Covered Code has been published under a particular version of the - License, You may always continue to use it under the terms of that - version. You may also choose to use such Covered Code under the terms - of any subsequent version of the License published by Netscape. No one - other than Netscape has the right to modify the terms applicable to - Covered Code created under this License. - - 6.3. Derivative Works. - If You create or use a modified version of this License (which you may - only do in order to apply it to code which is not already Covered Code - governed by this License), You must (a) rename Your license so that - the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape", - "MPL", "NPL" or any confusingly similar phrase do not appear in your - license (except to note that your license differs from this License) - and (b) otherwise make it clear that Your version of the license - contains terms which differ from the Mozilla Public License and - Netscape Public License. (Filling in the name of the Initial - Developer, Original Code or Contributor in the notice described in - Exhibit A shall not of themselves be deemed to be modifications of - this License.) - - 7. DISCLAIMER OF WARRANTY. - - COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, - WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, - WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF - DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. - THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE - IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, - YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE - COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER - OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - - 8. TERMINATION. - - 8.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to cure - such breach within 30 days of becoming aware of the breach. All - sublicenses to the Covered Code which are properly granted shall - survive any termination of this License. Provisions which, by their - nature, must remain in effect beyond the termination of this License - shall survive. - - 8.2. If You initiate litigation by asserting a patent infringement - claim (excluding declatory judgment actions) against Initial Developer - or a Contributor (the Initial Developer or Contributor against whom - You file such action is referred to as "Participant") alleging that: - - (a) such Participant's Contributor Version directly or indirectly - infringes any patent, then any and all rights granted by such - Participant to You under Sections 2.1 and/or 2.2 of this License - shall, upon 60 days notice from Participant terminate prospectively, - unless if within 60 days after receipt of notice You either: (i) - agree in writing to pay Participant a mutually agreeable reasonable - royalty for Your past and future use of Modifications made by such - Participant, or (ii) withdraw Your litigation claim with respect to - the Contributor Version against such Participant. If within 60 days - of notice, a reasonable royalty and payment arrangement are not - mutually agreed upon in writing by the parties or the litigation claim - is not withdrawn, the rights granted by Participant to You under - Sections 2.1 and/or 2.2 automatically terminate at the expiration of - the 60 day notice period specified above. - - (b) any software, hardware, or device, other than such Participant's - Contributor Version, directly or indirectly infringes any patent, then - any rights granted to You by such Participant under Sections 2.1(b) - and 2.2(b) are revoked effective as of the date You first made, used, - sold, distributed, or had made, Modifications made by that - Participant. - - 8.3. If You assert a patent infringement claim against Participant - alleging that such Participant's Contributor Version directly or - indirectly infringes any patent where such claim is resolved (such as - by license or settlement) prior to the initiation of patent - infringement litigation, then the reasonable value of the licenses - granted by such Participant under Sections 2.1 or 2.2 shall be taken - into account in determining the amount or value of any payment or - license. - - 8.4. In the event of termination under Sections 8.1 or 8.2 above, - all end user license agreements (excluding distributors and resellers) - which have been validly granted by You or any distributor hereunder - prior to termination shall survive termination. - - 9. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL - DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE, - OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR - ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY - CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, - WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY - RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW - PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE - EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO - THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - - 10. U.S. GOVERNMENT END USERS. - - The Covered Code is a "commercial item," as that term is defined in - 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer - software" and "commercial computer software documentation," as such - terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 - C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), - all U.S. Government End Users acquire Covered Code with only those - rights set forth herein. - - 11. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed by - California law provisions (except to the extent applicable law, if - any, provides otherwise), excluding its conflict-of-law provisions. - With respect to disputes in which at least one party is a citizen of, - or an entity chartered or registered to do business in the United - States of America, any litigation relating to this License shall be - subject to the jurisdiction of the Federal Courts of the Northern - District of California, with venue lying in Santa Clara County, - California, with the losing party responsible for costs, including - without limitation, court costs and reasonable attorneys' fees and - expenses. The application of the United Nations Convention on - Contracts for the International Sale of Goods is expressly excluded. - Any law or regulation which provides that the language of a contract - shall be construed against the drafter shall not apply to this - License. - - 12. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or indirectly, - out of its utilization of rights under this License and You agree to - work with Initial Developer and Contributors to distribute such - responsibility on an equitable basis. Nothing herein is intended or - shall be deemed to constitute any admission of liability. - - 13. MULTIPLE-LICENSED CODE. - - Initial Developer may designate portions of the Covered Code as - "Multiple-Licensed". "Multiple-Licensed" means that the Initial - Developer permits you to utilize portions of the Covered Code under - Your choice of the NPL or the alternative licenses, if any, specified - by the Initial Developer in the file described in Exhibit A. - - EXHIBIT A -Mozilla Public License. - - ``The contents of this file are subject to the Mozilla Public License - Version 1.1 (the "License"); you may not use this file except in - compliance with the License. You may obtain a copy of the License at - http://www.mozilla.org/MPL/ - - Software distributed under the License is distributed on an "AS IS" - basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the - License for the specific language governing rights and limitations - under the License. - - The Original Code is RabbitMQ. - - The Initial Developer of the Original Code is VMware, Ltd.. - Portions created by VMware, Ltd. are Copyright (C) - 2007-2012 VMware, Inc.. All Rights Reserved. - - Contributor(s): . - - Alternatively, the contents of this file may be used under the terms - of the GNU General Public License Version 2 license (the "[GPL] License"), in which case the - provisions of [GPL] License are applicable instead of those - above. If you wish to allow use of your version of this file only - under the terms of the [GPL] License and not to allow others to use - your version of this file under the MPL, indicate your decision by - deleting the provisions above and replace them with the notice and - other provisions required by the [GPL] License. If you do not delete - the provisions above, a recipient may use your version of this file - under either the MPL or the [GPL] License." - - [NOTE: The text of this Exhibit A may differ slightly from the text of - the notices in the Source Code files of the Original Code. You should - use the text of this Exhibit A rather than the text found in the - Original Code Source Code for Your Modifications.] - - - from VMware, Inc http://www.vmware.com/ - rabbitmq-client.jar from http://www.rabbitmq.com/java-client.html - -Within the patches/systemvm/debian/config/etc directory - placed in the public domain - by Adiscon GmbH http://www.adiscon.com/ - rsyslog.conf - by Simon Kelley - dnsmasq.conf - vpcdnsmasq.conf - -Within the patches/systemvm/debian/config/etc/apache2 directory - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2012 The Apache Software Foundation - from The Apache Software Foundation http://www.apache.org/ - httpd.conf - ports.conf - sites-available/default - sites-available/default-ssl - vhostexample.conf - -Within the patches/systemvm/debian/config/etc/ssh/ directory - licensed under the BSD (2-clause) http://www.opensource.org/licenses/BSD-2-Clause (as follows) - - - Redistribution and use in source and binary forms, with or without modification, - are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this list - of conditions and the following disclaimer. Redistributions in binary form must - reproduce the above copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided with the - distribution. - - Neither the name of the author nor the names of contributors may be used to - endorse or promote products derived from this software without specific prior - written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - from OpenSSH Project http://www.openssh.org/ - sshd_config - -Within the patches/systemvm/debian/config/root/redundant_router directory - placed in the public domain - by The netfilter.org project http://www.netfilter.org/ - conntrackd.conf.templ - -Within the scripts/storage/secondary directory - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2010-2011 OpenStack, LLC. - from OpenStack, LLC http://www.openstack.org - swift - -Within the scripts/vm/hypervisor/xenserver directory - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2010-2011 OpenStack, LLC. - from OpenStack, LLC http://www.openstack.org - swift - -Within the target/jar directory - placed in the public domain - by Distributed Computing Laboratory at Emory University http://creativecommons.org/licenses/publicdomain/ - cloud-backport-util-concurrent-3.0.jar from http://backport-jsr166.sourceforge.net/ - - licensed under the Apache License, Version 1.1 http://www.apache.org/licenses/LICENSE-1.1 (as follows) - - Copyright (c) 2012 The Apache Software Foundation - - /* ==================================================================== - * The Apache Software License, Version 1.1 - * - * Copyright (c) 2000 The Apache Software Foundation. All rights - * reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. The end-user documentation included with the redistribution, - * if any, must include the following acknowledgment: - * "This product includes software developed by the - * Apache Software Foundation (http://www.apache.org/)." - * Alternately, this acknowledgment may appear in the software itself, - * if and wherever such third-party acknowledgments normally appear. - * - * 4. The names "Apache" and "Apache Software Foundation" must - * not be used to endorse or promote products derived from this - * software without prior written permission. For written - * permission, please contact apache@apache.org. - * - * 5. Products derived from this software may not be called "Apache", - * nor may "Apache" appear in their name, without prior written - * permission of the Apache Software Foundation. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * ==================================================================== - * - * This software consists of voluntary contributions made by many - * individuals on behalf of the Apache Software Foundation. For more - * information on the Apache Software Foundation, please see - * . - */ - - from The Apache Software Foundation http://www.apache.org/ - cloud-commons-discovery.jar from http://commons.apache.org/discovery/ - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2012 The Apache Software Foundation - from The Apache Software Foundation http://www.apache.org/ - cloud-axis.jar from http://axis.apache.org/axis/ - cloud-cglib.jar from http://cglib.sourceforge.net/ - cloud-commons-codec-1.5.jar from http://commons.apache.org/codec/ - cloud-commons-collections-3.2.1.jar from http://commons.apache.org/collections/ - cloud-commons-configuration-1.8.jar from http://commons.apache.org/configuration/ - cloud-commons-dbcp-1.4.jar from http://commons.apache.org/dbcp/ - cloud-commons-httpclient-3.1.jar from http://hc.apache.org/httpclient-3.x/ - cloud-commons-lang-2.6.jar from http://commons.apache.org/lang/ - cloud-commons-logging-1.1.1.jar from http://commons.apache.org/logging/ - cloud-commons-pool-1.5.6.jar from http://commons.apache.org/pool/ - cloud-log4j-extras.jar from http://logging.apache.org/log4j/companions/extras/ - cloud-log4j.jar from http://logging.apache.org/log4j/ - cloud-ws-commons-util-1.0.2.jar from http://ws.apache.org/commons/util/ - cloud-xmlrpc-client-3.1.3.jar from http://ws.apache.org/xmlrpc/client.html - cloud-xmlrpc-common-3.1.3.jar from http://ws.apache.org/xmlrpc/xmlrpc-common/ - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2007-2010, The JASYPT team (http://www.jasypt.org) - from The JASYPT team http://www.jasypt.org - cloud-jasypt-1.9.jar from http://www.jasypt.org - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2003-2007 Luck Consulting Pty Ltd - from Luck Consulting Pty Ltd http://gregluck.com/blog/about/ - cloud-ehcache.jar from http://ehcache.org/ - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2009 Google Inc. - from Google Inc. http://google.com - cloud-google-gson-1.7.1.jar from http://code.google.com/p/google-gson/ - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - - from Jetty Committers http://jetty.codehaus.org/jetty/ - jetty-6.1.26.jar from http://repo1.maven.org/maven2/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26-sources.jar - jetty-util-6.1.26.jar from http://repo1.maven.org/maven2/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26-sources.jar - - licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) - - Copyright (c) 2009, Caringo, Inc. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. - - from Caringo, Inc. http://www.caringo.com/ - CAStorSDK.jar from http://www.castor.org/download.html - - licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) - - Copyright (c) 2002-2011 Atsuhiko Yamanaka, JCraft,Inc. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. - - from JCraft http://www.jcraft.com/ - cloud-jsch-0.1.42.jar from http://www.jcraft.com/jsch/ - - licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) - - Copyright (c) 2007-2008 Trilead AG (http://www.trilead.com) - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. - - from Trilead AG http://www.trilead.com - cloud-trilead-ssh2-build213.jar from http://sourceforge.net/projects/orion-ssh2/ - - licensed under the Bouncy Castle adaptation of MIT X11 License http://www.bouncycastle.org/licence.html (as follows) - - - Please note: our license is an adaptation of the MIT X11 License and should be - read as such. - - LICENSE Copyright (c) 2000 - 2011 The Legion Of The Bouncy Castle - (http://www.bouncycastle.org) - - Permission is hereby granted, free of charge, to any person obtaining a copy of - this software and associated documentation files (the "Software"), to deal in - the Software without restriction, including without limitation the rights to - use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of - the Software, and to permit persons to whom the Software is furnished to do so, - subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS - FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER - IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from The Legion Of The Bouncy Castle http://www.bouncycastle.org - cloud-bcprov-jdk16-1.45.jar from http://repo1.maven.org/maven2/org/bouncycastle/bcprov-jdk16/1.45/bcprov-jdk16-1.45-sources.jar - - licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) - - Copyright (c) 2006 Sun Microsystems, Inc. All rights reserved. - - COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - - 1. Definitions. - - 1.1. "Contributor" means each individual or entity that - creates or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the - Original Software, prior Modifications used by a - Contributor (if any), and the Modifications made by that - particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or - (b) Modifications, or (c) the combination of files - containing Original Software with files containing - Modifications, in each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form - other than Source Code. - - 1.5. "Initial Developer" means the individual or entity - that first makes Original Software available under this - License. - - 1.6. "Larger Work" means a work which combines Covered - Software or portions thereof with code not governed by the - terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the - maximum extent possible, whether at the time of the initial - grant or subsequently acquired, any and all of the rights - conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable - form of any of the following: - - A. Any file that results from an addition to, - deletion from or modification of the contents of a - file containing Original Software or previous - Modifications; - - B. Any new file that contains any part of the - Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made - available under the terms of this License. - - 1.10. "Original Software" means the Source Code and - Executable form of computer software code that is - originally released under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned - or hereafter acquired, including without limitation, - method, process, and apparatus claims, in any patent - Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer - software code in which modifications are made and (b) - associated documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal - entity exercising rights under, and complying with all of - the terms of, this License. For legal entities, "You" - includes any entity which controls, is controlled by, or is - under common control with You. For purposes of this - definition, "control" means (a) the power, direct or - indirect, to cause the direction or management of such - entity, whether by contract or otherwise, or (b) ownership - of more than fifty percent (50%) of the outstanding shares - or beneficial ownership of such entity. - - 2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, the - Initial Developer hereby grants You a world-wide, - royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Initial Developer, - to use, reproduce, modify, display, perform, - sublicense and distribute the Original Software (or - portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using or selling of Original Software, to make, have - made, use, practice, sell, and offer for sale, and/or - otherwise dispose of the Original Software (or - portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) - are effective on the date Initial Developer first - distributes or otherwise makes the Original Software - available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent - license is granted: (1) for code that You delete from - the Original Software, or (2) for infringements - caused by: (i) the modification of the Original - Software, or (ii) the combination of the Original - Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, each - Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Contributor to - use, reproduce, modify, display, perform, sublicense - and distribute the Modifications created by such - Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as - Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using, or selling of Modifications made by that - Contributor either alone and/or in combination with - its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, - have made, and/or otherwise dispose of: (1) - Modifications made by that Contributor (or portions - thereof); and (2) the combination of Modifications - made by that Contributor with its Contributor Version - (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and - 2.2(b) are effective on the date Contributor first - distributes or otherwise makes the Modifications - available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent - license is granted: (1) for any code that Contributor - has deleted from the Contributor Version; (2) for - infringements caused by: (i) third party - modifications of Contributor Version, or (ii) the - combination of Modifications made by that Contributor - with other software (except as part of the - Contributor Version) or other devices; or (3) under - Patent Claims infringed by Covered Software in the - absence of Modifications made by that Contributor. - - 3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make - available in Executable form must also be made available in - Source Code form and that Source Code form must be - distributed only under the terms of this License. You must - include a copy of this License with every copy of the - Source Code form of the Covered Software You distribute or - otherwise make available. You must inform recipients of any - such Covered Software in Executable form as to how they can - obtain such Covered Software in Source Code form in a - reasonable manner on or through a medium customarily used - for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You - contribute are governed by the terms of this License. You - represent that You believe Your Modifications are Your - original creation(s) and/or You have sufficient rights to - grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications - that identifies You as the Contributor of the Modification. - You may not remove or alter any copyright, patent or - trademark notices contained within the Covered Software, or - any notices of licensing or any descriptive text giving - attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered - Software in Source Code form that alters or restricts the - applicable version of this License or the recipients' - rights hereunder. You may choose to offer, and to charge a - fee for, warranty, support, indemnity or liability - obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on - behalf of the Initial Developer or any Contributor. You - must make it absolutely clear that any such warranty, - support, indemnity or liability obligation is offered by - You alone, and You hereby agree to indemnify the Initial - Developer and every Contributor for any liability incurred - by the Initial Developer or such Contributor as a result of - warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered - Software under the terms of this License or under the terms - of a license of Your choice, which may contain terms - different from this License, provided that You are in - compliance with the terms of this License and that the - license for the Executable form does not attempt to limit - or alter the recipient's rights in the Source Code form - from the rights set forth in this License. If You - distribute the Covered Software in Executable form under a - different license, You must make it absolutely clear that - any terms which differ from this License are offered by You - alone, not by the Initial Developer or Contributor. You - hereby agree to indemnify the Initial Developer and every - Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms - You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software - with other code not governed by the terms of this License - and distribute the Larger Work as a single product. In such - a case, You must make sure the requirements of this License - are fulfilled for the Covered Software. - - 4. Versions of the License. - - 4.1. New Versions. - - Sun Microsystems, Inc. is the initial license steward and - may publish revised and/or new versions of this License - from time to time. Each version will be given a - distinguishing version number. Except as provided in - Section 4.3, no one other than the license steward has the - right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. If the Initial Developer includes a - notice in the Original Software prohibiting it from being - distributed or otherwise made available under any - subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. Otherwise, You may also choose to - use, distribute or otherwise make the Covered Software - available under the terms of any subsequent version of the - License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a - new license for Your Original Software, You may create and - use a modified version of this License if You: (a) rename - the license and remove any references to the name of the - license steward (except to note that the license differs - from this License); and (b) otherwise make it clear that - the license contains terms which differ from this License. - - 5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" - BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED - SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR - PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND - PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY - COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE - INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF - ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF - WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS - DISCLAIMER. - - 6. TERMINATION. - - 6.1. This License and the rights granted hereunder will - terminate automatically if You fail to comply with terms - herein and fail to cure such breach within 30 days of - becoming aware of the breach. Provisions which, by their - nature, must remain in effect beyond the termination of - this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or - a Contributor (the Initial Developer or Contributor against - whom You assert such claim is referred to as "Participant") - alleging that the Participant Software (meaning the - Contributor Version where the Participant is a Contributor - or the Original Software where the Participant is the - Initial Developer) directly or indirectly infringes any - patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial - Developer (if the Initial Developer is not the Participant) - and all Contributors under Sections 2.1 and/or 2.2 of this - License shall, upon 60 days notice from Participant - terminate prospectively and automatically at the expiration - of such 60 day notice period, unless if within such 60 day - period You withdraw Your claim with respect to the - Participant Software against such Participant either - unilaterally or pursuant to a written agreement with - Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 - above, all end user licenses that have been validly granted - by You or any distributor hereunder prior to termination - (excluding licenses granted to You by any distributor) - shall survive termination. - - 7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE - LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK - STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL - INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT - APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO - NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR - CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT - APPLY TO YOU. - - 8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is - defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. ¤ - 252.227-7014(a)(1)) and "commercial computer software - documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. - 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 - through 227.7202-4 (June 1995), all U.S. Government End Users - acquire Covered Software with only those rights set forth herein. - This U.S. Government Rights clause is in lieu of, and supersedes, - any other FAR, DFAR, or other clause or provision that addresses - Government rights in computer software under this License. - - 9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the - extent necessary to make it enforceable. This License shall be - governed by the law of the jurisdiction specified in a notice - contained within the Original Software (except to the extent - applicable law, if any, provides otherwise), excluding such - jurisdiction's conflict-of-law provisions. Any litigation - relating to this License shall be subject to the jurisdiction of - the courts located in the jurisdiction and venue specified in a - notice contained within the Original Software, with the losing - party responsible for costs, including, without limitation, court - costs and reasonable attorneys' fees and expenses. The - application of the United Nations Convention on Contracts for the - International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall - be construed against the drafter shall not apply to this License. - You agree that You alone are responsible for compliance with the - United States export administration regulations (and the export - control laws and regulation of any other countries) when You use, - distribute or otherwise make available any Covered Software. - - 10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or - indirectly, out of its utilization of rights under this License - and You agree to work with Initial Developer and Contributors to - distribute such responsibility on an equitable basis. Nothing - herein is intended or shall be deemed to constitute any admission - of liability. - - from Project GlassFish http://glassfish.java.net/ - cloud-ejb-api-3.0.jar from http://repo1.maven.org/maven2/javax/ejb/ejb-api/3.0/ejb-api-3.0-sources.jar - cloud-jstl-1.2.jar from http://jstl.java.net/ - - licensed under the COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 http://www.opensource.org/licenses/CDDL-1.0 (as follows) - - Copyright (c) 1997-2010 Oracle and/or its affiliates. All rights reserved. - - COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - - 1. Definitions. - - 1.1. "Contributor" means each individual or entity that - creates or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the - Original Software, prior Modifications used by a - Contributor (if any), and the Modifications made by that - particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or - (b) Modifications, or (c) the combination of files - containing Original Software with files containing - Modifications, in each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form - other than Source Code. - - 1.5. "Initial Developer" means the individual or entity - that first makes Original Software available under this - License. - - 1.6. "Larger Work" means a work which combines Covered - Software or portions thereof with code not governed by the - terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the - maximum extent possible, whether at the time of the initial - grant or subsequently acquired, any and all of the rights - conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable - form of any of the following: - - A. Any file that results from an addition to, - deletion from or modification of the contents of a - file containing Original Software or previous - Modifications; - - B. Any new file that contains any part of the - Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made - available under the terms of this License. - - 1.10. "Original Software" means the Source Code and - Executable form of computer software code that is - originally released under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned - or hereafter acquired, including without limitation, - method, process, and apparatus claims, in any patent - Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer - software code in which modifications are made and (b) - associated documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal - entity exercising rights under, and complying with all of - the terms of, this License. For legal entities, "You" - includes any entity which controls, is controlled by, or is - under common control with You. For purposes of this - definition, "control" means (a) the power, direct or - indirect, to cause the direction or management of such - entity, whether by contract or otherwise, or (b) ownership - of more than fifty percent (50%) of the outstanding shares - or beneficial ownership of such entity. - - 2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, the - Initial Developer hereby grants You a world-wide, - royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Initial Developer, - to use, reproduce, modify, display, perform, - sublicense and distribute the Original Software (or - portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using or selling of Original Software, to make, have - made, use, practice, sell, and offer for sale, and/or - otherwise dispose of the Original Software (or - portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) - are effective on the date Initial Developer first - distributes or otherwise makes the Original Software - available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent - license is granted: (1) for code that You delete from - the Original Software, or (2) for infringements - caused by: (i) the modification of the Original - Software, or (ii) the combination of the Original - Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, each - Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than - patent or trademark) Licensable by Contributor to - use, reproduce, modify, display, perform, sublicense - and distribute the Modifications created by such - Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as - Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, - using, or selling of Modifications made by that - Contributor either alone and/or in combination with - its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, - have made, and/or otherwise dispose of: (1) - Modifications made by that Contributor (or portions - thereof); and (2) the combination of Modifications - made by that Contributor with its Contributor Version - (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and - 2.2(b) are effective on the date Contributor first - distributes or otherwise makes the Modifications - available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent - license is granted: (1) for any code that Contributor - has deleted from the Contributor Version; (2) for - infringements caused by: (i) third party - modifications of Contributor Version, or (ii) the - combination of Modifications made by that Contributor - with other software (except as part of the - Contributor Version) or other devices; or (3) under - Patent Claims infringed by Covered Software in the - absence of Modifications made by that Contributor. - - 3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make - available in Executable form must also be made available in - Source Code form and that Source Code form must be - distributed only under the terms of this License. You must - include a copy of this License with every copy of the - Source Code form of the Covered Software You distribute or - otherwise make available. You must inform recipients of any - such Covered Software in Executable form as to how they can - obtain such Covered Software in Source Code form in a - reasonable manner on or through a medium customarily used - for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You - contribute are governed by the terms of this License. You - represent that You believe Your Modifications are Your - original creation(s) and/or You have sufficient rights to - grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications - that identifies You as the Contributor of the Modification. - You may not remove or alter any copyright, patent or - trademark notices contained within the Covered Software, or - any notices of licensing or any descriptive text giving - attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered - Software in Source Code form that alters or restricts the - applicable version of this License or the recipients' - rights hereunder. You may choose to offer, and to charge a - fee for, warranty, support, indemnity or liability - obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on - behalf of the Initial Developer or any Contributor. You - must make it absolutely clear that any such warranty, - support, indemnity or liability obligation is offered by - You alone, and You hereby agree to indemnify the Initial - Developer and every Contributor for any liability incurred - by the Initial Developer or such Contributor as a result of - warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered - Software under the terms of this License or under the terms - of a license of Your choice, which may contain terms - different from this License, provided that You are in - compliance with the terms of this License and that the - license for the Executable form does not attempt to limit - or alter the recipient's rights in the Source Code form - from the rights set forth in this License. If You - distribute the Covered Software in Executable form under a - different license, You must make it absolutely clear that - any terms which differ from this License are offered by You - alone, not by the Initial Developer or Contributor. You - hereby agree to indemnify the Initial Developer and every - Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms - You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software - with other code not governed by the terms of this License - and distribute the Larger Work as a single product. In such - a case, You must make sure the requirements of this License - are fulfilled for the Covered Software. - - 4. Versions of the License. - - 4.1. New Versions. - - Sun Microsystems, Inc. is the initial license steward and - may publish revised and/or new versions of this License - from time to time. Each version will be given a - distinguishing version number. Except as provided in - Section 4.3, no one other than the license steward has the - right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. If the Initial Developer includes a - notice in the Original Software prohibiting it from being - distributed or otherwise made available under any - subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the - version of the License under which You originally received - the Covered Software. Otherwise, You may also choose to - use, distribute or otherwise make the Covered Software - available under the terms of any subsequent version of the - License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a - new license for Your Original Software, You may create and - use a modified version of this License if You: (a) rename - the license and remove any references to the name of the - license steward (except to note that the license differs - from this License); and (b) otherwise make it clear that - the license contains terms which differ from this License. - - 5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" - BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED - SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR - PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND - PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY - COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE - INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF - ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF - WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS - DISCLAIMER. - - 6. TERMINATION. - - 6.1. This License and the rights granted hereunder will - terminate automatically if You fail to comply with terms - herein and fail to cure such breach within 30 days of - becoming aware of the breach. Provisions which, by their - nature, must remain in effect beyond the termination of - this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or - a Contributor (the Initial Developer or Contributor against - whom You assert such claim is referred to as "Participant") - alleging that the Participant Software (meaning the - Contributor Version where the Participant is a Contributor - or the Original Software where the Participant is the - Initial Developer) directly or indirectly infringes any - patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial - Developer (if the Initial Developer is not the Participant) - and all Contributors under Sections 2.1 and/or 2.2 of this - License shall, upon 60 days notice from Participant - terminate prospectively and automatically at the expiration - of such 60 day notice period, unless if within such 60 day - period You withdraw Your claim with respect to the - Participant Software against such Participant either - unilaterally or pursuant to a written agreement with - Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 - above, all end user licenses that have been validly granted - by You or any distributor hereunder prior to termination - (excluding licenses granted to You by any distributor) - shall survive termination. - - 7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE - LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK - STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL - INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT - APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO - NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR - CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT - APPLY TO YOU. - - 8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is - defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 C.F.R. ¤ - 252.227-7014(a)(1)) and "commercial computer software - documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. - 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 - through 227.7202-4 (June 1995), all U.S. Government End Users - acquire Covered Software with only those rights set forth herein. - This U.S. Government Rights clause is in lieu of, and supersedes, - any other FAR, DFAR, or other clause or provision that addresses - Government rights in computer software under this License. - - 9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the - extent necessary to make it enforceable. This License shall be - governed by the law of the jurisdiction specified in a notice - contained within the Original Software (except to the extent - applicable law, if any, provides otherwise), excluding such - jurisdiction's conflict-of-law provisions. Any litigation - relating to this License shall be subject to the jurisdiction of - the courts located in the jurisdiction and venue specified in a - notice contained within the Original Software, with the losing - party responsible for costs, including, without limitation, court - costs and reasonable attorneys' fees and expenses. The - application of the United Nations Convention on Contracts for the - International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall - be construed against the drafter shall not apply to this License. - You agree that You alone are responsible for compliance with the - United States export administration regulations (and the export - control laws and regulation of any other countries) when You use, - distribute or otherwise make available any Covered Software. - - 10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or - indirectly, out of its utilization of rights under this License - and You agree to work with Initial Developer and Contributors to - distribute such responsibility on an equitable basis. Nothing - herein is intended or shall be deemed to constitute any admission - of liability. - - from Oracle and/or its affiliates http://oracle.com - cloud-email.jar from http://kenai.com/projects/javamail - - licensed under the Common Public License - v 1.0 http://opensource.org/licenses/cpl1.0 (as follows) - - Copyright (c) IBM Corp 2006 - - Common Public License Version 1.0 (CPL) - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC - LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM - CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - - 1. DEFINITIONS - - "Contribution means: - - a) in the case of the initial Contributor, the initial code and documentation - distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - - i) changes to the Program, and - - ii) additions to the Program; - - where such changes and/or additions to the Program originate from and are - distributed by that particular Contributor. A Contribution 'originates' from a - Contributor if it was added to the Program by such Contributor itself or anyone - acting on such Contributor's behalf. Contributions do not include additions to - the Program which: (i) are separate modules of software distributed in - conjunction with the Program under their own license agreement, and (ii) are not - derivative works of the Program. - - "Contributor means any person or entity that distributes the Program. - - "Licensed Patents mean patent claims licensable by a Contributor which are - "necessarily infringed by the use or sale of its Contribution alone or when - "combined with the Program. - - "Program means the Contributions distributed in accordance with this Agreement. - - "Recipient means anyone who receives the Program under this Agreement, including - "all Contributors. - - 2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free copyright license to - reproduce, prepare derivative works of, publicly display, publicly perform, - distribute and sublicense the Contribution of such Contributor, if any, and such - derivative works, in source code and object code form. - - b) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed - Patents to make, use, sell, offer to sell, import and otherwise transfer the - Contribution of such Contributor, if any, in source code and object code form. - This patent license shall apply to the combination of the Contribution and the - Program if, at the time the Contribution is added by the Contributor, such - addition of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other combinations - which include the Contribution. No hardware per se is licensed hereunder. - - c) Recipient understands that although each Contributor grants the licenses to - its Contributions set forth herein, no assurances are provided by any - Contributor that the Program does not infringe the patent or other intellectual - property rights of any other entity. Each Contributor disclaims any liability to - Recipient for claims brought by any other entity based on infringement of - intellectual property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby assumes sole - responsibility to secure any other intellectual property rights needed, if any. - For example, if a third party patent license is required to allow Recipient to - distribute the Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has sufficient copyright - rights in its Contribution, if any, to grant the copyright license set forth in - this Agreement. - - 3. REQUIREMENTS - - A Contributor may choose to distribute the Program in object code form under its - own license agreement, provided that: - - a) it complies with the terms and conditions of this Agreement; and - - b) its license agreement: - - i) effectively disclaims on behalf of all Contributors all warranties and - conditions, express and implied, including warranties or conditions of title and - non-infringement, and implied warranties or conditions of merchantability and - fitness for a particular purpose; - - ii) effectively excludes on behalf of all Contributors all liability for - damages, including direct, indirect, special, incidental and consequential - damages, such as lost profits; - - iii) states that any provisions which differ from this Agreement are offered by - that Contributor alone and not by any other party; and - - iv) states that source code for the Program is available from such Contributor, - and informs licensees how to obtain it in a reasonable manner on or through a - medium customarily used for software exchange. - - When the Program is made available in source code form: - - a) it must be made available under this Agreement; and - - b) a copy of this Agreement must be included with each copy of the Program. - - Contributors may not remove or alter any copyright notices contained within the - Program. - - Each Contributor must identify itself as the originator of its Contribution, if - any, in a manner that reasonably allows subsequent Recipients to identify the - originator of the Contribution. - - 4. COMMERCIAL DISTRIBUTION - - Commercial distributors of software may accept certain responsibilities with - respect to end users, business partners and the like. While this license is - intended to facilitate the commercial use of the Program, the Contributor who - includes the Program in a commercial product offering should do so in a manner - which does not create potential liability for other Contributors. Therefore, if - a Contributor includes the Program in a commercial product offering, such - Contributor ("Commercial Contributor") hereby agrees to defend and indemnify - every other Contributor ("Indemnified Contributor") against any losses, damages - and costs (collectively "Losses") arising from claims, lawsuits and other legal - actions brought by a third party against the Indemnified Contributor to the - extent caused by the acts or omissions of such Commercial Contributor in - connection with its distribution of the Program in a commercial product - offering. The obligations in this section do not apply to any claims or Losses - relating to any actual or alleged intellectual property infringement. In order - to qualify, an Indemnified Contributor must: a) promptly notify the Commercial - Contributor in writing of such claim, and b) allow the Commercial Contributor to - control, and cooperate with the Commercial Contributor in, the defense and any - related settlement negotiations. The Indemnified Contributor may participate in - any such claim at its own expense. - - For example, a Contributor might include the Program in a commercial product - offering, Product X. That Contributor is then a Commercial Contributor. If that - Commercial Contributor then makes performance claims, or offers warranties - related to Product X, those performance claims and warranties are such - Commercial Contributor's responsibility alone. Under this section, the - Commercial Contributor would have to defend claims against the other - Contributors related to those performance claims and warranties, and if a court - requires any other Contributor to pay any damages as a result, the Commercial - Contributor must pay those damages. - - 5. NO WARRANTY - - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR - IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, - NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each - Recipient is solely responsible for determining the appropriateness of using and - distributing the Program and assumes all risks associated with its exercise of - rights under this Agreement, including but not limited to the risks and costs of - program errors, compliance with applicable laws, damage to or loss of data, - programs or equipment, and unavailability or interruption of operations. - - 6. DISCLAIMER OF LIABILITY - - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY - CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST - PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS - GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - 7. GENERAL - - If any provision of this Agreement is invalid or unenforceable under applicable - law, it shall not affect the validity or enforceability of the remainder of the - terms of this Agreement, and without further action by the parties hereto, such - provision shall be reformed to the minimum extent necessary to make such - provision valid and enforceable. - - If Recipient institutes patent litigation against a Contributor with respect to - a patent applicable to software (including a cross-claim or counterclaim in a - lawsuit), then any patent licenses granted by that Contributor to such Recipient - under this Agreement shall terminate as of the date such litigation is filed. In - addition, if Recipient institutes patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that the Program - itself (excluding combinations of the Program with other software or hardware) - infringes such Recipient's patent(s), then such Recipient's rights granted under - Section 2(b) shall terminate as of the date such litigation is filed. - - All Recipient's rights under this Agreement shall terminate if it fails to - comply with any of the material terms or conditions of this Agreement and does - not cure such failure in a reasonable period of time after becoming aware of - such noncompliance. If all Recipient's rights under this Agreement terminate, - Recipient agrees to cease use and distribution of the Program as soon as - reasonably practicable. However, Recipient's obligations under this Agreement - and any licenses granted by Recipient relating to the Program shall continue and - survive. - - Everyone is permitted to copy and distribute copies of this Agreement, but in - order to avoid inconsistency the Agreement is copyrighted and may only be - modified in the following manner. The Agreement Steward reserves the right to - publish new versions (including revisions) of this Agreement from time to time. - No one other than the Agreement Steward has the right to modify this Agreement. - IBM is the initial Agreement Steward. IBM may assign the responsibility to serve - as the Agreement Steward to a suitable separate entity. Each new version of the - Agreement will be given a distinguishing version number. The Program (including - Contributions) may always be distributed subject to the version of the Agreement - under which it was received. In addition, after a new version of the Agreement - is published, Contributor may elect to distribute the Program (including its - Contributions) under the new version. Except as expressly stated in Sections - 2(a) and 2(b) above, Recipient receives no rights or licenses to the - intellectual property of any Contributor under this Agreement, whether - expressly, by implication, estoppel or otherwise. All rights in the Program not - expressly granted under this Agreement are reserved. - - This Agreement is governed by the laws of the State of New York and the - intellectual property laws of the United States of America. No party to this - Agreement will bring a legal action under this Agreement more than one year - after the cause of action arose. Each party waives its rights to a jury trial in - any resulting litigation. - - from IBM Corp http://www.ibm.com/ - cloud-wsdl4j-1.6.2.jar from http://sourceforge.net/projects/wsdl4j/ - cloud-wsdl4j.jar from http://sourceforge.net/projects/wsdl4j/ - - licensed under the Common Public License - v 1.0 http://opensource.org/licenses/cpl1.0 (as follows) - - - Common Public License Version 1.0 (CPL) - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC - LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM - CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - - 1. DEFINITIONS - - "Contribution means: - - a) in the case of the initial Contributor, the initial code and documentation - distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - - i) changes to the Program, and - - ii) additions to the Program; - - where such changes and/or additions to the Program originate from and are - distributed by that particular Contributor. A Contribution 'originates' from a - Contributor if it was added to the Program by such Contributor itself or anyone - acting on such Contributor's behalf. Contributions do not include additions to - the Program which: (i) are separate modules of software distributed in - conjunction with the Program under their own license agreement, and (ii) are not - derivative works of the Program. - - "Contributor means any person or entity that distributes the Program. - - "Licensed Patents mean patent claims licensable by a Contributor which are - "necessarily infringed by the use or sale of its Contribution alone or when - "combined with the Program. - - "Program means the Contributions distributed in accordance with this Agreement. - - "Recipient means anyone who receives the Program under this Agreement, including - "all Contributors. - - 2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free copyright license to - reproduce, prepare derivative works of, publicly display, publicly perform, - distribute and sublicense the Contribution of such Contributor, if any, and such - derivative works, in source code and object code form. - - b) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed - Patents to make, use, sell, offer to sell, import and otherwise transfer the - Contribution of such Contributor, if any, in source code and object code form. - This patent license shall apply to the combination of the Contribution and the - Program if, at the time the Contribution is added by the Contributor, such - addition of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other combinations - which include the Contribution. No hardware per se is licensed hereunder. - - c) Recipient understands that although each Contributor grants the licenses to - its Contributions set forth herein, no assurances are provided by any - Contributor that the Program does not infringe the patent or other intellectual - property rights of any other entity. Each Contributor disclaims any liability to - Recipient for claims brought by any other entity based on infringement of - intellectual property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby assumes sole - responsibility to secure any other intellectual property rights needed, if any. - For example, if a third party patent license is required to allow Recipient to - distribute the Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has sufficient copyright - rights in its Contribution, if any, to grant the copyright license set forth in - this Agreement. - - 3. REQUIREMENTS - - A Contributor may choose to distribute the Program in object code form under its - own license agreement, provided that: - - a) it complies with the terms and conditions of this Agreement; and - - b) its license agreement: - - i) effectively disclaims on behalf of all Contributors all warranties and - conditions, express and implied, including warranties or conditions of title and - non-infringement, and implied warranties or conditions of merchantability and - fitness for a particular purpose; - - ii) effectively excludes on behalf of all Contributors all liability for - damages, including direct, indirect, special, incidental and consequential - damages, such as lost profits; - - iii) states that any provisions which differ from this Agreement are offered by - that Contributor alone and not by any other party; and - - iv) states that source code for the Program is available from such Contributor, - and informs licensees how to obtain it in a reasonable manner on or through a - medium customarily used for software exchange. - - When the Program is made available in source code form: - - a) it must be made available under this Agreement; and - - b) a copy of this Agreement must be included with each copy of the Program. - - Contributors may not remove or alter any copyright notices contained within the - Program. - - Each Contributor must identify itself as the originator of its Contribution, if - any, in a manner that reasonably allows subsequent Recipients to identify the - originator of the Contribution. - - 4. COMMERCIAL DISTRIBUTION - - Commercial distributors of software may accept certain responsibilities with - respect to end users, business partners and the like. While this license is - intended to facilitate the commercial use of the Program, the Contributor who - includes the Program in a commercial product offering should do so in a manner - which does not create potential liability for other Contributors. Therefore, if - a Contributor includes the Program in a commercial product offering, such - Contributor ("Commercial Contributor") hereby agrees to defend and indemnify - every other Contributor ("Indemnified Contributor") against any losses, damages - and costs (collectively "Losses") arising from claims, lawsuits and other legal - actions brought by a third party against the Indemnified Contributor to the - extent caused by the acts or omissions of such Commercial Contributor in - connection with its distribution of the Program in a commercial product - offering. The obligations in this section do not apply to any claims or Losses - relating to any actual or alleged intellectual property infringement. In order - to qualify, an Indemnified Contributor must: a) promptly notify the Commercial - Contributor in writing of such claim, and b) allow the Commercial Contributor to - control, and cooperate with the Commercial Contributor in, the defense and any - related settlement negotiations. The Indemnified Contributor may participate in - any such claim at its own expense. - - For example, a Contributor might include the Program in a commercial product - offering, Product X. That Contributor is then a Commercial Contributor. If that - Commercial Contributor then makes performance claims, or offers warranties - related to Product X, those performance claims and warranties are such - Commercial Contributor's responsibility alone. Under this section, the - Commercial Contributor would have to defend claims against the other - Contributors related to those performance claims and warranties, and if a court - requires any other Contributor to pay any damages as a result, the Commercial - Contributor must pay those damages. - - 5. NO WARRANTY - - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR - IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, - NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each - Recipient is solely responsible for determining the appropriateness of using and - distributing the Program and assumes all risks associated with its exercise of - rights under this Agreement, including but not limited to the risks and costs of - program errors, compliance with applicable laws, damage to or loss of data, - programs or equipment, and unavailability or interruption of operations. - - 6. DISCLAIMER OF LIABILITY - - EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY - CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST - PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS - GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - - 7. GENERAL - - If any provision of this Agreement is invalid or unenforceable under applicable - law, it shall not affect the validity or enforceability of the remainder of the - terms of this Agreement, and without further action by the parties hereto, such - provision shall be reformed to the minimum extent necessary to make such - provision valid and enforceable. - - If Recipient institutes patent litigation against a Contributor with respect to - a patent applicable to software (including a cross-claim or counterclaim in a - lawsuit), then any patent licenses granted by that Contributor to such Recipient - under this Agreement shall terminate as of the date such litigation is filed. In - addition, if Recipient institutes patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that the Program - itself (excluding combinations of the Program with other software or hardware) - infringes such Recipient's patent(s), then such Recipient's rights granted under - Section 2(b) shall terminate as of the date such litigation is filed. - - All Recipient's rights under this Agreement shall terminate if it fails to - comply with any of the material terms or conditions of this Agreement and does - not cure such failure in a reasonable period of time after becoming aware of - such noncompliance. If all Recipient's rights under this Agreement terminate, - Recipient agrees to cease use and distribution of the Program as soon as - reasonably practicable. However, Recipient's obligations under this Agreement - and any licenses granted by Recipient relating to the Program shall continue and - survive. - - Everyone is permitted to copy and distribute copies of this Agreement, but in - order to avoid inconsistency the Agreement is copyrighted and may only be - modified in the following manner. The Agreement Steward reserves the right to - publish new versions (including revisions) of this Agreement from time to time. - No one other than the Agreement Steward has the right to modify this Agreement. - IBM is the initial Agreement Steward. IBM may assign the responsibility to serve - as the Agreement Steward to a suitable separate entity. Each new version of the - Agreement will be given a distinguishing version number. The Program (including - Contributions) may always be distributed subject to the version of the Agreement - under which it was received. In addition, after a new version of the Agreement - is published, Contributor may elect to distribute the Program (including its - Contributions) under the new version. Except as expressly stated in Sections - 2(a) and 2(b) above, Recipient receives no rights or licenses to the - intellectual property of any Contributor under this Agreement, whether - expressly, by implication, estoppel or otherwise. All rights in the Program not - expressly granted under this Agreement are reserved. - - This Agreement is governed by the laws of the State of New York and the - intellectual property laws of the United States of America. No party to this - Agreement will bring a legal action under this Agreement more than one year - after the cause of action arose. Each party waives its rights to a jury trial in - any resulting litigation. - - from JUnit Project http://www.junit.org/ - cloud-junit.jar from http://kentbeck.github.com/junit/ - - licensed under the Eclipse Distribution License Version 1.0 http://www.eclipse.org/org/documents/edl-v10.php (as follows) - - Copyright (c) 2012 The Eclipse Foundation. - - Eclipse Distribution License Version 1.0 - - Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. - - All rights reserved. - - Redistribution and use in source and binary forms, with or without modification, - are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this list - of conditions and the following disclaimer. Redistributions in binary form must - reproduce the above copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided with the - distribution. Neither the name of the Eclipse Foundation, Inc. nor the names of - its contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - from The Eclipse Foundation http://www.eclipse.org - cloud-javax.persistence-2.0.0.jar from http://wiki.eclipse.org/EclipseLink/Release/2.0.0 - - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Copyright (C) 2008 Tóth István - 2008-2012 Daniel Veillard - 2009-2011 Bryan Kearney - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from The libvirt project http://libvirt.org/ - libvirt-java-0.4.9 - - licensed under the XStream BSD Style License https://fisheye.codehaus.org/browse/xstream/trunk/LICENSE.txt?hb=true (as follows) - - - (BSD Style License) - - Copyright (c) 2003-2006, Joe Walnes Copyright (c) 2006-2011, XStream Committers - All rights reserved. - - Redistribution and use in source and binary forms, with or without modification, - are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this list - of conditions and the following disclaimer. Redistributions in binary form must - reproduce the above copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided with the - distribution. - - Neither the name of XStream nor the names of its contributors may be used to - endorse or promote products derived from this software without specific prior - written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - from XStream Committers http://xstream.codehaus.org/ - cloud-xstream-1.3.1.jar from http://xstream.codehaus.org/repository.html - -Within the ui/lib directory - placed in the public domain - by Eric Meyer http://meyerweb.com/eric/ - reset.css from http://meyerweb.com/eric/tools/css/reset/ - - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2006 Google Inc. - from Google Inc. http://google.com - excanvas.js from http://code.google.com/p/explorercanvas/ - - licensed under the BSD (2-clause) http://www.opensource.org/licenses/BSD-2-Clause (as follows) - - Copyright (c) 2008 George McGinley Smith - All rights reserved. - - Redistribution and use in source and binary forms, with or without modification, - are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this list - of conditions and the following disclaimer. Redistributions in binary form must - reproduce the above copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided with the - distribution. - - Neither the name of the author nor the names of contributors may be used to - endorse or promote products derived from this software without specific prior - written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - from George McGinley Smith - jquery.easing.js - - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from The Dojo Foundation http://dojofoundation.org/ - require.js from http://github.com/jrburke/requirejs - - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Copyright (c) 2011, John Resig - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from John Resig - jquery.js - - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Copyright (c) 2006 - 2011 Jörn Zaefferer - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Jorn Zaefferer - jquery.validate.js - - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Copyright (c) 2010, Sebastian Tschan - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Sebastian Tschan https://blueimp.net - jquery.md5.js - - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Copyright (c) 2006 Klaus Hartl (stilbuero.de) - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Klaus Hartl http://stilbuero.de - jquery.cookies.js - -Within the ui/lib/flot directory - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Released under the MIT license by IOLA, December 2007. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from IOLA http://www.iola.dk/ - jquery.flot.crosshair.js - jquery.flot.fillbetween.js - jquery.flot.image.js - jquery.flot.js - jquery.flot.navigate.js - jquery.flot.resize.js - jquery.flot.selection.js - jquery.flot.stack.js - jquery.flot.symbol.js - jquery.flot.threshold.js - - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Created by Brian Medendorp, June 2009 - Updated November 2009 with contributions from: btburnett3, Anthony Aragues and Xavi Ivars - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Brian Medendorp - jquery.pie.js - - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Ole Laursen - jquery.colorhelpers.js - -Within the ui/lib/jquery-ui directory - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from jQuery UI Developers http://jqueryui.com/about - css/jquery-ui.css - index.html - js/jquery-ui.js - -Within the ui/lib/qunit directory - licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - Copyright (c) 2012 John Resig, Jörn Zaefferer - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Jorn Zaefferer - qunit.css from http://docs.jquery.com/QUnit - qunit.js from http://docs.jquery.com/QUnit - -Within the utils/src/com/cloud/utils/db directory - licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - Copyright (c) 2004 Clinton Begin - from Clinton Begin http://code.google.com/p/mybatis/ - ScriptRunner.java from http://code.google.com/p/mybatis/ - From e4baff2281526c7725785a3f14bb352222f7eb9c Mon Sep 17 00:00:00 2001 From: Chip Childers Date: Tue, 19 Feb 2013 13:00:11 -0500 Subject: [PATCH 070/486] Adding legal docs for java-ipv6.jar Signed-off-by: Chip Childers --- tools/whisker/LICENSE | 5 +++++ tools/whisker/descriptor-for-packaging.xml | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/tools/whisker/LICENSE b/tools/whisker/LICENSE index 7ef2e45c4e6..4080d9cb253 100644 --- a/tools/whisker/LICENSE +++ b/tools/whisker/LICENSE @@ -2994,6 +2994,11 @@ Within the target/jar directory jetty-6.1.26.jar from http://repo1.maven.org/maven2/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26-sources.jar jetty-util-6.1.26.jar from http://repo1.maven.org/maven2/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26-sources.jar + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + + from Jan Van Besien mailto:janvanbesien@gmail.com + java-ipv6.jar from http://code.google.com/p/java-ipv6 + licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) Copyright (c) 2009, Caringo, Inc. diff --git a/tools/whisker/descriptor-for-packaging.xml b/tools/whisker/descriptor-for-packaging.xml index 3db74183089..cc661b7a691 100644 --- a/tools/whisker/descriptor-for-packaging.xml +++ b/tools/whisker/descriptor-for-packaging.xml @@ -2431,6 +2431,10 @@ Innovation Centre, 2006 (http://www.it-innovation.soton.ac.uk). id='dojofoundation.org' name='The Dojo Foundation' url='http://dojofoundation.org/' /> + Copyright (c) 2013 The Apache Software Foundation @@ -2827,6 +2831,12 @@ All rights reserved. + + + + + +
From 58bcd0eefb67f6fcc362e0581acd29f5448c6e77 Mon Sep 17 00:00:00 2001 From: Chip Childers Date: Tue, 19 Feb 2013 13:22:42 -0500 Subject: [PATCH 071/486] Adding springsource artifacts to package legal docs. Signed-off-by: Chip Childers --- tools/whisker/LICENSE | 7 +++++++ tools/whisker/descriptor-for-packaging.xml | 14 ++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/tools/whisker/LICENSE b/tools/whisker/LICENSE index 4080d9cb253..f192c134552 100644 --- a/tools/whisker/LICENSE +++ b/tools/whisker/LICENSE @@ -2999,6 +2999,13 @@ Within the target/jar directory from Jan Van Besien mailto:janvanbesien@gmail.com java-ipv6.jar from http://code.google.com/p/java-ipv6 + licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) + Copyright 2002-2012 the original author or authors. + from SpringSource, a division of VMware. http://www.springsource.org/ + spring-context-3.1.2.RELEASE.jar from https://github.com/SpringSource/spring-framework + spring-core-3.1.2.RELEASE.jar from https://github.com/SpringSource/spring-framework + spring-web-3.1.2.RELEASE.jar from https://github.com/SpringSource/spring-framework + licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) Copyright (c) 2009, Caringo, Inc. diff --git a/tools/whisker/descriptor-for-packaging.xml b/tools/whisker/descriptor-for-packaging.xml index cc661b7a691..2e93f4dc642 100644 --- a/tools/whisker/descriptor-for-packaging.xml +++ b/tools/whisker/descriptor-for-packaging.xml @@ -2435,6 +2435,10 @@ Innovation Centre, 2006 (http://www.it-innovation.soton.ac.uk). id='person:janvanbesien' name='Jan Van Besien' url='mailto:janvanbesien@gmail.com' /> + Copyright (c) 2013 The Apache Software Foundation @@ -2837,6 +2841,16 @@ All rights reserved. + + +Copyright 2002-2012 the original author or authors. + + + + + + + From 9333f979db443bb091715c68367d1b9dc3995647 Mon Sep 17 00:00:00 2001 From: Chip Childers Date: Tue, 19 Feb 2013 13:50:09 -0500 Subject: [PATCH 072/486] Adding legal docs for esapi-2.0.1.jar to the package LICENSE Signed-off-by: Chip Childers --- tools/whisker/LICENSE | 32 ++++++++++++++++++++++ tools/whisker/descriptor-for-packaging.xml | 13 +++++++++ 2 files changed, 45 insertions(+) diff --git a/tools/whisker/LICENSE b/tools/whisker/LICENSE index f192c134552..2f99021f9bc 100644 --- a/tools/whisker/LICENSE +++ b/tools/whisker/LICENSE @@ -3006,6 +3006,38 @@ Within the target/jar directory spring-core-3.1.2.RELEASE.jar from https://github.com/SpringSource/spring-framework spring-web-3.1.2.RELEASE.jar from https://github.com/SpringSource/spring-framework + licensed under the BSD (2-clause) http://www.opensource.org/licenses/BSD-2-Clause (as follows) + + Copyright (c) 2007, The OWASP Foundation + All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list + of conditions and the following disclaimer. Redistributions in binary form must + reproduce the above copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with the + distribution. + + Neither the name of the author nor the names of contributors may be used to + endorse or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + from OWASP Foundation Inc. https://www.owasp.org/ + esapi-2.0.1.jar from http://code.google.com/p/owasp-esapi-java/ + licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows) Copyright (c) 2009, Caringo, Inc. diff --git a/tools/whisker/descriptor-for-packaging.xml b/tools/whisker/descriptor-for-packaging.xml index 2e93f4dc642..5f6a49ade62 100644 --- a/tools/whisker/descriptor-for-packaging.xml +++ b/tools/whisker/descriptor-for-packaging.xml @@ -2439,6 +2439,10 @@ Innovation Centre, 2006 (http://www.it-innovation.soton.ac.uk). id='springsource.com' name='SpringSource, a division of VMware.' url='http://www.springsource.org/' /> + Copyright (c) 2013 The Apache Software Foundation @@ -2851,6 +2855,15 @@ Copyright 2002-2012 the original author or authors. + + +Copyright (c) 2007, The OWASP Foundation +All rights reserved. + + + + + From 05069bb4d92384f9ff6e39571c7b850b19a8acea Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 19 Feb 2013 11:54:43 -0800 Subject: [PATCH 073/486] Add db upgrade path for ipv6 --- setup/db/db/schema-40to410.sql | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 9e639c623a2..82688714bd2 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -1656,3 +1656,37 @@ CREATE VIEW `cloud`.`data_center_view` AS left join `cloud`.`domain` ON data_center.domain_id = domain.id; +CREATE TABLE `cloud`.`user_ipv6_address` ( + `id` bigint unsigned NOT NULL UNIQUE auto_increment, + `uuid` varchar(40), + `account_id` bigint unsigned NULL, + `domain_id` bigint unsigned NULL, + `ip_address` char(50) NOT NULL, + `data_center_id` bigint unsigned NOT NULL COMMENT 'zone that it belongs to', + `vlan_id` bigint unsigned NOT NULL, + `state` char(32) NOT NULL default 'Free' COMMENT 'state of the ip address', + `mac_address` varchar(40) NOT NULL COMMENT 'mac address of this ip', + `source_network_id` bigint unsigned NOT NULL COMMENT 'network id ip belongs to', + `network_id` bigint unsigned COMMENT 'network this public ip address is associated with', + `physical_network_id` bigint unsigned NOT NULL COMMENT 'physical network id that this configuration is based on', + `created` datetime NULL COMMENT 'Date this ip was allocated to someone', + PRIMARY KEY (`id`), + UNIQUE (`ip_address`, `source_network_id`), + CONSTRAINT `fk_user_ipv6_address__source_network_id` FOREIGN KEY (`source_network_id`) REFERENCES `networks`(`id`), + CONSTRAINT `fk_user_ipv6_address__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`), + CONSTRAINT `fk_user_ipv6_address__account_id` FOREIGN KEY (`account_id`) REFERENCES `account`(`id`), + CONSTRAINT `fk_user_ipv6_address__vlan_id` FOREIGN KEY (`vlan_id`) REFERENCES `vlan`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_user_ipv6_address__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, + CONSTRAINT `uc_user_ipv6_address__uuid` UNIQUE (`uuid`), + CONSTRAINT `fk_user_ipv6_address__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `cloud`.`networks` ADD COLUMN `ip6_gateway` varchar(50) COMMENT 'IPv6 gateway for this network'; +ALTER TABLE `cloud`.`networks` ADD COLUMN `ip6_cidr` varchar(50) COMMENT 'IPv6 cidr for this network'; + +ALTER TABLE `cloud`.`nics` ADD COLUMN `ip6_gateway` varchar(50) COMMENT 'gateway for ip6 address'; +ALTER TABLE `cloud`.`nics` ADD COLUMN `ip6_cidr` varchar(50) COMMENT 'cidr for ip6 address'; + +ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_gateway` varchar(255); +ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_cidr` varchar(255); +ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_range` varchar(255); From 5b94646d5ea7df1e7be2ba58e7dae24c0da77433 Mon Sep 17 00:00:00 2001 From: Marcus Sorensen Date: Tue, 19 Feb 2013 14:58:19 -0700 Subject: [PATCH 074/486] Summary: Increase tomcat servers memory size per dev list discussion Detail: Per Kelven Yang when Javelin was merged into master/4.1, the memory settings for management server should be a heap of 2g and a max perm size of 512m, due to increased footprint introduced by spring framework/Javelin. Signed-off-by: Marcus Sorensen 1361311099 -0700 --- client/tomcatconf/tomcat6-nonssl.conf.in | 2 +- client/tomcatconf/tomcat6-ssl.conf.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/client/tomcatconf/tomcat6-nonssl.conf.in b/client/tomcatconf/tomcat6-nonssl.conf.in index d69d6ed94c0..8b7a23d7230 100644 --- a/client/tomcatconf/tomcat6-nonssl.conf.in +++ b/client/tomcatconf/tomcat6-nonssl.conf.in @@ -41,7 +41,7 @@ CATALINA_TMPDIR="@MSENVIRON@/temp" # Use JAVA_OPTS to set java.library.path for libtcnative.so #JAVA_OPTS="-Djava.library.path=/usr/lib64" -JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=256M" +JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=256M -XX:MaxPermSize=512m" # What user should run tomcat TOMCAT_USER="@MSUSER@" diff --git a/client/tomcatconf/tomcat6-ssl.conf.in b/client/tomcatconf/tomcat6-ssl.conf.in index ecb93b23abc..c6cdf27b152 100644 --- a/client/tomcatconf/tomcat6-ssl.conf.in +++ b/client/tomcatconf/tomcat6-ssl.conf.in @@ -40,7 +40,7 @@ CATALINA_TMPDIR="@MSENVIRON@/temp" # Use JAVA_OPTS to set java.library.path for libtcnative.so #JAVA_OPTS="-Djava.library.path=/usr/lib64" -JAVA_OPTS="-Djava.awt.headless=true -Djavax.net.ssl.trustStore=/etc/cloud/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=256M" +JAVA_OPTS="-Djava.awt.headless=true -Djavax.net.ssl.trustStore=/etc/cloud/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:MaxPermSize=512m -XX:PermSize=256M" # What user should run tomcat TOMCAT_USER="@MSUSER@" From 11d5eadc7221ceaa4adf3df83b3b4062710bce1b Mon Sep 17 00:00:00 2001 From: Marcus Sorensen Date: Tue, 19 Feb 2013 15:28:05 -0700 Subject: [PATCH 075/486] Summary: Re-adjusting tomcat memory settings per Rohit's emails Detail: adjusting MaxPerm to 800, Perm to 512. Signed-off-by: Marcus Sorensen 1361312885 -0700 --- client/tomcatconf/tomcat6-nonssl.conf.in | 2 +- client/tomcatconf/tomcat6-ssl.conf.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/client/tomcatconf/tomcat6-nonssl.conf.in b/client/tomcatconf/tomcat6-nonssl.conf.in index 8b7a23d7230..4a9a70f619e 100644 --- a/client/tomcatconf/tomcat6-nonssl.conf.in +++ b/client/tomcatconf/tomcat6-nonssl.conf.in @@ -41,7 +41,7 @@ CATALINA_TMPDIR="@MSENVIRON@/temp" # Use JAVA_OPTS to set java.library.path for libtcnative.so #JAVA_OPTS="-Djava.library.path=/usr/lib64" -JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=256M -XX:MaxPermSize=512m" +JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=512M -XX:MaxPermSize=800m" # What user should run tomcat TOMCAT_USER="@MSUSER@" diff --git a/client/tomcatconf/tomcat6-ssl.conf.in b/client/tomcatconf/tomcat6-ssl.conf.in index c6cdf27b152..84b6d6275bb 100644 --- a/client/tomcatconf/tomcat6-ssl.conf.in +++ b/client/tomcatconf/tomcat6-ssl.conf.in @@ -40,7 +40,7 @@ CATALINA_TMPDIR="@MSENVIRON@/temp" # Use JAVA_OPTS to set java.library.path for libtcnative.so #JAVA_OPTS="-Djava.library.path=/usr/lib64" -JAVA_OPTS="-Djava.awt.headless=true -Djavax.net.ssl.trustStore=/etc/cloud/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:MaxPermSize=512m -XX:PermSize=256M" +JAVA_OPTS="-Djava.awt.headless=true -Djavax.net.ssl.trustStore=/etc/cloud/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:MaxPermSize=800m -XX:PermSize=512M" # What user should run tomcat TOMCAT_USER="@MSUSER@" From 8296d41b5bd340e98d9e48ced884aab56f61e0ac Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Tue, 19 Feb 2013 15:12:06 -0800 Subject: [PATCH 076/486] CLOUDSTACK-1330: ec2-run-instances - When -n option is used to deploy multiple Vms API returns error even though few of the Vms have been deployed successfully. Changes: - Instead of throwing out error, return the response containing the info about the instances that were launched. --- .../bridge/service/core/ec2/EC2Engine.java | 72 ++++++++++--------- 1 file changed, 39 insertions(+), 33 deletions(-) diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java index 281ecbd90bf..e92f845f2b1 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java @@ -1423,47 +1423,53 @@ public class EC2Engine extends ManagerBase { // now actually deploy the vms for( int i=0; i < createInstances; i++ ) { - CloudStackUserVm resp = getApi().deployVirtualMachine(svcOffering.getId(), - request.getTemplateId(), zoneId, null, null, null, null, - null, null, null, request.getKeyName(), null, (network != null ? network.getId() : null), - null, constructList(request.getGroupSet()), request.getSize().longValue(), request.getUserData()); - EC2Instance vm = new EC2Instance(); - vm.setId(resp.getId().toString()); - vm.setName(resp.getName()); - vm.setZoneName(resp.getZoneName()); - vm.setTemplateId(resp.getTemplateId().toString()); - if (resp.getSecurityGroupList() != null && resp.getSecurityGroupList().size() > 0) { - List securityGroupList = resp.getSecurityGroupList(); - for (CloudStackSecurityGroup securityGroup : securityGroupList) { - EC2SecurityGroup param = new EC2SecurityGroup(); - param.setId(securityGroup.getId()); - param.setName(securityGroup.getName()); - vm.addGroupName(param); + try{ + CloudStackUserVm resp = getApi().deployVirtualMachine(svcOffering.getId(), + request.getTemplateId(), zoneId, null, null, null, null, + null, null, null, request.getKeyName(), null, (network != null ? network.getId() : null), + null, constructList(request.getGroupSet()), request.getSize().longValue(), request.getUserData()); + EC2Instance vm = new EC2Instance(); + vm.setId(resp.getId().toString()); + vm.setName(resp.getName()); + vm.setZoneName(resp.getZoneName()); + vm.setTemplateId(resp.getTemplateId().toString()); + if (resp.getSecurityGroupList() != null && resp.getSecurityGroupList().size() > 0) { + List securityGroupList = resp.getSecurityGroupList(); + for (CloudStackSecurityGroup securityGroup : securityGroupList) { + EC2SecurityGroup param = new EC2SecurityGroup(); + param.setId(securityGroup.getId()); + param.setName(securityGroup.getName()); + vm.addGroupName(param); + } } - } - vm.setState(resp.getState()); - vm.setCreated(resp.getCreated()); - List nicList = resp.getNics(); - for (CloudStackNic nic : nicList) { - if (nic.getIsDefault()) { - vm.setPrivateIpAddress(nic.getIpaddress()); - break; + vm.setState(resp.getState()); + vm.setCreated(resp.getCreated()); + List nicList = resp.getNics(); + for (CloudStackNic nic : nicList) { + if (nic.getIsDefault()) { + vm.setPrivateIpAddress(nic.getIpaddress()); + break; + } } + vm.setIpAddress(resp.getIpAddress()); + vm.setAccountName(resp.getAccountName()); + vm.setDomainId(resp.getDomainId()); + vm.setHypervisor(resp.getHypervisor()); + vm.setServiceOffering( svcOffering.getName()); + vm.setKeyPairName(resp.getKeyPairName()); + instances.addInstance(vm); + countCreated++; + }catch(Exception e){ + logger.error("Failed to deploy VM number: "+ (i+1) +" due to error: "+e.getMessage()); + break; } - vm.setIpAddress(resp.getIpAddress()); - vm.setAccountName(resp.getAccountName()); - vm.setDomainId(resp.getDomainId()); - vm.setHypervisor(resp.getHypervisor()); - vm.setServiceOffering( svcOffering.getName()); - vm.setKeyPairName(resp.getKeyPairName()); - instances.addInstance(vm); - countCreated++; } if (0 == countCreated) { // TODO, we actually need to destroy left-over VMs when the exception is thrown - throw new EC2ServiceException(ServerError.InsufficientInstanceCapacity, "Insufficient Instance Capacity" ); + throw new EC2ServiceException(ServerError.InternalError, "Failed to deploy instances" ); } + logger.debug("Could deploy "+ countCreated + " VM's successfully"); return instances; } catch( Exception e ) { From da75e4ac8addb1464ff602136336123e9347ec52 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 19 Feb 2013 15:04:34 -0800 Subject: [PATCH 077/486] IPv6: Add IPv6 for domain router view --- .../query/dao/DomainRouterJoinDaoImpl.java | 6 +++ .../api/query/vo/DomainRouterJoinVO.java | 51 +++++++++++++++++++ setup/db/db/schema-40to410.sql | 3 ++ 3 files changed, 60 insertions(+) diff --git a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java index 3b780ee83ef..22e3badabac 100644 --- a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java @@ -116,6 +116,9 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase Date: Tue, 19 Feb 2013 15:16:50 -0800 Subject: [PATCH 078/486] IPv6: Add ipv6 for user vm view --- .../api/query/dao/UserVmJoinDaoImpl.java | 6 +++ .../com/cloud/api/query/vo/UserVmJoinVO.java | 47 +++++++++++++++++++ setup/db/db/schema-40to410.sql | 3 ++ 3 files changed, 56 insertions(+) diff --git a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 4a5ac33bea9..f561449fe2a 100644 --- a/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -186,6 +186,9 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem nicResponse.setNetmask(userVm.getNetmask()); nicResponse.setNetworkid(userVm.getNetworkUuid()); nicResponse.setMacAddress(userVm.getMacAddress()); + nicResponse.setIp6Address(userVm.getIp6Address()); + nicResponse.setIp6Gateway(userVm.getIp6Gateway()); + nicResponse.setIp6Cidr(userVm.getIp6Cidr()); if (userVm.getBroadcastUri() != null) { nicResponse.setBroadcastUri(userVm.getBroadcastUri().toString()); } @@ -244,6 +247,9 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem nicResponse.setNetmask(uvo.getNetmask()); nicResponse.setNetworkid(uvo.getNetworkUuid()); nicResponse.setMacAddress(uvo.getMacAddress()); + nicResponse.setIp6Address(uvo.getIp6Address()); + nicResponse.setIp6Gateway(uvo.getIp6Gateway()); + nicResponse.setIp6Cidr(uvo.getIp6Cidr()); if (uvo.getBroadcastUri() != null) { nicResponse.setBroadcastUri(uvo.getBroadcastUri().toString()); } diff --git a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java index 025db47d599..d7238224e4e 100644 --- a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java +++ b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java @@ -269,6 +269,15 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name = "netmask") private String netmask; + @Column(name = "ip6_address") + private String ip6Address; + + @Column(name = "ip6_gateway") + private String ip6Gateway; + + @Column(name = "ip6_cidr") + private String ip6Cidr; + @Column(name = "mac_address") private String macAddress; @@ -1611,4 +1620,42 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { return toString; } + public String getIp6Address() { + return ip6Address; + } + + + + + public void setIp6Address(String ip6Address) { + this.ip6Address = ip6Address; + } + + + + + public String getIp6Gateway() { + return ip6Gateway; + } + + + + + public void setIp6Gateway(String ip6Gateway) { + this.ip6Gateway = ip6Gateway; + } + + + + + public String getIp6Cidr() { + return ip6Cidr; + } + + + + + public void setIp6Cidr(String ip6Cidr) { + this.ip6Cidr = ip6Cidr; + } } diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index fa0a17977a0..e1ab8151392 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -598,6 +598,9 @@ CREATE VIEW `cloud`.`user_vm_view` AS nics.uuid nic_uuid, nics.network_id network_id, nics.ip4_address ip_address, + nics.ip6_address ip6_address, + nics.ip6_gateway ip6_gateway, + nics.ip6_cidr ip6_cidr, nics.default_nic is_default_nic, nics.gateway gateway, nics.netmask netmask, From 2b76114bba7e25356b9d16d83839bb513374d1a3 Mon Sep 17 00:00:00 2001 From: Kelven Yang Date: Tue, 19 Feb 2013 19:21:53 -0500 Subject: [PATCH 079/486] CloudStack-1267: initialize log4j in agent main() --- agent/src/com/cloud/agent/AgentShell.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/agent/src/com/cloud/agent/AgentShell.java b/agent/src/com/cloud/agent/AgentShell.java index 0e020935e90..e736978a7a8 100644 --- a/agent/src/com/cloud/agent/AgentShell.java +++ b/agent/src/com/cloud/agent/AgentShell.java @@ -48,6 +48,7 @@ import com.cloud.agent.dao.StorageComponent; import com.cloud.agent.dao.impl.PropertiesStorage; import com.cloud.host.Host; import com.cloud.resource.ServerResource; +import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.ProcessUtil; import com.cloud.utils.PropertiesUtil; @@ -611,6 +612,8 @@ public class AgentShell implements IAgentShell { public static void main(String[] args) { try { + LogUtils.initLog4j("log4j-cloud.xml"); + AgentShell shell = new AgentShell(); shell.init(args); shell.start(); From 4d80b71ec08ff4fb1f829175d571978186289692 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 19 Feb 2013 16:42:20 -0800 Subject: [PATCH 080/486] IPv6: Revoke the change of create-schema.sql The change is in scheme-40to410.sql now. --- setup/db/create-schema.sql | 7 ------- 1 file changed, 7 deletions(-) diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index 3570dfeaad0..e59fb7d68f8 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -247,8 +247,6 @@ CREATE TABLE `cloud`.`networks` ( `broadcast_uri` varchar(255) COMMENT 'broadcast domain specifier', `gateway` varchar(15) COMMENT 'gateway for this network configuration', `cidr` varchar(18) COMMENT 'network cidr', - `ip6_gateway` varchar(50) COMMENT 'IPv6 gateway for this network', - `ip6_cidr` varchar(50) COMMENT 'IPv6 cidr for this network', `mode` varchar(32) COMMENT 'How to retrieve ip address in this network', `network_offering_id` bigint unsigned NOT NULL COMMENT 'network offering id that this configuration is created from', `physical_network_id` bigint unsigned COMMENT 'physical network id that this configuration is based on', @@ -312,8 +310,6 @@ CREATE TABLE `cloud`.`nics` ( `update_time` timestamp NOT NULL COMMENT 'time the state was changed', `isolation_uri` varchar(255) COMMENT 'id for isolation', `ip6_address` char(40) COMMENT 'ip6 address', - `ip6_gateway` varchar(50) COMMENT 'gateway for ip6 address', - `ip6_cidr` varchar(50) COMMENT 'cidr for ip6 address', `default_nic` tinyint NOT NULL COMMENT "None", `vm_type` varchar(32) COMMENT 'type of vm: System or User vm', `created` datetime NOT NULL COMMENT 'date created', @@ -541,10 +537,7 @@ CREATE TABLE `cloud`.`vlan` ( `vlan_id` varchar(255), `vlan_gateway` varchar(255), `vlan_netmask` varchar(255), - `ip6_gateway` varchar(255), - `ip6_cidr` varchar(255), `description` varchar(255), - `ip6_range` varchar(255), `vlan_type` varchar(255), `data_center_id` bigint unsigned NOT NULL, `network_id` bigint unsigned NOT NULL COMMENT 'id of corresponding network offering', From 73d5926d0fa9913b5c50037c9222189c42232f3f Mon Sep 17 00:00:00 2001 From: Kelven Yang Date: Tue, 19 Feb 2013 20:47:57 -0500 Subject: [PATCH 081/486] CLOUDSTACK-1333: enable logging at Spring startup phase --- client/WEB-INF/web.xml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/client/WEB-INF/web.xml b/client/WEB-INF/web.xml index 0d75165659e..626305b82fa 100644 --- a/client/WEB-INF/web.xml +++ b/client/WEB-INF/web.xml @@ -19,6 +19,14 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd" version="2.5"> + + + log4jConfigLocation + /WEB-INF/classes/log4j-cloud.xml + + + org.springframework.web.util.Log4jConfigListener + org.springframework.web.context.ContextLoaderListener From 483a11ce959046b35c78ab5bde94e3adb00a3597 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 19 Feb 2013 21:12:24 -0800 Subject: [PATCH 082/486] IPv6: Fix build of master All the table column should be modified before view was added --- setup/db/db/schema-40to410.sql | 43 +++++++--------------------------- 1 file changed, 9 insertions(+), 34 deletions(-) diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index e1ab8151392..9a59318354c 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -519,6 +519,15 @@ CREATE TABLE `cloud`.`user_ipv6_address` ( CONSTRAINT `fk_user_ipv6_address__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +ALTER TABLE `cloud`.`networks` ADD COLUMN `ip6_gateway` varchar(50) COMMENT 'IPv6 gateway for this network'; +ALTER TABLE `cloud`.`networks` ADD COLUMN `ip6_cidr` varchar(50) COMMENT 'IPv6 cidr for this network'; + +ALTER TABLE `cloud`.`nics` ADD COLUMN `ip6_gateway` varchar(50) COMMENT 'gateway for ip6 address'; +ALTER TABLE `cloud`.`nics` ADD COLUMN `ip6_cidr` varchar(50) COMMENT 'cidr for ip6 address'; + +ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_gateway` varchar(255); +ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_cidr` varchar(255); +ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_range` varchar(255); -- DB views for list api @@ -1662,37 +1671,3 @@ CREATE VIEW `cloud`.`data_center_view` AS left join `cloud`.`domain` ON data_center.domain_id = domain.id; -CREATE TABLE `cloud`.`user_ipv6_address` ( - `id` bigint unsigned NOT NULL UNIQUE auto_increment, - `uuid` varchar(40), - `account_id` bigint unsigned NULL, - `domain_id` bigint unsigned NULL, - `ip_address` char(50) NOT NULL, - `data_center_id` bigint unsigned NOT NULL COMMENT 'zone that it belongs to', - `vlan_id` bigint unsigned NOT NULL, - `state` char(32) NOT NULL default 'Free' COMMENT 'state of the ip address', - `mac_address` varchar(40) NOT NULL COMMENT 'mac address of this ip', - `source_network_id` bigint unsigned NOT NULL COMMENT 'network id ip belongs to', - `network_id` bigint unsigned COMMENT 'network this public ip address is associated with', - `physical_network_id` bigint unsigned NOT NULL COMMENT 'physical network id that this configuration is based on', - `created` datetime NULL COMMENT 'Date this ip was allocated to someone', - PRIMARY KEY (`id`), - UNIQUE (`ip_address`, `source_network_id`), - CONSTRAINT `fk_user_ipv6_address__source_network_id` FOREIGN KEY (`source_network_id`) REFERENCES `networks`(`id`), - CONSTRAINT `fk_user_ipv6_address__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`), - CONSTRAINT `fk_user_ipv6_address__account_id` FOREIGN KEY (`account_id`) REFERENCES `account`(`id`), - CONSTRAINT `fk_user_ipv6_address__vlan_id` FOREIGN KEY (`vlan_id`) REFERENCES `vlan`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_user_ipv6_address__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, - CONSTRAINT `uc_user_ipv6_address__uuid` UNIQUE (`uuid`), - CONSTRAINT `fk_user_ipv6_address__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -ALTER TABLE `cloud`.`networks` ADD COLUMN `ip6_gateway` varchar(50) COMMENT 'IPv6 gateway for this network'; -ALTER TABLE `cloud`.`networks` ADD COLUMN `ip6_cidr` varchar(50) COMMENT 'IPv6 cidr for this network'; - -ALTER TABLE `cloud`.`nics` ADD COLUMN `ip6_gateway` varchar(50) COMMENT 'gateway for ip6 address'; -ALTER TABLE `cloud`.`nics` ADD COLUMN `ip6_cidr` varchar(50) COMMENT 'cidr for ip6 address'; - -ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_gateway` varchar(255); -ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_cidr` varchar(255); -ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_range` varchar(255); From 091682cf67665dce941f31a184e23d5af773b774 Mon Sep 17 00:00:00 2001 From: Chiradeep Vittal Date: Tue, 19 Feb 2013 23:36:08 -0800 Subject: [PATCH 083/486] CLOUDSTACK-1066: remove useless config.dat --- .../definitions/systemvmtemplate/config.dat | 878 ------------------ 1 file changed, 878 deletions(-) delete mode 100644 tools/appliance/definitions/systemvmtemplate/config.dat diff --git a/tools/appliance/definitions/systemvmtemplate/config.dat b/tools/appliance/definitions/systemvmtemplate/config.dat deleted file mode 100644 index bc71fb981db..00000000000 --- a/tools/appliance/definitions/systemvmtemplate/config.dat +++ /dev/null @@ -1,878 +0,0 @@ -Name: adduser/homedir-permission -Template: adduser/homedir-permission -Value: true -Owners: adduser - -Name: adduser/title -Template: adduser/title -Owners: adduser - -Name: apt-listchanges/confirm -Template: apt-listchanges/confirm -Value: false -Owners: apt-listchanges - -Name: apt-listchanges/email-address -Template: apt-listchanges/email-address -Value: root -Owners: apt-listchanges - -Name: apt-listchanges/frontend -Template: apt-listchanges/frontend -Value: pager -Owners: apt-listchanges - -Name: apt-listchanges/save-seen -Template: apt-listchanges/save-seen -Value: true -Owners: apt-listchanges - -Name: apt-listchanges/which -Template: apt-listchanges/which -Value: news -Owners: apt-listchanges - -Name: ca-certificates/enable_crts -Template: ca-certificates/enable_crts -Value: cacert.org/cacert.org.crt, debconf.org/ca.crt, mozilla/ACEDICOM_Root.crt, mozilla/AC_Raíz_Certicámara_S.A..crt, mozilla/Actalis_Authentication_Root_CA.crt, mozilla/AddTrust_External_Root.crt, mozilla/AddTrust_Low-Value_Services_Root.crt, mozilla/AddTrust_Public_Services_Root.crt, mozilla/AddTrust_Qualified_Certificates_Root.crt, mozilla/AffirmTrust_Commercial.crt, mozilla/AffirmTrust_Networking.crt, mozilla/AffirmTrust_Premium.crt, mozilla/AffirmTrust_Premium_ECC.crt, mozilla/America_Online_Root_Certification_Authority_1.crt, mozilla/America_Online_Root_Certification_Authority_2.crt, mozilla/ApplicationCA_-_Japanese_Government.crt, mozilla/A-Trust-nQual-03.crt, mozilla/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.crt, mozilla/Baltimore_CyberTrust_Root.crt, mozilla/Buypass_Class_2_CA_1.crt, mozilla/Buypass_Class_2_Root_CA.crt, mozilla/Buypass_Class_3_CA_1.crt, mozilla/Buypass_Class_3_Root_CA.crt, mozilla/CA_Disig.crt, mozilla/Camerfirma_Chambers_of_Commerce_Root.crt, mozilla/Camerfirma_Global_Chambersign_Root.crt, mozilla/Certigna.crt, mozilla/Certinomis_-_Autorité_Racine.crt, mozilla/Certplus_Class_2_Primary_CA.crt, mozilla/certSIGN_ROOT_CA.crt, mozilla/Certum_Root_CA.crt, mozilla/Certum_Trusted_Network_CA.crt, mozilla/Chambers_of_Commerce_Root_-_2008.crt, mozilla/CNNIC_ROOT.crt, mozilla/Comodo_AAA_Services_root.crt, mozilla/COMODO_Certification_Authority.crt, mozilla/COMODO_ECC_Certification_Authority.crt, mozilla/Comodo_Secure_Services_root.crt, mozilla/Comodo_Trusted_Services_root.crt, mozilla/ComSign_CA.crt, mozilla/ComSign_Secured_CA.crt, mozilla/Cybertrust_Global_Root.crt, mozilla/Deutsche_Telekom_Root_CA_2.crt, mozilla/DigiCert_Assured_ID_Root_CA.crt, mozilla/DigiCert_Global_Root_CA.crt, mozilla/DigiCert_High_Assurance_EV_Root_CA.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_1.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_3.crt, mozilla/DST_ACES_CA_X6.crt, mozilla/DST_Root_CA_X3.crt, mozilla/EBG_Elektronik_Sertifika_Hizmet_SaÄŸlayıcısı.crt, mozilla/EC-ACC.crt, mozilla/EE_Certification_Centre_Root_CA.crt, mozilla/E-Guven_Kok_Elektronik_Sertifika_Hizmet_Saglayicisi.crt, mozilla/Entrust.net_Premium_2048_Secure_Server_CA.crt, mozilla/Entrust.net_Secure_Server_CA.crt, mozilla/Entrust_Root_Certification_Authority.crt, mozilla/ePKI_Root_Certification_Authority.crt, mozilla/Equifax_Secure_CA.crt, mozilla/Equifax_Secure_eBusiness_CA_1.crt, mozilla/Equifax_Secure_eBusiness_CA_2.crt, mozilla/Equifax_Secure_Global_eBusiness_CA.crt, mozilla/Firmaprofesional_Root_CA.crt, mozilla/GeoTrust_Global_CA_2.crt, mozilla/GeoTrust_Global_CA.crt, mozilla/GeoTrust_Primary_Certification_Authority.crt, mozilla/GeoTrust_Primary_Certification_Authority_-_G2.crt, mozilla/GeoTrust_Primary_Certification_Authority_-_G3.crt, mozilla/GeoTrust_Universal_CA_2.crt, mozilla/GeoTrust_Universal_CA.crt, mozilla/Global_Chambersign_Root_-_2008.crt, mozilla/GlobalSign_Root_CA.crt, mozilla/GlobalSign_Root_CA_-_R2.crt, mozilla/GlobalSign_Root_CA_-_R3.crt, mozilla/Go_Daddy_Class_2_CA.crt, mozilla/Go_Daddy_Root_Certificate_Authority_-_G2.crt, mozilla/GTE_CyberTrust_Global_Root.crt, mozilla/Hellenic_Academic_and_Research_Institutions_RootCA_2011.crt, mozilla/Hongkong_Post_Root_CA_1.crt, mozilla/IGC_A.crt, mozilla/Izenpe.com.crt, mozilla/Juur-SK.crt, mozilla/Microsec_e-Szigno_Root_CA_2009.crt, mozilla/Microsec_e-Szigno_Root_CA.crt, mozilla/NetLock_Arany_=Class_Gold=_FÅ‘tanúsítvány.crt, mozilla/NetLock_Business_=Class_B=_Root.crt, mozilla/NetLock_Express_=Class_C=_Root.crt, mozilla/NetLock_Notary_=Class_A=_Root.crt, mozilla/NetLock_Qualified_=Class_QA=_Root.crt, mozilla/Network_Solutions_Certificate_Authority.crt, mozilla/OISTE_WISeKey_Global_Root_GA_CA.crt, mozilla/QuoVadis_Root_CA_2.crt, mozilla/QuoVadis_Root_CA_3.crt, mozilla/QuoVadis_Root_CA.crt, mozilla/Root_CA_Generalitat_Valenciana.crt, mozilla/RSA_Root_Certificate_1.crt, mozilla/RSA_Security_2048_v3.crt, mozilla/Secure_Global_CA.crt, mozilla/SecureSign_RootCA11.crt, mozilla/SecureTrust_CA.crt, mozilla/Security_Communication_EV_RootCA1.crt, mozilla/Security_Communication_RootCA2.crt, mozilla/Security_Communication_Root_CA.crt, mozilla/Sonera_Class_1_Root_CA.crt, mozilla/Sonera_Class_2_Root_CA.crt, mozilla/Staat_der_Nederlanden_Root_CA.crt, mozilla/Staat_der_Nederlanden_Root_CA_-_G2.crt, mozilla/Starfield_Class_2_CA.crt, mozilla/Starfield_Root_Certificate_Authority_-_G2.crt, mozilla/Starfield_Services_Root_Certificate_Authority_-_G2.crt, mozilla/StartCom_Certification_Authority.crt, mozilla/StartCom_Certification_Authority_G2.crt, mozilla/S-TRUST_Authentication_and_Encryption_Root_CA_2005_PN.crt, mozilla/Swisscom_Root_CA_1.crt, mozilla/SwissSign_Gold_CA_-_G2.crt, mozilla/SwissSign_Platinum_CA_-_G2.crt, mozilla/SwissSign_Silver_CA_-_G2.crt, mozilla/TÜBİTAK_UEKAE_Kök_Sertifika_Hizmet_SaÄŸlayıcısı_-_Sürüm_3.crt, mozilla/Taiwan_GRCA.crt, mozilla/TC_TrustCenter_Class_2_CA_II.crt, mozilla/TC_TrustCenter_Class_3_CA_II.crt, mozilla/TC_TrustCenter_Universal_CA_I.crt, mozilla/TC_TrustCenter_Universal_CA_III.crt, mozilla/TDC_Internet_Root_CA.crt, mozilla/TDC_OCES_Root_CA.crt, mozilla/Thawte_Premium_Server_CA.crt, mozilla/thawte_Primary_Root_CA.crt, mozilla/thawte_Primary_Root_CA_-_G2.crt, mozilla/thawte_Primary_Root_CA_-_G3.crt, mozilla/Thawte_Server_CA.crt, mozilla/Trustis_FPS_Root_CA.crt, mozilla/T-TeleSec_GlobalRoot_Class_3.crt, mozilla/TURKTRUST_Certificate_Services_Provider_Root_1.crt, mozilla/TURKTRUST_Certificate_Services_Provider_Root_2.crt, mozilla/TWCA_Root_Certification_Authority.crt, mozilla/UTN_DATACorp_SGC_Root_CA.crt, mozilla/UTN_USERFirst_Email_Root_CA.crt, mozilla/UTN_USERFirst_Hardware_Root_CA.crt, mozilla/ValiCert_Class_1_VA.crt, mozilla/ValiCert_Class_2_VA.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority_-_G3.crt, mozilla/VeriSign_Class_3_Public_Primary_Certification_Authority_-_G4.crt, mozilla/VeriSign_Class_3_Public_Primary_Certification_Authority_-_G5.crt, mozilla/Verisign_Class_4_Public_Primary_Certification_Authority_-_G3.crt, mozilla/VeriSign_Universal_Root_Certification_Authority.crt, mozilla/Visa_eCommerce_Root.crt, mozilla/Wells_Fargo_Root_CA.crt, mozilla/WellsSecure_Public_Root_Certificate_Authority.crt, mozilla/XRamp_Global_CA_Root.crt, spi-inc.org/spi-ca-2003.crt, spi-inc.org/spi-cacert-2008.crt -Owners: ca-certificates -Variables: - enable_crts = cacert.org/cacert.org.crt, debconf.org/ca.crt, mozilla/ACEDICOM_Root.crt, mozilla/AC_Raíz_Certicámara_S.A..crt, mozilla/Actalis_Authentication_Root_CA.crt, mozilla/AddTrust_External_Root.crt, mozilla/AddTrust_Low-Value_Services_Root.crt, mozilla/AddTrust_Public_Services_Root.crt, mozilla/AddTrust_Qualified_Certificates_Root.crt, mozilla/AffirmTrust_Commercial.crt, mozilla/AffirmTrust_Networking.crt, mozilla/AffirmTrust_Premium.crt, mozilla/AffirmTrust_Premium_ECC.crt, mozilla/America_Online_Root_Certification_Authority_1.crt, mozilla/America_Online_Root_Certification_Authority_2.crt, mozilla/ApplicationCA_-_Japanese_Government.crt, mozilla/A-Trust-nQual-03.crt, mozilla/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.crt, mozilla/Baltimore_CyberTrust_Root.crt, mozilla/Buypass_Class_2_CA_1.crt, mozilla/Buypass_Class_2_Root_CA.crt, mozilla/Buypass_Class_3_CA_1.crt, mozilla/Buypass_Class_3_Root_CA.crt, mozilla/CA_Disig.crt, mozilla/Camerfirma_Chambers_of_Commerce_Root.crt, mozilla/Camerfirma_Global_Chambersign_Root.crt, mozilla/Certigna.crt, mozilla/Certinomis_-_Autorité_Racine.crt, mozilla/Certplus_Class_2_Primary_CA.crt, mozilla/certSIGN_ROOT_CA.crt, mozilla/Certum_Root_CA.crt, mozilla/Certum_Trusted_Network_CA.crt, mozilla/Chambers_of_Commerce_Root_-_2008.crt, mozilla/CNNIC_ROOT.crt, mozilla/Comodo_AAA_Services_root.crt, mozilla/COMODO_Certification_Authority.crt, mozilla/COMODO_ECC_Certification_Authority.crt, mozilla/Comodo_Secure_Services_root.crt, mozilla/Comodo_Trusted_Services_root.crt, mozilla/ComSign_CA.crt, mozilla/ComSign_Secured_CA.crt, mozilla/Cybertrust_Global_Root.crt, mozilla/Deutsche_Telekom_Root_CA_2.crt, mozilla/DigiCert_Assured_ID_Root_CA.crt, mozilla/DigiCert_Global_Root_CA.crt, mozilla/DigiCert_High_Assurance_EV_Root_CA.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_1.crt, mozilla/Digital_Signature_Trust_Co._Global_CA_3.crt, mozilla/DST_ACES_CA_X6.crt, mozilla/DST_Root_CA_X3.crt, mozilla/EBG_Elektronik_Sertifika_Hizmet_SaÄŸlayıcısı.crt, mozilla/EC-ACC.crt, mozilla/EE_Certification_Centre_Root_CA.crt, mozilla/E-Guven_Kok_Elektronik_Sertifika_Hizmet_Saglayicisi.crt, mozilla/Entrust.net_Premium_2048_Secure_Server_CA.crt, mozilla/Entrust.net_Secure_Server_CA.crt, mozilla/Entrust_Root_Certification_Authority.crt, mozilla/ePKI_Root_Certification_Authority.crt, mozilla/Equifax_Secure_CA.crt, mozilla/Equifax_Secure_eBusiness_CA_1.crt, mozilla/Equifax_Secure_eBusiness_CA_2.crt, mozilla/Equifax_Secure_Global_eBusiness_CA.crt, mozilla/Firmaprofesional_Root_CA.crt, mozilla/GeoTrust_Global_CA_2.crt, mozilla/GeoTrust_Global_CA.crt, mozilla/GeoTrust_Primary_Certification_Authority.crt, mozilla/GeoTrust_Primary_Certification_Authority_-_G2.crt, mozilla/GeoTrust_Primary_Certification_Authority_-_G3.crt, mozilla/GeoTrust_Universal_CA_2.crt, mozilla/GeoTrust_Universal_CA.crt, mozilla/Global_Chambersign_Root_-_2008.crt, mozilla/GlobalSign_Root_CA.crt, mozilla/GlobalSign_Root_CA_-_R2.crt, mozilla/GlobalSign_Root_CA_-_R3.crt, mozilla/Go_Daddy_Class_2_CA.crt, mozilla/Go_Daddy_Root_Certificate_Authority_-_G2.crt, mozilla/GTE_CyberTrust_Global_Root.crt, mozilla/Hellenic_Academic_and_Research_Institutions_RootCA_2011.crt, mozilla/Hongkong_Post_Root_CA_1.crt, mozilla/IGC_A.crt, mozilla/Izenpe.com.crt, mozilla/Juur-SK.crt, mozilla/Microsec_e-Szigno_Root_CA_2009.crt, mozilla/Microsec_e-Szigno_Root_CA.crt, mozilla/NetLock_Arany_=Class_Gold=_FÅ‘tanúsítvány.crt, mozilla/NetLock_Business_=Class_B=_Root.crt, mozilla/NetLock_Express_=Class_C=_Root.crt, mozilla/NetLock_Notary_=Class_A=_Root.crt, mozilla/NetLock_Qualified_=Class_QA=_Root.crt, mozilla/Network_Solutions_Certificate_Authority.crt, mozilla/OISTE_WISeKey_Global_Root_GA_CA.crt, mozilla/QuoVadis_Root_CA_2.crt, mozilla/QuoVadis_Root_CA_3.crt, mozilla/QuoVadis_Root_CA.crt, mozilla/Root_CA_Generalitat_Valenciana.crt, mozilla/RSA_Root_Certificate_1.crt, mozilla/RSA_Security_2048_v3.crt, mozilla/Secure_Global_CA.crt, mozilla/SecureSign_RootCA11.crt, mozilla/SecureTrust_CA.crt, mozilla/Security_Communication_EV_RootCA1.crt, mozilla/Security_Communication_RootCA2.crt, mozilla/Security_Communication_Root_CA.crt, mozilla/Sonera_Class_1_Root_CA.crt, mozilla/Sonera_Class_2_Root_CA.crt, mozilla/Staat_der_Nederlanden_Root_CA.crt, mozilla/Staat_der_Nederlanden_Root_CA_-_G2.crt, mozilla/Starfield_Class_2_CA.crt, mozilla/Starfield_Root_Certificate_Authority_-_G2.crt, mozilla/Starfield_Services_Root_Certificate_Authority_-_G2.crt, mozilla/StartCom_Certification_Authority.crt, mozilla/StartCom_Certification_Authority_G2.crt, mozilla/S-TRUST_Authentication_and_Encryption_Root_CA_2005_PN.crt, mozilla/Swisscom_Root_CA_1.crt, mozilla/SwissSign_Gold_CA_-_G2.crt, mozilla/SwissSign_Platinum_CA_-_G2.crt, mozilla/SwissSign_Silver_CA_-_G2.crt, mozilla/TÜBİTAK_UEKAE_Kök_Sertifika_Hizmet_SaÄŸlayıcısı_-_Sürüm_3.crt, mozilla/Taiwan_GRCA.crt, mozilla/TC_TrustCenter_Class_2_CA_II.crt, mozilla/TC_TrustCenter_Class_3_CA_II.crt, mozilla/TC_TrustCenter_Universal_CA_I.crt, mozilla/TC_TrustCenter_Universal_CA_III.crt, mozilla/TDC_Internet_Root_CA.crt, mozilla/TDC_OCES_Root_CA.crt, mozilla/Thawte_Premium_Server_CA.crt, mozilla/thawte_Primary_Root_CA.crt, mozilla/thawte_Primary_Root_CA_-_G2.crt, mozilla/thawte_Primary_Root_CA_-_G3.crt, mozilla/Thawte_Server_CA.crt, mozilla/Trustis_FPS_Root_CA.crt, mozilla/T-TeleSec_GlobalRoot_Class_3.crt, mozilla/TURKTRUST_Certificate_Services_Provider_Root_1.crt, mozilla/TURKTRUST_Certificate_Services_Provider_Root_2.crt, mozilla/TWCA_Root_Certification_Authority.crt, mozilla/UTN_DATACorp_SGC_Root_CA.crt, mozilla/UTN_USERFirst_Email_Root_CA.crt, mozilla/UTN_USERFirst_Hardware_Root_CA.crt, mozilla/ValiCert_Class_1_VA.crt, mozilla/ValiCert_Class_2_VA.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_1_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_2_Public_Primary_Certification_Authority_-_G3.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority_-_G2.crt, mozilla/Verisign_Class_3_Public_Primary_Certification_Authority_-_G3.crt, mozilla/VeriSign_Class_3_Public_Primary_Certification_Authority_-_G4.crt, mozilla/VeriSign_Class_3_Public_Primary_Certification_Authority_-_G5.crt, mozilla/Verisign_Class_4_Public_Primary_Certification_Authority_-_G3.crt, mozilla/VeriSign_Universal_Root_Certification_Authority.crt, mozilla/Visa_eCommerce_Root.crt, mozilla/Wells_Fargo_Root_CA.crt, mozilla/WellsSecure_Public_Root_Certificate_Authority.crt, mozilla/XRamp_Global_CA_Root.crt, spi-inc.org/spi-ca-2003.crt, spi-inc.org/spi-cacert-2008.crt - -Name: ca-certificates/new_crts -Template: ca-certificates/new_crts -Owners: ca-certificates -Variables: - new_crts = - -Name: ca-certificates/title -Template: ca-certificates/title -Owners: ca-certificates - -Name: ca-certificates/trust_new_crts -Template: ca-certificates/trust_new_crts -Value: yes -Owners: ca-certificates - -Name: console-setup/charmap47 -Template: console-setup/charmap47 -Value: ISO-8859-1 -Owners: console-setup -Variables: - CHOICES = ARMSCII-8, CP1251, CP1255, CP1256, GEORGIAN-ACADEMY, GEORGIAN-PS, IBM1133, ISIRI-3342, ISO-8859-1, ISO-8859-10, ISO-8859-11, ISO-8859-13, ISO-8859-14, ISO-8859-15, ISO-8859-16, ISO-8859-2, ISO-8859-3, ISO-8859-4, ISO-8859-5, ISO-8859-6, ISO-8859-7, ISO-8859-8, ISO-8859-9, KOI8-R, KOI8-U, TIS-620, UTF-8, VISCII - -Name: console-setup/codeset47 -Template: console-setup/codeset47 -Value: # Latin1 and Latin5 - western Europe and Turkic languages -Owners: console-setup - -Name: console-setup/codesetcode -Template: console-setup/codesetcode -Value: Lat15 -Owners: console-setup - -Name: console-setup/fontface47 -Template: console-setup/fontface47 -Value: Fixed -Owners: console-setup -Variables: - CHOICES = Fixed, Terminus, TerminusBold, TerminusBoldVGA, VGA, Do not change the boot/kernel font, Let the system select a suitable font - -Name: console-setup/fontsize -Template: console-setup/fontsize -Value: 8x16 -Owners: console-setup - -Name: console-setup/fontsize-fb47 -Template: console-setup/fontsize-fb47 -Value: 8x16 -Owners: console-setup -Variables: - CHOICES = 8x13, 8x14, 8x15, 8x16, 8x18 - -Name: console-setup/fontsize-text47 -Template: console-setup/fontsize-text47 -Value: 8x16 -Owners: console-setup - -Name: console-setup/framebuffer_only -Template: console-setup/framebuffer_only -Owners: console-setup - -Name: console-setup/guess_font -Template: console-setup/guess_font -Owners: console-setup - -Name: console-setup/store_defaults_in_debconf_db -Template: console-setup/store_defaults_in_debconf_db -Value: true -Owners: console-setup - -Name: console-setup/use_system_font -Template: console-setup/use_system_font -Owners: console-setup - -Name: dash/sh -Template: dash/sh -Value: true -Owners: dash -Flags: seen - -Name: debconf-apt-progress/info -Template: debconf-apt-progress/info -Owners: debconf - -Name: debconf-apt-progress/media-change -Template: debconf-apt-progress/media-change -Owners: debconf - -Name: debconf-apt-progress/preparing -Template: debconf-apt-progress/preparing -Owners: debconf - -Name: debconf-apt-progress/title -Template: debconf-apt-progress/title -Owners: debconf - -Name: debconf/frontend -Template: debconf/frontend -Value: Dialog -Owners: debconf - -Name: debconf/priority -Template: debconf/priority -Value: high -Owners: debconf - -Name: debian-installer/console-setup-udeb/title -Template: debian-installer/console-setup-udeb/title -Owners: keyboard-configuration - -Name: debian-installer/country -Template: debian-installer/country -Value: US -Owners: d-i - -Name: debian-installer/language -Template: debian-installer/language -Value: en -Owners: d-i - -Name: dictionaries-common/default-ispell -Template: dictionaries-common/default-ispell -Value: american (American English) -Owners: dictionaries-common -Flags: seen -Variables: - choices = american (American English), british (British English) - echoices = american (American English), british (British English) - -Name: dictionaries-common/default-wordlist -Template: dictionaries-common/default-wordlist -Value: american (American English) -Owners: dictionaries-common -Flags: seen -Variables: - choices = american (American English) - echoices = american (American English) - -Name: dictionaries-common/invalid_debconf_value -Template: dictionaries-common/invalid_debconf_value -Owners: dictionaries-common - -Name: dictionaries-common/ispell-autobuildhash-message -Template: dictionaries-common/ispell-autobuildhash-message -Owners: dictionaries-common - -Name: dictionaries-common/move_old_usr_dict -Template: dictionaries-common/move_old_usr_dict -Owners: dictionaries-common - -Name: dictionaries-common/old_wordlist_link -Template: dictionaries-common/old_wordlist_link -Owners: dictionaries-common - -Name: dictionaries-common/remove_old_usr_dict_link -Template: dictionaries-common/remove_old_usr_dict_link -Value: false -Owners: dictionaries-common - -Name: dictionaries-common/selecting_ispell_wordlist_default -Template: dictionaries-common/selecting_ispell_wordlist_default -Owners: dictionaries-common - -Name: discover/install_hw_packages -Template: discover/install_hw_packages -Owners: discover - -Name: exim4-base/drec -Template: exim4-base/drec -Owners: exim4-base - -Name: exim4-daemon-light/drec -Template: exim4-daemon-light/drec -Owners: exim4-daemon-light - -Name: exim4/dc_eximconfig_configtype -Template: exim4/dc_eximconfig_configtype -Value: local delivery only; not on a network -Owners: exim4-config - -Name: exim4/dc_local_interfaces -Template: exim4/dc_local_interfaces -Value: 127.0.0.1 ; ::1 -Owners: exim4-config - -Name: exim4/dc_localdelivery -Template: exim4/dc_localdelivery -Value: mbox format in /var/mail/ -Owners: exim4-config - -Name: exim4/dc_minimaldns -Template: exim4/dc_minimaldns -Value: false -Owners: exim4-config - -Name: exim4/dc_other_hostnames -Template: exim4/dc_other_hostnames -Value: ahha.citrite.net -Owners: exim4-config -Flags: mailname -Variables: - fqdn = ahha.citrite.net - -Name: exim4/dc_postmaster -Template: exim4/dc_postmaster -Value: vagrant -Owners: exim4-config - -Name: exim4/dc_readhost -Template: exim4/dc_readhost -Owners: exim4-config - -Name: exim4/dc_relay_domains -Template: exim4/dc_relay_domains -Owners: exim4-config - -Name: exim4/dc_relay_nets -Template: exim4/dc_relay_nets -Owners: exim4-config - -Name: exim4/dc_smarthost -Template: exim4/dc_smarthost -Owners: exim4-config - -Name: exim4/drec -Template: exim4/drec -Owners: exim4 - -Name: exim4/exim4-config-title -Template: exim4/exim4-config-title -Owners: exim4-config - -Name: exim4/hide_mailname -Template: exim4/hide_mailname -Owners: exim4-config - -Name: exim4/mailname -Template: exim4/mailname -Value: ahha.citrite.net -Owners: exim4-config - -Name: exim4/no_config -Template: exim4/no_config -Owners: exim4-config - -Name: exim4/purge_spool -Template: exim4/purge_spool -Owners: exim4-base - -Name: exim4/use_split_config -Template: exim4/use_split_config -Value: false -Owners: exim4-config - -Name: glibc/disable-screensaver -Template: glibc/disable-screensaver -Owners: libc6, libc6:i386 - -Name: glibc/restart-failed -Template: glibc/restart-failed -Owners: libc6, libc6:i386 - -Name: glibc/restart-services -Template: glibc/restart-services -Owners: libc6, libc6:i386 - -Name: glibc/upgrade -Template: glibc/upgrade -Owners: libc6, libc6:i386 - -Name: grub-pc/chainload_from_menu.lst -Template: grub-pc/chainload_from_menu.lst -Owners: grub-pc - -Name: grub-pc/disk_description -Template: grub-pc/disk_description -Owners: grub-pc -Variables: - DEVICE = /dev/mapper/ahha-root - MODEL = ahha-root - SIZE = 1753 - -Name: grub-pc/install_devices -Template: grub-pc/install_devices -Value: /dev/disk/by-id/ata-VBOX_HARDDISK_VBe38481e3-55a686f1 -Owners: grub-pc -Flags: seen -Variables: - CHOICES = /dev/sda (2147 MB; VBOX_HARDDISK), - /dev/sda1 (254 MB; /boot), /dev/mapper/ahha-root (1753 MB; ahha-root) - RAW_CHOICES = /dev/disk/by-id/ata-VBOX_HARDDISK_VBe38481e3-55a686f1, /dev/disk/by-id/ata-VBOX_HARDDISK_VBe38481e3-55a686f1-part1, /dev/mapper/ahha-root - -Name: grub-pc/install_devices_disks_changed -Template: grub-pc/install_devices_disks_changed -Owners: grub-pc - -Name: grub-pc/install_devices_empty -Template: grub-pc/install_devices_empty -Value: false -Owners: grub-pc - -Name: grub-pc/install_devices_failed -Template: grub-pc/install_devices_failed -Owners: grub-pc - -Name: grub-pc/install_devices_failed_upgrade -Template: grub-pc/install_devices_failed_upgrade -Owners: grub-pc - -Name: grub-pc/kopt_extracted -Template: grub-pc/kopt_extracted -Owners: grub-pc - -Name: grub-pc/mixed_legacy_and_grub2 -Template: grub-pc/mixed_legacy_and_grub2 -Owners: grub-pc - -Name: grub-pc/partition_description -Template: grub-pc/partition_description -Owners: grub-pc -Variables: - DEVICE = /dev/sda1 - PATH = /boot - SIZE = 254 - -Name: grub-pc/postrm_purge_boot_grub -Template: grub-pc/postrm_purge_boot_grub -Owners: grub-pc - -Name: grub2/device_map_regenerated -Template: grub2/device_map_regenerated -Owners: grub-pc - -Name: grub2/kfreebsd_cmdline -Template: grub2/kfreebsd_cmdline -Owners: grub-pc - -Name: grub2/kfreebsd_cmdline_default -Template: grub2/kfreebsd_cmdline_default -Owners: grub-pc - -Name: grub2/linux_cmdline -Template: grub2/linux_cmdline -Value: debian-installer=en_US -Owners: grub-pc -Flags: seen - -Name: grub2/linux_cmdline_default -Template: grub2/linux_cmdline_default -Value: quiet -Owners: grub-pc -Flags: seen - -Name: iamerican/languages -Template: iamerican/languages -Owners: iamerican - -Name: ibritish/languages -Template: ibritish/languages -Owners: ibritish - -Name: keyboard-configuration/altgr -Template: keyboard-configuration/altgr -Value: The default for the keyboard layout -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/compose -Template: keyboard-configuration/compose -Value: No compose key -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/ctrl_alt_bksp -Template: keyboard-configuration/ctrl_alt_bksp -Value: false -Owners: d-i, keyboard-configuration - -Name: keyboard-configuration/layout -Template: keyboard-configuration/layout -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/layoutcode -Template: keyboard-configuration/layoutcode -Value: us -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/model -Template: keyboard-configuration/model -Value: Generic 105-key (Intl) PC -Owners: d-i, keyboard-configuration -Flags: seen -Variables: - CHOICES = A4Tech KB-21, A4Tech KBS-8, A4Tech Wireless Desktop RFKB-23, Acer AirKey V, Acer C300, Acer Ferrari 4000, Acer Laptop, Advance Scorpius KI, Amiga, Apple, Apple Aluminium Keyboard (ANSI), Apple Aluminium Keyboard (ISO), Apple Aluminium Keyboard (JIS), Apple Laptop, Asus Laptop, Atari TT, Azona RF2300 wireless Internet Keyboard, BenQ X-Touch, BenQ X-Touch 730, BenQ X-Touch 800, Brother Internet Keyboard, BTC 5090, BTC 5113RF Multimedia, BTC 5126T, BTC 6301URF, BTC 9000, BTC 9000A, BTC 9001AH, BTC 9019U, BTC 9116U Mini Wireless Internet and Gaming, Cherry Blue Line CyBo@rd, Cherry Blue Line CyBo@rd (alternate option), Cherry B.UNLIMITED, Cherry CyBo@rd USB-Hub, Cherry CyMotion Expert, Cherry CyMotion Master Linux, Cherry CyMotion Master XPress, Chicony Internet Keyboard, Chicony KB-9885, Chicony KU-0108, Chicony KU-0420, Classmate PC, Compaq Easy Access Keyboard, Compaq Internet Keyboard (13 keys), Compaq Internet Keyboard (18 keys), Compaq Internet Keyboard (7 keys), Compaq iPaq Keyboard, Creative Desktop Wireless 7000, Dell, Dell 101-key PC, Dell Laptop/notebook Inspiron 6xxx/8xxx, Dell Laptop/notebook Precision M series, Dell Latitude series laptop, Dell Precision M65, Dell SK-8125, Dell SK-8135, Dell USB Multimedia Keyboard, Dexxa Wireless Desktop Keyboard, Diamond 9801 / 9802 series, DTK2000, Ennyah DKB-1008, Everex STEPnote, FL90, Fujitsu-Siemens Computers AMILO laptop, Generic 101-key PC, Generic 102-key (Intl) PC, Generic 104-key PC, Generic 105-key (Intl) PC, Genius Comfy KB-12e, Genius Comfy KB-16M / Genius MM Keyboard KWD-910, Genius Comfy KB-21e-Scroll, Genius KB-19e NB, Genius KKB-2050HS, Gyration, Happy Hacking Keyboard, Happy Hacking Keyboard for Mac, Hewlett-Packard Internet Keyboard, Hewlett-Packard Mini 110 Notebook, Hewlett-Packard nx9020, Hewlett-Packard Omnibook 500 FA, Hewlett-Packard Omnibook 5xx, Hewlett-Packard Omnibook 6000/6100, Hewlett-Packard Omnibook XE3 GC, Hewlett-Packard Omnibook XE3 GF, Hewlett-Packard Omnibook XT1000, Hewlett-Packard Pavilion dv5, Hewlett-Packard Pavilion ZT11xx, Hewlett-Packard SK-250x Multimedia Keyboard, Honeywell Euroboard, HTC Dream, Htc Dream phone, IBM Rapid Access, IBM Rapid Access II, IBM Space Saver, IBM ThinkPad 560Z/600/600E/A22E, IBM ThinkPad R60/T60/R61/T61, IBM ThinkPad Z60m/Z60t/Z61m/Z61t, Keytronic FlexPro, Kinesis, Laptop/notebook Compaq (eg. Armada) Laptop Keyboard, Laptop/notebook Compaq (eg. Presario) Internet Keyboard, Laptop/notebook eMachines m68xx, Logitech Access Keyboard, Logitech Cordless Desktop, Logitech Cordless Desktop (alternate option), Logitech Cordless Desktop EX110, Logitech Cordless Desktop iTouch, Logitech Cordless Desktop LX-300, Logitech Cordless Desktop Navigator, Logitech Cordless Desktop Optical, Logitech Cordless Desktop Pro (alternate option 2), Logitech Cordless Freedom/Desktop Navigator, Logitech diNovo Edge Keyboard, Logitech diNovo Keyboard, Logitech G15 extra keys via G15daemon, Logitech Generic Keyboard, Logitech Internet 350 Keyboard, Logitech Internet Keyboard, Logitech Internet Navigator Keyboard, Logitech iTouch, Logitech iTouch Cordless Keyboard (model Y-RB6), Logitech iTouch Internet Navigator Keyboard SE, Logitech iTouch Internet Navigator Keyboard SE (USB), Logitech Media Elite Keyboard, Logitech Ultra-X Cordless Media Desktop Keyboard, Logitech Ultra-X Keyboard, MacBook/MacBook Pro, MacBook/MacBook Pro (Intl), Macintosh, Macintosh Old, Memorex MX1998, Memorex MX2500 EZ-Access Keyboard, Memorex MX2750, Microsoft Comfort Curve Keyboard 2000, Microsoft Internet Keyboard, Microsoft Internet Keyboard Pro\, Swedish, Microsoft Natural, Microsoft Natural Keyboard Elite, Microsoft Natural Keyboard Pro OEM, Microsoft Natural Keyboard Pro / Microsoft Internet Keyboard Pro, Microsoft Natural Keyboard Pro USB / Microsoft Internet Keyboard Pro, Microsoft Natural Wireless Ergonomic Keyboard 4000, Microsoft Natural Wireless Ergonomic Keyboard 7000, Microsoft Office Keyboard, Microsoft Wireless Multimedia Keyboard 1.0A, Northgate OmniKey 101, OLPC, Ortek MCK-800 MM/Internet keyboard, PC-98xx Series, Propeller Voyager (KTEZ-1000), QTronix Scorpius 98N+, Samsung SDM 4500P, Samsung SDM 4510P, Sanwa Supply SKB-KG3, SILVERCREST Multimedia Wireless Keyboard, SK-1300, SK-2500, SK-6200, SK-7100, Sun Type 4, Sun Type 5, Sun Type 5/6, Super Power Multimedia Keyboard, SVEN Ergonomic 2500, SVEN Slim 303, Symplon PaceBook (tablet PC), Targa Visionary 811, Toshiba Satellite S3000, Trust Direct Access Keyboard, Trust Slimline, Trust Wireless Keyboard Classic, TypeMatrix EZ-Reach 2020, TypeMatrix EZ-Reach 2030 PS2, TypeMatrix EZ-Reach 2030 USB, TypeMatrix EZ-Reach 2030 USB (102/105:EU mode), TypeMatrix EZ-Reach 2030 USB (106:JP mode), Unitek KB-1925, ViewSonic KU-306 Internet Keyboard, Winbook Model XP5, Yahoo! Internet Keyboard - -Name: keyboard-configuration/modelcode -Template: keyboard-configuration/modelcode -Value: pc105 -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/optionscode -Template: keyboard-configuration/optionscode -Value: -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/other -Template: keyboard-configuration/other -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/store_defaults_in_debconf_db -Template: keyboard-configuration/store_defaults_in_debconf_db -Value: true -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/switch -Template: keyboard-configuration/switch -Value: No temporary switch -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/toggle -Template: keyboard-configuration/toggle -Value: No toggling -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/unsupported_config_layout -Template: keyboard-configuration/unsupported_config_layout -Value: true -Owners: d-i, keyboard-configuration - -Name: keyboard-configuration/unsupported_config_options -Template: keyboard-configuration/unsupported_config_options -Value: true -Owners: d-i, keyboard-configuration - -Name: keyboard-configuration/unsupported_layout -Template: keyboard-configuration/unsupported_layout -Value: true -Owners: d-i, keyboard-configuration - -Name: keyboard-configuration/unsupported_options -Template: keyboard-configuration/unsupported_options -Value: true -Owners: d-i, keyboard-configuration - -Name: keyboard-configuration/variant -Template: keyboard-configuration/variant -Value: English (US) -Owners: d-i, keyboard-configuration -Flags: seen -Variables: - CHOICES = English (US), English (US) - Cherokee, English (US) - English (classic Dvorak), English (US) - English (Colemak), English (US) - English (Dvorak), English (US) - English (Dvorak alternative international no dead keys), English (US) - English (Dvorak international with dead keys), English (US) - English (international AltGr dead keys), English (US) - English (layout toggle on multiply/divide key), English (US) - English (left handed Dvorak), English (US) - English (Macintosh), English (US) - English (programmer Dvorak), English (US) - English (right handed Dvorak), English (US) - English (US\, alternative international), English (US) - English (US\, international with dead keys), English (US) - English (US\, with euro on 5), English (US) - Russian (US\, phonetic), English (US) - Serbo-Croatian (US), Other - -Name: keyboard-configuration/variantcode -Template: keyboard-configuration/variantcode -Value: -Owners: d-i, keyboard-configuration -Flags: seen - -Name: keyboard-configuration/xkb-keymap -Template: keyboard-configuration/xkb-keymap -Value: us -Owners: d-i, keyboard-configuration -Flags: seen - -Name: libpam-modules/disable-screensaver -Template: libpam-modules/disable-screensaver -Owners: libpam-modules - -Name: libpam-runtime/conflicts -Template: libpam-runtime/conflicts -Owners: libpam-runtime - -Name: libpam-runtime/no_profiles_chosen -Template: libpam-runtime/no_profiles_chosen -Owners: libpam-runtime - -Name: libpam-runtime/override -Template: libpam-runtime/override -Value: false -Owners: libpam-runtime - -Name: libpam-runtime/profiles -Template: libpam-runtime/profiles -Value: unix -Owners: libpam-runtime -Variables: - profile_names = unix - profiles = Unix authentication - -Name: libpam-runtime/title -Template: libpam-runtime/title -Owners: libpam-runtime - -Name: libpam0g/restart-failed -Template: libpam0g/restart-failed -Owners: libpam0g:i386 - -Name: libpam0g/restart-services -Template: libpam0g/restart-services -Owners: libpam0g:i386 - -Name: libpam0g/xdm-needs-restart -Template: libpam0g/xdm-needs-restart -Owners: libpam0g:i386 - -Name: libraries/restart-without-asking -Template: libraries/restart-without-asking -Owners: libc6, libc6:i386, libpam0g:i386 - -Name: libssl1.0.0/restart-failed -Template: libssl1.0.0/restart-failed -Owners: libssl1.0.0:i386 - -Name: libssl1.0.0/restart-services -Template: libssl1.0.0/restart-services -Owners: libssl1.0.0:i386 - -Name: linux-base/disk-id-convert-auto -Template: linux-base/disk-id-convert-auto -Owners: linux-base - -Name: linux-base/disk-id-convert-plan -Template: linux-base/disk-id-convert-plan -Owners: linux-base - -Name: linux-base/disk-id-convert-plan-no-relabel -Template: linux-base/disk-id-convert-plan-no-relabel -Owners: linux-base - -Name: linux-base/disk-id-manual -Template: linux-base/disk-id-manual -Owners: linux-base - -Name: linux-base/disk-id-manual-boot-loader -Template: linux-base/disk-id-manual-boot-loader -Owners: linux-base - -Name: linux-base/disk-id-update-failed -Template: linux-base/disk-id-update-failed -Owners: linux-base - -Name: linux-base/do-bootloader-default-changed -Template: linux-base/do-bootloader-default-changed -Owners: linux-base - -Name: linux-image-3.2.0-4-686-pae/postinst/depmod-error-initrd-3.2.0-4-686-pae -Template: linux-image-3.2.0-4-686-pae/postinst/depmod-error-initrd-3.2.0-4-686-pae -Owners: linux-image-3.2.0-4-686-pae - -Name: linux-image-3.2.0-4-686-pae/postinst/ignoring-ramdisk -Template: linux-image-3.2.0-4-686-pae/postinst/ignoring-ramdisk -Owners: linux-image-3.2.0-4-686-pae - -Name: linux-image-3.2.0-4-686-pae/postinst/missing-firmware-3.2.0-4-686-pae -Template: linux-image-3.2.0-4-686-pae/postinst/missing-firmware-3.2.0-4-686-pae -Owners: linux-image-3.2.0-4-686-pae - -Name: linux-image-3.2.0-4-686-pae/prerm/removing-running-kernel-3.2.0-4-686-pae -Template: linux-image-3.2.0-4-686-pae/prerm/removing-running-kernel-3.2.0-4-686-pae -Owners: linux-image-3.2.0-4-686-pae - -Name: locales/default_environment_locale -Template: locales/default_environment_locale -Owners: locales - -Name: locales/locales_to_be_generated -Template: locales/locales_to_be_generated -Value: -Owners: locales -Variables: - locales = aa_DJ ISO-8859-1, aa_DJ.UTF-8 UTF-8, aa_ER UTF-8, aa_ER@saaho UTF-8, aa_ET UTF-8, af_ZA ISO-8859-1, af_ZA.UTF-8 UTF-8, am_ET UTF-8, an_ES ISO-8859-15, an_ES.UTF-8 UTF-8, ar_AE ISO-8859-6, ar_AE.UTF-8 UTF-8, ar_BH ISO-8859-6, ar_BH.UTF-8 UTF-8, ar_DZ ISO-8859-6, ar_DZ.UTF-8 UTF-8, ar_EG ISO-8859-6, ar_EG.UTF-8 UTF-8, ar_IN UTF-8, ar_IQ ISO-8859-6, ar_IQ.UTF-8 UTF-8, ar_JO ISO-8859-6, ar_JO.UTF-8 UTF-8, ar_KW ISO-8859-6, ar_KW.UTF-8 UTF-8, ar_LB ISO-8859-6, ar_LB.UTF-8 UTF-8, ar_LY ISO-8859-6, ar_LY.UTF-8 UTF-8, ar_MA ISO-8859-6, ar_MA.UTF-8 UTF-8, ar_OM ISO-8859-6, ar_OM.UTF-8 UTF-8, ar_QA ISO-8859-6, ar_QA.UTF-8 UTF-8, ar_SA ISO-8859-6, ar_SA.UTF-8 UTF-8, ar_SD ISO-8859-6, ar_SD.UTF-8 UTF-8, ar_SY ISO-8859-6, ar_SY.UTF-8 UTF-8, ar_TN ISO-8859-6, ar_TN.UTF-8 UTF-8, ar_YE ISO-8859-6, ar_YE.UTF-8 UTF-8, as_IN.UTF-8 UTF-8, ast_ES ISO-8859-15, ast_ES.UTF-8 UTF-8, az_AZ.UTF-8 UTF-8, be_BY CP1251, be_BY.UTF-8 UTF-8, be_BY@latin UTF-8, bem_ZM UTF-8, ber_DZ UTF-8, ber_MA UTF-8, bg_BG CP1251, bg_BG.UTF-8 UTF-8, bn_BD UTF-8, bn_IN UTF-8, bo_CN UTF-8, bo_IN UTF-8, br_FR ISO-8859-1, br_FR.UTF-8 UTF-8, br_FR@euro ISO-8859-15, bs_BA ISO-8859-2, bs_BA.UTF-8 UTF-8, byn_ER UTF-8, ca_AD ISO-8859-15, ca_AD.UTF-8 UTF-8, ca_ES ISO-8859-1, ca_ES.UTF-8 UTF-8, ca_ES.UTF-8@valencia UTF-8, ca_ES@euro ISO-8859-15, ca_ES@valencia ISO-8859-15, ca_FR ISO-8859-15, ca_FR.UTF-8 UTF-8, ca_IT ISO-8859-15, ca_IT.UTF-8 UTF-8, crh_UA UTF-8, cs_CZ ISO-8859-2, cs_CZ.UTF-8 UTF-8, csb_PL UTF-8, cv_RU UTF-8, cy_GB ISO-8859-14, cy_GB.UTF-8 UTF-8, da_DK ISO-8859-1, da_DK.UTF-8 UTF-8, de_AT ISO-8859-1, de_AT.UTF-8 UTF-8, de_AT@euro ISO-8859-15, de_BE ISO-8859-1, de_BE.UTF-8 UTF-8, de_BE@euro ISO-8859-15, de_CH ISO-8859-1, de_CH.UTF-8 UTF-8, de_DE ISO-8859-1, de_DE.UTF-8 UTF-8, de_DE@euro ISO-8859-15, de_LI.UTF-8 UTF-8, de_LU ISO-8859-1, de_LU.UTF-8 UTF-8, de_LU@euro ISO-8859-15, dv_MV UTF-8, dz_BT UTF-8, el_CY ISO-8859-7, el_CY.UTF-8 UTF-8, el_GR ISO-8859-7, el_GR.UTF-8 UTF-8, en_AG UTF-8, en_AU ISO-8859-1, en_AU.UTF-8 UTF-8, en_BW ISO-8859-1, en_BW.UTF-8 UTF-8, en_CA ISO-8859-1, en_CA.UTF-8 UTF-8, en_DK ISO-8859-1, en_DK.ISO-8859-15 ISO-8859-15, en_DK.UTF-8 UTF-8, en_GB ISO-8859-1, en_GB.ISO-8859-15 ISO-8859-15, en_GB.UTF-8 UTF-8, en_HK ISO-8859-1, en_HK.UTF-8 UTF-8, en_IE ISO-8859-1, en_IE.UTF-8 UTF-8, en_IE@euro ISO-8859-15, en_IN UTF-8, en_NG UTF-8, en_NZ ISO-8859-1, en_NZ.UTF-8 UTF-8, en_PH ISO-8859-1, en_PH.UTF-8 UTF-8, en_SG ISO-8859-1, en_SG.UTF-8 UTF-8, en_US ISO-8859-1, en_US.ISO-8859-15 ISO-8859-15, en_US.UTF-8 UTF-8, en_ZA ISO-8859-1, en_ZA.UTF-8 UTF-8, en_ZM UTF-8, en_ZW ISO-8859-1, en_ZW.UTF-8 UTF-8, eo ISO-8859-3, eo.UTF-8 UTF-8, es_AR ISO-8859-1, es_AR.UTF-8 UTF-8, es_BO ISO-8859-1, es_BO.UTF-8 UTF-8, es_CL ISO-8859-1, es_CL.UTF-8 UTF-8, es_CO ISO-8859-1, es_CO.UTF-8 UTF-8, es_CR ISO-8859-1, es_CR.UTF-8 UTF-8, es_DO ISO-8859-1, es_DO.UTF-8 UTF-8, es_EC ISO-8859-1, es_EC.UTF-8 UTF-8, es_ES ISO-8859-1, es_ES.UTF-8 UTF-8, es_ES@euro ISO-8859-15, es_GT ISO-8859-1, es_GT.UTF-8 UTF-8, es_HN ISO-8859-1, es_HN.UTF-8 UTF-8, es_MX ISO-8859-1, es_MX.UTF-8 UTF-8, es_NI ISO-8859-1, es_NI.UTF-8 UTF-8, es_PA ISO-8859-1, es_PA.UTF-8 UTF-8, es_PE ISO-8859-1, es_PE.UTF-8 UTF-8, es_PR ISO-8859-1, es_PR.UTF-8 UTF-8, es_PY ISO-8859-1, es_PY.UTF-8 UTF-8, es_SV ISO-8859-1, es_SV.UTF-8 UTF-8, es_US ISO-8859-1, es_US.UTF-8 UTF-8, es_UY ISO-8859-1, es_UY.UTF-8 UTF-8, es_VE ISO-8859-1, es_VE.UTF-8 UTF-8, et_EE ISO-8859-1, et_EE.ISO-8859-15 ISO-8859-15, et_EE.UTF-8 UTF-8, eu_ES ISO-8859-1, eu_ES.UTF-8 UTF-8, eu_ES@euro ISO-8859-15, eu_FR ISO-8859-1, eu_FR.UTF-8 UTF-8, eu_FR@euro ISO-8859-15, fa_IR UTF-8, ff_SN UTF-8, fi_FI ISO-8859-1, fi_FI.UTF-8 UTF-8, fi_FI@euro ISO-8859-15, fil_PH UTF-8, fo_FO ISO-8859-1, fo_FO.UTF-8 UTF-8, fr_BE ISO-8859-1, fr_BE.UTF-8 UTF-8, fr_BE@euro ISO-8859-15, fr_CA ISO-8859-1, fr_CA.UTF-8 UTF-8, fr_CH ISO-8859-1, fr_CH.UTF-8 UTF-8, fr_FR ISO-8859-1, fr_FR.UTF-8 UTF-8, fr_FR@euro ISO-8859-15, fr_LU ISO-8859-1, fr_LU.UTF-8 UTF-8, fr_LU@euro ISO-8859-15, fur_IT UTF-8, fy_DE UTF-8, fy_NL UTF-8, ga_IE ISO-8859-1, ga_IE.UTF-8 UTF-8, ga_IE@euro ISO-8859-15, gd_GB ISO-8859-15, gd_GB.UTF-8 UTF-8, gez_ER UTF-8, gez_ER@abegede UTF-8, gez_ET UTF-8, gez_ET@abegede UTF-8, gl_ES ISO-8859-1, gl_ES.UTF-8 UTF-8, gl_ES@euro ISO-8859-15, gu_IN UTF-8, gv_GB ISO-8859-1, gv_GB.UTF-8 UTF-8, ha_NG UTF-8, he_IL ISO-8859-8, he_IL.UTF-8 UTF-8, hi_IN UTF-8, hne_IN UTF-8, hr_HR ISO-8859-2, hr_HR.UTF-8 UTF-8, hsb_DE ISO-8859-2, hsb_DE.UTF-8 UTF-8, ht_HT UTF-8, hu_HU ISO-8859-2, hu_HU.UTF-8 UTF-8, hy_AM UTF-8, hy_AM.ARMSCII-8 ARMSCII-8, ia UTF-8, id_ID ISO-8859-1, id_ID.UTF-8 UTF-8, ig_NG UTF-8, ik_CA UTF-8, is_IS ISO-8859-1, is_IS.UTF-8 UTF-8, it_CH ISO-8859-1, it_CH.UTF-8 UTF-8, it_IT ISO-8859-1, it_IT.UTF-8 UTF-8, it_IT@euro ISO-8859-15, iu_CA UTF-8, iw_IL ISO-8859-8, iw_IL.UTF-8 UTF-8, ja_JP.EUC-JP EUC-JP, ja_JP.UTF-8 UTF-8, ka_GE GEORGIAN-PS, ka_GE.UTF-8 UTF-8, kk_KZ PT154, kk_KZ RK1048, kk_KZ.UTF-8 UTF-8, kl_GL ISO-8859-1, kl_GL.UTF-8 UTF-8, km_KH UTF-8, kn_IN UTF-8, ko_KR.EUC-KR EUC-KR, ko_KR.UTF-8 UTF-8, kok_IN UTF-8, ks_IN UTF-8, ks_IN@devanagari UTF-8, ku_TR ISO-8859-9, ku_TR.UTF-8 UTF-8, kw_GB ISO-8859-1, kw_GB.UTF-8 UTF-8, ky_KG UTF-8, lg_UG ISO-8859-10, lg_UG.UTF-8 UTF-8, li_BE UTF-8, li_NL UTF-8, lo_LA UTF-8, lt_LT ISO-8859-13, lt_LT.UTF-8 UTF-8, lv_LV ISO-8859-13, lv_LV.UTF-8 UTF-8, mai_IN UTF-8, mg_MG ISO-8859-15, mg_MG.UTF-8 UTF-8, mi_NZ ISO-8859-13, mi_NZ.UTF-8 UTF-8, mk_MK ISO-8859-5, mk_MK.UTF-8 UTF-8, ml_IN UTF-8, mn_MN UTF-8, mr_IN UTF-8, ms_MY ISO-8859-1, ms_MY.UTF-8 UTF-8, mt_MT ISO-8859-3, mt_MT.UTF-8 UTF-8, my_MM UTF-8, nan_TW@latin UTF-8, nb_NO ISO-8859-1, nb_NO.UTF-8 UTF-8, nds_DE UTF-8, nds_NL UTF-8, ne_NP UTF-8, nl_AW UTF-8, nl_BE ISO-8859-1, nl_BE.UTF-8 UTF-8, nl_BE@euro ISO-8859-15, nl_NL ISO-8859-1, nl_NL.UTF-8 UTF-8, nl_NL@euro ISO-8859-15, nn_NO ISO-8859-1, nn_NO.UTF-8 UTF-8, nr_ZA UTF-8, nso_ZA UTF-8, oc_FR ISO-8859-1, oc_FR.UTF-8 UTF-8, om_ET UTF-8, om_KE ISO-8859-1, om_KE.UTF-8 UTF-8, or_IN UTF-8, os_RU UTF-8, pa_IN UTF-8, pa_PK UTF-8, pap_AN UTF-8, pl_PL ISO-8859-2, pl_PL.UTF-8 UTF-8, ps_AF UTF-8, pt_BR ISO-8859-1, pt_BR.UTF-8 UTF-8, pt_PT ISO-8859-1, pt_PT.UTF-8 UTF-8, pt_PT@euro ISO-8859-15, ro_RO ISO-8859-2, ro_RO.UTF-8 UTF-8, ru_RU ISO-8859-5, ru_RU.CP1251 CP1251, ru_RU.KOI8-R KOI8-R, ru_RU.UTF-8 UTF-8, ru_UA KOI8-U, ru_UA.UTF-8 UTF-8, rw_RW UTF-8, sa_IN UTF-8, sc_IT UTF-8, sd_IN UTF-8, sd_IN@devanagari UTF-8, se_NO UTF-8, shs_CA UTF-8, si_LK UTF-8, sid_ET UTF-8, sk_SK ISO-8859-2, sk_SK.UTF-8 UTF-8, sl_SI ISO-8859-2, sl_SI.UTF-8 UTF-8, so_DJ ISO-8859-1, so_DJ.UTF-8 UTF-8, so_ET UTF-8, so_KE ISO-8859-1, so_KE.UTF-8 UTF-8, so_SO ISO-8859-1, so_SO.UTF-8 UTF-8, sq_AL ISO-8859-1, sq_AL.UTF-8 UTF-8, sq_MK UTF-8, sr_ME UTF-8, sr_RS UTF-8, sr_RS@latin UTF-8, ss_ZA UTF-8, st_ZA ISO-8859-1, st_ZA.UTF-8 UTF-8, sv_FI ISO-8859-1, sv_FI.UTF-8 UTF-8, sv_FI@euro ISO-8859-15, sv_SE ISO-8859-1, sv_SE.ISO-8859-15 ISO-8859-15, sv_SE.UTF-8 UTF-8, sw_KE UTF-8, sw_TZ UTF-8, ta_IN UTF-8, te_IN UTF-8, tg_TJ KOI8-T, tg_TJ.UTF-8 UTF-8, th_TH TIS-620, th_TH.UTF-8 UTF-8, ti_ER UTF-8, ti_ET UTF-8, tig_ER UTF-8, tk_TM UTF-8, tl_PH ISO-8859-1, tl_PH.UTF-8 UTF-8, tn_ZA UTF-8, tr_CY ISO-8859-9, tr_CY.UTF-8 UTF-8, tr_TR ISO-8859-9, tr_TR.UTF-8 UTF-8, ts_ZA UTF-8, tt_RU.UTF-8 UTF-8, tt_RU.UTF-8@iqtelif UTF-8, ug_CN UTF-8, uk_UA KOI8-U, uk_UA.UTF-8 UTF-8, ur_PK UTF-8, uz_UZ ISO-8859-1, uz_UZ.UTF-8 UTF-8, uz_UZ@cyrillic UTF-8, ve_ZA UTF-8, vi_VN UTF-8, vi_VN.TCVN TCVN5712-1, wa_BE ISO-8859-1, wa_BE.UTF-8 UTF-8, wa_BE@euro ISO-8859-15, wo_SN UTF-8, xh_ZA ISO-8859-1, xh_ZA.UTF-8 UTF-8, yi_US CP1255, yi_US.UTF-8 UTF-8, yo_NG UTF-8, zh_CN GB2312, zh_CN.GB18030 GB18030, zh_CN.GBK GBK, zh_CN.UTF-8 UTF-8, zh_HK BIG5-HKSCS, zh_HK.UTF-8 UTF-8, zh_SG GB2312, zh_SG.GBK GBK, zh_SG.UTF-8 UTF-8, zh_TW BIG5, zh_TW.EUC-TW EUC-TW, zh_TW.UTF-8 UTF-8, zu_ZA ISO-8859-1, zu_ZA.UTF-8 UTF-8 - -Name: make-ssl-cert/altname -Template: make-ssl-cert/altname -Owners: ssl-cert - -Name: make-ssl-cert/hostname -Template: make-ssl-cert/hostname -Owners: ssl-cert - -Name: make-ssl-cert/title -Template: make-ssl-cert/title -Owners: ssl-cert - -Name: make-ssl-cert/vulnerable_prng -Template: make-ssl-cert/vulnerable_prng -Owners: ssl-cert - -Name: man-db/auto-update -Template: man-db/auto-update -Owners: man-db - -Name: man-db/install-setuid -Template: man-db/install-setuid -Value: false -Owners: man-db - -Name: openswan/existing_x509_certificate_filename -Template: openswan/existing_x509_certificate_filename -Owners: openswan - -Name: openswan/existing_x509_key_filename -Template: openswan/existing_x509_key_filename -Owners: openswan - -Name: openswan/existing_x509_rootca_filename -Template: openswan/existing_x509_rootca_filename -Owners: openswan - -Name: openswan/how_to_get_x509_certificate -Template: openswan/how_to_get_x509_certificate -Owners: openswan - -Name: openswan/install_x509_certificate -Template: openswan/install_x509_certificate -Value: false -Owners: openswan -Flags: seen - -Name: openswan/no-oe_include_file -Template: openswan/no-oe_include_file -Owners: openswan - -Name: openswan/restart -Template: openswan/restart -Value: true -Owners: openswan - -Name: openswan/rsa_key_length -Template: openswan/rsa_key_length -Owners: openswan - -Name: openswan/runlevel_changes -Template: openswan/runlevel_changes -Owners: openswan - -Name: openswan/x509_common_name -Template: openswan/x509_common_name -Owners: openswan - -Name: openswan/x509_country_code -Template: openswan/x509_country_code -Owners: openswan - -Name: openswan/x509_email_address -Template: openswan/x509_email_address -Owners: openswan - -Name: openswan/x509_locality_name -Template: openswan/x509_locality_name -Owners: openswan - -Name: openswan/x509_organization_name -Template: openswan/x509_organization_name -Owners: openswan - -Name: openswan/x509_organizational_unit -Template: openswan/x509_organizational_unit -Owners: openswan - -Name: openswan/x509_self_signed -Template: openswan/x509_self_signed -Owners: openswan - -Name: openswan/x509_state_name -Template: openswan/x509_state_name -Owners: openswan - -Name: passwd/username -Template: passwd/username -Value: vagrant -Owners: user-setup-udeb -Flags: seen - -Name: shared/packages-ispell -Template: shared/packages-ispell -Owners: iamerican, ibritish - -Name: shared/packages-wordlist -Template: shared/packages-wordlist -Owners: wamerican - -Name: ssh/disable_cr_auth -Template: ssh/disable_cr_auth -Owners: openssh-server - -Name: ssh/encrypted_host_key_but_no_keygen -Template: ssh/encrypted_host_key_but_no_keygen -Owners: openssh-server - -Name: ssh/use_old_init_script -Template: ssh/use_old_init_script -Value: true -Owners: openssh-server -Flags: seen - -Name: ssh/vulnerable_host_keys -Template: ssh/vulnerable_host_keys -Owners: openssh-server - -Name: sysstat/enable -Template: sysstat/enable -Value: true -Owners: sysstat -Flags: seen - -Name: sysstat/remove_files -Template: sysstat/remove_files -Value: true -Owners: sysstat - -Name: tasksel/desktop -Template: tasksel/desktop -Owners: tasksel - -Name: tasksel/first -Template: tasksel/first -Value: ssh-server -Owners: tasksel -Variables: - CHOICES = Debian desktop environment, Web server, Print server, SQL database, DNS Server, File server, Mail server, SSH server, Laptop, Standard system utilities - CHOICES_C = desktop, web-server, print-server, database-server, dns-server, file-server, mail-server, ssh-server, laptop, standard - -Name: tasksel/tasks -Template: tasksel/tasks -Owners: tasksel - -Name: tasksel/title -Template: tasksel/title -Owners: tasksel - -Name: tzdata/Areas -Template: tzdata/Areas -Value: Etc -Owners: tzdata -Flags: seen - -Name: tzdata/Zones/Africa -Template: tzdata/Zones/Africa -Owners: tzdata - -Name: tzdata/Zones/America -Template: tzdata/Zones/America -Owners: tzdata - -Name: tzdata/Zones/Antarctica -Template: tzdata/Zones/Antarctica -Owners: tzdata - -Name: tzdata/Zones/Arctic -Template: tzdata/Zones/Arctic -Owners: tzdata - -Name: tzdata/Zones/Asia -Template: tzdata/Zones/Asia -Owners: tzdata - -Name: tzdata/Zones/Atlantic -Template: tzdata/Zones/Atlantic -Owners: tzdata - -Name: tzdata/Zones/Australia -Template: tzdata/Zones/Australia -Owners: tzdata - -Name: tzdata/Zones/Etc -Template: tzdata/Zones/Etc -Value: UTC -Owners: tzdata -Flags: seen - -Name: tzdata/Zones/Europe -Template: tzdata/Zones/Europe -Owners: tzdata - -Name: tzdata/Zones/Indian -Template: tzdata/Zones/Indian -Owners: tzdata - -Name: tzdata/Zones/Pacific -Template: tzdata/Zones/Pacific -Owners: tzdata - -Name: tzdata/Zones/SystemV -Template: tzdata/Zones/SystemV -Owners: tzdata - -Name: tzdata/Zones/US -Template: tzdata/Zones/US -Owners: tzdata - -Name: ucf/changeprompt -Template: ucf/changeprompt -Owners: ucf - -Name: ucf/changeprompt_threeway -Template: ucf/changeprompt_threeway -Owners: ucf - -Name: ucf/conflicts_found -Template: ucf/conflicts_found -Owners: ucf - -Name: ucf/show_diff -Template: ucf/show_diff -Owners: ucf - -Name: ucf/title -Template: ucf/title -Owners: ucf - -Name: udev/new_kernel_needed -Template: udev/new_kernel_needed -Owners: udev - -Name: udev/reboot_needed -Template: udev/reboot_needed -Owners: udev - -Name: udev/sysfs_deprecated_incompatibility -Template: udev/sysfs_deprecated_incompatibility -Owners: udev - -Name: udev/title/upgrade -Template: udev/title/upgrade -Owners: udev - -Name: wamerican/languages -Template: wamerican/languages -Owners: wamerican - From c8a2eb5387faa2c359eaab6a4f2d9aaaa0b8238d Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Wed, 20 Feb 2013 15:30:22 +0530 Subject: [PATCH 084/486] CLOUDSTACK-1312: Remove remaining db schema diversions since 4.0 Signed-off-by: Rohit Yadav --- setup/db/create-schema.sql | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index e59fb7d68f8..7361681da47 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -1634,7 +1634,6 @@ CREATE TABLE `cloud`.`hypervisor_capabilities` ( `max_guests_limit` bigint unsigned DEFAULT 50, `security_group_enabled` int(1) unsigned DEFAULT 1 COMMENT 'Is security group supported', `max_data_volumes_limit` int unsigned DEFAULT 6 COMMENT 'Max. data volumes per VM supported by hypervisor', - `max_hosts_per_cluster` int unsigned DEFAULT NULL COMMENT 'Max. hosts in cluster supported by hypervisor', PRIMARY KEY (`id`), CONSTRAINT `uc_hypervisor_capabilities__uuid` UNIQUE (`uuid`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; @@ -1646,11 +1645,10 @@ INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '5.6 SP2', 50, 1); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit) VALUES ('XenServer', '6.0', 50, 1, 13); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit) VALUES ('XenServer', '6.0.2', 50, 1, 13); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', 'default', 128, 0, 32); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '4.0', 128, 0, 32); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '4.1', 128, 0, 32); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.0', 128, 0, 32); -INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', 'default', 128, 0); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', '4.0', 128, 0); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', '4.1', 128, 0); +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', '5.0', 128, 0); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('KVM', 'default', 50, 1); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('Ovm', 'default', 25, 1); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('Ovm', '2.3', 25, 1); From 35b83b2adcc4bae88a13ba48d7906c82d45322f6 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Wed, 20 Feb 2013 16:01:14 +0530 Subject: [PATCH 085/486] CLOUDSTACK-1317: In DatabaseUpgradeChecker, add upgrade path to 4.2.0 Signed-off-by: Rohit Yadav --- .../cloud/upgrade/DatabaseUpgradeChecker.java | 45 ++++++++++--------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index f48709452b1..00b4b043b64 100755 --- a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -57,6 +57,7 @@ import com.cloud.upgrade.dao.Upgrade301to302; import com.cloud.upgrade.dao.Upgrade302to40; import com.cloud.upgrade.dao.Upgrade30to301; import com.cloud.upgrade.dao.Upgrade40to41; +import com.cloud.upgrade.dao.Upgrade410to420; import com.cloud.upgrade.dao.UpgradeSnapshot217to224; import com.cloud.upgrade.dao.UpgradeSnapshot223to224; import com.cloud.upgrade.dao.VersionDao; @@ -83,83 +84,85 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.1.8", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.1.9", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.1", new DbUpgrade[] { new Upgrade221to222(), new UpgradeSnapshot223to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.2", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.3", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.4", new DbUpgrade[] { new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.5", new DbUpgrade[] { new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), - new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.6", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.7", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.8", new DbUpgrade[] { new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30() - , new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + , new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.9", new DbUpgrade[] { new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.10", new DbUpgrade[] { new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.11", new DbUpgrade[] { new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.12", new DbUpgrade[] { new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.13", new DbUpgrade[] { new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.14", new DbUpgrade[] { new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), - new Upgrade302to40(), new Upgrade40to41() }); + new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); - _upgradeMap.put("3.0.0", new DbUpgrade[] { new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + _upgradeMap.put("3.0.0", new DbUpgrade[] { new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); - _upgradeMap.put("3.0.1", new DbUpgrade[] { new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41() }); + _upgradeMap.put("3.0.1", new DbUpgrade[] { new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); - _upgradeMap.put("3.0.2", new DbUpgrade[] { new Upgrade302to40(), new Upgrade40to41() }); + _upgradeMap.put("3.0.2", new DbUpgrade[] { new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); - _upgradeMap.put("4.0.0", new DbUpgrade[] { new Upgrade40to41() }); + _upgradeMap.put("4.0.0", new DbUpgrade[] { new Upgrade40to41(), new Upgrade410to420() }); + + _upgradeMap.put("4.1.0", new DbUpgrade[] { new Upgrade410to420() }); } protected void runScript(Connection conn, File file) { From 1121977513ce2f911b489e7b4e89cbaaa142abe9 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Wed, 20 Feb 2013 16:15:59 +0530 Subject: [PATCH 086/486] packaging: Don't depend on cloudstack snapshot version, copy using wildcard path Signed-off-by: Rohit Yadav --- debian/rules | 2 +- packaging/centos63/cloud.spec | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/rules b/debian/rules index 36b611ddc04..69fba7a07ba 100755 --- a/debian/rules +++ b/debian/rules @@ -60,7 +60,7 @@ install: mkdir -p debian/tmp/usr/share/cloud/management/webapps/client cp -r client/target/utilities/scripts/db/* debian/tmp/usr/share/cloud/setup/ - cp -r client/target/cloud-client-ui-4.1.0-SNAPSHOT/* debian/tmp/usr/share/cloud/management/webapps/client/ + cp -r client/target/cloud-client-ui-*-SNAPSHOT/* debian/tmp/usr/share/cloud/management/webapps/client/ dh_installdirs -s dh_install -s diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec index 5a15ce40929..1267f47c8f3 100644 --- a/packaging/centos63/cloud.spec +++ b/packaging/centos63/cloud.spec @@ -215,7 +215,7 @@ install -D client/target/utilities/bin/cloud-sysvmadm ${RPM_BUILD_ROOT}%{_bindir install -D client/target/utilities/bin/cloud-update-xenserver-licenses ${RPM_BUILD_ROOT}%{_bindir}/%{name}-update-xenserver-licenses cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup -cp -r client/target/cloud-client-ui-4.1.0-SNAPSHOT/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client +cp -r client/target/cloud-client-ui-*-SNAPSHOT/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client # Don't package the scripts in the management webapp rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/scripts From 80d58b6c73b72df906eb0d94e937df6fe5b5e3ba Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Wed, 20 Feb 2013 16:26:29 +0530 Subject: [PATCH 087/486] CLOUDSTACK-1317: Bump CloudStack package version to 4.2.0-SNAPSHOT in all poms Signed-off-by: Rohit Yadav --- agent/pom.xml | 2 +- api/pom.xml | 2 +- awsapi/pom.xml | 2 +- client/pom.xml | 2 +- core/pom.xml | 2 +- deps/XenServerJava/pom.xml | 2 +- developer/pom.xml | 2 +- engine/api/pom.xml | 2 +- engine/components-api/pom.xml | 2 +- engine/compute/pom.xml | 2 +- engine/network/pom.xml | 2 +- engine/orchestration/pom.xml | 2 +- engine/pom.xml | 2 +- engine/schema/pom.xml | 2 +- engine/service/pom.xml | 2 +- engine/storage/backup/pom.xml | 2 +- engine/storage/image/pom.xml | 2 +- engine/storage/imagemotion/pom.xml | 2 +- engine/storage/integration-test/pom.xml | 2 +- engine/storage/pom.xml | 2 +- engine/storage/snapshot/pom.xml | 2 +- engine/storage/volume/pom.xml | 2 +- framework/events/pom.xml | 2 +- framework/ipc/pom.xml | 6 +++--- framework/jobs/pom.xml | 2 +- framework/pom.xml | 2 +- framework/rest/pom.xml | 2 +- patches/pom.xml | 2 +- plugins/acl/static-role-based/pom.xml | 2 +- plugins/api/discovery/pom.xml | 2 +- plugins/api/rate-limit/pom.xml | 2 +- plugins/deployment-planners/user-concentrated-pod/pom.xml | 2 +- plugins/deployment-planners/user-dispersing/pom.xml | 2 +- plugins/event-bus/rabbitmq/pom.xml | 2 +- plugins/file-systems/netapp/pom.xml | 2 +- plugins/host-allocators/random/pom.xml | 2 +- plugins/hypervisors/baremetal/pom.xml | 2 +- plugins/hypervisors/kvm/pom.xml | 2 +- plugins/hypervisors/ovm/pom.xml | 2 +- plugins/hypervisors/simulator/pom.xml | 2 +- plugins/hypervisors/ucs/pom.xml | 4 ++-- plugins/hypervisors/vmware/pom.xml | 2 +- plugins/hypervisors/xen/pom.xml | 2 +- plugins/network-elements/bigswitch-vns/pom.xml | 2 +- plugins/network-elements/dns-notifier/pom.xml | 2 +- plugins/network-elements/elastic-loadbalancer/pom.xml | 2 +- plugins/network-elements/f5/pom.xml | 2 +- plugins/network-elements/juniper-srx/pom.xml | 2 +- plugins/network-elements/netscaler/pom.xml | 2 +- plugins/network-elements/nicira-nvp/pom.xml | 2 +- plugins/network-elements/ovs/pom.xml | 2 +- plugins/pom.xml | 2 +- plugins/storage-allocators/random/pom.xml | 2 +- plugins/storage/image/s3/pom.xml | 2 +- plugins/storage/volume/solidfire/pom.xml | 2 +- plugins/user-authenticators/ldap/pom.xml | 2 +- plugins/user-authenticators/md5/pom.xml | 2 +- plugins/user-authenticators/plain-text/pom.xml | 2 +- plugins/user-authenticators/sha256salted/pom.xml | 2 +- pom.xml | 2 +- server/pom.xml | 2 +- services/console-proxy/plugin/pom.xml | 2 +- services/console-proxy/pom.xml | 2 +- services/console-proxy/server/pom.xml | 2 +- services/pom.xml | 2 +- test/pom.xml | 2 +- tools/apidoc/pom.xml | 4 ++-- tools/cli/pom.xml | 2 +- tools/devcloud-kvm/pom.xml | 2 +- tools/devcloud/pom.xml | 2 +- tools/marvin/pom.xml | 2 +- tools/pom.xml | 2 +- usage/pom.xml | 2 +- utils/pom.xml | 2 +- vmware-base/pom.xml | 2 +- 75 files changed, 79 insertions(+), 79 deletions(-) diff --git a/agent/pom.xml b/agent/pom.xml index 810f33fc572..a3d071b1c6a 100644 --- a/agent/pom.xml +++ b/agent/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/api/pom.xml b/api/pom.xml index 7461c67aaa2..8ca258f12e3 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/awsapi/pom.xml b/awsapi/pom.xml index 5a0ad7b0cb4..8e07f9e2124 100644 --- a/awsapi/pom.xml +++ b/awsapi/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/client/pom.xml b/client/pom.xml index 38d0c4ee943..be37cc10f7e 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/core/pom.xml b/core/pom.xml index 3d6356e561e..acc742fd964 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/deps/XenServerJava/pom.xml b/deps/XenServerJava/pom.xml index 18ba54f56a3..0f2cdf427c8 100644 --- a/deps/XenServerJava/pom.xml +++ b/deps/XenServerJava/pom.xml @@ -21,7 +21,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml xapi diff --git a/developer/pom.xml b/developer/pom.xml index b448e1bf0fd..a20200f2174 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -18,7 +18,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/engine/api/pom.xml b/engine/api/pom.xml index cbb83e46add..ca03e590286 100644 --- a/engine/api/pom.xml +++ b/engine/api/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/engine/components-api/pom.xml b/engine/components-api/pom.xml index a4f8a44fa2a..6d6ad4d14b9 100644 --- a/engine/components-api/pom.xml +++ b/engine/components-api/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/engine/compute/pom.xml b/engine/compute/pom.xml index 8fb3ab4fb2b..0875bb63f39 100644 --- a/engine/compute/pom.xml +++ b/engine/compute/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/engine/network/pom.xml b/engine/network/pom.xml index 3396a42321c..60cb7e950ec 100644 --- a/engine/network/pom.xml +++ b/engine/network/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/engine/orchestration/pom.xml b/engine/orchestration/pom.xml index 95426eae9dd..c98373aa353 100755 --- a/engine/orchestration/pom.xml +++ b/engine/orchestration/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/engine/pom.xml b/engine/pom.xml index 9a5f6d57987..1a3d896d50d 100644 --- a/engine/pom.xml +++ b/engine/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index 3e38a840571..da40d9cc4a3 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/engine/service/pom.xml b/engine/service/pom.xml index 38ff81591f2..47c0edcefc0 100644 --- a/engine/service/pom.xml +++ b/engine/service/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT cloud-engine-service war diff --git a/engine/storage/backup/pom.xml b/engine/storage/backup/pom.xml index 8b4fd277055..019e09c7204 100644 --- a/engine/storage/backup/pom.xml +++ b/engine/storage/backup/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/image/pom.xml b/engine/storage/image/pom.xml index c05714b9b54..c4cf14ca9b5 100644 --- a/engine/storage/image/pom.xml +++ b/engine/storage/image/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/imagemotion/pom.xml b/engine/storage/imagemotion/pom.xml index 856b9d995e5..9a7f3e017a2 100644 --- a/engine/storage/imagemotion/pom.xml +++ b/engine/storage/imagemotion/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/integration-test/pom.xml b/engine/storage/integration-test/pom.xml index 782bc7d218e..368a4e301ab 100644 --- a/engine/storage/integration-test/pom.xml +++ b/engine/storage/integration-test/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/pom.xml b/engine/storage/pom.xml index e8a2eb75193..270fe47c743 100644 --- a/engine/storage/pom.xml +++ b/engine/storage/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/engine/storage/snapshot/pom.xml b/engine/storage/snapshot/pom.xml index 45439c4726a..723c21081ad 100644 --- a/engine/storage/snapshot/pom.xml +++ b/engine/storage/snapshot/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/volume/pom.xml b/engine/storage/volume/pom.xml index e424cab5d0e..19357ab11e4 100644 --- a/engine/storage/volume/pom.xml +++ b/engine/storage/volume/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/framework/events/pom.xml b/framework/events/pom.xml index d21275a6744..7c788c35bbd 100644 --- a/framework/events/pom.xml +++ b/framework/events/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-framework - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/framework/ipc/pom.xml b/framework/ipc/pom.xml index 6e01b7ec5d2..b7f4fcc78ce 100644 --- a/framework/ipc/pom.xml +++ b/framework/ipc/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-framework - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml @@ -25,13 +25,13 @@ org.apache.cloudstack cloud-core - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT org.apache.cloudstack cloud-utils - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/framework/jobs/pom.xml b/framework/jobs/pom.xml index 8b12f5d4bb5..56490216f16 100644 --- a/framework/jobs/pom.xml +++ b/framework/jobs/pom.xml @@ -26,4 +26,4 @@ quartz 2.1.6 - \ No newline at end of file + diff --git a/framework/pom.xml b/framework/pom.xml index dafc0eb5a2d..4dfb409f04e 100644 --- a/framework/pom.xml +++ b/framework/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT install diff --git a/framework/rest/pom.xml b/framework/rest/pom.xml index e8322e03e59..2a22155603a 100644 --- a/framework/rest/pom.xml +++ b/framework/rest/pom.xml @@ -22,7 +22,7 @@ org.apache.cloudstack cloudstack-framework - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml cloud-framework-rest diff --git a/patches/pom.xml b/patches/pom.xml index a662bcb9791..00eec02ddc9 100644 --- a/patches/pom.xml +++ b/patches/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT install diff --git a/plugins/acl/static-role-based/pom.xml b/plugins/acl/static-role-based/pom.xml index a2e8d05d48e..e40cecb9d65 100644 --- a/plugins/acl/static-role-based/pom.xml +++ b/plugins/acl/static-role-based/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/api/discovery/pom.xml b/plugins/api/discovery/pom.xml index 1cfc5c2eaf2..5d9ad75ea3a 100644 --- a/plugins/api/discovery/pom.xml +++ b/plugins/api/discovery/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/api/rate-limit/pom.xml b/plugins/api/rate-limit/pom.xml index 1f0330916a9..5645f0b3a32 100644 --- a/plugins/api/rate-limit/pom.xml +++ b/plugins/api/rate-limit/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/deployment-planners/user-concentrated-pod/pom.xml b/plugins/deployment-planners/user-concentrated-pod/pom.xml index 78829356170..df7c660630e 100644 --- a/plugins/deployment-planners/user-concentrated-pod/pom.xml +++ b/plugins/deployment-planners/user-concentrated-pod/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/deployment-planners/user-dispersing/pom.xml b/plugins/deployment-planners/user-dispersing/pom.xml index 33f6582e72f..0e5dbd58eb6 100644 --- a/plugins/deployment-planners/user-dispersing/pom.xml +++ b/plugins/deployment-planners/user-dispersing/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/event-bus/rabbitmq/pom.xml b/plugins/event-bus/rabbitmq/pom.xml index 6a47983a9b5..bd4d0977c04 100644 --- a/plugins/event-bus/rabbitmq/pom.xml +++ b/plugins/event-bus/rabbitmq/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/file-systems/netapp/pom.xml b/plugins/file-systems/netapp/pom.xml index e1c8866d15d..0e6f427da36 100644 --- a/plugins/file-systems/netapp/pom.xml +++ b/plugins/file-systems/netapp/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/host-allocators/random/pom.xml b/plugins/host-allocators/random/pom.xml index ba7e1ae1e65..6fc76fe8dad 100644 --- a/plugins/host-allocators/random/pom.xml +++ b/plugins/host-allocators/random/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/baremetal/pom.xml b/plugins/hypervisors/baremetal/pom.xml index 600eedb1440..328bd963c91 100755 --- a/plugins/hypervisors/baremetal/pom.xml +++ b/plugins/hypervisors/baremetal/pom.xml @@ -21,7 +21,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml cloud-plugin-hypervisor-baremetal diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml index 8fc8f739460..579244014f9 100644 --- a/plugins/hypervisors/kvm/pom.xml +++ b/plugins/hypervisors/kvm/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/ovm/pom.xml b/plugins/hypervisors/ovm/pom.xml index 5700c14d4eb..84beff0d4eb 100644 --- a/plugins/hypervisors/ovm/pom.xml +++ b/plugins/hypervisors/ovm/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/simulator/pom.xml b/plugins/hypervisors/simulator/pom.xml index a1ab9c08639..ff1664ad85f 100644 --- a/plugins/hypervisors/simulator/pom.xml +++ b/plugins/hypervisors/simulator/pom.xml @@ -22,7 +22,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml org.apache.cloudstack diff --git a/plugins/hypervisors/ucs/pom.xml b/plugins/hypervisors/ucs/pom.xml index 54cd68fd6b7..24bdc948e73 100755 --- a/plugins/hypervisors/ucs/pom.xml +++ b/plugins/hypervisors/ucs/pom.xml @@ -24,12 +24,12 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml org.apache.cloudstack cloud-plugin-hypervisor-ucs - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT Apache CloudStack Plugin - Hypervisor UCS http://maven.apache.org diff --git a/plugins/hypervisors/vmware/pom.xml b/plugins/hypervisors/vmware/pom.xml index d990e89b388..5d954f331e6 100644 --- a/plugins/hypervisors/vmware/pom.xml +++ b/plugins/hypervisors/vmware/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/xen/pom.xml b/plugins/hypervisors/xen/pom.xml index 0a57afca284..72d32f3029c 100644 --- a/plugins/hypervisors/xen/pom.xml +++ b/plugins/hypervisors/xen/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/bigswitch-vns/pom.xml b/plugins/network-elements/bigswitch-vns/pom.xml index 32650f31497..95a7692ce75 100644 --- a/plugins/network-elements/bigswitch-vns/pom.xml +++ b/plugins/network-elements/bigswitch-vns/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/dns-notifier/pom.xml b/plugins/network-elements/dns-notifier/pom.xml index ea35d788653..1dea4b933d1 100644 --- a/plugins/network-elements/dns-notifier/pom.xml +++ b/plugins/network-elements/dns-notifier/pom.xml @@ -22,7 +22,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml org.apache.cloudstack diff --git a/plugins/network-elements/elastic-loadbalancer/pom.xml b/plugins/network-elements/elastic-loadbalancer/pom.xml index dac500d8fd2..4d02a61e93e 100644 --- a/plugins/network-elements/elastic-loadbalancer/pom.xml +++ b/plugins/network-elements/elastic-loadbalancer/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/f5/pom.xml b/plugins/network-elements/f5/pom.xml index bf40332cfbb..d0f8133f2b4 100644 --- a/plugins/network-elements/f5/pom.xml +++ b/plugins/network-elements/f5/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/juniper-srx/pom.xml b/plugins/network-elements/juniper-srx/pom.xml index 6040720da6e..28f2c29eda7 100644 --- a/plugins/network-elements/juniper-srx/pom.xml +++ b/plugins/network-elements/juniper-srx/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/netscaler/pom.xml b/plugins/network-elements/netscaler/pom.xml index b11009d8b1a..1eb73a236dc 100644 --- a/plugins/network-elements/netscaler/pom.xml +++ b/plugins/network-elements/netscaler/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/nicira-nvp/pom.xml b/plugins/network-elements/nicira-nvp/pom.xml index 70f85607e4a..4e05a4f9fae 100644 --- a/plugins/network-elements/nicira-nvp/pom.xml +++ b/plugins/network-elements/nicira-nvp/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/ovs/pom.xml b/plugins/network-elements/ovs/pom.xml index ab7ffab8465..7964b931e19 100644 --- a/plugins/network-elements/ovs/pom.xml +++ b/plugins/network-elements/ovs/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/pom.xml b/plugins/pom.xml index 02459b4c1b5..88f617b4560 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT install diff --git a/plugins/storage-allocators/random/pom.xml b/plugins/storage-allocators/random/pom.xml index b476d1de49f..06754ffc133 100644 --- a/plugins/storage-allocators/random/pom.xml +++ b/plugins/storage-allocators/random/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/storage/image/s3/pom.xml b/plugins/storage/image/s3/pom.xml index 4ea6517527b..7ab0d3e9301 100644 --- a/plugins/storage/image/s3/pom.xml +++ b/plugins/storage/image/s3/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/volume/solidfire/pom.xml b/plugins/storage/volume/solidfire/pom.xml index cbbc54c368d..9db0685e91b 100644 --- a/plugins/storage/volume/solidfire/pom.xml +++ b/plugins/storage/volume/solidfire/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/user-authenticators/ldap/pom.xml b/plugins/user-authenticators/ldap/pom.xml index 05e9466d825..5c45f1177b6 100644 --- a/plugins/user-authenticators/ldap/pom.xml +++ b/plugins/user-authenticators/ldap/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/md5/pom.xml b/plugins/user-authenticators/md5/pom.xml index f358f8f1c21..605014ff953 100644 --- a/plugins/user-authenticators/md5/pom.xml +++ b/plugins/user-authenticators/md5/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/plain-text/pom.xml b/plugins/user-authenticators/plain-text/pom.xml index 6406fa92489..60336ebb22d 100644 --- a/plugins/user-authenticators/plain-text/pom.xml +++ b/plugins/user-authenticators/plain-text/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/sha256salted/pom.xml b/plugins/user-authenticators/sha256salted/pom.xml index 3f530f76e17..22e97632e3d 100644 --- a/plugins/user-authenticators/sha256salted/pom.xml +++ b/plugins/user-authenticators/sha256salted/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/pom.xml b/pom.xml index 1ee889112ef..e83ff666a5b 100644 --- a/pom.xml +++ b/pom.xml @@ -28,7 +28,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT pom Apache CloudStack Apache CloudStack is an IaaS (“Infrastracture as a Serviceâ€) cloud orchestration platform. diff --git a/server/pom.xml b/server/pom.xml index 602ed5b977b..59d1b15b911 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/services/console-proxy/plugin/pom.xml b/services/console-proxy/plugin/pom.xml index 8cf3d76aa00..4cbe6d1c8f4 100644 --- a/services/console-proxy/plugin/pom.xml +++ b/services/console-proxy/plugin/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-service-console-proxy - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/services/console-proxy/pom.xml b/services/console-proxy/pom.xml index cd57526273d..1453e8cc264 100644 --- a/services/console-proxy/pom.xml +++ b/services/console-proxy/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-services - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/services/console-proxy/server/pom.xml b/services/console-proxy/server/pom.xml index 71e83933ca9..0df7559781e 100644 --- a/services/console-proxy/server/pom.xml +++ b/services/console-proxy/server/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloud-service-console-proxy - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/services/pom.xml b/services/pom.xml index 26488513999..35ec2e186ba 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/test/pom.xml b/test/pom.xml index 4507e8cc916..d4b88326fa2 100644 --- a/test/pom.xml +++ b/test/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/tools/apidoc/pom.xml b/tools/apidoc/pom.xml index d75286b81b0..b324ad4b567 100644 --- a/tools/apidoc/pom.xml +++ b/tools/apidoc/pom.xml @@ -17,11 +17,11 @@ org.apache.cloudstack cloud-tools - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml - ../../client/target/cloud-client-ui-4.1.0-SNAPSHOT/WEB-INF/ + ../../client/target/cloud-client-ui-4.2.0-SNAPSHOT/WEB-INF/ ${client.config.base}/lib ${client.config.base}/classes diff --git a/tools/cli/pom.xml b/tools/cli/pom.xml index a145d814c38..b4820cd1e36 100644 --- a/tools/cli/pom.xml +++ b/tools/cli/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloud-tools - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/tools/devcloud-kvm/pom.xml b/tools/devcloud-kvm/pom.xml index 713b12d87bd..9ae36ee6788 100644 --- a/tools/devcloud-kvm/pom.xml +++ b/tools/devcloud-kvm/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloud-tools - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/tools/devcloud/pom.xml b/tools/devcloud/pom.xml index f45a86d26f6..d7b82c9d7fe 100644 --- a/tools/devcloud/pom.xml +++ b/tools/devcloud/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloud-tools - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/tools/marvin/pom.xml b/tools/marvin/pom.xml index f5561ca7722..80099be1ecb 100644 --- a/tools/marvin/pom.xml +++ b/tools/marvin/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloud-tools - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/tools/pom.xml b/tools/pom.xml index 982306f2cce..09961bb28d8 100644 --- a/tools/pom.xml +++ b/tools/pom.xml @@ -28,7 +28,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/usage/pom.xml b/usage/pom.xml index f7d9dd18c9e..1e880a28615 100644 --- a/usage/pom.xml +++ b/usage/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/utils/pom.xml b/utils/pom.xml index e4fd2b0f7e6..d4bafbdcaf0 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/vmware-base/pom.xml b/vmware-base/pom.xml index bd536fb574a..765182baee4 100644 --- a/vmware-base/pom.xml +++ b/vmware-base/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT From a223691e0264121fd0677924eba784430b2c6513 Mon Sep 17 00:00:00 2001 From: radhikap Date: Tue, 19 Feb 2013 15:53:16 +0530 Subject: [PATCH 088/486] cloudstack-806 Signed-off-by: radhikap --- docs/en-US/site-to-site-vpn.xml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/en-US/site-to-site-vpn.xml b/docs/en-US/site-to-site-vpn.xml index 6570aabe0bd..a5899eac4f1 100644 --- a/docs/en-US/site-to-site-vpn.xml +++ b/docs/en-US/site-to-site-vpn.xml @@ -55,6 +55,9 @@ Create VPN connection from the VPC VPN gateway to the customer VPN gateway. + Appropriate events are generated on the &PRODUCT; UI when status of a Site-to-Site VPN + connection changes from connected to disconnected, or vice versa. Currently no events are generated + when establishing a VPN connection fails or pending. From 476ad0be625de2eb712980e5fd9899ace6c1affe Mon Sep 17 00:00:00 2001 From: Chip Childers Date: Wed, 20 Feb 2013 10:13:00 -0500 Subject: [PATCH 089/486] CLOUDSTACK-1341: Corrected the URL for the KEYS file Signed-off-by: Chip Childers --- docs/en-US/verifying-source.xml | 2 +- docs/pot/verifying-source.pot | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en-US/verifying-source.xml b/docs/en-US/verifying-source.xml index f8bd102379d..b445aa4bd67 100644 --- a/docs/en-US/verifying-source.xml +++ b/docs/en-US/verifying-source.xml @@ -32,7 +32,7 @@ Getting the KEYS To enable you to verify the GPG signature, you will need to download the - KEYS + KEYS file. diff --git a/docs/pot/verifying-source.pot b/docs/pot/verifying-source.pot index 9c1effe6bf5..9b2d586aacf 100644 --- a/docs/pot/verifying-source.pot +++ b/docs/pot/verifying-source.pot @@ -40,7 +40,7 @@ msgstr "" #. Tag: para #, no-c-format -msgid "To enable you to verify the GPG signature, you will need to download the KEYS file." +msgid "To enable you to verify the GPG signature, you will need to download the KEYS file." msgstr "" #. Tag: para From 525fe14c25877aeb0c49a6ca8aa9d18f62ff97e2 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Wed, 20 Feb 2013 10:57:52 -0800 Subject: [PATCH 090/486] Trust all certificate in vcenter connect. --- .../hypervisor/vmware/util/VmwareClient.java | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java index ae5f47fca22..f29a8c0077a 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java @@ -58,6 +58,36 @@ import com.vmware.vim25.ObjectContent; */ public class VmwareClient { + private static class TrustAllTrustManager implements javax.net.ssl.TrustManager, javax.net.ssl.X509TrustManager { + + @Override + public java.security.cert.X509Certificate[] getAcceptedIssuers() { + return null; + } + + @Override + public void checkServerTrusted(java.security.cert.X509Certificate[] certs, String authType) throws java.security.cert.CertificateException { + return; + } + + @Override + public void checkClientTrusted(java.security.cert.X509Certificate[] certs, String authType) throws java.security.cert.CertificateException { + return; + } + } + + private static void trustAllHttpsCertificates() throws Exception { + // Create a trust manager that does not validate certificate chains: + javax.net.ssl.TrustManager[] trustAllCerts = new javax.net.ssl.TrustManager[1]; + javax.net.ssl.TrustManager tm = new TrustAllTrustManager(); + trustAllCerts[0] = tm; + javax.net.ssl.SSLContext sc = javax.net.ssl.SSLContext.getInstance("SSL"); + javax.net.ssl.SSLSessionContext sslsc = sc.getServerSessionContext(); + sslsc.setSessionTimeout(0); + sc.init(null, trustAllCerts, null); + javax.net.ssl.HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); + } + private ManagedObjectReference SVC_INST_REF = new ManagedObjectReference(); private ManagedObjectReference propCollectorRef; private ManagedObjectReference rootRef; @@ -86,6 +116,7 @@ public class VmwareClient { return true; } }; + trustAllHttpsCertificates(); HttpsURLConnection.setDefaultHostnameVerifier(hv); SVC_INST_REF.setType(SVC_INST_NAME); From 0f3a3a9a6d84c97e53769cca3b76d07ad273c4c9 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Wed, 20 Feb 2013 10:58:18 -0800 Subject: [PATCH 091/486] Enable logging in systemvm. --- agent/src/com/cloud/agent/AgentShell.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/agent/src/com/cloud/agent/AgentShell.java b/agent/src/com/cloud/agent/AgentShell.java index 0e020935e90..7297ab285e5 100644 --- a/agent/src/com/cloud/agent/AgentShell.java +++ b/agent/src/com/cloud/agent/AgentShell.java @@ -48,6 +48,7 @@ import com.cloud.agent.dao.StorageComponent; import com.cloud.agent.dao.impl.PropertiesStorage; import com.cloud.host.Host; import com.cloud.resource.ServerResource; +import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.ProcessUtil; import com.cloud.utils.PropertiesUtil; @@ -611,6 +612,8 @@ public class AgentShell implements IAgentShell { public static void main(String[] args) { try { + LogUtils.initLog4j("log4j-cloud.xml"); + AgentShell shell = new AgentShell(); shell.init(args); shell.start(); From ec1f48a4b2c31b1f7c3854b0b2ada5322a18e42f Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Wed, 20 Feb 2013 11:22:28 -0800 Subject: [PATCH 092/486] CLOUDSTACK-1331: Upgrade fails for a 2.2.14 Zone having multiple guest networks using network_tags and Public Vlan Changes: -We need to update the physical_network_id for public vlans. -Physical network ID should be retrieved from the network if present. --- .../cloud/network/guru/NiciraNvpGuestNetworkGuru.java | 7 ++++++- .../src/com/cloud/network/guru/GuestNetworkGuru.java | 7 ++++++- server/src/com/cloud/upgrade/dao/Upgrade2214to30.java | 11 +++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/plugins/network-elements/nicira-nvp/src/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java b/plugins/network-elements/nicira-nvp/src/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java index 3ba6167a47d..b78d165ddd6 100644 --- a/plugins/network-elements/nicira-nvp/src/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java +++ b/plugins/network-elements/nicira-nvp/src/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java @@ -151,7 +151,12 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru { long dcId = dest.getDataCenter().getId(); //get physical network id - long physicalNetworkId = _networkModel.findPhysicalNetworkId(dcId, offering.getTags(), offering.getTrafficType()); + Long physicalNetworkId = network.getPhysicalNetworkId(); + + // physical network id can be null in Guest Network in Basic zone, so locate the physical network + if (physicalNetworkId == null) { + physicalNetworkId = _networkModel.findPhysicalNetworkId(dcId, offering.getTags(), offering.getTrafficType()); + } NetworkVO implemented = new NetworkVO(network.getTrafficType(), network.getMode(), network.getBroadcastDomainType(), network.getNetworkOfferingId(), State.Allocated, network.getDataCenterId(), physicalNetworkId); diff --git a/server/src/com/cloud/network/guru/GuestNetworkGuru.java b/server/src/com/cloud/network/guru/GuestNetworkGuru.java index ab8a06958da..cc79d300788 100755 --- a/server/src/com/cloud/network/guru/GuestNetworkGuru.java +++ b/server/src/com/cloud/network/guru/GuestNetworkGuru.java @@ -314,7 +314,12 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur long dcId = dest.getDataCenter().getId(); //get physical network id - long physicalNetworkId = _networkModel.findPhysicalNetworkId(dcId, offering.getTags(), offering.getTrafficType()); + Long physicalNetworkId = network.getPhysicalNetworkId(); + + // physical network id can be null in Guest Network in Basic zone, so locate the physical network + if (physicalNetworkId == null) { + physicalNetworkId = _networkModel.findPhysicalNetworkId(dcId, offering.getTags(), offering.getTrafficType()); + } NetworkVO implemented = new NetworkVO(network.getTrafficType(), network.getMode(), network.getBroadcastDomainType(), network.getNetworkOfferingId(), State.Allocated, diff --git a/server/src/com/cloud/upgrade/dao/Upgrade2214to30.java b/server/src/com/cloud/upgrade/dao/Upgrade2214to30.java index 88370c10c8c..c0f827e655e 100755 --- a/server/src/com/cloud/upgrade/dao/Upgrade2214to30.java +++ b/server/src/com/cloud/upgrade/dao/Upgrade2214to30.java @@ -266,8 +266,19 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { addPhysicalNtwk_To_Ntwk_IP_Vlan(conn, physicalNetworkId,networkId); } pstmt3.close(); + + // add the reference to this physical network for the default public network entries in vlan / user_ip_address tables // add first physicalNetworkId to op_dc_vnet_alloc for this zone - just a placeholder since direct networking dont need this if(isFirstPhysicalNtwk){ + s_logger.debug("Adding PhysicalNetwork to default Public network entries in vlan and user_ip_address"); + pstmt3 = conn.prepareStatement("SELECT id FROM `cloud`.`networks` where traffic_type = 'Public' and data_center_id = "+zoneId); + ResultSet rsPubNet = pstmt3.executeQuery(); + if(rsPubNet.next()){ + Long publicNetworkId = rsPubNet.getLong(1); + addPhysicalNtwk_To_Ntwk_IP_Vlan(conn, physicalNetworkId,publicNetworkId); + } + pstmt3.close(); + s_logger.debug("Adding PhysicalNetwork to op_dc_vnet_alloc"); String updateVnet = "UPDATE `cloud`.`op_dc_vnet_alloc` SET physical_network_id = " + physicalNetworkId + " WHERE data_center_id = " + zoneId; pstmtUpdate = conn.prepareStatement(updateVnet); From 6c9eb6c11c26ef0ff6e2b5d0e6188079470f57e0 Mon Sep 17 00:00:00 2001 From: Chip Childers Date: Wed, 20 Feb 2013 14:50:36 -0500 Subject: [PATCH 093/486] Updating the java-ipv6 legal docs to reflect the newly added NOTICE and copyright statements for version 0.10 Signed-off-by: Chip Childers --- tools/whisker/LICENSE | 2 +- tools/whisker/NOTICE | 8 ++++++++ tools/whisker/descriptor-for-packaging.xml | 10 ++++++++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/tools/whisker/LICENSE b/tools/whisker/LICENSE index 2f99021f9bc..2e17633f464 100644 --- a/tools/whisker/LICENSE +++ b/tools/whisker/LICENSE @@ -2995,7 +2995,7 @@ Within the target/jar directory jetty-util-6.1.26.jar from http://repo1.maven.org/maven2/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26-sources.jar licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) - + Copyright 2013 Jan Van Besien from Jan Van Besien mailto:janvanbesien@gmail.com java-ipv6.jar from http://code.google.com/p/java-ipv6 diff --git a/tools/whisker/NOTICE b/tools/whisker/NOTICE index 18679580c47..69869906cf4 100644 --- a/tools/whisker/NOTICE +++ b/tools/whisker/NOTICE @@ -242,6 +242,14 @@ this distribution. + For + java-ipv6.jar + + + Java IPv6 + Copyright 2013 Jan Van Besien + + For cloud-jasypt-1.9.jar diff --git a/tools/whisker/descriptor-for-packaging.xml b/tools/whisker/descriptor-for-packaging.xml index 5f6a49ade62..ed2c9360112 100644 --- a/tools/whisker/descriptor-for-packaging.xml +++ b/tools/whisker/descriptor-for-packaging.xml @@ -2261,6 +2261,10 @@ The Apache Software Foundation (http://www.apache.org/). This product includes software Copyright University of Southampton IT Innovation Centre, 2006 (http://www.it-innovation.soton.ac.uk). + + Java IPv6 + Copyright 2013 Jan Van Besien + - + +Copyright 2013 Jan Van Besien + - + From 70ac773f69a2d752cd6bf191fb4dd4e4f42f573e Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Wed, 20 Feb 2013 11:57:14 -0800 Subject: [PATCH 094/486] CLOUDSTACK-1344 Typo in use.external.dns setting description Fixed the typo. --- server/src/com/cloud/configuration/Config.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 4a0306a1986..c0c23b6b641 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -318,7 +318,7 @@ public enum Config { //disabling lb as cluster sync does not work with distributed cluster AgentLbEnable("Advanced", ManagementServer.class, Boolean.class, "agent.lb.enabled", "false", "If agent load balancing enabled in cluster setup", null), SubDomainNetworkAccess("Advanced", NetworkManager.class, Boolean.class, "allow.subdomain.network.access", "true", "Allow subdomains to use networks dedicated to their parent domain(s)", null), - UseExternalDnsServers("Advanced", NetworkManager.class, Boolean.class, "use.external.dns", "false", "Bypass internal dns, use exetrnal dns1 and dns2", null), + UseExternalDnsServers("Advanced", NetworkManager.class, Boolean.class, "use.external.dns", "false", "Bypass internal dns, use external dns1 and dns2", null), EncodeApiResponse("Advanced", ManagementServer.class, Boolean.class, "encode.api.response", "false", "Do URL encoding for the api response, false by default", null), DnsBasicZoneUpdates("Advanced", NetworkManager.class, String.class, "network.dns.basiczone.updates", "all", "This parameter can take 2 values: all (default) and pod. It defines if DHCP/DNS requests have to be send to all dhcp servers in cloudstack, or only to the one in the same pod", "all,pod"), From b176654b19a88e62b0e138acf526e9d38d3db377 Mon Sep 17 00:00:00 2001 From: Alex Huang Date: Wed, 20 Feb 2013 12:37:21 -0800 Subject: [PATCH 095/486] CLOUDSTACK-1273: Fixed the problem with the paths on the systemvm --- .../console-proxy/server/systemvm-descriptor.xml | 14 +++++++------- tools/pom.xml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/services/console-proxy/server/systemvm-descriptor.xml b/services/console-proxy/server/systemvm-descriptor.xml index 7efe7fdfcb0..e34026bc3a6 100644 --- a/services/console-proxy/server/systemvm-descriptor.xml +++ b/services/console-proxy/server/systemvm-descriptor.xml @@ -31,13 +31,13 @@ - ../scripts/storage/secondary/ + ../../../scripts/storage/secondary/ scripts/storage/secondary 555 555 - ../scripts/storage/secondary/ + ../../../scripts/storage/secondary/ scripts/storage/secondary 555 555 @@ -60,7 +60,7 @@ - ../console-proxy/images + images images 555 555 @@ -72,7 +72,7 @@ - ../console-proxy/js + js js 555 555 @@ -81,7 +81,7 @@ - ../console-proxy/ui + ui ui 555 555 @@ -90,7 +90,7 @@ - ../console-proxy/css + css css 555 555 @@ -99,7 +99,7 @@ - ../console-proxy/certs + certs certs 555 555 diff --git a/tools/pom.xml b/tools/pom.xml index 09961bb28d8..2e31955891a 100644 --- a/tools/pom.xml +++ b/tools/pom.xml @@ -35,7 +35,7 @@ install - apidoc + marvin cli devcloud From 48ec4395bd6c681bdaba548ea9956c652759adc0 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Wed, 20 Feb 2013 15:13:46 -0800 Subject: [PATCH 096/486] CLOUDSTACK-1343: cloudstack UI - baremetal - enable baremetal providers who are selected in the network offering. --- ui/scripts/installWizard.js | 1 + ui/scripts/zoneWizard.js | 84 ++++++++++++++++++++++++++++++++----- 2 files changed, 75 insertions(+), 10 deletions(-) diff --git a/ui/scripts/installWizard.js b/ui/scripts/installWizard.js index dff12a0c0b7..a5e13b8c640 100644 --- a/ui/scripts/installWizard.js +++ b/ui/scripts/installWizard.js @@ -278,6 +278,7 @@ }, pluginFrom: { name: 'installWizard', + selectedNetworkOffering: selectedNetworkOffering, selectedNetworkOfferingHavingSG: true } }, diff --git a/ui/scripts/zoneWizard.js b/ui/scripts/zoneWizard.js index 60656e05850..8b5171bd9c2 100755 --- a/ui/scripts/zoneWizard.js +++ b/ui/scripts/zoneWizard.js @@ -23,7 +23,10 @@ var returnedPublicVlanIpRanges = []; //public VlanIpRanges returned by API var configurationUseLocalStorage = false; var skipGuestTrafficStep = false; - + var selectedNetworkOfferingObj = {}; + var baremetalProviders = ["BaremetalDhcpProvider", "BaremetalPxeProvider", "BaremetaUserdataProvider"]; + var selectedBaremetalProviders = []; + // Makes URL string for traffic label var trafficLabelParam = function(trafficTypeID, data, physicalNetworkID) { var zoneType = data.zone.networkType; @@ -441,14 +444,16 @@ var thisNetworkOffering = this; $(this.service).each(function(){ var thisService = this; - + $(thisService.provider).each(function(){ if(this.name == "Netscaler") { - thisNetworkOffering.havingNetscaler = true; - return false; //break each loop + thisNetworkOffering.havingNetscaler = true; + } + else if($.inArray(this.name, baremetalProviders) != -1) { + selectedBaremetalProviders.push(this.name); } - }); - + }); + if(thisService.name == "SecurityGroup") { thisNetworkOffering.havingSG = true; } @@ -1787,10 +1792,19 @@ if (result.jobstatus == 1) { //alert("configureVirtualRouterElement succeeded."); - + + if(args.data.pluginFrom != null && args.data.pluginFrom.name == "installWizard") { + selectedNetworkOfferingObj = args.data.pluginFrom.selectedNetworkOffering; + } + + var data = { + id: virtualRouterProviderId, + state: 'Enabled' + }; + $.ajax({ - url: createURL("updateNetworkServiceProvider&state=Enabled&id=" + virtualRouterProviderId), - dataType: "json", + url: createURL("updateNetworkServiceProvider"), + data: data, async: false, success: function(json) { var enableVirtualRouterProviderIntervalID = setInterval(function() { @@ -1806,7 +1820,57 @@ clearInterval(enableVirtualRouterProviderIntervalID); if (result.jobstatus == 1) { - //alert("Virtual Router Provider is enabled"); + //alert("Virtual Router Provider is enabled"); + for(var i = 0; i < selectedBaremetalProviders.length; i++) { + $.ajax({ + url: createURL("listNetworkServiceProviders"), + data: { + name: selectedBaremetalProviders[i], + physicalNetworkId: args.data.returnedBasicPhysicalNetwork.id + }, + async: false, + success: function(json) { + var items = json.listnetworkserviceprovidersresponse.networkserviceprovider; + if(items != null && items.length > 0) { + var providerId = items[0].id; + $.ajax({ + url: createURL("updateNetworkServiceProvider"), + data: { + id: providerId, + state: 'Enabled' + }, + async: false, + success: function(json) { + var updateNetworkServiceProviderIntervalID = setInterval(function() { + $.ajax({ + url: createURL("queryAsyncJobResult&jobId=" + json.updatenetworkserviceproviderresponse.jobid), + dataType: "json", + success: function(json) { + var result = json.queryasyncjobresultresponse; + if (result.jobstatus == 0) { + return; //Job has not completed + } + else { + clearInterval(updateNetworkServiceProviderIntervalID); + if (result.jobstatus == 1) { //baremetal provider has been enabled successfully + + } + else if (result.jobstatus == 2) { + alert(_s(result.jobresult.errortext)); + } + } + }, + error: function(XMLHttpResponse) { + alert(parseXMLHttpResponse(XMLHttpResponse)); + } + }); + }, g_queryAsyncJobResultInterval); + } + }); + } + } + }); + } if(args.data.pluginFrom != null && args.data.pluginFrom.name == "installWizard") { selectedNetworkOfferingHavingSG = args.data.pluginFrom.selectedNetworkOfferingHavingSG; From 744c24ee68ff9d44d340120e3abd8f6502d3d0ad Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Wed, 20 Feb 2013 15:35:14 -0800 Subject: [PATCH 097/486] CLOUDSTACK-1346: Check to see if external devices are used in the network, is hardcoded for specific devices - Changes the check to list all providers of a network and check if it is an external provider --- .../com/cloud/network/NetworkModelImpl.java | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index beebb871d8a..ebe5aa4e02a 100644 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -1762,17 +1762,26 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { @Override public boolean networkIsConfiguredForExternalNetworking(long zoneId, long networkId) { - boolean netscalerInNetwork = isProviderForNetwork(Network.Provider.Netscaler, networkId); - boolean juniperInNetwork = isProviderForNetwork(Network.Provider.JuniperSRX, networkId); - boolean f5InNetwork = isProviderForNetwork(Network.Provider.F5BigIp, networkId); - - if (netscalerInNetwork || juniperInNetwork || f5InNetwork) { - return true; - } else { - return false; + List networkProviders = getNetworkProviders(networkId); + for(Provider provider : networkProviders){ + if(provider.isExternal()){ + return true; + } } + return false; } + private List getNetworkProviders(long networkId) { + List providerNames = _ntwkSrvcDao.getDistinctProviders(networkId); + Map providers = new HashMap(); + for (String providerName : providerNames) { + if(!providers.containsKey(providerName)){ + providers.put(providerName, Network.Provider.getProvider(providerName)); + } + } + + return new ArrayList(providers.values()); + } @Override public boolean configure(String name, Map params) throws ConfigurationException { From 99653ea328be5ab9fcef416f2c5feefec38d0b32 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Wed, 20 Feb 2013 18:40:59 -0800 Subject: [PATCH 098/486] IPv6: Update java-ipv6 dependency to 0.10 --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index e83ff666a5b..beeccaf936a 100644 --- a/pom.xml +++ b/pom.xml @@ -89,7 +89,7 @@ 2.6 1.4 0.9.8 - 0.8 + 0.10 From f7b600b43921ee2324e049432c9cf18906fc6772 Mon Sep 17 00:00:00 2001 From: hongtu_zang Date: Wed, 20 Feb 2013 09:52:25 +0800 Subject: [PATCH 099/486] CLOUDSTACK-1334 global config vmware.root.disk.controller doesn't work --- .../src/com/cloud/hypervisor/guru/VMwareGuru.java | 12 ++++++++++++ .../hypervisor/vmware/manager/VmwareManager.java | 2 ++ .../hypervisor/vmware/manager/VmwareManagerImpl.java | 5 +++++ 3 files changed, 19 insertions(+) diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/guru/VMwareGuru.java index 819d3999f92..bb7c29745d9 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/guru/VMwareGuru.java @@ -1,3 +1,4 @@ + // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information @@ -129,6 +130,17 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru { } } } + + String diskDeviceType = details.get(VmDetailConstants.ROOK_DISK_CONTROLLER); + if (!(vm.getVirtualMachine() instanceof DomainRouterVO || vm.getVirtualMachine() instanceof ConsoleProxyVO + || vm.getVirtualMachine() instanceof SecondaryStorageVmVO)){ + // user vm + if (diskDeviceType != null){ + details.remove(VmDetailConstants.ROOK_DISK_CONTROLLER); + } + details.put(VmDetailConstants.ROOK_DISK_CONTROLLER, _vmwareMgr.getRootDiskController()); + } + to.setDetails(details); if(vm.getVirtualMachine() instanceof DomainRouterVO) { diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java index e219c1cc426..445b2f0debc 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java @@ -67,4 +67,6 @@ public interface VmwareManager { String getPublicVSwitchName(long dcId, HypervisorType hypervisorType); String getGuestVSwitchName(long dcId, HypervisorType hypervisorType); + + public String getRootDiskController(); } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index e6d57426db2..70f98cc64b4 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -906,4 +906,9 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw } return nexusVSMCredentials; } + + @Override + public String getRootDiskController() { + return _rootDiskController; + } } From 7a3f072a50f533c652721f613ba60f1ef78c44b9 Mon Sep 17 00:00:00 2001 From: Harikrishna Patnala Date: Thu, 21 Feb 2013 14:59:35 +0530 Subject: [PATCH 100/486] CLOUDSTACK-745: Reset a VM on reboot. On every reboot of VM, the root disk state is reset. This is for the VMs created using service offering where an optional parameter isvolatile is set true. Signed-off-by: Abhinandan Prateek --- .../com/cloud/offering/ServiceOffering.java | 5 + .../apache/cloudstack/api/ApiConstants.java | 1 + .../offering/CreateServiceOfferingCmd.java | 11 +- .../configuration/ConfigurationManager.java | 3 +- .../ConfigurationManagerImpl.java | 14 +- .../cloud/migration/ServiceOffering21VO.java | 5 + .../com/cloud/service/ServiceOfferingVO.java | 30 ++-- .../src/com/cloud/vm/UserVmManagerImpl.java | 42 +++-- .../test/com/cloud/vm/UserVmManagerTest.java | 148 ++++++++++++++++++ .../vpc/MockConfigurationManagerImpl.java | 2 +- setup/db/db/schema-410to420.sql | 5 +- 11 files changed, 231 insertions(+), 35 deletions(-) create mode 100755 server/test/com/cloud/vm/UserVmManagerTest.java diff --git a/api/src/com/cloud/offering/ServiceOffering.java b/api/src/com/cloud/offering/ServiceOffering.java index 4d715898a75..d6c215f42f0 100755 --- a/api/src/com/cloud/offering/ServiceOffering.java +++ b/api/src/com/cloud/offering/ServiceOffering.java @@ -77,6 +77,11 @@ public interface ServiceOffering extends InfrastructureEntity, InternalIdentity, */ boolean getLimitCpuUse(); + /** + * @return Does this service plan support Volatile VM that is, discard VM's root disk and create a new one on reboot? + */ + boolean getVolatileVm(); + /** * @return the rate in megabits per sec to which a VM's network interface is throttled to */ diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index cd7d700d2b5..35a11dd7a53 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -218,6 +218,7 @@ public class ApiConstants { public static final String VM_LIMIT = "vmlimit"; public static final String VM_TOTAL = "vmtotal"; public static final String VNET = "vnet"; + public static final String IS_VOLATILE = "isvolatile"; public static final String VOLUME_ID = "volumeid"; public static final String ZONE_ID = "zoneid"; public static final String ZONE_NAME = "zonename"; diff --git a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index ee1e1b20bfc..e915c48e9b6 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -59,6 +59,9 @@ public class CreateServiceOfferingCmd extends BaseCmd { @Parameter(name=ApiConstants.LIMIT_CPU_USE, type=CommandType.BOOLEAN, description="restrict the CPU usage to committed service offering") private Boolean limitCpuUse; + @Parameter(name=ApiConstants.IS_VOLATILE, type=CommandType.BOOLEAN, description="true if the virtual machine needs to be volatile so that on every reboot of VM, original root disk is dettached then destroyed and a fresh root disk is created and attached to VM") + private Boolean isVolatile; + @Parameter(name=ApiConstants.STORAGE_TYPE, type=CommandType.STRING, description="the storage type of the service offering. Values are local and shared.") private String storageType; @@ -106,11 +109,15 @@ public class CreateServiceOfferingCmd extends BaseCmd { } public Boolean getOfferHa() { - return offerHa; + return offerHa == null ? false : offerHa; } public Boolean GetLimitCpuUse() { - return limitCpuUse; + return limitCpuUse == null ? false : limitCpuUse; + } + + public Boolean getVolatileVm() { + return isVolatile == null ? false : isVolatile; } public String getStorageType() { diff --git a/server/src/com/cloud/configuration/ConfigurationManager.java b/server/src/com/cloud/configuration/ConfigurationManager.java index 5c1b0d58c6f..7193928ca33 100644 --- a/server/src/com/cloud/configuration/ConfigurationManager.java +++ b/server/src/com/cloud/configuration/ConfigurationManager.java @@ -72,6 +72,7 @@ public interface ConfigurationManager extends ConfigurationService, Manager { * @param localStorageRequired * @param offerHA * @param domainId + * @param volatileVm * @param hostTag * @param networkRate * TODO @@ -80,7 +81,7 @@ public interface ConfigurationManager extends ConfigurationService, Manager { * @return ID */ ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, - boolean offerHA, boolean limitResourceUse, String tags, Long domainId, String hostTag, Integer networkRate); + boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate); /** * Creates a new disk offering diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index b886bedbc48..cf3a9080a1f 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -1777,14 +1777,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } Boolean offerHA = cmd.getOfferHa(); - if (offerHA == null) { - offerHA = false; - } - Boolean limitCpuUse = cmd.GetLimitCpuUse(); - if (limitCpuUse == null) { - limitCpuUse = false; - } + Boolean volatileVm = cmd.getVolatileVm(); String vmTypeString = cmd.getSystemVmType(); VirtualMachine.Type vmType = null; @@ -1811,15 +1805,15 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), cpuNumber.intValue(), memory.intValue(), cpuSpeed.intValue(), cmd.getDisplayText(), - localStorageRequired, offerHA, limitCpuUse, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate()); + localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate()); } @Override @ActionEvent(eventType = EventTypes.EVENT_SERVICE_OFFERING_CREATE, eventDescription = "creating service offering") public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_type, String name, int cpu, int ramSize, int speed, String displayText, - boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, String tags, Long domainId, String hostTag, Integer networkRate) { + boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) { tags = cleanupTags(tags); - ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, displayText, localStorageRequired, false, tags, isSystem, vm_type, + ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, volatileVm, displayText, localStorageRequired, false, tags, isSystem, vm_type, domainId, hostTag); if ((offering = _serviceOfferingDao.persist(offering)) != null) { diff --git a/server/src/com/cloud/migration/ServiceOffering21VO.java b/server/src/com/cloud/migration/ServiceOffering21VO.java index fdec30e3b8a..d07be6462f1 100644 --- a/server/src/com/cloud/migration/ServiceOffering21VO.java +++ b/server/src/com/cloud/migration/ServiceOffering21VO.java @@ -169,5 +169,10 @@ public class ServiceOffering21VO extends DiskOffering21VO implements ServiceOffe return null; } + @Override + public boolean getVolatileVm() { + return false; + } + } diff --git a/server/src/com/cloud/service/ServiceOfferingVO.java b/server/src/com/cloud/service/ServiceOfferingVO.java index c199a86afd1..7be939c3a15 100755 --- a/server/src/com/cloud/service/ServiceOfferingVO.java +++ b/server/src/com/cloud/service/ServiceOfferingVO.java @@ -53,6 +53,9 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering @Column(name="limit_cpu_use") private boolean limitCpuUse; + @Column(name="is_volatile") + private boolean volatileVm; + @Column(name="host_tag") private String hostTag; @@ -78,11 +81,12 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering this.multicastRateMbps = multicastRateMbps; this.offerHA = offerHA; this.limitCpuUse = false; + this.volatileVm = false; this.default_use = defaultUse; this.vm_type = vm_type == null ? null : vm_type.toString().toLowerCase(); } - public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, boolean limitCpuUse, String displayText, boolean useLocalStorage, boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId) { + public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, boolean limitCpuUse, boolean volatileVm, String displayText, boolean useLocalStorage, boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId) { super(name, displayText, false, tags, recreatable, useLocalStorage, systemUse, true, domainId); this.cpu = cpu; this.ramSize = ramSize; @@ -91,11 +95,12 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering this.multicastRateMbps = multicastRateMbps; this.offerHA = offerHA; this.limitCpuUse = limitCpuUse; + this.volatileVm = volatileVm; this.vm_type = vm_type == null ? null : vm_type.toString().toLowerCase(); } - public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, boolean limitResourceUse, String displayText, boolean useLocalStorage, boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId, String hostTag) { - this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, limitResourceUse, displayText, useLocalStorage, recreatable, tags, systemUse, vm_type, domainId); + public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String displayText, boolean useLocalStorage, boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId, String hostTag) { + this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, limitResourceUse, volatileVm, displayText, useLocalStorage, recreatable, tags, systemUse, vm_type, domainId); this.hostTag = hostTag; } @@ -189,13 +194,18 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering public String getSystemVmType(){ return vm_type; } + + public void setSortKey(int key) { + sortKey = key; + } + + public int getSortKey() { + return sortKey; + } - public void setSortKey(int key) { - sortKey = key; + @Override + public boolean getVolatileVm() { + return volatileVm; } - - public int getSortKey() { - return sortKey; - } - + } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index df976099f1d..ea25c663428 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -2672,6 +2672,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use _accountMgr.checkAccess(caller, null, true, vmInstance); + // If the VM is Volatile in nature, on reboot discard the VM's root disk and create a new root disk for it: by calling restoreVM + long serviceOfferingId = vmInstance.getServiceOfferingId(); + ServiceOfferingVO offering = _serviceOfferingDao.findById(serviceOfferingId); + if(offering.getVolatileVm()){ + return restoreVMInternal(caller, vmInstance); + } + return rebootVirtualMachine(UserContext.current().getCallerUserId(), vmId); } @@ -4789,19 +4796,27 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use // Input validation Account caller = UserContext.current().getCaller(); Long userId = UserContext.current().getCallerUserId(); - UserVO user = _userDao.findById(userId); - boolean needRestart = false; long vmId = cmd.getVmId(); UserVmVO vm = _vmDao.findById(vmId); if (vm == null) { - InvalidParameterValueException ex = new InvalidParameterValueException( - "Cann not find VM with ID " + vmId); + InvalidParameterValueException ex = new InvalidParameterValueException("Cannot find VM with ID " + vmId); ex.addProxyObject(vm, vmId, "vmId"); throw ex; } + return restoreVMInternal(caller, vm); + } + + public UserVm restoreVMInternal(Account caller, UserVmVO vm){ + + Long userId = caller.getId(); Account owner = _accountDao.findById(vm.getAccountId()); + UserVO user = _userDao.findById(userId); + long vmId = vm.getId(); + boolean needRestart = false; + + // Input validation if (owner == null) { throw new InvalidParameterValueException("The owner of " + vm + " does not exist: " + vm.getAccountId()); @@ -4816,7 +4831,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use && vm.getState() != VirtualMachine.State.Stopped) { throw new CloudRuntimeException( "Vm " - + vmId + + vm.getUuid() + " currently in " + vm.getState() + " state, restore vm can only execute when VM in Running or Stopped"); @@ -4829,13 +4844,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use List rootVols = _volsDao.findByInstance(vmId); if (rootVols.isEmpty()) { InvalidParameterValueException ex = new InvalidParameterValueException( - "Can not find root volume for VM " + vmId); + "Can not find root volume for VM " + vm.getUuid()); ex.addProxyObject(vm, vmId, "vmId"); throw ex; } VolumeVO root = rootVols.get(0); - long templateId = root.getTemplateId(); + Long templateId = root.getTemplateId(); + if(templateId == null) { + InvalidParameterValueException ex = new InvalidParameterValueException("Currently there is no support to reset a vm that is deployed using ISO " + vm.getUuid()); + ex.addProxyObject(vm, vmId, "vmId"); + throw ex; + } + VMTemplateVO template = _templateDao.findById(templateId); if (template == null) { InvalidParameterValueException ex = new InvalidParameterValueException( @@ -4849,7 +4870,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use try { _itMgr.stop(vm, user, caller); } catch (ResourceUnavailableException e) { - s_logger.debug("Stop vm " + vmId + " failed", e); + s_logger.debug("Stop vm " + vm.getUuid() + " failed", e); CloudRuntimeException ex = new CloudRuntimeException( "Stop vm failed for specified vmId"); ex.addProxyObject(vm, vmId, "vmId"); @@ -4874,7 +4895,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use try { _itMgr.start(vm, null, user, caller); } catch (Exception e) { - s_logger.debug("Unable to start VM " + vmId, e); + s_logger.debug("Unable to start VM " + vm.getUuid(), e); CloudRuntimeException ex = new CloudRuntimeException( "Unable to start VM with specified id" + e.getMessage()); ex.addProxyObject(vm, vmId, "vmId"); @@ -4882,9 +4903,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - s_logger.debug("Restore VM " + vmId + " with template " + s_logger.debug("Restore VM " + vm.getUuid() + " with template " + root.getTemplateId() + " successfully"); return vm; + } @Override diff --git a/server/test/com/cloud/vm/UserVmManagerTest.java b/server/test/com/cloud/vm/UserVmManagerTest.java new file mode 100755 index 00000000000..46069ede5d6 --- /dev/null +++ b/server/test/com/cloud/vm/UserVmManagerTest.java @@ -0,0 +1,148 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.vm; + +import java.util.List; + +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.vm.RestoreVMCmd; +import org.apache.log4j.Logger; +import org.junit.Test; +import org.junit.Before; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.storage.StorageManager; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.UserVO; +import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.dao.UserVmDao; + +import static org.mockito.Mockito.*; + +public class UserVmManagerTest { + + @Spy UserVmManagerImpl _userVmMgr = new UserVmManagerImpl(); + @Mock VirtualMachineManager _itMgr; + @Mock StorageManager _storageMgr; + @Mock Account account; + @Mock AccountManager _accountMgr; + @Mock AccountDao _accountDao; + @Mock UserDao _userDao; + @Mock UserVmDao _vmDao; + @Mock VMTemplateDao _templateDao; + @Mock VolumeDao _volsDao; + @Mock RestoreVMCmd restoreVMCmd; + @Mock AccountVO accountMock; + @Mock UserVO userMock; + @Mock UserVmVO vmMock; + @Mock VMTemplateVO templateMock; + @Mock VolumeVO volumeMock; + @Mock List rootVols; + @Before + public void setup(){ + MockitoAnnotations.initMocks(this); + + _userVmMgr._vmDao = _vmDao; + _userVmMgr._templateDao = _templateDao; + _userVmMgr._volsDao = _volsDao; + _userVmMgr._itMgr = _itMgr; + _userVmMgr._storageMgr = _storageMgr; + _userVmMgr._accountDao = _accountDao; + _userVmMgr._userDao = _userDao; + + doReturn(3L).when(account).getId(); + doReturn(8L).when(vmMock).getAccountId(); + when(_accountDao.findById(anyLong())).thenReturn(accountMock); + when(_userDao.findById(anyLong())).thenReturn(userMock); + doReturn(Account.State.enabled).when(account).getState(); + when(vmMock.getId()).thenReturn(314L); + + } + + // VM state not in running/stopped case + @Test(expected=CloudRuntimeException.class) + public void testRestoreVMF1() throws ResourceAllocationException { + + when(_vmDao.findById(anyLong())).thenReturn(vmMock); + when(_templateDao.findById(anyLong())).thenReturn(templateMock); + doReturn(VirtualMachine.State.Error).when(vmMock).getState(); + _userVmMgr.restoreVMInternal(account, vmMock); + } + + // when VM is in stopped state + @Test + public void testRestoreVMF2() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, + ConcurrentOperationException, ResourceAllocationException { + + doReturn(VirtualMachine.State.Stopped).when(vmMock).getState(); + when(_vmDao.findById(anyLong())).thenReturn(vmMock); + when(_volsDao.findByInstance(anyLong())).thenReturn(rootVols); + doReturn(false).when(rootVols).isEmpty(); + when(rootVols.get(eq(0))).thenReturn(volumeMock); + doReturn(3L).when(volumeMock).getTemplateId(); + when(_templateDao.findById(anyLong())).thenReturn(templateMock); + when(_storageMgr.allocateDuplicateVolume(volumeMock, null)).thenReturn(volumeMock); + doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); + when(volumeMock.getId()).thenReturn(3L); + doNothing().when(_volsDao).detachVolume(anyLong()); + when(_storageMgr.destroyVolume(volumeMock)).thenReturn(true); + + _userVmMgr.restoreVMInternal(account, vmMock); + + } + + // when VM is in running state + @Test + public void testRestoreVMF3() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, + ConcurrentOperationException, ResourceAllocationException { + + doReturn(VirtualMachine.State.Running).when(vmMock).getState(); + when(_vmDao.findById(anyLong())).thenReturn(vmMock); + when(_volsDao.findByInstance(anyLong())).thenReturn(rootVols); + doReturn(false).when(rootVols).isEmpty(); + when(rootVols.get(eq(0))).thenReturn(volumeMock); + doReturn(3L).when(volumeMock).getTemplateId(); + when(_templateDao.findById(anyLong())).thenReturn(templateMock); + when(_itMgr.stop(vmMock, userMock, account)).thenReturn(true); + when(_itMgr.start(vmMock, null, userMock, account)).thenReturn(vmMock); + when(_storageMgr.allocateDuplicateVolume(volumeMock, null)).thenReturn(volumeMock); + doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); + when(volumeMock.getId()).thenReturn(3L); + doNothing().when(_volsDao).detachVolume(anyLong()); + when(_storageMgr.destroyVolume(volumeMock)).thenReturn(true); + + _userVmMgr.restoreVMInternal(account, vmMock); + + } + +} \ No newline at end of file diff --git a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java index 180138ac136..e93b2a14c52 100644 --- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -433,7 +433,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu */ @Override public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, boolean offerHA, - boolean limitResourceUse, String tags, Long domainId, String hostTag, Integer networkRate) { + boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) { // TODO Auto-generated method stub return null; } diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index 65add75294b..0335f2a0781 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -23,4 +23,7 @@ ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `max_hosts_per_cluster` UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `hypervisor_type`='VMware'; INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32); DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max'; -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen'); \ No newline at end of file +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen'); + + +ALTER TABLE `cloud`.`service_offering` ADD COLUMN `is_volatile` tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk'; From 59db01ce6e9a428d5853786eef5279ee30aa23b2 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Thu, 21 Feb 2013 14:56:43 +0530 Subject: [PATCH 101/486] CLOUDSTACK-1347: Don't enforce not null rule on queue_proc_time of sync_item_queue In c63dbb88042d7eabea2664c2b608c51792fc9f18 I removed the rule from create-schema: - `queue_proc_time` datetime COMMENT 'when processing started for the item', But, upgrade path schema-40to410.sql had a different rule which caused the bug: +ALTER TABLE `cloud`.`sync_queue_item` ADD `queue_proc_time` DATETIME NOT NULL COMMENT 'when processing started for the item' AFTER `queue_proc_number`; In this fix we just revert to whatever rule was defined in create-schema as the developer may have forgetten to fix same rule in create-schema and upgrade path. This commit can be reverted or the code be fixed if we want that queue_proc_time cannot be null. Signed-off-by: Rohit Yadav --- setup/db/db/schema-40to410.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 9a59318354c..47b7cbe14d7 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -200,7 +200,7 @@ ALTER TABLE `cloud`.`sync_queue` DROP COLUMN `queue_proc_time`; ALTER TABLE `cloud`.`sync_queue` DROP COLUMN `queue_proc_msid`; -ALTER TABLE `cloud`.`sync_queue_item` ADD `queue_proc_time` DATETIME NOT NULL COMMENT 'when processing started for the item' AFTER `queue_proc_number`; +ALTER TABLE `cloud`.`sync_queue_item` ADD `queue_proc_time` DATETIME COMMENT 'when processing started for the item' AFTER `queue_proc_number`; ALTER TABLE `cloud`.`sync_queue_item` ADD INDEX `i_sync_queue__queue_proc_time`(`queue_proc_time`); From 6c01b62cdc2fe068d50b4e37739721dbc722cc41 Mon Sep 17 00:00:00 2001 From: Harikrishna Patnala Date: Thu, 21 Feb 2013 15:03:18 +0530 Subject: [PATCH 102/486] CLOUDSTACK-667: VM's base image update facility --- .../api/command/user/vm/RestoreVMCmd.java | 10 +- .../src/com/cloud/vm/UserVmManagerImpl.java | 51 ++++--- .../test/com/cloud/vm/UserVmManagerTest.java | 127 +++++++++++------- 3 files changed, 124 insertions(+), 64 deletions(-) diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java index e98c2f2eddc..9c33f97c317 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java @@ -22,6 +22,7 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.log4j.Logger; @@ -34,7 +35,7 @@ import com.cloud.user.Account; import com.cloud.user.UserContext; import com.cloud.uservm.UserVm; -@APICommand(name = "restoreVirtualMachine", description="Restore a VM to original template or specific snapshot", responseObject=UserVmResponse.class, since="3.0.0") +@APICommand(name = "restoreVirtualMachine", description="Restore a VM to original template or new template", responseObject=UserVmResponse.class, since="3.0.0") public class RestoreVMCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(RestoreVMCmd.class); private static final String s_name = "restorevmresponse"; @@ -43,6 +44,9 @@ public class RestoreVMCmd extends BaseAsyncCmd { required=true, description="Virtual Machine ID") private Long vmId; + @Parameter(name=ApiConstants.TEMPLATE_ID, type=CommandType.UUID, entityType = TemplateResponse.class, description="an optional template Id to restore vm from the new template") + private Long templateId; + @Override public String getEventType() { return EventTypes.EVENT_VM_RESTORE; @@ -85,4 +89,8 @@ public class RestoreVMCmd extends BaseAsyncCmd { public long getVmId() { return vmId; } + + public Long getTemplateId() { + return templateId; + } } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index ea25c663428..ed8cd3630a8 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -2675,8 +2675,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use // If the VM is Volatile in nature, on reboot discard the VM's root disk and create a new root disk for it: by calling restoreVM long serviceOfferingId = vmInstance.getServiceOfferingId(); ServiceOfferingVO offering = _serviceOfferingDao.findById(serviceOfferingId); - if(offering.getVolatileVm()){ - return restoreVMInternal(caller, vmInstance); + if(offering != null && offering.getRemoved() == null) { + if(offering.getVolatileVm()){ + return restoreVMInternal(caller, vmInstance, null); + } + } else { + throw new InvalidParameterValueException("Unable to find service offering: " + serviceOfferingId + " corresponding to the vm"); } return rebootVirtualMachine(UserContext.current().getCallerUserId(), @@ -4795,9 +4799,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use public UserVm restoreVM(RestoreVMCmd cmd) { // Input validation Account caller = UserContext.current().getCaller(); - Long userId = UserContext.current().getCallerUserId(); long vmId = cmd.getVmId(); + Long newTemplateId = cmd.getTemplateId(); UserVmVO vm = _vmDao.findById(vmId); if (vm == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Cannot find VM with ID " + vmId); @@ -4805,10 +4809,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use throw ex; } - return restoreVMInternal(caller, vm); + _accountMgr.checkAccess(caller, null, true, vm); + + return restoreVMInternal(caller, vm, newTemplateId); } - public UserVm restoreVMInternal(Account caller, UserVmVO vm){ + public UserVm restoreVMInternal(Account caller, UserVmVO vm, Long newTemplateId){ Long userId = caller.getId(); Account owner = _accountDao.findById(vm.getAccountId()); @@ -4857,13 +4863,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use throw ex; } - VMTemplateVO template = _templateDao.findById(templateId); - if (template == null) { - InvalidParameterValueException ex = new InvalidParameterValueException( - "Cannot find template for specified volumeid and vmId"); - ex.addProxyObject(vm, vmId, "vmId"); - ex.addProxyObject(root, root.getId(), "volumeId"); - throw ex; + VMTemplateVO template = null; + if(newTemplateId != null) { + template = _templateDao.findById(newTemplateId); + _accountMgr.checkAccess(caller, null, true, template); + } else { + template = _templateDao.findById(templateId); + if (template == null) { + InvalidParameterValueException ex = new InvalidParameterValueException( + "Cannot find template for specified volumeid and vmId"); + ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(root, root.getId(), "volumeId"); + throw ex; + } } if (needRestart) { @@ -4878,8 +4890,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - /* allocate a new volume from original template */ - VolumeVO newVol = _storageMgr.allocateDuplicateVolume(root, null); + /* If new template is provided allocate a new volume from new template otherwise allocate new volume from original template */ + VolumeVO newVol = null; + if (newTemplateId != null){ + newVol = _storageMgr.allocateDuplicateVolume(root, newTemplateId); + vm.setGuestOSId(template.getGuestOSId()); + vm.setTemplateId(newTemplateId); + _vmDao.update(vmId, vm); + } else newVol = _storageMgr.allocateDuplicateVolume(root, null); + _volsDao.attachVolume(newVol.getId(), vmId, newVol.getDeviceId()); /* Detach and destory the old root volume */ @@ -4903,8 +4922,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - s_logger.debug("Restore VM " + vm.getUuid() + " with template " - + root.getTemplateId() + " successfully"); + s_logger.debug("Restore VM " + vmId + " with template " + + template.getUuid() + " done successfully"); return vm; } diff --git a/server/test/com/cloud/vm/UserVmManagerTest.java b/server/test/com/cloud/vm/UserVmManagerTest.java index 46069ede5d6..07cad479771 100755 --- a/server/test/com/cloud/vm/UserVmManagerTest.java +++ b/server/test/com/cloud/vm/UserVmManagerTest.java @@ -54,20 +54,20 @@ public class UserVmManagerTest { @Spy UserVmManagerImpl _userVmMgr = new UserVmManagerImpl(); @Mock VirtualMachineManager _itMgr; @Mock StorageManager _storageMgr; - @Mock Account account; + @Mock Account _account; @Mock AccountManager _accountMgr; @Mock AccountDao _accountDao; @Mock UserDao _userDao; @Mock UserVmDao _vmDao; @Mock VMTemplateDao _templateDao; @Mock VolumeDao _volsDao; - @Mock RestoreVMCmd restoreVMCmd; - @Mock AccountVO accountMock; - @Mock UserVO userMock; - @Mock UserVmVO vmMock; - @Mock VMTemplateVO templateMock; - @Mock VolumeVO volumeMock; - @Mock List rootVols; + @Mock RestoreVMCmd _restoreVMCmd; + @Mock AccountVO _accountMock; + @Mock UserVO _userMock; + @Mock UserVmVO _vmMock; + @Mock VMTemplateVO _templateMock; + @Mock VolumeVO _volumeMock; + @Mock List _rootVols; @Before public void setup(){ MockitoAnnotations.initMocks(this); @@ -79,69 +79,102 @@ public class UserVmManagerTest { _userVmMgr._storageMgr = _storageMgr; _userVmMgr._accountDao = _accountDao; _userVmMgr._userDao = _userDao; + _userVmMgr._accountMgr = _accountMgr; - doReturn(3L).when(account).getId(); - doReturn(8L).when(vmMock).getAccountId(); - when(_accountDao.findById(anyLong())).thenReturn(accountMock); - when(_userDao.findById(anyLong())).thenReturn(userMock); - doReturn(Account.State.enabled).when(account).getState(); - when(vmMock.getId()).thenReturn(314L); + doReturn(3L).when(_account).getId(); + doReturn(8L).when(_vmMock).getAccountId(); + when(_accountDao.findById(anyLong())).thenReturn(_accountMock); + when(_userDao.findById(anyLong())).thenReturn(_userMock); + doReturn(Account.State.enabled).when(_account).getState(); + when(_vmMock.getId()).thenReturn(314L); } - // VM state not in running/stopped case + // Test restoreVm when VM state not in running/stopped case @Test(expected=CloudRuntimeException.class) public void testRestoreVMF1() throws ResourceAllocationException { - when(_vmDao.findById(anyLong())).thenReturn(vmMock); - when(_templateDao.findById(anyLong())).thenReturn(templateMock); - doReturn(VirtualMachine.State.Error).when(vmMock).getState(); - _userVmMgr.restoreVMInternal(account, vmMock); + when(_vmDao.findById(anyLong())).thenReturn(_vmMock); + when(_templateDao.findById(anyLong())).thenReturn(_templateMock); + doReturn(VirtualMachine.State.Error).when(_vmMock).getState(); + _userVmMgr.restoreVMInternal(_account, _vmMock, null); } - // when VM is in stopped state + // Test restoreVm when VM is in stopped state @Test public void testRestoreVMF2() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, - ConcurrentOperationException, ResourceAllocationException { + ConcurrentOperationException, ResourceAllocationException { - doReturn(VirtualMachine.State.Stopped).when(vmMock).getState(); - when(_vmDao.findById(anyLong())).thenReturn(vmMock); - when(_volsDao.findByInstance(anyLong())).thenReturn(rootVols); - doReturn(false).when(rootVols).isEmpty(); - when(rootVols.get(eq(0))).thenReturn(volumeMock); - doReturn(3L).when(volumeMock).getTemplateId(); - when(_templateDao.findById(anyLong())).thenReturn(templateMock); - when(_storageMgr.allocateDuplicateVolume(volumeMock, null)).thenReturn(volumeMock); + doReturn(VirtualMachine.State.Stopped).when(_vmMock).getState(); + when(_vmDao.findById(anyLong())).thenReturn(_vmMock); + when(_volsDao.findByInstance(anyLong())).thenReturn(_rootVols); + doReturn(false).when(_rootVols).isEmpty(); + when(_rootVols.get(eq(0))).thenReturn(_volumeMock); + doReturn(3L).when(_volumeMock).getTemplateId(); + when(_templateDao.findById(anyLong())).thenReturn(_templateMock); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, null)).thenReturn(_volumeMock); doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); - when(volumeMock.getId()).thenReturn(3L); + when(_volumeMock.getId()).thenReturn(3L); doNothing().when(_volsDao).detachVolume(anyLong()); - when(_storageMgr.destroyVolume(volumeMock)).thenReturn(true); + when(_storageMgr.destroyVolume(_volumeMock)).thenReturn(true); + when(_templateMock.getUuid()).thenReturn("e0552266-7060-11e2-bbaa-d55f5db67735"); - _userVmMgr.restoreVMInternal(account, vmMock); + _userVmMgr.restoreVMInternal(_account, _vmMock, null); } - // when VM is in running state + // Test restoreVM when VM is in running state @Test public void testRestoreVMF3() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, - ConcurrentOperationException, ResourceAllocationException { + ConcurrentOperationException, ResourceAllocationException { - doReturn(VirtualMachine.State.Running).when(vmMock).getState(); - when(_vmDao.findById(anyLong())).thenReturn(vmMock); - when(_volsDao.findByInstance(anyLong())).thenReturn(rootVols); - doReturn(false).when(rootVols).isEmpty(); - when(rootVols.get(eq(0))).thenReturn(volumeMock); - doReturn(3L).when(volumeMock).getTemplateId(); - when(_templateDao.findById(anyLong())).thenReturn(templateMock); - when(_itMgr.stop(vmMock, userMock, account)).thenReturn(true); - when(_itMgr.start(vmMock, null, userMock, account)).thenReturn(vmMock); - when(_storageMgr.allocateDuplicateVolume(volumeMock, null)).thenReturn(volumeMock); + doReturn(VirtualMachine.State.Running).when(_vmMock).getState(); + when(_vmDao.findById(anyLong())).thenReturn(_vmMock); + when(_volsDao.findByInstance(anyLong())).thenReturn(_rootVols); + doReturn(false).when(_rootVols).isEmpty(); + when(_rootVols.get(eq(0))).thenReturn(_volumeMock); + doReturn(3L).when(_volumeMock).getTemplateId(); + when(_templateDao.findById(anyLong())).thenReturn(_templateMock); + when(_itMgr.stop(_vmMock, _userMock, _account)).thenReturn(true); + when(_itMgr.start(_vmMock, null, _userMock, _account)).thenReturn(_vmMock); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, null)).thenReturn(_volumeMock); doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); - when(volumeMock.getId()).thenReturn(3L); + when(_volumeMock.getId()).thenReturn(3L); doNothing().when(_volsDao).detachVolume(anyLong()); - when(_storageMgr.destroyVolume(volumeMock)).thenReturn(true); + when(_storageMgr.destroyVolume(_volumeMock)).thenReturn(true); + when(_templateMock.getUuid()).thenReturn("e0552266-7060-11e2-bbaa-d55f5db67735"); - _userVmMgr.restoreVMInternal(account, vmMock); + _userVmMgr.restoreVMInternal(_account, _vmMock, null); + + } + + // Test restoreVM on providing new template Id, when VM is in running state + @Test + public void testRestoreVMF4() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, + ConcurrentOperationException, ResourceAllocationException { + doReturn(VirtualMachine.State.Running).when(_vmMock).getState(); + when(_vmDao.findById(anyLong())).thenReturn(_vmMock); + when(_volsDao.findByInstance(anyLong())).thenReturn(_rootVols); + doReturn(false).when(_rootVols).isEmpty(); + when(_rootVols.get(eq(0))).thenReturn(_volumeMock); + doReturn(3L).when(_volumeMock).getTemplateId(); + when(_templateDao.findById(anyLong())).thenReturn(_templateMock); + doNothing().when(_accountMgr).checkAccess(_account, null, true, _templateMock); + when(_itMgr.stop(_vmMock, _userMock, _account)).thenReturn(true); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, 14L)).thenReturn(_volumeMock); + when(_templateMock.getGuestOSId()).thenReturn(5L); + doNothing().when(_vmMock).setGuestOSId(anyLong()); + doNothing().when(_vmMock).setTemplateId(3L); + when(_vmDao.update(314L, _vmMock)).thenReturn(true); + when(_itMgr.start(_vmMock, null, _userMock, _account)).thenReturn(_vmMock); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, null)).thenReturn(_volumeMock); + doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); + when(_volumeMock.getId()).thenReturn(3L); + doNothing().when(_volsDao).detachVolume(anyLong()); + when(_storageMgr.destroyVolume(_volumeMock)).thenReturn(true); + when(_templateMock.getUuid()).thenReturn("b1a3626e-72e0-4697-8c7c-a110940cc55d"); + + _userVmMgr.restoreVMInternal(_account, _vmMock, 14L); } From 3279b4146526e2f4e77a06699887cb7ea141fc0b Mon Sep 17 00:00:00 2001 From: Pradeep Soundararajan Date: Thu, 21 Feb 2013 10:40:16 +0100 Subject: [PATCH 103/486] Commit review 9409 Added a global package name in the packaging script Signed-off-by: Hugo Trippaers --- packaging/centos63/package.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/centos63/package.sh b/packaging/centos63/package.sh index fa45210b2f8..2515ecba11f 100755 --- a/packaging/centos63/package.sh +++ b/packaging/centos63/package.sh @@ -18,7 +18,7 @@ CWD=`pwd` RPMDIR=$CWD/../../dist/rpmbuild - +PACK_PROJECT=cloudstack VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep '^[0-9]\.'` @@ -34,12 +34,12 @@ else fi mkdir -p $RPMDIR/SPECS -mkdir -p $RPMDIR/SOURCES/cloudstack-$VERSION +mkdir -p $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION -(cd ../../; tar -c --exclude .git --exclude dist . | tar -C $RPMDIR/SOURCES/cloudstack-$VERSION -x ) -(cd $RPMDIR/SOURCES/; tar -czf cloudstack-$VERSION.tgz cloudstack-$VERSION) +(cd ../../; tar -c --exclude .git --exclude dist . | tar -C $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION -x ) +(cd $RPMDIR/SOURCES/; tar -czf $PACK_PROJECT-$VERSION.tgz $PACK_PROJECT-$VERSION) cp cloud.spec $RPMDIR/SPECS -(cd $RPMDIR; rpmbuild -ba SPECS/cloud.spec "-D_topdir $RPMDIR" "$DEFVER" "$DEFREL" "$DEFPRE" ) +(cd $RPMDIR; rpmbuild -ba SPECS/cloud.spec "-D_topdir $RPMDIR" "$DEFVER" "$DEFREL" "$DEFPRE") From 35d89050817324876c4411998c81abdac3501563 Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Thu, 21 Feb 2013 16:26:31 +0530 Subject: [PATCH 104/486] Reset a VM detail view display --- ui/scripts/configuration.js | 1 + 1 file changed, 1 insertion(+) diff --git a/ui/scripts/configuration.js b/ui/scripts/configuration.js index 1e44ecfd688..4a64eeac1a5 100644 --- a/ui/scripts/configuration.js +++ b/ui/scripts/configuration.js @@ -361,6 +361,7 @@ label: 'label.CPU.cap', converter: cloudStack.converters.toBooleanText }, + isvolatile:{ label:'Volatile' , converter: cloudStack.converters.toBooleanText }, tags: { label: 'label.storage.tags' }, hosttags: { label: 'label.host.tags' }, domain: { label: 'label.domain' }, From 368a5d5a9a37d60988c15d2f3b9acde9904509cd Mon Sep 17 00:00:00 2001 From: Hugo Trippaers Date: Thu, 21 Feb 2013 12:27:18 +0100 Subject: [PATCH 105/486] Prachi's commit 20a747601c2664b2b8128f7a180f7e94f6b0b1e1 introduced a new call to network.getPhysicalNetworkId() which wasn't mocked yet in the nvp plugin unittests.(cherry picked from commit aea5b268b4590775eff6291ddfbbd6de777d1b63) Signed-off-by: Hugo Trippaers --- .../com/cloud/network/guru/NiciraNvpGuestNetworkGuruTest.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/network-elements/nicira-nvp/test/com/cloud/network/guru/NiciraNvpGuestNetworkGuruTest.java b/plugins/network-elements/nicira-nvp/test/com/cloud/network/guru/NiciraNvpGuestNetworkGuruTest.java index f86e705336c..0e4f8fd4f84 100644 --- a/plugins/network-elements/nicira-nvp/test/com/cloud/network/guru/NiciraNvpGuestNetworkGuruTest.java +++ b/plugins/network-elements/nicira-nvp/test/com/cloud/network/guru/NiciraNvpGuestNetworkGuruTest.java @@ -252,6 +252,7 @@ public class NiciraNvpGuestNetworkGuruTest { NetworkVO network = mock(NetworkVO.class); when(network.getName()).thenReturn("testnetwork"); when(network.getState()).thenReturn(State.Implementing); + when(network.getPhysicalNetworkId()).thenReturn(42L); DeployDestination dest = mock(DeployDestination.class); @@ -308,7 +309,7 @@ public class NiciraNvpGuestNetworkGuruTest { when(network.getState()).thenReturn(State.Implementing); when(network.getGateway()).thenReturn("10.1.1.1"); when(network.getCidr()).thenReturn("10.1.1.0/24"); - + when(network.getPhysicalNetworkId()).thenReturn(42L); DeployDestination dest = mock(DeployDestination.class); @@ -365,6 +366,7 @@ public class NiciraNvpGuestNetworkGuruTest { NetworkVO network = mock(NetworkVO.class); when(network.getName()).thenReturn("testnetwork"); when(network.getState()).thenReturn(State.Implementing); + when(network.getPhysicalNetworkId()).thenReturn(42L); DeployDestination dest = mock(DeployDestination.class); From 8134885e2dcd4e825d4e187540e3b13c114b359a Mon Sep 17 00:00:00 2001 From: Parth Jagirdar Date: Tue, 19 Feb 2013 14:40:13 -0800 Subject: [PATCH 106/486] CLOUDSTACK-1180: UI test automation using selenium Description: Adding selenium test scripts. --- test/selenium/ReadMe.txt | 52 ++ test/selenium/lib/Global_Locators.py | 207 ++++++++ test/selenium/lib/initialize.py | 15 + test/selenium/smoke/Login_and_Accounts.py | 236 +++++++++ test/selenium/smoke/Service_Offering.py | 409 +++++++++++++++ test/selenium/smoke/TemplatesAndISO.py | 227 ++++++++ test/selenium/smoke/VM_lifeCycle.py | 596 ++++++++++++++++++++++ test/selenium/smoke/main.py | 128 +++++ 8 files changed, 1870 insertions(+) create mode 100644 test/selenium/ReadMe.txt create mode 100644 test/selenium/lib/Global_Locators.py create mode 100644 test/selenium/lib/initialize.py create mode 100644 test/selenium/smoke/Login_and_Accounts.py create mode 100644 test/selenium/smoke/Service_Offering.py create mode 100644 test/selenium/smoke/TemplatesAndISO.py create mode 100644 test/selenium/smoke/VM_lifeCycle.py create mode 100644 test/selenium/smoke/main.py diff --git a/test/selenium/ReadMe.txt b/test/selenium/ReadMe.txt new file mode 100644 index 00000000000..30b0e0df7a0 --- /dev/null +++ b/test/selenium/ReadMe.txt @@ -0,0 +1,52 @@ +############################################## +This files contains following: + +1) Installation requirements +2) Test Pre requisites +3) Running the Test and Generating the report +############################################## + + + +########################################################################################################################################## + +1) Installtion Requirements + + +1)Firefox depending on your OS (Good to have Firebug and Selenium IDE for troubleshooting and dev work) + + +2)Install Python 2.7. Recommend to use Active State Python + + +3) Now Open CMD/Terminal and type all of following + +- pypm install pycrypto (Installs Pycrypto) +- pypm install paramiko (Install paramiko) +- pip install unittest-xml-reporting (Install XML Test Runner) +- pip install -U selenium (Installs Selenium) + + +5) Now get the HTMLTestRunner for nice looking report generation. +- http://tungwaiyip.info/software/HTMLTestRunner.html +- Download and put this file into Lib of your python installation. + + +########################################################################################################################################## + +2) Test Prerequisites + +- Download and install CS +- Log into the management server nad Add a Zone. (Must be Advance Zone and Hypervisor type must be Xen) + +########################################################################################################################################## + +3) Running the Test and Generating the report + +- Folder smoke contains main.py +- main.py is the file where all the tests are serialized. +- main.py supports HTML and XML reporting. Please refer to end of file to choose either. +- Typical usage is: python main.py for XML Reporting +- And python main.py >> results.html for HTML Reporting. + +########################################################################################################################################## diff --git a/test/selenium/lib/Global_Locators.py b/test/selenium/lib/Global_Locators.py new file mode 100644 index 00000000000..ec3de57d32b --- /dev/null +++ b/test/selenium/lib/Global_Locators.py @@ -0,0 +1,207 @@ +''' +Variable Names are as follows +Logical Page Descriptor_____What Element Represents and/or where it is_____LocatorType + + +For Example :: + +instances_xpath = "//div[@id='navigation']/ul/li[2]/span[2]" + +Means this is:: xpath link for Instances which is present on Dashboard. +Any test cases that requires to go into Instances from Dashboard can use this variable now. + +This may not be intuitive as you go deep into the tree. + + + +for example + +stopinstanceforce_id + +The best way to know what this represents is to track by variable name +Under Instances / any instance is click on any instance (applies to any instance) / stop instance has a force stop check box when you click. +This link represents that. + + +Steps below do not have global locators. + +PF rule steps including and after filling port numbers. (Refer to vmLifeAndNetwork.py / def test_PF) +FW rule steps including and after filling port numbers. (Refer to vmLifeAndNetwork.py / def test_PF) +ADD Disk Offering page has Names, description, storage type etc etc +ADD Compute Offering page has Names, description, CPU Cores, CPU clocks type etc etc + +Create Acc, Delete Acc, Login and Logout are for test flow and are not test cases. They do not have global Locators. + +Such and many more data entry points that appear only once and hence we do not need glonal names for them. They are hard coded as and when needed in the scripts. + + +''' + +################################################################################################################################################################################################ + +## Links on the Main UI page (Dash board). Listed in the order they appear on screen +dashboard_xpath = "//div[@id='navigation']/ul/li" +instances_xpath = "//div[@id='navigation']/ul/li[2]/span[2]" # Link for Instance and following as self explanatory +storage_xpath = "//div[@id='navigation']/ul/li[3]/span[2]" +network_xpath = "//div[@id='navigation']/ul/li[4]/span[2]" +templates_xpath = "//div[@id='navigation']/ul/li[5]/span[2]" +events_xpath = "//div[@id='navigation']/ul/li[6]/span[2]" +projects_xpath = "//div[@id='navigation']/ul/li[7]/span[2]" +accounts_xpath = "//div[@id='navigation']/ul/li[8]/span[2]" +domains_xpath = "//div[@id='navigation']/ul/li[9]/span[2]" +infrastructure_xpath = "//div[@id='navigation']/ul/li[10]/span[2]" +globalSettings_xpath = "//div[@id='navigation']/ul/li[11]/span[2]" +serviceOfferings_xpath = "//div[@id='navigation']/ul/li[12]/span[2]" + +################################################################################################################################################################################################ + +## Instances Page +## Instances Main page + + +# Add Instance Button on top right corner of Instances page +add_instance_xpath = "//div[2]/div/div[2]/div/div[2]/span" + +# Add Instance Wizard next button +add_instance_next_xpath = "//div[4]/div[2]/div[3]/div[3]/span" + +# Table that lists all VM's under Instances page; General usage is to traverse through this table and search for the VM we are interested in. +instances_table_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div[2]/table/tbody/tr/td/span" + + +# Click any instance and following are available + +# Click ok on confirmation pop-up box for most actions listed below +actionconfirm_xpath = ("//button[@type='button']") + +# status of VM running. Click on VM > 3rd row in table +state_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div[2]/div[2]/div/div/div[2]/div/table/tbody/tr[3]/td[2]/span" + +# Stop instance icon +stopinstance_css = "a[alt=\"Stop Instance\"] > span.icon" + +# stop instance forcefully check box available after stop instance is executed in separate pop up +stopinstanceforce_id = ("force_stop") + +# start instance icon +startinstance_css = "a[alt=\"Start Instance\"] > span.icon" + +yesconfirmation_xapth = "(//button[@type='button'])[2]" + + +# Destroy instance icon +destroyinstance_css = "a[alt=\"Destroy Instance\"] > span.icon" + +#Restore Instance icon +restoreinstance_css = "a[alt=\"Restore Instance\"] > span.icon" + +# Reboot instance +rebootinstance_css = "a[alt=\"Reboot Instance\"] > span.icon" + +################################################################################################################################################################################################ + + +## Network Page + +# Table that lists all Networks under Network page; General usage is to traverse through this table and search for the network we are interested in. +network_networktable_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[2]/table/tbody/tr/td/span" + +# View IP addresses button on each network page +viewIp_css="div.view-all > a > span" + +# Acquire a new ip +acquireIP_xpath="//div[2]/div/div/div[2]/span" +# List of IP's within a netork table +network_iptables_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[2]/table/tbody/tr/td/span" +# Configuration tab for each IP +ipConfiguration_text="Configuration" +# PF under configuration for each IP +ip_PF = "li.portForwarding > div.view-details" + + +################################################################################################################################################################################################ + + +## Servivce Offering Page + +# Selects Compute offering from drop down menu +Offering_compute_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[1]" + +# Selects System offering from drop down menu +Offering_system_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[2]" + +# Selects Disk offering from drop down menu +Offering_disk_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[3]" + +# Selects Network offering from drop down menu +Offering_network_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[4]" + +# Add Offering +Offering_add_xpath ="//div[3]/span" + +# Points to tbale that lists Offerings +Offering_table_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div[2]/table/tbody/tr/td/span" + +# Edit Button +Offering_edit_css = "a[alt=\"Edit\"] > span.icon" + +# Edit name box +Offering_editname_name = "name" + +# Edit description box +Offering_editdescription_name = "displaytext" + +# Edit finished click ok +Offering_editdone_css="div.button.done" + +# delete offering button for Disk only +Offering_delete_css = "a[alt=\"Delete Disk Offering\"] > span.icon" + +# delete offering button for Compute only +Offering_deletecompute_css = "a[alt=\"Delete Service Offering\"] > span.icon" + + + + +################################################################################################################################################################################################ + + +#### Templates Page + +# Selects Templates from drop down +template_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[1]" + +# Selects ISO from drop down +iso_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[2]" + +# Add Template +AddTemplate_xpath = "//div[3]/span" + +# Points to table where all templates are +template_table_xpath ="/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div[2]/table/tbody/tr/td/span" + +# Edit Template Button +template_edit_css = "a[alt=\"Edit\"] > span.icon" + +# Edit finished click OK +template_editdone_css = "div.button.done" + +# Delete Template button +template_delete_css = "a[alt=\"Delete Template\"] > span.icon" + + +################################################################################################################################################################################################ + + +## Login Page + +# Username box +login_username_css = "body.login > div.login > form > div.fields > div.field.username > input[name=\"username\"]" # Login>Username TextBox + +# Password Box +login_password_css = "body.login > div.login > form > div.fields > div.field.password > input[name=\"password\"]" # LoginPassword TextBox + +# Click ok to login +login_submit_css = "body.login > div.login > form > div.fields > input[type=\"submit\"]" # Login>Login Button (Submit button) + + diff --git a/test/selenium/lib/initialize.py b/test/selenium/lib/initialize.py new file mode 100644 index 00000000000..6da7166e9ac --- /dev/null +++ b/test/selenium/lib/initialize.py @@ -0,0 +1,15 @@ +''' +This will help pass webdriver (Browser instance) across our test cases. +''' + + + +from selenium import webdriver + +DRIVER = None + +def getOrCreateWebdriver(): + global DRIVER + DRIVER = DRIVER or webdriver.Firefox() + return DRIVER + diff --git a/test/selenium/smoke/Login_and_Accounts.py b/test/selenium/smoke/Login_and_Accounts.py new file mode 100644 index 00000000000..8ff17f466b9 --- /dev/null +++ b/test/selenium/smoke/Login_and_Accounts.py @@ -0,0 +1,236 @@ +import sys, os +sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) + + +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import Select +from selenium.common.exceptions import NoSuchElementException +import unittest, time +import Global_Locators +import initialize + + + +class login(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.base_url = "http://10.223.49.206:8080/" # Your management Server IP goes here + self.verificationErrors = [] + + + def test_login(self): + + # Here we will clear the test box for Username and Password and fill them with actual login data. + # After that we will click Login (Submit button) + driver = self.driver + driver.maximize_window() + driver.get(self.base_url + "client/") + driver.find_element_by_css_selector(Global_Locators.login_username_css).clear() + driver.find_element_by_css_selector(Global_Locators.login_username_css).send_keys("admin") + driver.find_element_by_css_selector(Global_Locators.login_password_css).clear() + driver.find_element_by_css_selector(Global_Locators.login_password_css).send_keys("password") + driver.find_element_by_css_selector(Global_Locators.login_submit_css).click() + time.sleep(5) + + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + +################################################################################################################################################ + + + +class logout(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.driver.implicitly_wait(100) + self.verificationErrors = [] + + + + def test_logout(self): + + # Here we will clear the test box for Username and Password and fill them with actual login data. + # After that we will click Login (Submit button) + driver = self.driver + driver.find_element_by_xpath("//div[@id='navigation']/ul/li").click() + driver.find_element_by_css_selector("div.icon.options").click() + driver.find_element_by_link_text("Logout").click() + + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + +################################################################################################################################################ + + + +class login_test(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_logintest(self): + + # Here we will clear the test box for Username and Password and fill them with actual login data. + # After that we will click Login (Submit button) + driver = self.driver + driver.find_element_by_css_selector(Global_Locators.login_username_css).clear() + driver.find_element_by_css_selector(Global_Locators.login_username_css).send_keys("test") + driver.find_element_by_css_selector(Global_Locators.login_password_css).clear() + driver.find_element_by_css_selector(Global_Locators.login_password_css).send_keys("password") + driver.find_element_by_css_selector(Global_Locators.login_submit_css).click() + time.sleep(5) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + +################################################################################################################################################ + + +class createAcc(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_createacc(self): + + driver = self.driver + self.driver.implicitly_wait(100) + driver.find_element_by_xpath("//div[@id='navigation']/ul/li[8]/span[2]").click() + driver.find_element_by_xpath("//div[3]/span").click() + driver.find_element_by_id("label_username").clear() + driver.find_element_by_id("label_username").send_keys("test") + driver.find_element_by_id("password").clear() + driver.find_element_by_id("password").send_keys("password") + driver.find_element_by_id("label_confirm_password").clear() + driver.find_element_by_id("label_confirm_password").send_keys("password") + driver.find_element_by_id("label_email").clear() + driver.find_element_by_id("label_email").send_keys("test@citrix.com") + driver.find_element_by_id("label_first_name").clear() + driver.find_element_by_id("label_first_name").send_keys("test") + driver.find_element_by_id("label_last_name").clear() + driver.find_element_by_id("label_last_name").send_keys("test") + driver.find_element_by_id("label_domain").click() + Select(driver.find_element_by_id("label_type")).select_by_visible_text("Admin") + Select(driver.find_element_by_id("label_timezone")).select_by_visible_text("[UTC-08:00] Pacific Standard Time") + driver.find_element_by_xpath("//button[@type='button']").click() + + # Go to Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(30) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + +################################################################################################################################################ + + +class tearAcc(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_tearacc(self): + + driver = self.driver + driver.find_element_by_css_selector("li.navigation-item.accounts").click() + driver.find_element_by_css_selector("tr.odd > td.name.first").click() + driver.find_element_by_css_selector("a[alt=\"Delete account\"] > span.icon").click() + driver.find_element_by_xpath("(//button[@type='button'])[2]").click() + + # Go to Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(30) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + def tearDown(self): + + self.driver.quit() + self.assertEqual([], self.verificationErrors) + + + +################################################################################################################################################ \ No newline at end of file diff --git a/test/selenium/smoke/Service_Offering.py b/test/selenium/smoke/Service_Offering.py new file mode 100644 index 00000000000..fa9b449e703 --- /dev/null +++ b/test/selenium/smoke/Service_Offering.py @@ -0,0 +1,409 @@ +import sys, os +sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) + +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import Select +from selenium.common.exceptions import NoSuchElementException +import unittest, time +import initialize +import Global_Locators + + + + +class Disk_offering_Add(unittest.TestCase): + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_diskadd(self): + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Disk offering + driver.find_element_by_xpath(Global_Locators.Offering_disk_xpath).click() + + # Add offering + driver.find_element_by_xpath(Global_Locators.Offering_add_xpath).click() + + # Following have names.. so they do not have their global entries. + driver.find_element_by_name("name").clear() + driver.find_element_by_name("name").send_keys("Test Disk Name") + driver.find_element_by_name("description").clear() + driver.find_element_by_name("description").send_keys("Test Disk Description") + driver.find_element_by_name("disksize").clear() + driver.find_element_by_name("disksize").send_keys("1") + driver.find_element_by_xpath("//button[@type='button']").click() + time.sleep(20) + + ##Verification will be if this offering shows up into table and we can actually edit it. + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + + + + +class Disk_offering_Edit(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_diskedit(self): + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Disk offering + driver.find_element_by_xpath(Global_Locators.Offering_disk_xpath).click() + + # We will be searching for our disk offering into the table + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.Offering_table_xpath) # This returns a list of all Offerings in table + + for link in linkclass: + + if link.text == "Test Disk Name": + link.click() + + time.sleep(2) + + # Click Edit + driver.find_element_by_css_selector(Global_Locators.Offering_edit_css).click() + + #Change name + driver.find_element_by_name(Global_Locators.Offering_editname_name).clear() + driver.find_element_by_name(Global_Locators.Offering_editname_name).send_keys("Test Name") + + # Change Description + driver.find_element_by_name(Global_Locators.Offering_editdescription_name).clear() + driver.find_element_by_name(Global_Locators.Offering_editdescription_name).send_keys("Test Description") + + #Click Done + driver.find_element_by_css_selector(Global_Locators.Offering_editdone_css).click() + time.sleep(10) + + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + # Now we will find this offering and delete it!! + + + + + + +class Disk_offering_Delete(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_diskdelete(self): + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Disk offering + driver.find_element_by_xpath(Global_Locators.Offering_disk_xpath).click() + + ## Action part + # We will be searching for our disk offering into the table + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.Offering_table_xpath) # This returns a list of all Offerings in table + + for link in linkclass: + + if link.text == "Test Name": + link.click() + + time.sleep(2) + + # Click Delete + driver.find_element_by_css_selector(Global_Locators.Offering_delete_css).click() + time.sleep(2) + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + time.sleep(20) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + + + + + +class Compute_offering_Add(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_computeadd(self): + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Compute offering + driver.find_element_by_xpath(Global_Locators.Offering_compute_xpath).click() + + ## Action part + + # Add offering + driver.find_element_by_xpath(Global_Locators.Offering_add_xpath).click() + + # Following do not have Global locators + driver.find_element_by_id("label_name").clear() + driver.find_element_by_id("label_name").send_keys("Test Compute Name") + driver.find_element_by_id("label_description").clear() + driver.find_element_by_id("label_description").send_keys("Test Compute Description") + driver.find_element_by_id("label_num_cpu_cores").clear() + driver.find_element_by_id("label_num_cpu_cores").send_keys("2") + driver.find_element_by_id("label_cpu_mhz").clear() + driver.find_element_by_id("label_cpu_mhz").send_keys("2000") + driver.find_element_by_id("label_memory_mb").clear() + driver.find_element_by_id("label_memory_mb").send_keys("2048") + driver.find_element_by_id("label_network_rate").clear() + driver.find_element_by_id("label_network_rate").send_keys("10") + driver.find_element_by_id("label_offer_ha").click() + driver.find_element_by_xpath("//button[@type='button']").click() + + time.sleep(2) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + + time.sleep(30) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + + + + +class Compute_offering_Edit(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_computeedit(self): + + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + ## Action part + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Compute offering + driver.find_element_by_xpath(Global_Locators.Offering_compute_xpath).click() + + # We will be searching for our disk offering into the table + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.Offering_table_xpath) # This returns a list of all Offerings in table + + for link in linkclass: + + if link.text == "Test Compute Name": + link.click() + + time.sleep(2) + + + # Click Edit + driver.find_element_by_css_selector(Global_Locators.Offering_edit_css).click() + + #Change name + driver.find_element_by_name(Global_Locators.Offering_editname_name).clear() + driver.find_element_by_name(Global_Locators.Offering_editname_name).send_keys("Test Name") + + # Change Description + driver.find_element_by_name(Global_Locators.Offering_editdescription_name).clear() + driver.find_element_by_name(Global_Locators.Offering_editdescription_name).send_keys("Test Description") + + #Click Done + driver.find_element_by_css_selector(Global_Locators.Offering_editdone_css).click() + time.sleep(10) + + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + + + + + +class Compute_offering_Delete(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_computedelete(self): + + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Compute offering + driver.find_element_by_xpath(Global_Locators.Offering_compute_xpath).click() + + ## Action part + # We will be searching for our disk offering into the table + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.Offering_table_xpath) # This returns a list of all Offerings in table + + for link in linkclass: + + if link.text == "Test Name": + link.click() + + time.sleep(2) + + # Click Delete + + driver.find_element_by_css_selector(Global_Locators.Offering_deletecompute_css).click() + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + + time.sleep(20) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) \ No newline at end of file diff --git a/test/selenium/smoke/TemplatesAndISO.py b/test/selenium/smoke/TemplatesAndISO.py new file mode 100644 index 00000000000..3bb6f5bf699 --- /dev/null +++ b/test/selenium/smoke/TemplatesAndISO.py @@ -0,0 +1,227 @@ +''' +ISO PART YET TO BE ADDED:: remove this after adding it. +''' + +import sys, os +sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) + + + +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import Select +from selenium.common.exceptions import NoSuchElementException +import unittest, time +import initialize +import Global_Locators + + + + +class Template_Add(unittest.TestCase): + + + + def setUp(self): + + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_templateadd(self): + + + driver = self.driver + + ## Action part + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Templates + driver.find_element_by_xpath(Global_Locators.templates_xpath).click() + + #Select Template from drop down list + driver.find_element_by_xpath(Global_Locators.template_xpath).click() + + # Add Template + driver.find_element_by_xpath(Global_Locators.AddTemplate_xpath).click() + + # Following have names.. so they do not have their global entries. + driver.find_element_by_id("label_name").clear() + driver.find_element_by_id("label_name").send_keys("Test Template Ubuntu") + driver.find_element_by_id("label_description").clear() + driver.find_element_by_id("label_description").send_keys("Ubuntu 10.04") + driver.find_element_by_id("URL").clear() + driver.find_element_by_id("URL").send_keys("http://nfs1.lab.vmops.com/templates/Ubuntu/Ubuntuu-10-04-64bit-server.vhd") + Select(driver.find_element_by_id("label_os_type")).select_by_visible_text("Ubuntu 10.04 (64-bit)") + driver.find_element_by_id("label_public").click() + driver.find_element_by_id("label_featured").click() + driver.find_element_by_xpath("//button[@type='button']").click() + + time.sleep(2) + + # Go to Dash Board + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + + + time.sleep(600) + + ##Verification will be if this offering shows up into table and we can actually edit it. + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + + + + +class Template_Edit(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_templateedit(self): + + driver = self.driver + + ## Action part + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Templates + driver.find_element_by_xpath(Global_Locators.templates_xpath).click() + + #Select Template from drop down list + driver.find_element_by_xpath(Global_Locators.template_xpath).click() + + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.template_table_xpath) # This returns a list + + for link in linkclass: + + if link.text == "Test Template Ubuntu": # We will search for our VM in this table + link.click() + + time.sleep(2) + + # Change name + driver.find_element_by_name("name").clear() + driver.find_element_by_name("name").send_keys("Test template") + + + # Change Description + driver.find_element_by_name("displaytext").clear() + driver.find_element_by_name("displaytext").send_keys("ubuntu") + + driver.find_element_by_css_selector(Global_Locators.template_editdone_css).click() + time.sleep(2) + + #Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(10) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + +# Now we will find this offering and delete it!! + + + + + + +class Template_Delete(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_templatedelete(self): + + driver = self.driver + + ## Action part + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Templates + driver.find_element_by_xpath(Global_Locators.templates_xpath).click() + + #Select Template from drop down list + driver.find_element_by_xpath(Global_Locators.template_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.template_table_xpath) # This returns a list + + for link in linkclass: + + if link.text == "Test Template": # We will search for our VM in this table + link.click() + + time.sleep(2) + + driver.find_element_by_css_selector(Gloabl_Locators.template_delete_css).click() + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + + time.sleep(2) + + #Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + + time.sleep(20) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) \ No newline at end of file diff --git a/test/selenium/smoke/VM_lifeCycle.py b/test/selenium/smoke/VM_lifeCycle.py new file mode 100644 index 00000000000..cd17f86332e --- /dev/null +++ b/test/selenium/smoke/VM_lifeCycle.py @@ -0,0 +1,596 @@ +import sys, os +sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) + + + +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import Select +from selenium.common.exceptions import NoSuchElementException +import unittest, time +import initialize +import Global_Locators + + + +class deployVM(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_deployvm(self): + + + ## Action Part + # VM will be named Auto-VM and this VM will be used in all subsequent tests. + # Deploy an Instance named Auto-VM Default CentOS no GUI Template + + driver = self.driver + self.driver.implicitly_wait(30) + driver.refresh() ## Most Important step. Failure to do this will change XPATH location and Scripts will fail. + + + # Click on Instances link + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + # Click on add Instance on Instances page + driver.find_element_by_xpath(Global_Locators.add_instance_xpath).click() + + # Following select template action will fire automatically... ignore it. And leave following commented. + # driver.find_element_by_xpath("(//input[@name='select-template'])[3]").click() + #Click on Next button on Instances Wizard. + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Nothing to do here as we will be using all default settings. (Default CentOS no GUI template should be highlighted here. Click Next + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Nothing to do here. Medium Instance compute offering should be selected here. Click Next + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Nothing to do here. Data Disk Offering : No Thanks!!. Click Next + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Since this is our first instance; we must provide a network name. We will use Test-Network as out network name. + driver.find_element_by_xpath("(//input[@name='new-network-name'])[2]").click() + driver.find_element_by_xpath("(//input[@name='new-network-name'])[2]").clear() + driver.find_element_by_xpath("(//input[@name='new-network-name'])[2]").send_keys("Test-Network") + + #Click next + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Give our VM a name here. Use Auto-VM as name + driver.find_element_by_xpath("(//input[@name='displayname'])[2]").click() + + driver.find_element_by_xpath("(//input[@name='displayname'])[2]").clear() + + driver.find_element_by_xpath("(//input[@name='displayname'])[2]").send_keys("Auto-VM") + + # All data filled. Click Launch VM. (It has the same xpath as Next button. So we will use Next Variable here. + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + print '\n' + '\n' + "VM Deployment is complete... wait for 5 mins to check deployment status" + '\n' + '\n' + + + + ## Verification Part + + + ## Now we must wait for some random time (Educated guess based on experience) and check if VM has been deployed and if it is in running state. + ## Should take about 4 min to deploy VM.. but we will wait 5 mins and check the status , we will do this twice. So total 2 check within 10 mins with first check occuring at 5th min. + + + driver.refresh() # Refresh UI Page; This polls latest status. + + # Click on Instances link + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + count = 1 + + while (count > 0): + + time.sleep(300) + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + print "found VM in table .. checking status..." + '\n' + '\n' + link.click() + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + + if status == "Running" : + print "VM is in running state... continuing with other tests."+ '\n' + '\n' + break + else: + print "Need to check one more time after 5 mins" + continue + count = count - 1 + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + + + + +################################################################################################################################################################################################ + + + +class destroyVM(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_destroyvm(self): + + driver = self.driver + self.driver.implicitly_wait(100) + + ## Action part + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + time.sleep(2) + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + # Click on Destroy Instance button and confirm + time.sleep(2) + driver.find_element_by_css_selector(Global_Locators.destroyinstance_css).click() + time.sleep(2) + + # Click ok on confirmation + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + time.sleep(2) + + # Go to Dashboard + # driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + driver.refresh() + + ## Verification part + time.sleep(60) + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + time.sleep(2) + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + if status == "Destroyed" : + print "VM is Destroyed...."+ '\n' + '\n' + else: + print "Something went wrong" + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + + +################################################################################################################################################################################################ + + + + +class rebootVM(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_rebootvm(self): + + driver = self.driver + self.driver.implicitly_wait(30) + print "Verify this test manually for now" + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(vmLifeAndNetwork.Server_Ip, username='root', password='password') + print '\n' + '\n' + "Before Reboot ...Executing command date ... " + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('date') + print stdout.readlines() + print '\n' + '\n' + "Before Reboot ...Executing command last reboot | head -1 ..." + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('last reboot | head -1') + print '\n' + '\n' + "Before Reboot ...Executing command uptime..." + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('uptime') + print stdout.readlines() + ssh.close() + + + driver.refresh() + + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + count = 1 + + while (count > 0): + + #time.sleep(300) + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + print "found VM in table .. Rebooting now..." + '\n' + '\n' + link.click() + + driver.find_element_by_css_selector(Global_Locators.rebootinstance_css).click() + driver.find_element_by_xpath(Global_Locators.actionconfirm_xpath).click() + + # Sleep for 5 mins to ensure system gets rebooted. + time.sleep(300) + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(vmLifeAndNetwork.Server_Ip, username='root', password='password') + print '\n' + '\n' + "After Reboot ...Executing command date ... " + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('date') + print stdout.readlines() + print '\n' + '\n' + "After Reboot ...Executing command last reboot | head -1 ..." + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('last reboot | head -1') + print '\n' + '\n' + "After Reboot ...Executing command uptime..." + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('uptime') + print stdout.readlines() + ssh.close() + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + +######################################################################################################################################################### + + + +class restoreVM(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_restorevm(self): + + driver = self.driver + self.driver.implicitly_wait(100) + + ## Action part + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + + link.click() + + # Click on Destroy Instance button and confirm + driver.find_element_by_css_selector(Global_Locators.restoreinstance_css).click() + + # Click ok on confirmation + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + + # Go to Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + + + ## Verification part + + time.sleep(60) + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + + if status == "Stopped" : + print "VM is Restored. but in stopped state.. will start now."+ '\n' + '\n' + + else: + print "Something went wrong" + + + + + #VM will be in stop state so we must start it now + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + # Click on Start Instance. + driver.find_element_by_css_selector(Global_Locators.startinstance_css).click() + time.sleep(2) + + # Dismiss confirmation by clicking Yes + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + time.sleep(2) + + # Go to Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + print "VM is Started."+ '\n' + '\n' + + # status = None + time.sleep(60) + + # Dismiss the Start Instance information box. + driver.find_element_by_xpath(Global_Locators.actionconfirm_xpath).click() + time.sleep(2) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + +######################################################################################################################################################### + + + +class startVM(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_startvm(self): + + driver = self.driver + self.driver.implicitly_wait(100) + + ## Action part + #driver.refresh() ## Most Important step. Failure to do this will change XPATH location and Scripts will fail. + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + print "found VM in table .. checking status..." + '\n' + '\n' + link.click() + + + + # Click on Start Instance. + driver.find_element_by_css_selector(Global_Locators.startinstance_css).click() + time.sleep(2) + + # Dismiss confirmation by clicking Yes + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + time.sleep(2) + + # Go to Dashboard + #driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + driver.refresh() + + + ## Verification part + # status = None + time.sleep(60) + + # Dismiss the Start Instance information box. + driver.find_element_by_xpath(Global_Locators.actionconfirm_xpath).click() + time.sleep(2) + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + time.sleep(2) + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + + if status == "Running" : + print "VM is in Running state..."+ '\n' + '\n' + + else: + print "Something went wrong" + + # Go to Dashboard + driver.refresh() + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + +######################################################################################################################################################### + + + +class stopVM(unittest.TestCase): + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_stopvm(self): + + driver = self.driver + self.driver.implicitly_wait(100) + + ## Action part + driver.refresh() ## Important step. + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + print "found VM in table .. checking status..." + '\n' + '\n' + link.click() + + + # HWe are on our VM information page. + driver.find_element_by_css_selector(Global_Locators.stopinstance_css).click() + time.sleep(2) + + # a Pop up must appear; below we will check the force stop check box and then we will click ok. + driver.find_element_by_id(Global_Locators.stopinstanceforce_id).click() + driver.find_element_by_xpath(Global_Locators.actionconfirm_xpath).click() + time.sleep(2) + + # Go to Dahsboard + #driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + driver.refresh() + + # Should take less than min to stop the instance. We will check twice at interval of 45 seconds o be safe. + ## Verification part + time.sleep(60) + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + + if status == "Stopped" : + print "VM is in Stopped state...."+ '\n' + '\n' + else: + print "Something went wrong" + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + +######################################################################################################################################################### \ No newline at end of file diff --git a/test/selenium/smoke/main.py b/test/selenium/smoke/main.py new file mode 100644 index 00000000000..d7835317496 --- /dev/null +++ b/test/selenium/smoke/main.py @@ -0,0 +1,128 @@ +import unittest +import HTMLTestRunner +import xmlrunner + + +global DRIVER + + +# Import test cases + +################################## +from Login_and_Accounts import * +from Service_Offering import * + +from TemplatesAndISO import * +from VM_lifeCycle import * + +################################### + + +# Following are BVT Tests +# serialize the test cases + + +suite = unittest.TestSuite() # setup new test suite + + +#################################################################################################### + +# Following logs admin user in and creates test account then logs admin user out and logs in as test to run tests. +# You should leave this as is for all the tests. + +suite.addTest(unittest.makeSuite(login)) #Login Admin + +time.sleep(5) +suite.addTest(unittest.makeSuite(createAcc)) # Create an Account test. We will use test account for all our tests + +time.sleep(5) +suite.addTest(unittest.makeSuite(logout)) #Logout Admin + +time.sleep(5) +suite.addTest(unittest.makeSuite(login_test)) # Login Test + + + +#################################################################################################### + + + +time.sleep(5) +suite.addTest(unittest.makeSuite(Disk_offering_Add)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Disk_offering_Edit)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Disk_offering_Delete)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Compute_offering_Add)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Compute_offering_Edit)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Compute_offering_Delete)) + + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(deployVM)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(stopVM)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(startVM)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(destroyVM)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(restoreVM)) + + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(Template_Add)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(Template_Edit)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(Template_Delete)) + + +#################################################################################################### + +# Following logs test user out and logs back in as Admin and tears down the test account. +# You should leave this as is for all the tests. + +suite.addTest(unittest.makeSuite(logout)) #Logout test +time.sleep(5) +suite.addTest(unittest.makeSuite(login)) #Login Admin +time.sleep(5) +suite.addTest(unittest.makeSuite(tearAcc)) # Delete Account test + +#################################################################################################### + + + +# If XML reports compatible with junit's XML output are desired then leave folowing code as is. +# If HTML reports are desired follow instructions + + +#Comment following line for HTML and uncomment for XML +runner = xmlrunner.XMLTestRunner(output='test-reports') + +#Comment following line for XML and uncomment for HTML +#runner = HTMLTestRunner.HTMLTestRunner() + +#header is required for displaying the website +#Comment following line for XML and uncomment for HTML +#print "Content-Type: text/html\n" + +# Leave following as is for either XML or HTML +runner.run(suite) + + + From 16f51227c690956949a9e169b594136e7c5009f4 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Thu, 21 Feb 2013 17:19:32 +0530 Subject: [PATCH 107/486] client: Fix web.xml string processing based on profile, oss or nonoss Signed-off-by: Rohit Yadav --- client/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/pom.xml b/client/pom.xml index be37cc10f7e..71fe413b56d 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -371,8 +371,8 @@ test + match="classpath:componentContext.xml" + replace="classpath:nonossComponentContext.xml" byline="true" /> From d81209210c565e19beb576be865da6fa421d0935 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Thu, 21 Feb 2013 18:13:57 +0530 Subject: [PATCH 108/486] client: Add baremetal and ucs as dependencies Signed-off-by: Rohit Yadav --- client/pom.xml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/client/pom.xml b/client/pom.xml index 71fe413b56d..178ddc6d128 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -90,6 +90,16 @@ cloud-plugin-hypervisor-ovm ${project.version} + + org.apache.cloudstack + cloud-plugin-hypervisor-baremetal + ${project.version} + + + org.apache.cloudstack + cloud-plugin-hypervisor-ucs + ${project.version} + org.apache.cloudstack cloud-plugin-hypervisor-kvm From c9f0af42a6206ddf21f760469e21ab6dac5feddd Mon Sep 17 00:00:00 2001 From: Min Chen Date: Thu, 21 Feb 2013 10:22:14 -0800 Subject: [PATCH 109/486] Fix systemvm.iso path search issue. --- .../vmware/manager/VmwareManagerImpl.java | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index 21cd914689d..64dbea18495 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -584,10 +584,15 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw private File getSystemVMPatchIsoFile() { // locate systemvm.iso - URL url = this.getClass().getProtectionDomain().getCodeSource().getLocation(); - File file = new File(url.getFile()); - File isoFile = new File(file.getParent() + "/vms/systemvm.iso"); - if (!isoFile.exists()) { + //URL url = this.getClass().getProtectionDomain().getCodeSource().getLocation(); + //File file = new File(url.getFile()); + //File isoFile = new File(file.getParent() + "/vms/systemvm.iso"); + URL url = this.getClass().getClassLoader().getResource("vms/systemvm.iso"); + File isoFile = null; + if (url != null) { + isoFile = new File(url.getPath()); + } + if (isoFile == null || !isoFile.exists()) { isoFile = new File("/usr/lib64/cloud/common/" + "/vms/systemvm.iso"); if (!isoFile.exists()) { isoFile = new File("/usr/lib/cloud/common/" + "/vms/systemvm.iso"); @@ -598,11 +603,14 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw @Override public File getSystemVMKeyFile() { - URL url = this.getClass().getProtectionDomain().getCodeSource().getLocation(); - File file = new File(url.getFile()); - - File keyFile = new File(file.getParent(), "/scripts/vm/systemvm/id_rsa.cloud"); - if (!keyFile.exists()) { + // URL url = this.getClass().getProtectionDomain().getCodeSource().getLocation(); + // File file = new File(url.getFile()); + URL url = this.getClass().getClassLoader().getResource("scripts/vm/systemvm/id_rsa.cloud"); + File keyFile = null; + if ( url != null ){ + keyFile = new File(url.getPath()); + } + if (keyFile == null || !keyFile.exists()) { keyFile = new File("/usr/lib64/cloud/common" + "/scripts/vm/systemvm/id_rsa.cloud"); if (!keyFile.exists()) { keyFile = new File("/usr/lib/cloud/common" + "/scripts/vm/systemvm/id_rsa.cloud"); From a22403edcd656562e2e71613fd11cf8e69554be8 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Wed, 30 Jan 2013 19:13:34 -0800 Subject: [PATCH 110/486] squash changes into one giant patch --- .../api/CreateVolumeFromSnapshotCommand.java | 1 + api/src/com/cloud/storage/StoragePool.java | 9 +- .../com/cloud/storage/StoragePoolStatus.java | 9 +- api/src/com/cloud/storage/StorageService.java | 53 - api/src/com/cloud/storage/Volume.java | 17 +- .../com/cloud/storage/VolumeApiService.java | 80 + .../com/cloud/template/TemplateService.java | 10 + api/src/com/cloud/vm/UserVmService.java | 41 - .../apache/cloudstack/api/ApiConstants.java | 1 + .../org/apache/cloudstack/api/BaseCmd.java | 3 +- .../admin/storage/CreateStoragePoolCmd.java | 17 + .../api/command/user/iso/RegisterIsoCmd.java | 8 + .../user/template/CreateTemplateCmd.java | 4 +- .../user/template/RegisterTemplateCmd.java | 10 +- .../command/user/volume/AttachVolumeCmd.java | 2 +- .../command/user/volume/CreateVolumeCmd.java | 4 +- .../command/user/volume/DeleteVolumeCmd.java | 2 +- .../command/user/volume/DetachVolumeCmd.java | 2 +- .../command/user/volume/MigrateVolumeCmd.java | 2 +- .../command/user/volume/ResizeVolumeCmd.java | 2 +- .../command/user/volume/UploadVolumeCmd.java | 10 +- .../agent/test/BackupSnapshotCommandTest.java | 6 +- .../api/agent/test/SnapshotCommandTest.java | 12 +- .../api/test/ResizeVolumeCommandTest.java | 6 +- client/pom.xml | 3 +- core/pom.xml | 6 +- .../cloud/storage/StoragePoolDiscoverer.java | 2 + core/src/com/cloud/storage/StoragePoolVO.java | 346 -- .../com/cloud/storage/VMTemplateHostVO.java | 41 +- .../storage/VMTemplateStoragePoolVO.java | 43 +- core/src/com/cloud/storage/VMTemplateVO.java | 70 +- core/src/com/cloud/storage/VolumeHostVO.java | 41 +- core/src/com/cloud/storage/VolumeVO.java | 8 +- engine/api/pom.xml | 2 +- .../subsystem/api/storage/DataObject.java | 1 + .../api/storage/DataObjectInStore.java} | 27 +- .../subsystem/api/storage/DataStore.java | 3 + .../api/storage/DataStoreLifeCycle.java | 14 +- .../api/storage}/DataStoreManager.java | 8 +- .../api/storage}/DataStoreProvider.java | 4 +- .../storage}/DataStoreProviderManager.java | 3 +- .../api/storage}/DataStoreStatus.java | 2 +- .../api/storage/HypervisorHostListener.java | 24 + .../api/storage}/ImageDataFactory.java | 5 +- .../subsystem/api/storage}/ImageService.java | 7 +- .../ObjectInDataStoreStateMachine.java | 3 +- .../api/storage/PrimaryDataStoreInfo.java | 16 +- .../api/storage}/SnapshotDataFactory.java | 5 +- .../subsystem/api/storage}/SnapshotInfo.java | 4 +- .../subsystem/api/storage}/TemplateEvent.java | 2 +- .../subsystem/api/storage}/TemplateInfo.java | 7 +- .../subsystem/api/storage}/TemplateState.java | 2 +- .../api/storage}/VolumeDataFactory.java | 6 +- .../subsystem/api/storage/VolumeInfo.java | 6 +- .../subsystem/api/storage}/VolumeService.java | 39 +- .../datastore/db/DataStoreProviderDao.java | 0 .../db/DataStoreProviderDaoImpl.java | 0 .../datastore/db/DataStoreProviderVO.java | 0 .../datastore/db/PrimaryDataStoreDao.java | 32 +- .../datastore/db/PrimaryDataStoreDaoImpl.java | 82 +- .../db/PrimaryDataStoreDetailVO.java | 0 .../db/PrimaryDataStoreDetailsDao.java | 1 - .../db/PrimaryDataStoreDetailsDaoImpl.java | 0 .../storage/datastore/db/StoragePoolVO.java} | 105 +- .../cloud/entity/api/VMEntityManagerImpl.java | 13 +- .../storage/backup/BackupService.java | 2 +- .../storage/image/ImageDataFactoryImpl.java | 56 +- .../storage/image/ImageServiceImpl.java | 95 +- .../image/downloader/ImageDownloader.java | 2 +- .../AncientImageDataStoreDriverImpl.java | 187 + .../DefaultImageDataStoreDriverImpl.java | 6 +- .../image/manager/ImageDataManager.java | 8 +- .../image/manager/ImageDataManagerImpl.java | 12 +- .../manager/ImageDataStoreManagerImpl.java | 38 +- .../store/AncientImageDataStoreProvider.java | 92 + ...pl.java => DefaultImageDataStoreImpl.java} | 53 +- .../store/DefaultImageDataStoreProvider.java | 2 - .../storage/image/store/TemplateObject.java | 82 +- .../DefaultImageDataStoreLifeCycle.java | 23 +- .../motion/DefaultImageMotionStrategy.java | 9 +- .../image/motion/ImageMotionServiceImpl.java | 12 +- .../storage/test/ChildTestConfiguration.java | 58 +- .../test/MockStorageMotionStrategy.java | 42 + .../storage/test/volumeServiceTest.java | 65 +- .../test/resource/component.xml | 201 + .../test/resource/storageContext.xml | 1 + .../snapshot/SnapshotDataFactoryImpl.java | 22 +- .../storage/snapshot/SnapshotObject.java | 8 + .../storage/snapshot/SnapshotServiceImpl.java | 1 + .../strategy/HypervisorBasedSnapshot.java | 2 +- .../strategy/StorageBasedSnapshot.java | 2 +- .../datastore/DataObjectManagerImpl.java | 76 +- .../datastore/DataStoreManagerImpl.java | 20 + .../datastore/ObjectInDataStoreManager.java | 18 +- .../ObjectInDataStoreManagerImpl.java | 185 +- .../storage/datastore/PrimaryDataStore.java | 25 +- .../datastore/PrimaryDataStoreEntityImpl.java | 23 +- .../PrimaryDataStoreProviderManager.java | 3 + .../DataStoreProviderManagerImpl.java | 30 +- .../provider/ImageDataStoreProvider.java | 2 + .../provider/PrimaryDataStoreProvider.java | 2 + .../storage/db/ObjectInDataStoreDao.java | 6 +- .../storage/db/ObjectInDataStoreDaoImpl.java | 11 +- .../storage/db/ObjectInDataStoreVO.java | 39 +- .../storage/image/TemplateEntityImpl.java | 1 + .../image/datastore/ImageDataStore.java | 4 +- .../image/datastore/ImageDataStoreHelper.java | 16 +- .../datastore/ImageDataStoreManager.java | 5 + .../storage/image/db/ImageDataDao.java | 85 - .../storage/image/db/ImageDataDaoImpl.java | 975 ----- .../storage/image/db/ImageDataVO.java | 450 -- .../image/motion/ImageMotionService.java | 2 +- .../motion/AncientDataMotionStrategy.java | 581 +++ .../storage/snapshot/SnapshotService.java | 1 + .../storage/snapshot/SnapshotStrategy.java | 2 + .../storage/to/PrimaryDataStoreTO.java | 4 +- .../cloudstack/storage/to/TemplateTO.java | 2 +- .../volume/PrimaryDataStoreDriver.java | 2 +- .../TemplateOnPrimaryDataStoreInfo.java | 2 +- .../datastore/PrimaryDataStoreHelper.java | 28 +- .../db/TemplatePrimaryDataStoreDao.java | 2 +- .../db/TemplatePrimaryDataStoreDaoImpl.java | 6 +- .../volume/db/TemplatePrimaryDataStoreVO.java | 4 +- .../datastore/DefaultPrimaryDataStore.java | 189 +- .../AncientPrimaryDataStoreDriverImpl.java | 289 ++ .../DefaultPrimaryDataStoreDriverImpl.java | 2 +- .../AncientPrimaryDataStoreLifeCyclImpl.java | 952 +++++ .../DefaultPrimaryDataStoreLifeCycleImpl.java | 31 +- ...ltPrimaryDataStoreProviderManagerImpl.java | 34 +- .../storage/datastore/manager/data model.ucls | 32 +- .../AncientPrimaryDataStoreProviderImpl.java | 75 + .../provider/DefaultHostListener.java | 90 + .../DefaultPrimaryDatastoreProviderImpl.java | 4 + .../volume/TemplateInstallStrategy.java | 2 +- .../volume/TemplateInstallStrategyImpl.java | 33 +- .../storage/volume/VolumeDataFactoryImpl.java | 42 +- .../storage/volume/VolumeEntityImpl.java | 11 +- .../storage/volume/VolumeManagerImpl.java | 112 - .../storage/volume/VolumeObject.java | 187 +- .../storage/volume/VolumeServiceImpl.java | 270 +- framework/api/pom.xml | 42 + .../framework/async/AsyncCallFuture.java | 0 .../async/AsyncCompletionCallback.java | 0 framework/pom.xml | 1 + .../vmware/VmwareServerDiscoverer.java | 2 +- .../xen/discoverer/XcpServerDiscoverer.java | 2 +- .../resource/XenServerStorageResource.java | 5 + .../allocator/RandomStoragePoolAllocator.java | 5 +- .../SolidfirePrimaryDataStoreDriver.java | 2 +- .../src/com/cloud/alert/AlertManagerImpl.java | 6 +- server/src/com/cloud/api/ApiDBUtils.java | 15 +- .../src/com/cloud/api/ApiResponseHelper.java | 2 + .../api/commands/GetUsageRecordsCmd.java | 372 ++ .../baremetal/BareMetalTemplateAdapter.java | 4 +- .../baremetal/BareMetalVmManagerImpl.java | 24 +- .../com/cloud/capacity/CapacityManager.java | 3 +- .../cloud/capacity/CapacityManagerImpl.java | 29 +- .../cloud/capacity/dao/CapacityDaoImpl.java | 2 +- .../consoleproxy/ConsoleProxyManagerImpl.java | 16 +- .../src/com/cloud/deploy/FirstFitPlanner.java | 9 +- .../cloud/ha/HighAvailabilityManagerImpl.java | 6 +- .../cloud/resource/ResourceManagerImpl.java | 20 +- .../com/cloud/server/ManagementServer.java | 3 +- .../cloud/server/ManagementServerImpl.java | 26 +- .../src/com/cloud/server/StatsCollector.java | 11 +- .../storage/LocalStoragePoolListener.java | 66 +- .../com/cloud/storage/OCFS2ManagerImpl.java | 1 + .../cloud/storage/RegisterVolumePayload.java | 43 + .../src/com/cloud/storage/StorageManager.java | 175 +- .../com/cloud/storage/StorageManagerImpl.java | 3695 ++++------------- .../com/cloud/storage/TemplateProfile.java | 11 +- .../src/com/cloud/storage/VolumeManager.java | 99 + .../com/cloud/storage/VolumeManagerImpl.java | 2620 ++++++++++++ .../AbstractStoragePoolAllocator.java | 14 +- .../FirstFitStoragePoolAllocator.java | 9 +- .../allocator/LocalStoragePoolAllocator.java | 5 +- .../storage/dao/LaunchPermissionDao.java | 1 + .../com/cloud/storage/dao/StoragePoolDao.java | 3 +- .../cloud/storage/dao/StoragePoolDaoImpl.java | 2 +- .../com/cloud/storage/dao/VMTemplateDao.java | 6 +- .../cloud/storage/dao/VMTemplateDaoImpl.java | 59 +- .../cloud/storage/dao/VMTemplateHostDao.java | 8 +- .../storage/dao/VMTemplateHostDaoImpl.java | 57 + .../cloud/storage/dao/VMTemplatePoolDao.java | 7 +- .../storage/dao/VMTemplatePoolDaoImpl.java | 49 + .../com/cloud/storage/dao/VolumeDaoImpl.java | 3 +- .../com/cloud/storage/dao/VolumeHostDao.java | 6 +- .../cloud/storage/dao/VolumeHostDaoImpl.java | 56 +- .../storage/download/DownloadListener.java | 2 +- .../storage/download/DownloadMonitor.java | 1 + .../storage/download/DownloadMonitorImpl.java | 53 +- .../storage/listener/StoragePoolMonitor.java | 14 +- .../DummySecondaryStorageResource.java | 2 +- .../src/com/cloud/storage/s3/S3Manager.java | 1 + .../com/cloud/storage/s3/S3ManagerImpl.java | 2 +- .../SecondaryStorageManagerImpl.java | 2 +- .../storage/snapshot/SnapshotManagerImpl.java | 89 +- .../cloud/storage/upload/UploadMonitor.java | 1 + .../cloud/tags/dao/ResourceTagsDaoImpl.java | 1 + .../template/HyervisorTemplateAdapter.java | 33 +- .../com/cloud/template/TemplateAdapter.java | 3 +- .../cloud/template/TemplateAdapterBase.java | 35 +- .../com/cloud/template/TemplateManager.java | 28 +- .../cloud/template/TemplateManagerImpl.java | 703 +++- .../com/cloud/user/AccountManagerImpl.java | 5 +- server/src/com/cloud/vm/UserVmManager.java | 13 +- .../src/com/cloud/vm/UserVmManagerImpl.java | 1118 +---- .../com/cloud/vm/VirtualMachineManager.java | 1 + .../cloud/vm/VirtualMachineManagerImpl.java | 27 +- .../cloud/vm/VirtualMachineProfileImpl.java | 1 + .../com/cloud/vm/MockUserVmManagerImpl.java | 50 +- setup/db/templates.sql | 32 +- 212 files changed, 9639 insertions(+), 7522 deletions(-) create mode 100644 api/src/com/cloud/storage/VolumeApiService.java delete mode 100644 core/src/com/cloud/storage/StoragePoolVO.java rename engine/{storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManager.java => api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectInStore.java} (51%) rename engine/{storage/src/org/apache/cloudstack/storage/datastore => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/DataStoreManager.java (79%) rename engine/{storage/src/org/apache/cloudstack/storage/datastore/provider => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/DataStoreProvider.java (83%) rename engine/{storage/src/org/apache/cloudstack/storage/datastore/provider => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/DataStoreProviderManager.java (90%) rename engine/{storage/src/org/apache/cloudstack/storage/datastore => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/DataStoreStatus.java (94%) create mode 100644 engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java rename engine/{storage/src/org/apache/cloudstack/storage/image => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/ImageDataFactory.java (84%) rename engine/{storage/src/org/apache/cloudstack/storage/image => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/ImageService.java (77%) rename engine/{storage/src/org/apache/cloudstack/storage/volume => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/ObjectInDataStoreStateMachine.java (95%) rename engine/{storage/src/org/apache/cloudstack/storage/snapshot => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/SnapshotDataFactory.java (83%) rename engine/{storage/src/org/apache/cloudstack/storage/snapshot => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/SnapshotInfo.java (83%) rename engine/{storage/src/org/apache/cloudstack/storage/image => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/TemplateEvent.java (93%) rename engine/{storage/src/org/apache/cloudstack/storage/image => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/TemplateInfo.java (78%) rename engine/{storage/src/org/apache/cloudstack/storage/image => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/TemplateState.java (93%) rename engine/{storage/src/org/apache/cloudstack/storage/datastore => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/VolumeDataFactory.java (82%) rename engine/{storage/src/org/apache/cloudstack/storage/volume => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/VolumeService.java (63%) rename engine/{storage => api}/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDao.java (100%) rename engine/{storage => api}/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDaoImpl.java (100%) rename engine/{storage => api}/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderVO.java (100%) rename engine/{storage => api}/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java (65%) rename engine/{storage => api}/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java (77%) rename engine/{storage => api}/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java (100%) rename engine/{storage => api}/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java (95%) rename engine/{storage => api}/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDaoImpl.java (100%) rename engine/{storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java => api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java} (64%) create mode 100644 engine/storage/image/src/org/apache/cloudstack/storage/image/driver/AncientImageDataStoreDriverImpl.java create mode 100644 engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java rename engine/storage/image/src/org/apache/cloudstack/storage/image/store/{HttpDataStoreImpl.java => DefaultImageDataStoreImpl.java} (79%) create mode 100644 engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java create mode 100644 engine/storage/integration-test/test/resource/component.xml delete mode 100644 engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java delete mode 100644 engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java delete mode 100644 engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java create mode 100644 engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java create mode 100644 engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java create mode 100644 engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java create mode 100644 engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java create mode 100644 engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java delete mode 100644 engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java create mode 100644 framework/api/pom.xml rename framework/{ipc => api}/src/org/apache/cloudstack/framework/async/AsyncCallFuture.java (100%) rename framework/{ipc => api}/src/org/apache/cloudstack/framework/async/AsyncCompletionCallback.java (100%) create mode 100644 server/src/com/cloud/api/commands/GetUsageRecordsCmd.java create mode 100644 server/src/com/cloud/storage/RegisterVolumePayload.java create mode 100644 server/src/com/cloud/storage/VolumeManager.java create mode 100644 server/src/com/cloud/storage/VolumeManagerImpl.java diff --git a/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java b/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java index a19d34436f7..fbf6121f543 100644 --- a/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java +++ b/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java @@ -18,6 +18,7 @@ package com.cloud.agent.api; import com.cloud.storage.StoragePool; + /** * This currently assumes that both primary and secondary storage are mounted on the XenServer. */ diff --git a/api/src/com/cloud/storage/StoragePool.java b/api/src/com/cloud/storage/StoragePool.java index f517927eac1..091eef182cc 100644 --- a/api/src/com/cloud/storage/StoragePool.java +++ b/api/src/com/cloud/storage/StoragePool.java @@ -99,10 +99,7 @@ public interface StoragePool extends Identity, InternalIdentity { /** * @return */ - String getStorageProvider(); - - /** - * @return - */ - String getStorageType(); + Long getStorageProviderId(); + + boolean isInMaintenance(); } diff --git a/api/src/com/cloud/storage/StoragePoolStatus.java b/api/src/com/cloud/storage/StoragePoolStatus.java index 94dd686a8f0..a35f706d702 100644 --- a/api/src/com/cloud/storage/StoragePoolStatus.java +++ b/api/src/com/cloud/storage/StoragePoolStatus.java @@ -17,11 +17,6 @@ package com.cloud.storage; public enum StoragePoolStatus { - Creating, - Up, - PrepareForMaintenance, - ErrorInMaintenance, - CancelMaintenance, - Maintenance, - Removed; + Initial, Initialized, Creating, Attaching, Up, PrepareForMaintenance, + ErrorInMaintenance, CancelMaintenance, Maintenance, Removed; } diff --git a/api/src/com/cloud/storage/StorageService.java b/api/src/com/cloud/storage/StorageService.java index bd7dfd3a67a..63c5023ee91 100644 --- a/api/src/com/cloud/storage/StorageService.java +++ b/api/src/com/cloud/storage/StorageService.java @@ -22,17 +22,10 @@ import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaint import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; -import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; -import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceInUseException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.user.Account; public interface StorageService{ /** @@ -51,37 +44,6 @@ public interface StorageService{ StoragePool createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException; - /** - * Creates the database object for a volume based on the given criteria - * - * @param cmd - * the API command wrapping the criteria (account/domainId [admin only], zone, diskOffering, snapshot, - * name) - * @return the volume object - * @throws PermissionDeniedException - */ - Volume allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException; - - /** - * Creates the volume based on the given criteria - * - * @param cmd - * the API command wrapping the criteria (account/domainId [admin only], zone, diskOffering, snapshot, - * name) - * @return the volume object - */ - Volume createVolume(CreateVolumeCmd cmd); - - - /** - * Resizes the volume based on the given criteria - * - * @param cmd - * the API command wrapping the criteria - * @return the volume object - */ - Volume resizeVolume(ResizeVolumeCmd cmd); - /** * Delete the storage pool * @@ -120,19 +82,4 @@ public interface StorageService{ public StoragePool updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException; public StoragePool getStoragePool(long id); - - Volume migrateVolume(Long volumeId, Long storagePoolId) throws ConcurrentOperationException; - - - /** - * Uploads the volume to secondary storage - * - * @param UploadVolumeCmd cmd - * - * @return Volume object - */ - Volume uploadVolume(UploadVolumeCmd cmd) throws ResourceAllocationException; - - boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException; - } diff --git a/api/src/com/cloud/storage/Volume.java b/api/src/com/cloud/storage/Volume.java index 284c83d9e93..4903594f0af 100755 --- a/api/src/com/cloud/storage/Volume.java +++ b/api/src/com/cloud/storage/Volume.java @@ -39,9 +39,12 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba Snapshotting("There is a snapshot created on this volume, not backed up to secondary storage yet"), Resizing("The volume is being resized"), Expunging("The volume is being expunging"), + Expunged("The volume is being expunging"), Destroy("The volume is destroyed, and can't be recovered."), Destroying("The volume is destroying, and can't be recovered."), - UploadOp ("The volume upload operation is in progress or in short the volume is on secondary storage"); + UploadOp ("The volume upload operation is in progress or in short the volume is on secondary storage"), + Uploading("volume is uploading"), + Uploaded("volume is uploaded"); String _description; @@ -70,12 +73,15 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba s_fsm.addTransition(Resizing, Event.OperationSucceeded, Ready); s_fsm.addTransition(Resizing, Event.OperationFailed, Ready); s_fsm.addTransition(Allocated, Event.UploadRequested, UploadOp); - s_fsm.addTransition(UploadOp, Event.CopyRequested, Creating);// CopyRequested for volume from sec to primary storage + s_fsm.addTransition(Uploaded, Event.CopyRequested, Creating);// CopyRequested for volume from sec to primary storage s_fsm.addTransition(Creating, Event.CopySucceeded, Ready); - s_fsm.addTransition(Creating, Event.CopyFailed, UploadOp);// Copying volume from sec to primary failed. + s_fsm.addTransition(Creating, Event.CopyFailed, Uploaded);// Copying volume from sec to primary failed. s_fsm.addTransition(UploadOp, Event.DestroyRequested, Destroy); s_fsm.addTransition(Ready, Event.DestroyRequested, Destroy); s_fsm.addTransition(Destroy, Event.ExpungingRequested, Expunging); + s_fsm.addTransition(Expunging, Event.ExpungingRequested, Expunging); + s_fsm.addTransition(Expunging, Event.OperationSucceeded, Expunged); + s_fsm.addTransition(Expunging, Event.OperationFailed, Expunging); s_fsm.addTransition(Ready, Event.SnapshotRequested, Snapshotting); s_fsm.addTransition(Snapshotting, Event.OperationSucceeded, Ready); s_fsm.addTransition(Snapshotting, Event.OperationFailed, Ready); @@ -83,6 +89,9 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba s_fsm.addTransition(Migrating, Event.OperationSucceeded, Ready); s_fsm.addTransition(Migrating, Event.OperationFailed, Ready); s_fsm.addTransition(Destroy, Event.OperationSucceeded, Destroy); + s_fsm.addTransition(UploadOp, Event.OperationSucceeded, Uploaded); + s_fsm.addTransition(UploadOp, Event.OperationFailed, Allocated); + s_fsm.addTransition(Uploaded, Event.DestroyRequested, Destroy); } } @@ -110,7 +119,7 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba /** * @return total size of the partition */ - long getSize(); + Long getSize(); /** * @return the vm instance id diff --git a/api/src/com/cloud/storage/VolumeApiService.java b/api/src/com/cloud/storage/VolumeApiService.java new file mode 100644 index 00000000000..92880f4184d --- /dev/null +++ b/api/src/com/cloud/storage/VolumeApiService.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.user.Account; + +public interface VolumeApiService { + /** + * Creates the database object for a volume based on the given criteria + * + * @param cmd + * the API command wrapping the criteria (account/domainId [admin only], zone, diskOffering, snapshot, + * name) + * @return the volume object + * @throws PermissionDeniedException + */ + Volume allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException; + + /** + * Creates the volume based on the given criteria + * + * @param cmd + * the API command wrapping the criteria (account/domainId [admin only], zone, diskOffering, snapshot, + * name) + * @return the volume object + */ + Volume createVolume(CreateVolumeCmd cmd); + + + /** + * Resizes the volume based on the given criteria + * + * @param cmd + * the API command wrapping the criteria + * @return the volume object + */ + Volume resizeVolume(ResizeVolumeCmd cmd); + + Volume migrateVolume(Long volumeId, Long storagePoolId) throws ConcurrentOperationException; + + /** + * Uploads the volume to secondary storage + * + * @param UploadVolumeCmd cmd + * + * @return Volume object + */ + Volume uploadVolume(UploadVolumeCmd cmd) throws ResourceAllocationException; + + boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException; + + Volume attachVolumeToVM(AttachVolumeCmd command); + + Volume detachVolumeFromVM(DetachVolumeCmd cmmd); +} diff --git a/api/src/com/cloud/template/TemplateService.java b/api/src/com/cloud/template/TemplateService.java index 11475d46b8a..7e831fb0055 100755 --- a/api/src/com/cloud/template/TemplateService.java +++ b/api/src/com/cloud/template/TemplateService.java @@ -25,6 +25,7 @@ import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.CopyTemplateCmd; +import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; @@ -32,6 +33,8 @@ import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; import com.cloud.exception.InternalErrorException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; public interface TemplateService { @@ -87,4 +90,11 @@ public interface TemplateService { List listTemplatePermissions(BaseListTemplateOrIsoPermissionsCmd cmd); boolean updateTemplateOrIsoPermissions(BaseUpdateTemplateOrIsoPermissionsCmd cmd); + + VirtualMachineTemplate createPrivateTemplateRecord(CreateTemplateCmd cmd, + Account templateOwner) throws ResourceAllocationException; + + VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd command) + throws CloudRuntimeException; + } diff --git a/api/src/com/cloud/vm/UserVmService.java b/api/src/com/cloud/vm/UserVmService.java index fb574fa5848..ea89eda89d2 100755 --- a/api/src/com/cloud/vm/UserVmService.java +++ b/api/src/com/cloud/vm/UserVmService.java @@ -23,7 +23,6 @@ import javax.naming.InsufficientResourcesException; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; -import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; @@ -103,24 +102,6 @@ public interface UserVmService { */ UserVm resetVMSSHKey(ResetVMSSHKeyCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException; - /** - * Attaches the specified volume to the specified VM - * - * @param cmd - * - the command specifying volumeId and vmId - * @return the Volume object if attach worked successfully. - */ - Volume attachVolumeToVM(AttachVolumeCmd cmd); - - /** - * Detaches the specified volume from the VM it is currently attached to. - * - * @param cmd - * - the command specifying volumeId - * @return the Volume object if detach worked successfully. - */ - Volume detachVolumeFromVM(DetachVolumeCmd cmmd); - UserVm startVirtualMachine(StartVMCmd cmd) throws StorageUnavailableException, ExecutionException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException; @@ -151,28 +132,6 @@ public interface UserVmService { UserVm recoverVirtualMachine(RecoverVMCmd cmd) throws ResourceAllocationException; - /** - * Create a template database record in preparation for creating a private template. - * - * @param cmd - * the command object that defines the name, display text, snapshot/volume, bits, public/private, etc. - * for the - * private template - * @param templateOwner - * TODO - * @return the vm template object if successful, null otherwise - * @throws ResourceAllocationException - */ - VirtualMachineTemplate createPrivateTemplateRecord(CreateTemplateCmd cmd, Account templateOwner) throws ResourceAllocationException; - - /** - * Creates a private template from a snapshot of a VM - * - * @param cmd - * - the command specifying snapshotId, name, description - * @return a template if successfully created, null otherwise - */ - VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd cmd); /** * Creates a Basic Zone User VM in the database and returns the VM to the caller. diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index 35a11dd7a53..252e31d82a5 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -444,6 +444,7 @@ public class ApiConstants { public static final String VM_SNAPSHOT_ID = "vmsnapshotid"; public static final String VM_SNAPSHOT_DISK_IDS = "vmsnapshotdiskids"; public static final String VM_SNAPSHOT_MEMORY = "snapshotmemory"; + public static final String IMAGE_STORE_UUID = "imagestoreuuid"; public enum HostDetails { all, capacity, events, stats, min; diff --git a/api/src/org/apache/cloudstack/api/BaseCmd.java b/api/src/org/apache/cloudstack/api/BaseCmd.java index 17f789f88fa..816b6deed77 100644 --- a/api/src/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/org/apache/cloudstack/api/BaseCmd.java @@ -62,6 +62,7 @@ import com.cloud.resource.ResourceService; import com.cloud.server.ManagementService; import com.cloud.server.TaggedResourceService; import com.cloud.storage.StorageService; +import com.cloud.storage.VolumeApiService; import com.cloud.storage.snapshot.SnapshotService; import com.cloud.template.TemplateService; import com.cloud.user.Account; @@ -69,7 +70,6 @@ import com.cloud.user.AccountService; import com.cloud.user.DomainService; import com.cloud.user.ResourceLimitService; import com.cloud.utils.Pair; -import com.cloud.vm.BareMetalVmService; import com.cloud.vm.UserVmService; import com.cloud.vm.snapshot.VMSnapshotService; @@ -102,6 +102,7 @@ public abstract class BaseCmd { @Inject public UserVmService _userVmService; @Inject public ManagementService _mgr; @Inject public StorageService _storageService; + @Inject public VolumeApiService _volumeService; @Inject public ResourceService _resourceService; @Inject public NetworkService _networkService; @Inject public TemplateService _templateService; diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java index a3497a89f98..cae58886015 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java @@ -36,6 +36,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.storage.StoragePool; import com.cloud.user.Account; + @SuppressWarnings("rawtypes") @APICommand(name = "createStoragePool", description="Creates a storage pool.", responseObject=StoragePoolResponse.class) public class CreateStoragePoolCmd extends BaseCmd { @@ -70,6 +71,14 @@ public class CreateStoragePoolCmd extends BaseCmd { @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneResponse.class, required=true, description="the Zone ID for the storage pool") private Long zoneId; + + @Parameter(name=ApiConstants.PROVIDER, type=CommandType.UUID, + required=false, description="the storage provider uuid") + private String storageProviderUuid; + + @Parameter(name=ApiConstants.SCOPE, type=CommandType.STRING, + required=false, description="the scope of the storage: cluster or zone") + private String scope; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -102,6 +111,14 @@ public class CreateStoragePoolCmd extends BaseCmd { public Long getZoneId() { return zoneId; } + + public String getStorageProviderUuid() { + return this.storageProviderUuid; + } + + public String getScope() { + return this.scope; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java b/api/src/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java index a4a37c8fd58..5bc4d6e6a79 100644 --- a/api/src/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java @@ -88,6 +88,10 @@ public class RegisterIsoCmd extends BaseCmd { @Parameter(name=ApiConstants.PROJECT_ID, type=CommandType.UUID, entityType = ProjectResponse.class, description="Register iso for the project") private Long projectId; + + @Parameter(name=ApiConstants.IMAGE_STORE_UUID, type=CommandType.UUID, + description="Image store uuid") + private String imageStoreUuid; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -140,6 +144,10 @@ public class RegisterIsoCmd extends BaseCmd { public String getChecksum() { return checksum; } + + public String getImageStoreUuid() { + return this.imageStoreUuid; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java b/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java index 84fa197d12c..ba1f924fe02 100644 --- a/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java @@ -240,7 +240,7 @@ import com.cloud.user.UserContext; @Override public void create() throws ResourceAllocationException { VirtualMachineTemplate template = null; - template = _userVmService.createPrivateTemplateRecord(this, _accountService.getAccount(getEntityOwnerId())); + template = this._templateService.createPrivateTemplateRecord(this, _accountService.getAccount(getEntityOwnerId())); if (template != null) { this.setEntityId(template.getId()); this.setEntityUuid(template.getUuid()); @@ -255,7 +255,7 @@ import com.cloud.user.UserContext; public void execute() { UserContext.current().setEventDetails("Template Id: "+getEntityId()+((getSnapshotId() == null) ? " from volume Id: " + getVolumeId() : " from snapshot Id: " + getSnapshotId())); VirtualMachineTemplate template = null; - template = _userVmService.createPrivateTemplate(this); + template = this._templateService.createPrivateTemplate(this); if (template != null){ List templateResponses; diff --git a/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java b/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java index 3e98ca624ab..ed719671587 100644 --- a/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java @@ -110,7 +110,11 @@ public class RegisterTemplateCmd extends BaseCmd { @Parameter(name=ApiConstants.PROJECT_ID, type=CommandType.UUID, entityType = ProjectResponse.class, description="Register template for the project") private Long projectId; - + + @Parameter(name=ApiConstants.IMAGE_STORE_UUID, type=CommandType.UUID, + description="Image store uuid") + private String imageStoreUuid; + @Parameter(name=ApiConstants.DETAILS, type=CommandType.MAP, description="Template details in key/value pairs.") protected Map details; @@ -189,6 +193,10 @@ public class RegisterTemplateCmd extends BaseCmd { public String getTemplateTag() { return templateTag; } + + public String getImageStoreUuid() { + return this.imageStoreUuid; + } public Map getDetails() { if (details == null || details.isEmpty()) { diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java index 4d82534c2b2..e577e35795e 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java @@ -119,7 +119,7 @@ public class AttachVolumeCmd extends BaseAsyncCmd { @Override public void execute(){ UserContext.current().setEventDetails("Volume Id: "+getId()+" VmId: "+getVirtualMachineId()); - Volume result = _userVmService.attachVolumeToVM(this); + Volume result = _volumeService.attachVolumeToVM(this); if (result != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java index 2f77862b3b9..5db06bcd47f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java @@ -153,7 +153,7 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd { @Override public void create() throws ResourceAllocationException{ - Volume volume = _storageService.allocVolume(this); + Volume volume = this._volumeService.allocVolume(this); if (volume != null) { this.setEntityId(volume.getId()); this.setEntityUuid(volume.getUuid()); @@ -165,7 +165,7 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd { @Override public void execute(){ UserContext.current().setEventDetails("Volume Id: "+getEntityId()+((getSnapshotId() == null) ? "" : " from snapshot: " + getSnapshotId())); - Volume volume = _storageService.createVolume(this); + Volume volume = _volumeService.createVolume(this); if (volume != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(volume); //FIXME - have to be moved to ApiResponseHelper diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java index 39c3de3fac9..394b0092123 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java @@ -80,7 +80,7 @@ public class DeleteVolumeCmd extends BaseCmd { @Override public void execute() throws ConcurrentOperationException { UserContext.current().setEventDetails("Volume Id: "+getId()); - boolean result = _storageService.deleteVolume(id, UserContext.current().getCaller()); + boolean result = this._volumeService.deleteVolume(id, UserContext.current().getCaller()); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); this.setResponseObject(response); diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java index 6153e17448b..9a5929eccca 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java @@ -130,7 +130,7 @@ public class DetachVolumeCmd extends BaseAsyncCmd { @Override public void execute(){ UserContext.current().setEventDetails("Volume Id: "+getId()+" VmId: "+getVirtualMachineId()); - Volume result = _userVmService.detachVolumeFromVM(this); + Volume result = _volumeService.detachVolumeFromVM(this); if (result != null){ VolumeResponse response = _responseGenerator.createVolumeResponse(result); response.setResponseName("volume"); diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java index d43ad5500e1..8c09f8fbb72 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java @@ -92,7 +92,7 @@ public class MigrateVolumeCmd extends BaseAsyncCmd { public void execute(){ Volume result; try { - result = _storageService.migrateVolume(getVolumeId(), getStoragePoolId()); + result = _volumeService.migrateVolume(getVolumeId(), getStoragePoolId()); if (result != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java index 52863444507..955727a7d82 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java @@ -133,7 +133,7 @@ public class ResizeVolumeCmd extends BaseAsyncCmd { @Override public void execute(){ UserContext.current().setEventDetails("Volume Id: " + getEntityId() + " to size " + getSize() + "G"); - Volume volume = _storageService.resizeVolume(this); + Volume volume = _volumeService.resizeVolume(this); if (volume != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(volume); //FIXME - have to be moved to ApiResponseHelper diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java index 107d938b106..94c3d38e5a4 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java @@ -67,6 +67,10 @@ public class UploadVolumeCmd extends BaseAsyncCmd { @Parameter(name=ApiConstants.CHECKSUM, type=CommandType.STRING, description="the MD5 checksum value of this volume") private String checksum; + + @Parameter(name=ApiConstants.IMAGE_STORE_UUID, type=CommandType.UUID, + description="Image store uuid") + private String imageStoreUuid; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -99,6 +103,10 @@ public class UploadVolumeCmd extends BaseAsyncCmd { public String getChecksum() { return checksum; } + + public String getImageStoreUuid() { + return this.imageStoreUuid; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// @@ -110,7 +118,7 @@ public class UploadVolumeCmd extends BaseAsyncCmd { ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { - Volume volume = _storageService.uploadVolume(this); + Volume volume = _volumeService.uploadVolume(this); if (volume != null){ VolumeResponse response = _responseGenerator.createVolumeResponse(volume); response.setResponseName(getCommandName()); diff --git a/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java b/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java index 7836b6d6e8e..71004977d89 100644 --- a/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java +++ b/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java @@ -133,15 +133,15 @@ public class BackupSnapshotCommandTest { } @Override - public String getStorageProvider() { + public Long getStorageProviderId() { // TODO Auto-generated method stub return null; } @Override - public String getStorageType() { + public boolean isInMaintenance() { // TODO Auto-generated method stub - return null; + return false; }; }; diff --git a/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java b/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java index 3545d0f1c29..767d7c37c5e 100644 --- a/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java +++ b/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java @@ -115,16 +115,16 @@ public class SnapshotCommandTest { } @Override - public String getStorageProvider() { + public Long getStorageProviderId() { // TODO Auto-generated method stub return null; } - @Override - public String getStorageType() { - // TODO Auto-generated method stub - return null; - }; + @Override + public boolean isInMaintenance() { + // TODO Auto-generated method stub + return false; + }; }; SnapshotCommand ssc = new SnapshotCommand(pool, diff --git a/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java b/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java index 7f5540fa4d3..852e52b1b86 100644 --- a/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java +++ b/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java @@ -134,15 +134,15 @@ public class ResizeVolumeCommandTest { } @Override - public String getStorageProvider() { + public Long getStorageProviderId() { // TODO Auto-generated method stub return null; } @Override - public String getStorageType() { + public boolean isInMaintenance() { // TODO Auto-generated method stub - return null; + return false; }; }; diff --git a/client/pom.xml b/client/pom.xml index 178ddc6d128..108cc3ac05c 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -282,7 +282,8 @@ - + + diff --git a/core/pom.xml b/core/pom.xml index acc742fd964..0da69529400 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -31,7 +31,11 @@ cloud-api ${project.version} - + + org.apache.cloudstack + cloud-engine-api + ${project.version} + commons-httpclient commons-httpclient diff --git a/core/src/com/cloud/storage/StoragePoolDiscoverer.java b/core/src/com/cloud/storage/StoragePoolDiscoverer.java index 816e899f941..c7dd362a5c3 100644 --- a/core/src/com/cloud/storage/StoragePoolDiscoverer.java +++ b/core/src/com/cloud/storage/StoragePoolDiscoverer.java @@ -19,6 +19,8 @@ package com.cloud.storage; import java.net.URI; import java.util.Map; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.exception.DiscoveryException; import com.cloud.utils.component.Adapter; diff --git a/core/src/com/cloud/storage/StoragePoolVO.java b/core/src/com/cloud/storage/StoragePoolVO.java deleted file mode 100644 index af6e4e2905c..00000000000 --- a/core/src/com/cloud/storage/StoragePoolVO.java +++ /dev/null @@ -1,346 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.storage; - -import java.util.Date; -import java.util.UUID; - -import javax.persistence.Column; -import javax.persistence.Entity; -import javax.persistence.EnumType; -import javax.persistence.Enumerated; -import javax.persistence.Id; -import javax.persistence.Table; -import javax.persistence.TableGenerator; -import javax.persistence.Temporal; -import javax.persistence.TemporalType; -import javax.persistence.Transient; - -import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.utils.db.GenericDao; - -@Entity -@Table(name="storage_pool") -public class StoragePoolVO implements StoragePool { - @Id - @TableGenerator(name="storage_pool_sq", table="sequence", pkColumnName="name", valueColumnName="value", pkColumnValue="storage_pool_seq", allocationSize=1) - @Column(name="id", updatable=false, nullable = false) - private long id; - - @Column(name="name", updatable=false, nullable=false, length=255) - private String name = null; - - @Column(name="uuid", length=255) - private String uuid = null; - - @Column(name="pool_type", updatable=false, nullable=false, length=32) - @Enumerated(value=EnumType.STRING) - private StoragePoolType poolType; - - @Column(name=GenericDao.CREATED_COLUMN) - Date created; - - @Column(name=GenericDao.REMOVED_COLUMN) - private Date removed; - - @Column(name="update_time", updatable=true) - @Temporal(value=TemporalType.TIMESTAMP) - private Date updateTime; - - @Column(name="data_center_id", updatable=true, nullable=false) - private long dataCenterId; - - @Column(name="pod_id", updatable=true) - private Long podId; - - @Column(name="available_bytes", updatable=true, nullable=true) - private long availableBytes; - - @Column(name="capacity_bytes", updatable=true, nullable=true) - private long capacityBytes; - - @Column(name="status", updatable=true, nullable=false) - @Enumerated(value=EnumType.STRING) - private StoragePoolStatus status; - - // TODO, disable persisency of storageProvider and storageType, javelin new code not - // sync with the schema! - - // @Column(name="storage_provider", updatable=true, nullable=false) - @Transient private String storageProvider; - - // Column(name="storage_type", nullable=false) - @Transient private String storageType; - - @Override - public long getId() { - return id; - } - - @Override - public StoragePoolStatus getStatus() { - return status; - } - - public StoragePoolVO() { - // TODO Auto-generated constructor stub - } - - @Override - public String getName() { - return name; - } - - @Override - public String getUuid() { - return uuid; - } - - @Override - public StoragePoolType getPoolType() { - return poolType; - } - - @Override - public Date getCreated() { - return created; - } - - public Date getRemoved() { - return removed; - } - - @Override - public Date getUpdateTime() { - return updateTime; - } - - @Override - public long getDataCenterId() { - return dataCenterId; - } - - @Override - public long getAvailableBytes() { - return availableBytes; - } - - @Override - public String getStorageProvider() { - return storageProvider; - } - - public void setStorageProvider(String provider) { - storageProvider = provider; - } - - @Override - public String getStorageType() { - return storageType; - } - - public void setStorageType(String type) { - storageType = type; - } - - @Override - public long getCapacityBytes() { - return capacityBytes; - } - - public void setAvailableBytes(long available) { - availableBytes = available; - } - - public void setCapacityBytes(long capacity) { - capacityBytes = capacity; - } - - @Column(name="host_address") - private String hostAddress; - - @Column(name="path") - private String path; - - @Column(name="port") - private int port; - - @Column(name="user_info") - private String userInfo; - - @Column(name="cluster_id") - private Long clusterId; - - - @Override - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - @Override - public String getHostAddress() { - return hostAddress; - } - - @Override - public String getPath() { - return path; - } - - @Override - public String getUserInfo() { - return userInfo; - } - - public StoragePoolVO(long poolId, String name, String uuid, StoragePoolType type, - long dataCenterId, Long podId, long availableBytes, long capacityBytes, String hostAddress, int port, String hostPath) { - this.name = name; - this.id = poolId; - this.uuid = uuid; - this.poolType = type; - this.dataCenterId = dataCenterId; - this.availableBytes = availableBytes; - this.capacityBytes = capacityBytes; - this.hostAddress = hostAddress; - this.path = hostPath; - this.port = port; - this.podId = podId; - this.setStatus(StoragePoolStatus.Creating); - } - - public StoragePoolVO(StoragePoolVO that) { - this(that.id, that.name, that.uuid, that.poolType, that.dataCenterId, that.podId, that.availableBytes, that.capacityBytes, that.hostAddress, that.port, that.path); - } - - public StoragePoolVO(StoragePoolType type, String hostAddress, int port, String path) { - this.poolType = type; - this.hostAddress = hostAddress; - this.port = port; - this.path = path; - this.setStatus(StoragePoolStatus.Creating); - this.uuid = UUID.randomUUID().toString(); - } - - public StoragePoolVO(StoragePoolType type, String hostAddress, int port, String path, String userInfo) { - this.poolType = type; - this.hostAddress = hostAddress; - this.port = port; - this.path = path; - this.userInfo = userInfo; - this.setStatus(StoragePoolStatus.Creating); - this.uuid = UUID.randomUUID().toString(); - } - - public void setStatus(StoragePoolStatus status) - { - this.status = status; - } - - public void setId(long id) { - this.id = id; - } - - public void setDataCenterId(long dcId) { - this.dataCenterId = dcId; - } - - public void setPodId(Long podId) { - this.podId = podId; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public void setPath(String path) { - this.path = path; - } - - public void setUserInfo(String userInfo) { - this.userInfo = userInfo; - } - - @Override - public int getPort() { - return port; - } - - @Override - public boolean isShared() { - return poolType.isShared(); - } - - @Override - public boolean isLocal() { - return !poolType.isShared(); - } - - @Transient - public String toUri() { - /* - URI uri = new URI(); - try { - if (type == StoragePoolType.Filesystem) { - uri.setScheme("file"); - } else if (type == StoragePoolType.NetworkFilesystem) { - uri.setScheme("nfs"); - } else if (type == StoragePoolType.IscsiLUN) { - } - } catch (MalformedURIException e) { - throw new VmopsRuntimeException("Unable to form the uri " + id); - } - return uri.toString(); - */ - return null; - } - - @Override - public Long getPodId() { - return podId; - } - - public void setName(String name) { - this.name = name; - } - - public boolean isInMaintenance() { - return status == StoragePoolStatus.PrepareForMaintenance || status == StoragePoolStatus.Maintenance || status == StoragePoolStatus.ErrorInMaintenance || removed != null; - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof StoragePoolVO) || obj == null) { - return false; - } - StoragePoolVO that = (StoragePoolVO)obj; - return this.id == that.id; - } - - @Override - public int hashCode() { - return new Long(id).hashCode(); - } - - @Override - public String toString() { - return new StringBuilder("Pool[").append(id).append("|").append(poolType).append("]").toString(); - } -} diff --git a/core/src/com/cloud/storage/VMTemplateHostVO.java b/core/src/com/cloud/storage/VMTemplateHostVO.java index 9eae1a00303..b8dfc41d51b 100755 --- a/core/src/com/cloud/storage/VMTemplateHostVO.java +++ b/core/src/com/cloud/storage/VMTemplateHostVO.java @@ -29,8 +29,10 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.utils.db.GenericDaoBase; -import org.apache.cloudstack.api.InternalIdentity; /** * Join table for storage hosts and templates @@ -38,7 +40,7 @@ import org.apache.cloudstack.api.InternalIdentity; */ @Entity @Table(name="template_host_ref") -public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc { +public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc, DataObjectInStore { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) Long id; @@ -90,6 +92,18 @@ public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc { @Column(name="destroyed") boolean destroyed = false; + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name = "state") + @Enumerated(EnumType.STRING) + ObjectInDataStoreStateMachine.State state; + + @Override public String getInstallPath() { return installPath; @@ -162,6 +176,7 @@ public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc { super(); this.hostId = hostId; this.templateId = templateId; + this.state = ObjectInDataStoreStateMachine.State.Allocated; } public VMTemplateHostVO(long hostId, long templateId, Date lastUpdated, @@ -282,4 +297,26 @@ public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc { return new StringBuilder("TmplHost[").append(id).append("-").append(templateId).append("-").append(hostId).append(installPath).append("]").toString(); } + @Override + public ObjectInDataStoreStateMachine.State getState() { + // TODO Auto-generated method stub + return this.state; + } + + public long getUpdatedCount() { + return this.updatedCount; + } + + public void incrUpdatedCount() { + this.updatedCount++; + } + + public void decrUpdatedCount() { + this.updatedCount--; + } + + public Date getUpdated() { + return updated; + } + } diff --git a/core/src/com/cloud/storage/VMTemplateStoragePoolVO.java b/core/src/com/cloud/storage/VMTemplateStoragePoolVO.java index 32c9dd2ece5..9b761764359 100644 --- a/core/src/com/cloud/storage/VMTemplateStoragePoolVO.java +++ b/core/src/com/cloud/storage/VMTemplateStoragePoolVO.java @@ -29,8 +29,11 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; + import com.cloud.utils.db.GenericDaoBase; -import org.apache.cloudstack.api.InternalIdentity; /** * Join table for storage pools and templates @@ -38,7 +41,7 @@ import org.apache.cloudstack.api.InternalIdentity; */ @Entity @Table(name="template_spool_ref") -public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc { +public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc, DataObjectInStore { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) long id; @@ -69,7 +72,18 @@ public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc { @Column (name="template_size") long templateSize; @Column (name="marked_for_gc") boolean markedForGC; - + + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name = "state") + @Enumerated(EnumType.STRING) + ObjectInDataStoreStateMachine.State state; + @Override public String getInstallPath() { return installPath; @@ -148,6 +162,7 @@ public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc { this.poolId = poolId; this.templateId = templateId; this.downloadState = Status.NOT_DOWNLOADED; + this.state = ObjectInDataStoreStateMachine.State.Allocated; this.markedForGC = false; } @@ -235,4 +250,26 @@ public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc { return new StringBuilder("TmplPool[").append(id).append("-").append(templateId).append("-").append("poolId").append("-").append(installPath).append("]").toString(); } + @Override + public State getState() { + return this.state; + } + + public long getUpdatedCount() { + return this.updatedCount; + } + + public void incrUpdatedCount() { + this.updatedCount++; + } + + public void decrUpdatedCount() { + this.updatedCount--; + } + + public Date getUpdated() { + return updated; + } + + } diff --git a/core/src/com/cloud/storage/VMTemplateVO.java b/core/src/com/cloud/storage/VMTemplateVO.java index fcfdd0067e1..e643d75bf1e 100755 --- a/core/src/com/cloud/storage/VMTemplateVO.java +++ b/core/src/com/cloud/storage/VMTemplateVO.java @@ -31,17 +31,18 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import javax.persistence.Transient; -import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; + import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.api.InternalIdentity; +import com.cloud.utils.fsm.StateObject; @Entity @Table(name="vm_template") -public class VMTemplateVO implements VirtualMachineTemplate { +public class VMTemplateVO implements VirtualMachineTemplate, StateObject { @Id @TableGenerator(name="vm_template_sq", table="sequence", pkColumnName="name", valueColumnName="value", pkColumnValue="vm_template_seq", allocationSize=1) @Column(name="id", nullable = false) @@ -127,6 +128,22 @@ public class VMTemplateVO implements VirtualMachineTemplate { @Column(name="enable_sshkey") private boolean enableSshKey; + + @Column(name = "image_data_store_id") + private long imageDataStoreId; + + @Column(name = "size") + private Long size; + + @Column(name = "state") + private TemplateState state; + + @Column(name="update_count", updatable = true) + protected long updatedCount; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; @Transient Map details; @@ -140,8 +157,9 @@ public class VMTemplateVO implements VirtualMachineTemplate { this.uniqueName = uniqueName; } - protected VMTemplateVO() { + public VMTemplateVO() { this.uuid = UUID.randomUUID().toString(); + this.state = TemplateState.Allocated; } /** @@ -150,12 +168,14 @@ public class VMTemplateVO implements VirtualMachineTemplate { public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, boolean featured, boolean isExtractable, TemplateType type, String url, boolean requiresHvm, int bits, long accountId, String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable, HypervisorType hyperType, Map details) { this(id, generateUniqueName(id, accountId, name), name, format, isPublic, featured, isExtractable, type, url, null, requiresHvm, bits, accountId, cksum, displayText, enablePassword, guestOSId, bootable, hyperType, details); this.uuid = UUID.randomUUID().toString(); + this.state = TemplateState.Allocated; } public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, boolean featured, boolean isExtractable, TemplateType type, String url, boolean requiresHvm, int bits, long accountId, String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable, HypervisorType hyperType, String templateTag, Map details, boolean sshKeyEnabled) { this(id, name, format, isPublic, featured, isExtractable, type, url, requiresHvm, bits, accountId, cksum, displayText, enablePassword, guestOSId, bootable, hyperType, details); this.templateTag = templateTag; this.uuid = UUID.randomUUID().toString(); + this.state = TemplateState.Allocated; this.enableSshKey = sshKeyEnabled; } @@ -179,6 +199,7 @@ public class VMTemplateVO implements VirtualMachineTemplate { this.bootable = bootable; this.hypervisorType = hyperType; this.uuid = UUID.randomUUID().toString(); + this.state = TemplateState.Allocated; } // Has an extra attribute - isExtractable @@ -468,5 +489,46 @@ public class VMTemplateVO implements VirtualMachineTemplate { public void setEnableSshKey(boolean enable) { enableSshKey = enable; } + + public Long getImageDataStoreId() { + return this.imageDataStoreId; + } + + public void setImageDataStoreId(long dataStoreId) { + this.imageDataStoreId = dataStoreId; + } + + public void setSize(Long size) { + this.size = size; + } + + public Long getSize() { + return this.size; + } + + public TemplateState getState() { + return this.state; + } + + public long getUpdatedCount() { + return this.updatedCount; + } + + public void incrUpdatedCount() { + this.updatedCount++; + } + + public void decrUpdatedCount() { + this.updatedCount--; + } + + public Date getUpdated() { + return updated; + } + + public void setUpdated(Date updated) { + this.updated = updated; + } + } diff --git a/core/src/com/cloud/storage/VolumeHostVO.java b/core/src/com/cloud/storage/VolumeHostVO.java index f4fc7abc4ee..40bae499122 100755 --- a/core/src/com/cloud/storage/VolumeHostVO.java +++ b/core/src/com/cloud/storage/VolumeHostVO.java @@ -29,11 +29,13 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; -//import com.cloud.storage.VMVolumeStorageResourceAssoc.Status; +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.utils.db.GenericDaoBase; -import org.apache.cloudstack.api.InternalIdentity; /** * Join table for storage hosts and volumes @@ -41,7 +43,7 @@ import org.apache.cloudstack.api.InternalIdentity; */ @Entity @Table(name="volume_host_ref") -public class VolumeHostVO implements InternalIdentity { +public class VolumeHostVO implements InternalIdentity, DataObjectInStore { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) Long id; @@ -99,6 +101,16 @@ public class VolumeHostVO implements InternalIdentity { @Column(name="destroyed") boolean destroyed = false; + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name = "state") + @Enumerated(EnumType.STRING) + ObjectInDataStoreStateMachine.State state; public String getInstallPath() { return installPath; @@ -187,6 +199,7 @@ public class VolumeHostVO implements InternalIdentity { super(); this.hostId = hostId; this.volumeId = volumeId; + this.state = ObjectInDataStoreStateMachine.State.Allocated; } public VolumeHostVO(long hostId, long volumeId, long zoneId, Date lastUpdated, @@ -308,5 +321,27 @@ public class VolumeHostVO implements InternalIdentity { public String toString() { return new StringBuilder("VolumeHost[").append(id).append("-").append(volumeId).append("-").append(hostId).append(installPath).append("]").toString(); } + + public long getUpdatedCount() { + return this.updatedCount; + } + + public void incrUpdatedCount() { + this.updatedCount++; + } + + public void decrUpdatedCount() { + this.updatedCount--; + } + + public Date getUpdated() { + return updated; + } + + @Override + public ObjectInDataStoreStateMachine.State getState() { + // TODO Auto-generated method stub + return this.state; + } } diff --git a/core/src/com/cloud/storage/VolumeVO.java b/core/src/com/cloud/storage/VolumeVO.java index defc841e1e3..a287c26348b 100755 --- a/core/src/com/cloud/storage/VolumeVO.java +++ b/core/src/com/cloud/storage/VolumeVO.java @@ -32,11 +32,9 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import javax.persistence.Transient; -import org.apache.cloudstack.api.Identity; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.api.InternalIdentity; @Entity @Table(name = "volumes") @@ -69,7 +67,7 @@ public class VolumeVO implements Volume { Long deviceId = null; @Column(name = "size") - long size; + Long size; @Column(name = "folder") String folder; @@ -257,11 +255,11 @@ public class VolumeVO implements Volume { } @Override - public long getSize() { + public Long getSize() { return size; } - public void setSize(long size) { + public void setSize(Long size) { this.size = size; } diff --git a/engine/api/pom.xml b/engine/api/pom.xml index ca03e590286..7fc612f5d6c 100644 --- a/engine/api/pom.xml +++ b/engine/api/pom.xml @@ -32,7 +32,7 @@ org.apache.cloudstack - cloud-framework-ipc + cloud-framework-api ${project.version} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java index 812db48cf8c..0827cf6b674 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java @@ -28,4 +28,5 @@ public interface DataObject { public DataObjectType getType(); public DiskFormat getFormat(); public String getUuid(); + public void processEvent(ObjectInDataStoreStateMachine.Event event); } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManager.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectInStore.java similarity index 51% rename from engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManager.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectInStore.java index f27753dd2d7..60dfb9fb71f 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManager.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectInStore.java @@ -16,30 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.volume; +package org.apache.cloudstack.engine.subsystem.api.storage; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeProfile; -import org.apache.cloudstack.storage.volume.db.VolumeVO; +import com.cloud.utils.fsm.StateObject; -import com.cloud.storage.Volume; -import com.cloud.storage.Volume.Event; -import com.cloud.storage.Volume.State; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; -public interface VolumeManager { - VolumeVO allocateDuplicateVolume(VolumeVO oldVol); - - VolumeVO processEvent(Volume vol, Volume.Event event) throws NoTransitionException; - - VolumeProfile getProfile(long volumeId); - - VolumeVO getVolume(long volumeId); - - VolumeVO updateVolume(VolumeVO volume); - - /** - * @return - */ - StateMachine2 getStateMachine(); +public interface DataObjectInStore extends StateObject { + public String getInstallPath(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStore.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStore.java index 03f2b0408ae..f101f243047 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStore.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStore.java @@ -20,6 +20,9 @@ public interface DataStore { DataStoreDriver getDriver(); DataStoreRole getRole(); long getId(); + String getUuid(); String getUri(); Scope getScope(); + DataObject create(DataObject obj); + boolean delete(DataObject obj); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java index ef578a7b0d8..95e3d0b2ef8 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java @@ -20,23 +20,23 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import java.util.Map; +import com.cloud.agent.api.StoragePoolInfo; + public interface DataStoreLifeCycle { - public DataStore initialize(Map dsInfos); + public DataStore initialize(Map dsInfos); public boolean attachCluster(DataStore store, ClusterScope scope); - + public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo); boolean attachZone(DataStore dataStore, ZoneScope scope); public boolean dettach(); public boolean unmanaged(); - public boolean maintain(); - - public boolean cancelMaintain(); - - public boolean deleteDataStore(); + public boolean maintain(long storeId); + public boolean cancelMaintain(long storeId); + public boolean deleteDataStore(long storeId); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManager.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java similarity index 79% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManager.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java index 829be506ccc..15e49e133fb 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManager.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java @@ -16,14 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore; +package org.apache.cloudstack.engine.subsystem.api.storage; +import java.util.List; import java.util.Map; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; public interface DataStoreManager { public DataStore getDataStore(long storeId, DataStoreRole role); + public DataStore getPrimaryDataStore(long storeId); + public DataStore getDataStore(String uuid, DataStoreRole role); + public List getImageStores(Scope scope); public DataStore registerDataStore(Map params, String providerUuid); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProvider.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java similarity index 83% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProvider.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java index 0d38f34f1c7..d29c4828713 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProvider.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProvider.java @@ -16,12 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore.provider; +package org.apache.cloudstack.engine.subsystem.api.storage; import java.util.Map; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; public interface DataStoreProvider { public DataStoreLifeCycle getLifeCycle(); diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManager.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java similarity index 90% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManager.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java index cbe045c5bc8..94998133196 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManager.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreProviderManager.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore.provider; +package org.apache.cloudstack.engine.subsystem.api.storage; import java.util.List; @@ -26,5 +26,6 @@ public interface DataStoreProviderManager extends Manager { public DataStoreProvider getDataStoreProviderByUuid(String uuid); public DataStoreProvider getDataStoreProviderById(long id); public DataStoreProvider getDataStoreProvider(String name); + public DataStoreProvider getDefaultPrimaryDataStoreProvider(); public List getDataStoreProviders(); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreStatus.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreStatus.java similarity index 94% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreStatus.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreStatus.java index 23551e4d0ac..2388795410c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreStatus.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreStatus.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.datastore; +package org.apache.cloudstack.engine.subsystem.api.storage; public enum DataStoreStatus { Initial, Initialized, Creating, Attaching, Up, PrepareForMaintenance, ErrorInMaintenance, CancelMaintenance, Maintenance, Removed; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java new file mode 100644 index 00000000000..3ac17598bb0 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HypervisorHostListener.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.engine.subsystem.api.storage; + +public interface HypervisorHostListener { + boolean hostConnect(long hostId, long poolId); + boolean hostDisconnected(long hostId, long poolId); +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/ImageDataFactory.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageDataFactory.java similarity index 84% rename from engine/storage/src/org/apache/cloudstack/storage/image/ImageDataFactory.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageDataFactory.java index 7c7c2a8c530..f0d69887c7d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/ImageDataFactory.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageDataFactory.java @@ -16,10 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.image; +package org.apache.cloudstack.engine.subsystem.api.storage; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; public interface ImageDataFactory { TemplateInfo getTemplate(long templateId, DataStore store); + TemplateInfo getTemplate(DataObject obj, DataStore store); + TemplateInfo getTemplate(long templateId); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/ImageService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageService.java similarity index 77% rename from engine/storage/src/org/apache/cloudstack/storage/image/ImageService.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageService.java index 319406d5001..119f3b1d32f 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/ImageService.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ImageService.java @@ -16,13 +16,14 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.image; +package org.apache.cloudstack.engine.subsystem.api.storage; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.framework.async.AsyncCallFuture; public interface ImageService { AsyncCallFuture createTemplateAsync(TemplateInfo template, DataStore store); + AsyncCallFuture createTemplateFromSnapshotAsync(SnapshotInfo snapshot, TemplateInfo template, DataStore store); + AsyncCallFuture createTemplateFromVolumeAsync(VolumeInfo volume, TemplateInfo template, DataStore store); AsyncCallFuture deleteTemplateAsync(TemplateInfo template); + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/ObjectInDataStoreStateMachine.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java similarity index 95% rename from engine/storage/src/org/apache/cloudstack/storage/volume/ObjectInDataStoreStateMachine.java rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java index d0530d1934a..af9974e1118 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/ObjectInDataStoreStateMachine.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.cloudstack.storage.volume; +package org.apache.cloudstack.engine.subsystem.api.storage; import com.cloud.utils.fsm.StateObject; @@ -49,6 +49,7 @@ public interface ObjectInDataStoreStateMachine extends StateObject createVolumeAsync(VolumeInfo volume, long dataStoreId); + AsyncCallFuture createVolumeAsync(VolumeInfo volume, DataStore store); /** * Delete volume @@ -61,7 +53,7 @@ public interface VolumeService { * @return * @throws ConcurrentOperationException */ - AsyncCallFuture deleteVolumeAsync(VolumeInfo volume); + AsyncCallFuture expungeVolumeAsync(VolumeInfo volume); /** * @@ -71,21 +63,16 @@ public interface VolumeService { /** * */ - boolean createVolumeFromSnapshot(long volumeId, long snapshotId); + AsyncCallFuture createVolumeFromSnapshot(VolumeInfo volume, DataStore store, SnapshotInfo snapshot); - /** - * - */ - String grantAccess(VolumeInfo volume, EndPoint endpointId); - - TemplateOnPrimaryDataStoreInfo grantAccess(TemplateOnPrimaryDataStoreInfo template, EndPoint endPoint); - - /** - * - */ - boolean rokeAccess(long volumeId, long endpointId); VolumeEntity getVolumeEntity(long volumeId); AsyncCallFuture createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template); + AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataStore destStore); + + boolean destroyVolume(long volumeId) throws ConcurrentOperationException; + + AsyncCallFuture registerVolume(VolumeInfo volume, DataStore store); + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDao.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDao.java similarity index 100% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDao.java rename to engine/api/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDao.java diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDaoImpl.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDaoImpl.java similarity index 100% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDaoImpl.java rename to engine/api/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderDaoImpl.java diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderVO.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderVO.java similarity index 100% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderVO.java rename to engine/api/src/org/apache/cloudstack/storage/datastore/db/DataStoreProviderVO.java diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java similarity index 65% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java rename to engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java index 24a5c790688..1530ced30fd 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java @@ -21,23 +21,23 @@ package org.apache.cloudstack.storage.datastore.db; import java.util.List; import java.util.Map; -import org.apache.cloudstack.storage.datastore.DataStoreStatus; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus; import com.cloud.utils.db.GenericDao; -public interface PrimaryDataStoreDao extends GenericDao { +public interface PrimaryDataStoreDao extends GenericDao { /** * @param datacenterId * -- the id of the datacenter (availability zone) */ - List listByDataCenterId(long datacenterId); + List listByDataCenterId(long datacenterId); /** * @param datacenterId * -- the id of the datacenter (availability zone) */ - List listBy(long datacenterId, long podId, Long clusterId); + List listBy(long datacenterId, long podId, Long clusterId); /** * Set capacity of storage pool in bytes @@ -59,7 +59,7 @@ public interface PrimaryDataStoreDao extends GenericDao details); + StoragePoolVO persist(StoragePoolVO pool, Map details); /** * Find pool by name. @@ -68,7 +68,7 @@ public interface PrimaryDataStoreDao extends GenericDao findPoolByName(String name); + List findPoolByName(String name); /** * Find pools by the pod that matches the details. @@ -79,9 +79,9 @@ public interface PrimaryDataStoreDao extends GenericDao findPoolsByDetails(long dcId, long podId, Long clusterId, Map details); + List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details); - List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared); + List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared); /** * Find pool by UUID. @@ -90,13 +90,13 @@ public interface PrimaryDataStoreDao extends GenericDao listByStorageHost(String hostFqdnOrIp); + List listByStorageHost(String hostFqdnOrIp); - PrimaryDataStoreVO findPoolByHostPath(long dcId, Long podId, String host, String path, String uuid); + StoragePoolVO findPoolByHostPath(long dcId, Long podId, String host, String path, String uuid); - List listPoolByHostPath(String host, String path); + List listPoolByHostPath(String host, String path); void updateDetails(long poolId, Map details); @@ -104,13 +104,13 @@ public interface PrimaryDataStoreDao extends GenericDao searchForStoragePoolDetails(long poolId, String value); - List findIfDuplicatePoolsExistByUUID(String uuid); + List findIfDuplicatePoolsExistByUUID(String uuid); - List listByStatus(DataStoreStatus status); + List listByStatus(DataStoreStatus status); long countPoolsByStatus(DataStoreStatus... statuses); - List listByStatusInZone(long dcId, DataStoreStatus status); + List listByStatusInZone(long dcId, DataStoreStatus status); - List listPoolsByCluster(long clusterId); + List listPoolsByCluster(long clusterId); } \ No newline at end of file diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java similarity index 77% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java rename to engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index faca54b569a..023b42bda9d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -29,7 +29,7 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.storage.datastore.DataStoreStatus; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; @@ -43,12 +43,12 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @Component -public class PrimaryDataStoreDaoImpl extends GenericDaoBase implements PrimaryDataStoreDao { - protected final SearchBuilder AllFieldSearch; - protected final SearchBuilder DcPodSearch; - protected final SearchBuilder DcPodAnyClusterSearch; - protected final SearchBuilder DeleteLvmSearch; - protected final GenericSearchBuilder StatusCountSearch; +public class PrimaryDataStoreDaoImpl extends GenericDaoBase implements PrimaryDataStoreDao { + protected final SearchBuilder AllFieldSearch; + protected final SearchBuilder DcPodSearch; + protected final SearchBuilder DcPodAnyClusterSearch; + protected final SearchBuilder DeleteLvmSearch; + protected final GenericSearchBuilder StatusCountSearch; @Inject protected PrimaryDataStoreDetailsDao _detailsDao; @@ -99,73 +99,73 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase findPoolByName(String name) { - SearchCriteria sc = AllFieldSearch.create(); + public List findPoolByName(String name) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("name", name); return listIncludingRemovedBy(sc); } @Override - public PrimaryDataStoreVO findPoolByUUID(String uuid) { - SearchCriteria sc = AllFieldSearch.create(); + public StoragePoolVO findPoolByUUID(String uuid) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("uuid", uuid); return findOneIncludingRemovedBy(sc); } @Override - public List findIfDuplicatePoolsExistByUUID(String uuid) { - SearchCriteria sc = AllFieldSearch.create(); + public List findIfDuplicatePoolsExistByUUID(String uuid) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("uuid", uuid); return listBy(sc); } @Override - public List listByDataCenterId(long datacenterId) { - SearchCriteria sc = AllFieldSearch.create(); + public List listByDataCenterId(long datacenterId) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("datacenterId", datacenterId); return listBy(sc); } @Override public void updateAvailable(long id, long available) { - PrimaryDataStoreVO pool = createForUpdate(id); + StoragePoolVO pool = createForUpdate(id); pool.setAvailableBytes(available); update(id, pool); } @Override public void updateCapacity(long id, long capacity) { - PrimaryDataStoreVO pool = createForUpdate(id); + StoragePoolVO pool = createForUpdate(id); pool.setCapacityBytes(capacity); update(id, pool); } @Override - public List listByStorageHost(String hostFqdnOrIp) { - SearchCriteria sc = AllFieldSearch.create(); + public List listByStorageHost(String hostFqdnOrIp) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("hostAddress", hostFqdnOrIp); return listIncludingRemovedBy(sc); } @Override - public List listByStatus(DataStoreStatus status) { - SearchCriteria sc = AllFieldSearch.create(); + public List listByStatus(DataStoreStatus status) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("status", status); return listBy(sc); } @Override - public List listByStatusInZone(long dcId, DataStoreStatus status) { - SearchCriteria sc = AllFieldSearch.create(); + public List listByStatusInZone(long dcId, DataStoreStatus status) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("status", status); sc.setParameters("datacenterId", dcId); return listBy(sc); } @Override - public PrimaryDataStoreVO findPoolByHostPath(long datacenterId, Long podId, String host, String path, String uuid) { - SearchCriteria sc = AllFieldSearch.create(); + public StoragePoolVO findPoolByHostPath(long datacenterId, Long podId, String host, String path, String uuid) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("hostAddress", host); sc.setParameters("path", path); sc.setParameters("datacenterId", datacenterId); @@ -176,16 +176,16 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase listBy(long datacenterId, long podId, Long clusterId) { + public List listBy(long datacenterId, long podId, Long clusterId) { if (clusterId != null) { - SearchCriteria sc = DcPodSearch.create(); + SearchCriteria sc = DcPodSearch.create(); sc.setParameters("datacenterId", datacenterId); sc.setParameters("podId", podId); sc.setParameters("cluster", clusterId); return listBy(sc); } else { - SearchCriteria sc = DcPodAnyClusterSearch.create(); + SearchCriteria sc = DcPodAnyClusterSearch.create(); sc.setParameters("datacenterId", datacenterId); sc.setParameters("podId", podId); return listBy(sc); @@ -193,16 +193,16 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase listPoolByHostPath(String host, String path) { - SearchCriteria sc = AllFieldSearch.create(); + public List listPoolByHostPath(String host, String path) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("hostAddress", host); sc.setParameters("path", path); return listBy(sc); } - public PrimaryDataStoreVO listById(Integer id) { - SearchCriteria sc = AllFieldSearch.create(); + public StoragePoolVO listById(Integer id) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("id", id); return findOneIncludingRemovedBy(sc); @@ -210,7 +210,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase details) { + public StoragePoolVO persist(StoragePoolVO pool, Map details) { Transaction txn = Transaction.currentTxn(); txn.start(); pool = super.persist(pool); @@ -226,7 +226,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase findPoolsByDetails(long dcId, long podId, Long clusterId, Map details) { + public List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details) { StringBuilder sql = new StringBuilder(DetailsSqlPrefix); if (clusterId != null) { sql.append("storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND ("); @@ -248,7 +248,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase pools = new ArrayList(); + List pools = new ArrayList(); while (rs.next()) { pools.add(toEntityBean(rs, false)); } @@ -267,8 +267,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared) { - List storagePools = null; + public List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared) { + List storagePools = null; if (tags == null || tags.length == 0) { storagePools = listBy(dcId, podId, clusterId); } else { @@ -279,8 +279,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase filteredStoragePools = new ArrayList(storagePools); - for (PrimaryDataStoreVO pool : storagePools) { + List filteredStoragePools = new ArrayList(storagePools); + for (StoragePoolVO pool : storagePools) { /* * if (shared != pool.isShared()) { * filteredStoragePools.remove(pool); } @@ -351,8 +351,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase listPoolsByCluster(long clusterId) { - SearchCriteria sc = AllFieldSearch.create(); + public List listPoolsByCluster(long clusterId) { + SearchCriteria sc = AllFieldSearch.create(); sc.setParameters("clusterId", clusterId); return listBy(sc); diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java similarity index 100% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java rename to engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java similarity index 95% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java rename to engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java index 906742bb3f0..c2b109a959e 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java +++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.storage.datastore.db; import java.util.Map; -import com.cloud.storage.StoragePoolDetailVO; import com.cloud.utils.db.GenericDao; public interface PrimaryDataStoreDetailsDao extends GenericDao { diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDaoImpl.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDaoImpl.java similarity index 100% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDaoImpl.java rename to engine/api/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDaoImpl.java diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java similarity index 64% rename from engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java rename to engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index 3e37ec7abe8..1782f16a4c1 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreVO.java +++ b/engine/api/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -1,24 +1,23 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package org.apache.cloudstack.storage.datastore.db; import java.util.Date; +import java.util.UUID; import javax.persistence.Column; import javax.persistence.Entity; @@ -30,15 +29,15 @@ import javax.persistence.TableGenerator; import javax.persistence.Temporal; import javax.persistence.TemporalType; -import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; -import org.apache.cloudstack.storage.datastore.DataStoreStatus; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StoragePoolStatus; import com.cloud.utils.db.GenericDao; @Entity -@Table(name = "storage_pool") -public class PrimaryDataStoreVO implements Identity { +@Table(name="storage_pool") +public class StoragePoolVO { @Id @TableGenerator(name = "storage_pool_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "storage_pool_seq", allocationSize = 1) @Column(name = "id", updatable = false, nullable = false) @@ -51,7 +50,8 @@ public class PrimaryDataStoreVO implements Identity { private String uuid = null; @Column(name = "pool_type", updatable = false, nullable = false, length = 32) - private String poolType; + @Enumerated(value = EnumType.STRING) + private StoragePoolType poolType; @Column(name = GenericDao.CREATED_COLUMN) Date created; @@ -77,7 +77,7 @@ public class PrimaryDataStoreVO implements Identity { @Column(name = "status", updatable = true, nullable = false) @Enumerated(value = EnumType.STRING) - private DataStoreStatus status; + private StoragePoolStatus status; @Column(name = "storage_provider_id", updatable = true, nullable = false) private Long storageProviderId; @@ -105,28 +105,57 @@ public class PrimaryDataStoreVO implements Identity { return id; } - public DataStoreStatus getStatus() { + public StoragePoolStatus getStatus() { return status; } - public PrimaryDataStoreVO() { - this.status = DataStoreStatus.Initial; + public StoragePoolVO() { + this.status = StoragePoolStatus.Initial; } + + public StoragePoolVO(long poolId, String name, String uuid, StoragePoolType type, + long dataCenterId, Long podId, long availableBytes, long capacityBytes, String hostAddress, int port, String hostPath) { + this.name = name; + this.id = poolId; + this.uuid = uuid; + this.poolType = type; + this.dataCenterId = dataCenterId; + this.availableBytes = availableBytes; + this.capacityBytes = capacityBytes; + this.hostAddress = hostAddress; + this.path = hostPath; + this.port = port; + this.podId = podId; + this.setStatus(StoragePoolStatus.Initial); + } + + public StoragePoolVO(StoragePoolVO that) { + this(that.id, that.name, that.uuid, that.poolType, that.dataCenterId, that.podId, that.availableBytes, that.capacityBytes, that.hostAddress, that.port, that.path); + } + + public StoragePoolVO(StoragePoolType type, String hostAddress, int port, String path) { + this.poolType = type; + this.hostAddress = hostAddress; + this.port = port; + this.path = path; + this.setStatus(StoragePoolStatus.Initial); + this.uuid = UUID.randomUUID().toString(); + } + public String getName() { return name; } - @Override public String getUuid() { return uuid; } - public String getPoolType() { + public StoragePoolType getPoolType() { return poolType; } - public void setPoolType(String protocol) { + public void setPoolType(StoragePoolType protocol) { this.poolType = protocol; } @@ -194,7 +223,7 @@ public class PrimaryDataStoreVO implements Identity { return userInfo; } - public void setStatus(DataStoreStatus status) { + public void setStatus(StoragePoolStatus status) { this.status = status; } @@ -248,10 +277,10 @@ public class PrimaryDataStoreVO implements Identity { @Override public boolean equals(Object obj) { - if (!(obj instanceof PrimaryDataStoreVO) || obj == null) { + if (!(obj instanceof StoragePoolVO) || obj == null) { return false; } - PrimaryDataStoreVO that = (PrimaryDataStoreVO) obj; + StoragePoolVO that = (StoragePoolVO) obj; return this.id == that.id; } @@ -264,4 +293,12 @@ public class PrimaryDataStoreVO implements Identity { public String toString() { return new StringBuilder("Pool[").append(id).append("|").append(poolType).append("]").toString(); } -} \ No newline at end of file + + public boolean isShared() { + return this.scope == ScopeType.HOST ? false : true; + } + + public boolean isLocal() { + return !isShared(); + } +} diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java index b91a2cabd62..ee08cf3b0c7 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.UUID; import javax.inject.Inject; @@ -28,6 +27,7 @@ import org.apache.cloudstack.engine.cloud.entity.api.db.VMEntityVO; import org.apache.cloudstack.engine.cloud.entity.api.db.VMReservationVO; import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMEntityDao; import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenter; @@ -45,22 +45,18 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.dao.NetworkDao; import com.cloud.org.Cluster; import com.cloud.service.dao.ServiceOfferingDao; -import com.cloud.storage.StoragePoolVO; +import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; -import com.cloud.user.Account; -import com.cloud.user.User; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachineManager; -import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.VirtualMachineProfileImpl; import com.cloud.vm.dao.VMInstanceDao; @@ -104,6 +100,8 @@ public class VMEntityManagerImpl implements VMEntityManager { @Inject protected StoragePoolDao _storagePoolDao; + @Inject + DataStoreManager dataStoreMgr; @Override public VMEntityVO loadVirtualMachine(String vmId) { @@ -134,7 +132,8 @@ public class VMEntityManagerImpl implements VMEntityManager { List vols = _volsDao.findReadyRootVolumesByInstance(vm.getId()); if(!vols.isEmpty()){ VolumeVO vol = vols.get(0); - StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); + StoragePool pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(vol.getPoolId()); + if (!pool.isInMaintenance()) { long rootVolDcId = pool.getDataCenterId(); Long rootVolPodId = pool.getPodId(); diff --git a/engine/storage/backup/src/org/apache/cloudstack/storage/backup/BackupService.java b/engine/storage/backup/src/org/apache/cloudstack/storage/backup/BackupService.java index e4cb0c7031e..67924d2ce73 100644 --- a/engine/storage/backup/src/org/apache/cloudstack/storage/backup/BackupService.java +++ b/engine/storage/backup/src/org/apache/cloudstack/storage/backup/BackupService.java @@ -18,7 +18,7 @@ */ package org.apache.cloudstack.storage.backup; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; public interface BackupService { public boolean backupSnapshot(SnapshotInfo snapshot, long backupStoreId); diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageDataFactoryImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageDataFactoryImpl.java index b6a45b5f6bb..616e4789a27 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageDataFactoryImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageDataFactoryImpl.java @@ -20,38 +20,74 @@ package org.apache.cloudstack.storage.image; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.storage.datastore.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.db.ImageDataDao; -import org.apache.cloudstack.storage.image.db.ImageDataVO; import org.apache.cloudstack.storage.image.store.TemplateObject; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplatePoolDao; + @Component public class ImageDataFactoryImpl implements ImageDataFactory { + private static final Logger s_logger = Logger + .getLogger(ImageDataFactoryImpl.class); @Inject - ImageDataDao imageDataDao; + VMTemplateDao imageDataDao; @Inject ObjectInDataStoreManager objMap; @Inject DataStoreManager storeMgr; + @Inject + VMTemplatePoolDao templatePoolDao; @Override public TemplateInfo getTemplate(long templateId, DataStore store) { - ImageDataVO templ = imageDataDao.findById(templateId); + VMTemplateVO templ = imageDataDao.findById(templateId); if (store == null) { TemplateObject tmpl = TemplateObject.getTemplate(templ, null); return tmpl; } - ObjectInDataStoreVO obj = objMap.findObject(templateId, DataObjectType.TEMPLATE, store.getId(), store.getRole()); - if (obj == null) { - TemplateObject tmpl = TemplateObject.getTemplate(templ, null); - return tmpl; + boolean found = false; + if (store.getRole() == DataStoreRole.Primary) { + VMTemplateStoragePoolVO templatePoolVO = templatePoolDao.findByPoolTemplate(store.getId(), templateId); + if (templatePoolVO != null) { + found = true; + } + } else { + DataObjectInStore obj = objMap.findObject(templ.getUuid(), DataObjectType.TEMPLATE, store.getUuid(), store.getRole()); + if (obj != null) { + found = true; + } + } + + if (!found) { + s_logger.debug("template " + templateId + " is not in store:" + store.getId() + ", type:" + store.getRole()); } TemplateObject tmpl = TemplateObject.getTemplate(templ, store); return tmpl; } + @Override + public TemplateInfo getTemplate(long templateId) { + VMTemplateVO templ = imageDataDao.findById(templateId); + if (templ.getImageDataStoreId() == null) { + return this.getTemplate(templateId, null); + } + DataStore store = this.storeMgr.getDataStore(templ.getImageDataStoreId(), DataStoreRole.Image); + return this.getTemplate(templateId, store); + } + @Override + public TemplateInfo getTemplate(DataObject obj, DataStore store) { + return this.getTemplate(obj.getId(), store); + } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageServiceImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageServiceImpl.java index 82d0d71db9c..5898b1b0794 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageServiceImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/ImageServiceImpl.java @@ -22,15 +22,21 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageService; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcConext; +import org.apache.cloudstack.storage.datastore.DataObjectManager; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; import org.apache.cloudstack.storage.image.store.TemplateObject; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -41,21 +47,25 @@ public class ImageServiceImpl implements ImageService { private static final Logger s_logger = Logger.getLogger(ImageServiceImpl.class); @Inject ObjectInDataStoreManager objectInDataStoreMgr; + @Inject + DataObjectManager dataObjectMgr; class CreateTemplateContext extends AsyncRpcConext { final TemplateInfo srcTemplate; - final TemplateInfo templateOnStore; + final DataStore store; final AsyncCallFuture future; - final ObjectInDataStoreVO obj; + final DataObject templateOnStore; + public CreateTemplateContext(AsyncCompletionCallback callback, TemplateInfo srcTemplate, - TemplateInfo templateOnStore, AsyncCallFuture future, - ObjectInDataStoreVO obj) { + DataStore store, + DataObject templateOnStore + ) { super(callback); this.srcTemplate = srcTemplate; - this.templateOnStore = templateOnStore; this.future = future; - this.obj = obj; + this.store = store; + this.templateOnStore = templateOnStore; } } @@ -74,31 +84,15 @@ public class ImageServiceImpl implements ImageService { return future; } - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject(template.getId(), template.getType(), store.getId(), store.getRole()); - TemplateInfo templateOnStore = null; - if (obj == null) { - templateOnStore = (TemplateInfo)objectInDataStoreMgr.create(template, store); - obj = objectInDataStoreMgr.findObject(template.getId(), template.getType(), store.getId(), store.getRole()); - } else { - CommandResult result = new CommandResult(); - result.setResult("duplicate template on the storage"); - future.complete(result); - return future; - } + DataObject templateOnStore = store.create(template); + templateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.CreateOnlyRequested); - try { - objectInDataStoreMgr.update(obj, Event.CreateOnlyRequested); - } catch (NoTransitionException e) { - s_logger.debug("failed to transit", e); - CommandResult result = new CommandResult(); - result.setResult(e.toString()); - future.complete(result); - return future; - } CreateTemplateContext context = new CreateTemplateContext(null, - template, templateOnStore, + template, future, - obj); + store, + templateOnStore + ); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createTemplateCallback(null, null)) .setContext(context); @@ -108,42 +102,25 @@ public class ImageServiceImpl implements ImageService { protected Void createTemplateCallback(AsyncCallbackDispatcher callback, CreateTemplateContext context) { - - TemplateInfo templateOnStore = context.templateOnStore; TemplateObject template = (TemplateObject)context.srcTemplate; AsyncCallFuture future = context.future; CommandResult result = new CommandResult(); - + DataObject templateOnStore = context.templateOnStore; CreateCmdResult callbackResult = callback.getResult(); if (callbackResult.isFailed()) { try { - objectInDataStoreMgr.update(templateOnStore, Event.OperationFailed); + templateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + template.stateTransit(TemplateEvent.OperationFailed); } catch (NoTransitionException e) { - s_logger.debug("failed to transit state", e); + s_logger.debug("Failed to update template state", e); } result.setResult(callbackResult.getResult()); future.complete(result); return null; } - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject(templateOnStore.getId(), templateOnStore.getType(), templateOnStore.getDataStore().getId(), templateOnStore.getDataStore().getRole()); - obj.setInstallPath(callbackResult.getPath()); - - if (callbackResult.getSize() != null) { - obj.setSize(callbackResult.getSize()); - } try { - objectInDataStoreMgr.update(obj, Event.OperationSuccessed); - } catch (NoTransitionException e) { - s_logger.debug("Failed to transit state", e); - result.setResult(e.toString()); - future.complete(result); - return null; - } - - template.setImageStoreId(templateOnStore.getDataStore().getId()); - template.setSize(callbackResult.getSize()); - try { + templateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.OperationSuccessed); template.stateTransit(TemplateEvent.OperationSucceeded); } catch (NoTransitionException e) { s_logger.debug("Failed to transit state", e); @@ -162,4 +139,18 @@ public class ImageServiceImpl implements ImageService { // TODO Auto-generated method stub return null; } + + @Override + public AsyncCallFuture createTemplateFromSnapshotAsync( + SnapshotInfo snapshot, TemplateInfo template, DataStore store) { + // TODO Auto-generated method stub + return null; + } + + @Override + public AsyncCallFuture createTemplateFromVolumeAsync( + VolumeInfo volume, TemplateInfo template, DataStore store) { + // TODO Auto-generated method stub + return null; + } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/downloader/ImageDownloader.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/downloader/ImageDownloader.java index adb247afd0f..af572d49a5e 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/downloader/ImageDownloader.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/downloader/ImageDownloader.java @@ -18,7 +18,7 @@ */ package org.apache.cloudstack.storage.image.downloader; -import org.apache.cloudstack.storage.image.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; public interface ImageDownloader { public void downloadImage(TemplateInfo template); diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/AncientImageDataStoreDriverImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/AncientImageDataStoreDriverImpl.java new file mode 100644 index 00000000000..2c19c7fc039 --- /dev/null +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/AncientImageDataStoreDriverImpl.java @@ -0,0 +1,187 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.image.driver; + +import java.util.List; +import java.util.Set; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcConext; +import org.apache.cloudstack.storage.image.ImageDataStoreDriver; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.storage.DeleteVolumeCommand; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.RegisterVolumePayload; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.VolumeHostVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeHostDao; +import com.cloud.storage.download.DownloadMonitor; +import com.cloud.utils.exception.CloudRuntimeException; + +public class AncientImageDataStoreDriverImpl implements ImageDataStoreDriver { + private static final Logger s_logger = Logger + .getLogger(AncientImageDataStoreDriverImpl.class); + @Inject + VMTemplateZoneDao templateZoneDao; + @Inject + VMTemplateDao templateDao; + @Inject DownloadMonitor _downloadMonitor; + @Inject + VMTemplateHostDao _vmTemplateHostDao; + @Inject VolumeDao volumeDao; + @Inject VolumeHostDao volumeHostDao; + @Inject HostDao hostDao; + @Inject AgentManager agentMgr; + @Override + public String grantAccess(DataObject data, EndPoint ep) { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean revokeAccess(DataObject data, EndPoint ep) { + // TODO Auto-generated method stub + return false; + } + + @Override + public Set listObjects(DataStore store) { + // TODO Auto-generated method stub + return null; + } + + class CreateContext extends AsyncRpcConext { + final DataObject data; + public CreateContext(AsyncCompletionCallback callback, DataObject data) { + super(callback); + this.data = data; + } + } + + @Override + public void createAsync(DataObject data, + AsyncCompletionCallback callback) { + if (data.getType() == DataObjectType.TEMPLATE) { + List templateZones = this.templateZoneDao.listByTemplateId(data.getId()); + for (VMTemplateZoneVO templateZone : templateZones) { + VMTemplateVO template = this.templateDao.findById(data.getId()); + _downloadMonitor.downloadTemplateToStorage(template, templateZone.getZoneId()); + } + } else if (data.getType() == DataObjectType.VOLUME) { + VolumeVO vol = this.volumeDao.findById(data.getId()); + VolumeInfo volInfo = (VolumeInfo)data; + RegisterVolumePayload payload = (RegisterVolumePayload)volInfo.getpayload(); + _downloadMonitor.downloadVolumeToStorage(vol, vol.getDataCenterId(), payload.getUrl(), + payload.getChecksum(), ImageFormat.valueOf(payload.getFormat().toUpperCase())); + } + + CreateCmdResult result = new CreateCmdResult(null, null); + callback.complete(result); + } + + private void deleteVolume(DataObject data, AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + VolumeVO vol = volumeDao.findById(data.getId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Expunging " + vol); + } + + // Find out if the volume is present on secondary storage + VolumeHostVO volumeHost = volumeHostDao.findByVolumeId(vol.getId()); + if (volumeHost != null) { + if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { + HostVO ssHost = hostDao.findById(volumeHost.getHostId()); + DeleteVolumeCommand dtCommand = new DeleteVolumeCommand( + ssHost.getStorageUrl(), volumeHost.getInstallPath()); + Answer answer = agentMgr.sendToSecStorage(ssHost, dtCommand); + if (answer == null || !answer.getResult()) { + s_logger.debug("Failed to delete " + + volumeHost + + " due to " + + ((answer == null) ? "answer is null" : answer + .getDetails())); + return; + } + } else if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { + s_logger.debug("Volume: " + vol.getName() + + " is currently being uploaded; cant' delete it."); + throw new CloudRuntimeException( + "Please specify a volume that is not currently being uploaded."); + } + volumeHostDao.remove(volumeHost.getId()); + volumeDao.remove(vol.getId()); + CommandResult result = new CommandResult(); + callback.complete(result); + return; + } + } + + private void deleteTemplate(DataObject data, AsyncCompletionCallback callback) { + + } + + @Override + public void deleteAsync(DataObject data, + AsyncCompletionCallback callback) { + if (data.getType() == DataObjectType.VOLUME) { + deleteVolume(data, callback); + } else if (data.getType() == DataObjectType.TEMPLATE) { + deleteTemplate(data, callback); + } + + + + } + + @Override + public void copyAsync(DataObject srcdata, DataObject destData, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public boolean canCopy(DataObject srcData, DataObject destData) { + // TODO Auto-generated method stub + return false; + } + +} diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java index dce5a939413..1a506fa782b 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java @@ -34,15 +34,15 @@ import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.endpoint.EndPointSelector; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; -import org.apache.cloudstack.storage.image.db.ImageDataDao; -import org.apache.cloudstack.storage.image.db.ImageDataVO; + +import com.cloud.storage.dao.VMTemplateDao; //http-read-only based image store public class DefaultImageDataStoreDriverImpl implements ImageDataStoreDriver { @Inject EndPointSelector selector; @Inject - ImageDataDao imageDataDao; + VMTemplateDao imageDataDao; public DefaultImageDataStoreDriverImpl() { } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManager.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManager.java index e5a6863a58b..e1fd46b76df 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManager.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManager.java @@ -18,13 +18,13 @@ */ package org.apache.cloudstack.storage.image.manager; -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; -import org.apache.cloudstack.storage.image.db.ImageDataVO; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; +import com.cloud.storage.VMTemplateVO; import com.cloud.utils.fsm.StateMachine2; public interface ImageDataManager { - StateMachine2 getStateMachine(); + StateMachine2 getStateMachine(); } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManagerImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManagerImpl.java index d90f2b64e24..83e98878158 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManagerImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataManagerImpl.java @@ -18,17 +18,17 @@ */ package org.apache.cloudstack.storage.image.manager; -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; -import org.apache.cloudstack.storage.image.db.ImageDataVO; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; import org.springframework.stereotype.Component; +import com.cloud.storage.VMTemplateVO; import com.cloud.utils.fsm.StateMachine2; @Component public class ImageDataManagerImpl implements ImageDataManager { - private final StateMachine2 - stateMachine = new StateMachine2(); + private final StateMachine2 + stateMachine = new StateMachine2(); public ImageDataManagerImpl() { stateMachine.addTransition(TemplateState.Allocated, TemplateEvent.CreateRequested, TemplateState.Creating); @@ -44,7 +44,7 @@ public class ImageDataManagerImpl implements ImageDataManager { } @Override - public StateMachine2 getStateMachine() { + public StateMachine2 getStateMachine() { return stateMachine; } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java index 68a2770c549..2771f78e381 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageDataStoreManagerImpl.java @@ -18,38 +18,48 @@ */ package org.apache.cloudstack.storage.image.manager; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageDataStore; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; -import org.apache.cloudstack.storage.image.db.ImageDataDao; import org.apache.cloudstack.storage.image.db.ImageDataStoreDao; import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; -import org.apache.cloudstack.storage.image.store.HttpDataStoreImpl; +import org.apache.cloudstack.storage.image.store.DefaultImageDataStoreImpl; import org.springframework.stereotype.Component; +import com.cloud.storage.dao.VMTemplateDao; + @Component public class ImageDataStoreManagerImpl implements ImageDataStoreManager { @Inject ImageDataStoreDao dataStoreDao; @Inject - ImageDataDao imageDataDao; + VMTemplateDao imageDataDao; @Inject DataStoreProviderManager providerManager; - Map driverMaps = new HashMap(); + Map driverMaps; + @PostConstruct + public void config() { + driverMaps = new HashMap(); + } + @Override public ImageDataStore getImageDataStore(long dataStoreId) { ImageDataStoreVO dataStore = dataStoreDao.findById(dataStoreId); long providerId = dataStore.getProvider(); ImageDataStoreProvider provider = (ImageDataStoreProvider)providerManager.getDataStoreProviderById(providerId); - ImageDataStore imgStore = HttpDataStoreImpl.getDataStore(dataStore, + ImageDataStore imgStore = DefaultImageDataStoreImpl.getDataStore(dataStore, driverMaps.get(provider.getUuid()), provider ); // TODO Auto-generated method stub @@ -65,4 +75,20 @@ public class ImageDataStoreManagerImpl implements ImageDataStoreManager { return true; } + @Override + public ImageDataStore getImageDataStore(String uuid) { + ImageDataStoreVO dataStore = dataStoreDao.findByUuid(uuid); + return getImageDataStore(dataStore.getId()); + } + + @Override + public List getList() { + List stores = dataStoreDao.listAll(); + List imageStores = new ArrayList(); + for (ImageDataStoreVO store : stores) { + imageStores.add(getImageDataStore(store.getId())); + } + return imageStores; + } + } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java new file mode 100644 index 00000000000..b2ee9ab853d --- /dev/null +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/AncientImageDataStoreProvider.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.image.store; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; +import org.apache.cloudstack.storage.image.ImageDataStoreDriver; +import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper; +import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; +import org.apache.cloudstack.storage.image.driver.AncientImageDataStoreDriverImpl; +import org.apache.cloudstack.storage.image.store.lifecycle.DefaultImageDataStoreLifeCycle; +import org.apache.cloudstack.storage.image.store.lifecycle.ImageDataStoreLifeCycle; +import org.springframework.stereotype.Component; + +import com.cloud.utils.component.ComponentContext; + +@Component +public class AncientImageDataStoreProvider implements ImageDataStoreProvider { + + private final String name = "ancient image data store"; + protected ImageDataStoreLifeCycle lifeCycle; + protected ImageDataStoreDriver driver; + @Inject + ImageDataStoreManager storeMgr; + @Inject + ImageDataStoreHelper helper; + long id; + String uuid; + @Override + public DataStoreLifeCycle getLifeCycle() { + return lifeCycle; + } + + @Override + public String getName() { + return this.name; + } + + @Override + public String getUuid() { + return this.uuid; + } + + @Override + public long getId() { + return this.id; + } + + @Override + public boolean configure(Map params) { + lifeCycle = ComponentContext.inject(DefaultImageDataStoreLifeCycle.class); + driver = ComponentContext.inject(AncientImageDataStoreDriverImpl.class); + uuid = (String)params.get("uuid"); + id = (Long)params.get("id"); + storeMgr.registerDriver(uuid, driver); + + Map infos = new HashMap(); + String dataStoreName = UUID.nameUUIDFromBytes(this.name.getBytes()).toString(); + infos.put("name", dataStoreName); + infos.put("uuid", dataStoreName); + infos.put("protocol", "http"); + infos.put("scope", ScopeType.GLOBAL); + infos.put("provider", this.getId()); + DataStoreLifeCycle lifeCycle = this.getLifeCycle(); + lifeCycle.initialize(infos); + return true; + } + +} diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/HttpDataStoreImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java similarity index 79% rename from engine/storage/image/src/org/apache/cloudstack/storage/image/store/HttpDataStoreImpl.java rename to engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java index 34b4ff27f1a..d159f741584 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/HttpDataStoreImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreImpl.java @@ -26,24 +26,24 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.provider.ImageDataStoreProvider; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; -import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.image.datastore.ImageDataStore; -import org.apache.cloudstack.storage.image.db.ImageDataDao; import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.storage.encoding.EncodingType; -public class HttpDataStoreImpl implements ImageDataStore { +public class DefaultImageDataStoreImpl implements ImageDataStore { @Inject - ImageDataDao imageDao; + VMTemplateDao imageDao; @Inject private ObjectInDataStoreManager objectInStoreMgr; protected ImageDataStoreDriver driver; @@ -51,7 +51,7 @@ public class HttpDataStoreImpl implements ImageDataStore { protected ImageDataStoreProvider provider; boolean needDownloadToCacheStorage = false; - protected HttpDataStoreImpl() { + protected DefaultImageDataStoreImpl() { } @@ -62,9 +62,9 @@ public class HttpDataStoreImpl implements ImageDataStore { this.provider = provider; } - public static HttpDataStoreImpl getDataStore(ImageDataStoreVO dataStoreVO, ImageDataStoreDriver imageDataStoreDriver, + public static ImageDataStore getDataStore(ImageDataStoreVO dataStoreVO, ImageDataStoreDriver imageDataStoreDriver, ImageDataStoreProvider provider) { - HttpDataStoreImpl instance = (HttpDataStoreImpl)ComponentContext.inject(HttpDataStoreImpl.class); + DefaultImageDataStoreImpl instance = (DefaultImageDataStoreImpl)ComponentContext.inject(DefaultImageDataStoreImpl.class); instance.configure(dataStoreVO, imageDataStoreDriver, provider); return instance; } @@ -81,24 +81,17 @@ public class HttpDataStoreImpl implements ImageDataStore { return this.driver; } - - @Override public DataStoreRole getRole() { // TODO Auto-generated method stub return DataStoreRole.Image; } - - - @Override public long getId() { // TODO Auto-generated method stub return this.imageDataStoreVO.getId(); } - - @Override public String getUri() { return this.imageDataStoreVO.getProtocol() + "://" + "?" + EncodingType.ROLE + "=" + this.getRole(); @@ -106,39 +99,47 @@ public class HttpDataStoreImpl implements ImageDataStore { @Override public Scope getScope() { - // TODO Auto-generated method stub return new ZoneScope(imageDataStoreVO.getDcId()); } - - @Override public TemplateInfo getTemplate(long templateId) { // TODO Auto-generated method stub return null; } - - @Override public VolumeInfo getVolume(long volumeId) { // TODO Auto-generated method stub return null; } - - @Override public SnapshotInfo getSnapshot(long snapshotId) { // TODO Auto-generated method stub return null; } - - @Override public boolean exists(DataObject object) { - return (objectInStoreMgr.findObject(object.getId(), object.getType(), - this.getId(), this.getRole()) != null) ? true : false; + return (objectInStoreMgr.findObject(object, + this) != null) ? true : false; + } + + @Override + public String getUuid() { + return this.imageDataStoreVO.getUuid(); + } + + @Override + public DataObject create(DataObject obj) { + DataObject object = objectInStoreMgr.create(obj, this); + return object; + } + + @Override + public boolean delete(DataObject obj) { + // TODO Auto-generated method stub + return false; } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java index 3569fe803d5..efbb999bdcf 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/DefaultImageDataStoreProvider.java @@ -29,11 +29,9 @@ import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; import org.apache.cloudstack.storage.image.driver.DefaultImageDataStoreDriverImpl; import org.apache.cloudstack.storage.image.store.lifecycle.DefaultImageDataStoreLifeCycle; import org.apache.cloudstack.storage.image.store.lifecycle.ImageDataStoreLifeCycle; -import org.springframework.stereotype.Component; import com.cloud.utils.component.ComponentContext; -@Component public class DefaultImageDataStoreProvider implements ImageDataStoreProvider { private final String name = "default image data store"; protected ImageDataStoreLifeCycle lifeCycle; diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java index 1b0661c7691..85bc0c118a0 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -20,44 +20,48 @@ package org.apache.cloudstack.storage.image.store; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.image.db.ImageDataDao; -import org.apache.cloudstack.storage.image.db.ImageDataVO; import org.apache.cloudstack.storage.image.manager.ImageDataManager; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; import org.apache.log4j.Logger; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.storage.encoding.EncodingType; public class TemplateObject implements TemplateInfo { private static final Logger s_logger = Logger .getLogger(TemplateObject.class); - private ImageDataVO imageVO; + private VMTemplateVO imageVO; private DataStore dataStore; @Inject ImageDataManager imageMgr; @Inject - ImageDataDao imageDao; + VMTemplateDao imageDao; @Inject ObjectInDataStoreManager ojbectInStoreMgr; + @Inject VMTemplatePoolDao templatePoolDao; protected TemplateObject() { } - protected void configure(ImageDataVO template, DataStore dataStore) { + protected void configure(VMTemplateVO template, DataStore dataStore) { this.imageVO = template; this.dataStore = dataStore; } - public static TemplateObject getTemplate(ImageDataVO vo, DataStore store) { + public static TemplateObject getTemplate(VMTemplateVO vo, DataStore store) { TemplateObject to = ComponentContext.inject(TemplateObject.class); to.configure(vo, store); return to; @@ -66,12 +70,12 @@ public class TemplateObject implements TemplateInfo { public void setImageStoreId(long id) { this.imageVO.setImageDataStoreId(id); } - + public void setSize(Long size) { this.imageVO.setSize(size); } - public ImageDataVO getImage() { + public VMTemplateVO getImage() { return this.imageVO; } @@ -87,23 +91,20 @@ public class TemplateObject implements TemplateInfo { @Override public String getUuid() { - // TODO Auto-generated method stub - return null; + return this.imageVO.getUuid(); } @Override public String getUri() { - ImageDataVO image = imageDao.findById(this.imageVO.getId()); + VMTemplateVO image = imageDao.findById(this.imageVO.getId()); if (this.dataStore == null) { return image.getUrl(); } else { - ObjectInDataStoreVO obj = ojbectInStoreMgr.findObject( - this.imageVO.getId(), DataObjectType.TEMPLATE, - this.dataStore.getId(), this.dataStore.getRole()); + DataObjectInStore obj = ojbectInStoreMgr.findObject(this, this.dataStore); StringBuilder builder = new StringBuilder(); if (obj.getState() == ObjectInDataStoreStateMachine.State.Ready || obj.getState() == ObjectInDataStoreStateMachine.State.Copying) { - + builder.append(this.dataStore.getUri()); builder.append("&" + EncodingType.OBJTYPE + "=" + DataObjectType.TEMPLATE); builder.append("&" + EncodingType.PATH + "=" + obj.getInstallPath()); @@ -124,10 +125,33 @@ public class TemplateObject implements TemplateInfo { if (this.dataStore == null) { return this.imageVO.getSize(); } - ObjectInDataStoreVO obj = ojbectInStoreMgr.findObject( - this.imageVO.getId(), DataObjectType.TEMPLATE, - this.dataStore.getId(), this.dataStore.getRole()); - return obj.getSize(); + + /* + +// If the template that was passed into this allocator is not installed in the storage pool, + // add 3 * (template size on secondary storage) to the running total + VMTemplateHostVO templateHostVO = _storageMgr.findVmTemplateHost(templateForVmCreation.getId(), null); + + if (templateHostVO == null) { + VMTemplateSwiftVO templateSwiftVO = _swiftMgr.findByTmpltId(templateForVmCreation.getId()); + if (templateSwiftVO != null) { + long templateSize = templateSwiftVO.getPhysicalSize(); + if (templateSize == 0) { + templateSize = templateSwiftVO.getSize(); + } + totalAllocatedSize += (templateSize + _extraBytesPerVolume); + } + } else { + long templateSize = templateHostVO.getPhysicalSize(); + if ( templateSize == 0 ){ + templateSize = templateHostVO.getSize(); + } + totalAllocatedSize += (templateSize + _extraBytesPerVolume); + } + + */ + VMTemplateVO image = imageDao.findById(this.imageVO.getId()); + return image.getSize(); } @Override @@ -137,7 +161,7 @@ public class TemplateObject implements TemplateInfo { @Override public DiskFormat getFormat() { - return DiskFormat.getFormat(this.imageVO.getFormat()); + return DiskFormat.valueOf(this.imageVO.getFormat().toString()); } public boolean stateTransit(TemplateEvent e) throws NoTransitionException { @@ -146,4 +170,14 @@ public class TemplateObject implements TemplateInfo { this.imageVO = imageDao.findById(this.imageVO.getId()); return result; } + + @Override + public void processEvent(Event event) { + try { + ojbectInStoreMgr.update(this, event); + } catch (NoTransitionException e) { + s_logger.debug("failed to update state", e); + throw new CloudRuntimeException("Failed to update state" + e.toString()); + } + } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java index 07d52b40682..17aabca3921 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/lifecycle/DefaultImageDataStoreLifeCycle.java @@ -22,12 +22,15 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; import org.apache.cloudstack.storage.image.db.ImageDataStoreDao; import org.apache.cloudstack.storage.image.db.ImageDataStoreVO; +import com.cloud.agent.api.StoragePoolInfo; + public class DefaultImageDataStoreLifeCycle implements ImageDataStoreLifeCycle { @Inject protected ImageDataStoreDao imageStoreDao; @@ -40,7 +43,7 @@ public class DefaultImageDataStoreLifeCycle implements ImageDataStoreLifeCycle { @Override - public DataStore initialize(Map dsInfos) { + public DataStore initialize(Map dsInfos) { ImageDataStoreVO ids = imageStoreHelper.createImageDataStore(dsInfos); return imageStoreMgr.getImageDataStore(ids.getId()); } @@ -53,6 +56,14 @@ public class DefaultImageDataStoreLifeCycle implements ImageDataStoreLifeCycle { } + @Override + public boolean attachHost(DataStore store, HostScope scope, + StoragePoolInfo existingInfo) { + // TODO Auto-generated method stub + return false; + } + + @Override public boolean attachZone(DataStore dataStore, ZoneScope scope) { // TODO Auto-generated method stub @@ -75,23 +86,27 @@ public class DefaultImageDataStoreLifeCycle implements ImageDataStoreLifeCycle { @Override - public boolean maintain() { + public boolean maintain(long storeId) { // TODO Auto-generated method stub return false; } @Override - public boolean cancelMaintain() { + public boolean cancelMaintain(long storeId) { // TODO Auto-generated method stub return false; } @Override - public boolean deleteDataStore() { + public boolean deleteDataStore(long storeId) { // TODO Auto-generated method stub return false; } + + + + } diff --git a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java index 390b0fd7e34..561c1cb288f 100644 --- a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java +++ b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -32,12 +31,11 @@ import org.apache.cloudstack.storage.command.CopyCmd; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.endpoint.EndPointSelector; import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; -import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; //At least one of datastore is coming from image store or image cache store -@Component + public class DefaultImageMotionStrategy implements ImageMotionStrategy { @Inject EndPointSelector selector; @@ -86,14 +84,15 @@ public class DefaultImageMotionStrategy implements ImageMotionStrategy { @Override public boolean canHandle(DataObject srcData, DataObject destData) { + /* DataStore destStore = destData.getDataStore(); DataStore srcStore = srcData.getDataStore(); if (destStore.getRole() == DataStoreRole.Image || destStore.getRole() == DataStoreRole.ImageCache || srcStore.getRole() == DataStoreRole.Image || srcStore.getRole() == DataStoreRole.ImageCache) { return true; - } - return true; + }*/ + return false; } @Override diff --git a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionServiceImpl.java b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionServiceImpl.java index 0e3636e3886..93ba4a5ad64 100644 --- a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionServiceImpl.java +++ b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/ImageMotionServiceImpl.java @@ -23,18 +23,12 @@ import java.util.List; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageService; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.ImageService; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; -import org.apache.cloudstack.storage.volume.VolumeService; -import org.springframework.stereotype.Component; -import com.cloud.utils.exception.CloudRuntimeException; -@Component public class ImageMotionServiceImpl implements ImageMotionService { @Inject List motionStrategies; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java index 9c30a2e8269..2ad52159afc 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java @@ -16,14 +16,25 @@ // under the License. package org.apache.cloudstack.storage.test; +import java.io.IOException; + import org.apache.cloudstack.acl.APIChecker; import org.apache.cloudstack.engine.service.api.OrchestrationService; import org.apache.cloudstack.storage.HostEndpointRpcServer; import org.apache.cloudstack.storage.endpoint.EndPointSelector; +import org.apache.cloudstack.storage.test.ChildTestConfiguration.Library; import org.mockito.Mockito; import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; import com.cloud.agent.AgentManager; +import com.cloud.alert.AlertManager; import com.cloud.cluster.ClusteredAgentRebalanceService; import com.cloud.cluster.agentlb.dao.HostTransferMapDao; import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl; @@ -50,11 +61,35 @@ import com.cloud.host.dao.HostTagsDaoImpl; import com.cloud.server.auth.UserAuthenticator; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolHostDaoImpl; +import com.cloud.storage.dao.VMTemplateDaoImpl; import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.storage.dao.VMTemplateDetailsDaoImpl; +import com.cloud.storage.dao.VMTemplateHostDaoImpl; +import com.cloud.storage.dao.VMTemplatePoolDaoImpl; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VMTemplateZoneDaoImpl; - +import com.cloud.storage.dao.VolumeDaoImpl; +import com.cloud.storage.dao.VolumeHostDaoImpl; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.tags.dao.ResourceTagsDaoImpl; +import com.cloud.utils.component.SpringComponentScanUtils; +import com.cloud.vm.dao.NicDaoImpl; +import com.cloud.vm.dao.VMInstanceDaoImpl; +@Configuration +@ComponentScan(basePackageClasses={ + NicDaoImpl.class, + VMInstanceDaoImpl.class, + VMTemplateHostDaoImpl.class, + VolumeHostDaoImpl.class, + VolumeDaoImpl.class, + VMTemplatePoolDaoImpl.class, + ResourceTagsDaoImpl.class, + VMTemplateDaoImpl.class, + MockStorageMotionStrategy.class +}, +includeFilters={@Filter(value=Library.class, type=FilterType.CUSTOM)}, +useDefaultFilters=false +) public class ChildTestConfiguration extends TestConfiguration { @Override @@ -148,6 +183,27 @@ public class ChildTestConfiguration extends TestConfiguration { public APIChecker apiChecker() { return Mockito.mock(APIChecker.class); } + + @Bean + public SnapshotManager snapshotMgr() { + return Mockito.mock(SnapshotManager.class); + } + + @Bean + public AlertManager alertMgr() { + return Mockito.mock(AlertManager.class); + } + + public static class Library implements TypeFilter { + + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + mdr.getClassMetadata().getClassName(); + ComponentScan cs = ChildTestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringComponentScanUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + + } /* @Override @Bean public PrimaryDataStoreDao primaryDataStoreDao() { diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java new file mode 100644 index 00000000000..e2e8f9439c5 --- /dev/null +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.test; + +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.motion.DataMotionStrategy; + +public class MockStorageMotionStrategy implements DataMotionStrategy { + + @Override + public boolean canHandle(DataObject srcData, DataObject destData) { + // TODO Auto-generated method stub + return true; + } + + @Override + public Void copyAsync(DataObject srcData, DataObject destData, + AsyncCompletionCallback callback) { + CopyCommandResult result = new CopyCommandResult("something"); + callback.complete(result); + return null; + } + +} diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java index 0e88f733e08..85421a53b56 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/volumeServiceTest.java @@ -28,33 +28,30 @@ import java.util.UUID; import java.util.concurrent.ExecutionException; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageService; import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; import org.apache.cloudstack.engine.subsystem.api.storage.type.RootDisk; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.storage.HypervisorHostEndPoint; -import org.apache.cloudstack.storage.datastore.VolumeDataFactory; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProvider; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManager; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.endpoint.EndPointSelector; -import org.apache.cloudstack.storage.image.ImageDataFactory; -import org.apache.cloudstack.storage.image.ImageService; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.image.db.ImageDataDao; -import org.apache.cloudstack.storage.image.db.ImageDataVO; -import org.apache.cloudstack.storage.volume.VolumeService; -import org.apache.cloudstack.storage.volume.VolumeService.VolumeApiResult; import org.apache.cloudstack.storage.volume.db.VolumeDao2; import org.apache.cloudstack.storage.volume.db.VolumeVO; import org.mockito.Mockito; @@ -78,7 +75,11 @@ import com.cloud.org.Cluster.ClusterType; import com.cloud.org.Managed.ManagedState; import com.cloud.resource.ResourceState; import com.cloud.storage.Storage; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Storage.TemplateType; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.utils.component.ComponentContext; @ContextConfiguration(locations={"classpath:/storageContext.xml"}) public class volumeServiceTest extends CloudStackTestNGBase { @@ -89,7 +90,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { @Inject VolumeService volumeService; @Inject - ImageDataDao imageDataDao; + VMTemplateDao imageDataDao; @Inject VolumeDao2 volumeDao; @Inject @@ -121,12 +122,13 @@ public class volumeServiceTest extends CloudStackTestNGBase { @Test(priority = -1) public void setUp() { - try { + ComponentContext.initComponentsLifeCycle(); + /* try { dataStoreProviderMgr.configure(null, new HashMap()); } catch (ConfigurationException e) { // TODO Auto-generated catch block e.printStackTrace(); - } + }*/ host = hostDao.findByGuid(this.getHostGuid()); if (host != null) { dcId = host.getDataCenterId(); @@ -205,18 +207,17 @@ public class volumeServiceTest extends CloudStackTestNGBase { Mockito.when(selector.select(Mockito.any(DataObject.class), Mockito.any(DataObject.class))).thenReturn(eps.get(0)); } - private ImageDataVO createImageData() { - ImageDataVO image = new ImageDataVO(); + private VMTemplateVO createImageData() { + VMTemplateVO image = new VMTemplateVO(); image.setTemplateType(TemplateType.USER); image.setUrl(this.getTemplateUrl()); image.setUniqueName(UUID.randomUUID().toString()); image.setName(UUID.randomUUID().toString()); image.setPublicTemplate(true); image.setFeatured(true); - image.setRequireHvm(true); + image.setRequiresHvm(true); image.setBits(64); - image.setFormat(Storage.ImageFormat.VHD.toString()); - image.setAccountId(1); + image.setFormat(Storage.ImageFormat.VHD); image.setEnablePassword(true); image.setEnableSshKey(true); image.setGuestOSId(1); @@ -234,13 +235,13 @@ public class volumeServiceTest extends CloudStackTestNGBase { private TemplateInfo createTemplate() { try { DataStore store = createImageStore(); - ImageDataVO image = createImageData(); + VMTemplateVO image = createImageData(); TemplateInfo template = imageDataFactory.getTemplate(image.getId(), store); AsyncCallFuture future = imageService.createTemplateAsync(template, store); future.get(); template = imageDataFactory.getTemplate(image.getId(), store); /*imageProviderMgr.configure("image Provider", new HashMap()); - ImageDataVO image = createImageData(); + VMTemplateVO image = createImageData(); ImageDataStoreProvider defaultProvider = imageProviderMgr.getProvider("DefaultProvider"); ImageDataStoreLifeCycle lifeCycle = defaultProvider.getLifeCycle(); ImageDataStore store = lifeCycle.registerDataStore("defaultHttpStore", new HashMap()); @@ -262,7 +263,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { @Test public void testCreatePrimaryStorage() { DataStoreProvider provider = dataStoreProviderMgr.getDataStoreProvider("default primary data store provider"); - Map params = new HashMap(); + Map params = new HashMap(); URI uri = null; try { uri = new URI(this.getPrimaryStorageUrl()); @@ -273,7 +274,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { params.put("url", this.getPrimaryStorageUrl()); params.put("server", uri.getHost()); params.put("path", uri.getPath()); - params.put("protocol", uri.getScheme()); + params.put("protocol", StoragePoolType.NetworkFilesystem); params.put("dcId", dcId.toString()); params.put("clusterId", clusterId.toString()); params.put("name", this.primaryName); @@ -290,7 +291,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { private DataStore createImageStore() { DataStoreProvider provider = dataStoreProviderMgr.getDataStoreProvider("default image data store"); - Map params = new HashMap(); + Map params = new HashMap(); String name = UUID.randomUUID().toString(); params.put("name", name); params.put("uuid", name); @@ -310,12 +311,12 @@ public class volumeServiceTest extends CloudStackTestNGBase { public DataStore createPrimaryDataStore() { try { DataStoreProvider provider = dataStoreProviderMgr.getDataStoreProvider("default primary data store provider"); - Map params = new HashMap(); + Map params = new HashMap(); URI uri = new URI(this.getPrimaryStorageUrl()); params.put("url", this.getPrimaryStorageUrl()); params.put("server", uri.getHost()); params.put("path", uri.getPath()); - params.put("protocol", uri.getScheme()); + params.put("protocol", Storage.StoragePoolType.NetworkFilesystem); params.put("dcId", dcId.toString()); params.put("clusterId", clusterId.toString()); params.put("name", this.primaryName); @@ -390,7 +391,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { DataStore primaryStore = this.primaryStore; VolumeVO volume = createVolume(null, primaryStore.getId()); VolumeInfo vol = volumeFactory.getVolume(volume.getId(), primaryStore); - AsyncCallFuture future = volumeService.createVolumeAsync(vol, primaryStore.getId()); + AsyncCallFuture future = volumeService.createVolumeAsync(vol, primaryStore); try { future.get(); } catch (InterruptedException e) { @@ -407,7 +408,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { DataStore primaryStore = this.primaryStore; VolumeVO volume = createVolume(null, primaryStore.getId()); VolumeInfo vol = volumeFactory.getVolume(volume.getId(), primaryStore); - AsyncCallFuture future = volumeService.createVolumeAsync(vol, primaryStore.getId()); + AsyncCallFuture future = volumeService.createVolumeAsync(vol, primaryStore); try { future.get(); } catch (InterruptedException e) { @@ -420,7 +421,7 @@ public class volumeServiceTest extends CloudStackTestNGBase { //delete the volume vol = volumeFactory.getVolume(volume.getId(), primaryStore); - future = volumeService.deleteVolumeAsync(vol); + future = volumeService.expungeVolumeAsync(vol); try { future.get(); } catch (InterruptedException e) { @@ -434,9 +435,9 @@ public class volumeServiceTest extends CloudStackTestNGBase { //@Test(priority=3) public void tearDown() { - List ds = primaryStoreDao.findPoolByName(this.primaryName); + List ds = primaryStoreDao.findPoolByName(this.primaryName); for (int i = 0; i < ds.size(); i++) { - PrimaryDataStoreVO store = ds.get(i); + StoragePoolVO store = ds.get(i); store.setUuid(null); primaryStoreDao.remove(ds.get(i).getId()); primaryStoreDao.expunge(ds.get(i).getId()); diff --git a/engine/storage/integration-test/test/resource/component.xml b/engine/storage/integration-test/test/resource/component.xml new file mode 100644 index 00000000000..0368ad41425 --- /dev/null +++ b/engine/storage/integration-test/test/resource/component.xml @@ -0,0 +1,201 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine/storage/integration-test/test/resource/storageContext.xml b/engine/storage/integration-test/test/resource/storageContext.xml index 0127c96a734..4f55e243bac 100644 --- a/engine/storage/integration-test/test/resource/storageContext.xml +++ b/engine/storage/integration-test/test/resource/storageContext.xml @@ -45,6 +45,7 @@ + diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java index 487e2d53eff..095320907c7 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java @@ -20,11 +20,14 @@ package org.apache.cloudstack.storage.snapshot; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.storage.datastore.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; import org.apache.cloudstack.storage.snapshot.db.SnapshotDao2; import org.apache.cloudstack.storage.snapshot.db.SnapshotVO; import org.springframework.stereotype.Component; @@ -40,8 +43,21 @@ public class SnapshotDataFactoryImpl implements SnapshotDataFactory { @Override public SnapshotInfo getSnapshot(long snapshotId, DataStore store) { SnapshotVO snapshot = snapshotDao.findById(snapshotId); - ObjectInDataStoreVO obj = objMap.findObject(snapshotId, DataObjectType.SNAPSHOT, store.getId(), store.getRole()); + DataObjectInStore obj = objMap.findObject(snapshot.getUuid(), DataObjectType.SNAPSHOT, store.getUuid(), store.getRole()); + if (obj == null) { + return null; + } SnapshotObject so = new SnapshotObject(snapshot, store); return so; } + @Override + public SnapshotInfo getSnapshot(long snapshotId) { + // TODO Auto-generated method stub + return null; + } + @Override + public SnapshotInfo getSnapshot(DataObject obj, DataStore store) { + // TODO Auto-generated method stub + return null; + } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index 6ce17973375..d9fc8aabfe8 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -20,6 +20,8 @@ package org.apache.cloudstack.storage.snapshot; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.storage.snapshot.db.SnapshotVO; @@ -97,4 +99,10 @@ public class SnapshotObject implements SnapshotInfo { return null; } + @Override + public void processEvent(Event event) { + // TODO Auto-generated method stub + + } + } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index 80b1918665d..bd3caf4c0bc 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.storage.snapshot; import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.springframework.stereotype.Component; @Component diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java index 7f18200cd3d..8ef09275bcd 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java @@ -16,7 +16,7 @@ // under the License. package org.apache.cloudstack.storage.snapshot.strategy; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.storage.snapshot.SnapshotStrategy; import org.springframework.stereotype.Component; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java index fa9c5aeaa08..7af395acb96 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java @@ -16,7 +16,7 @@ // under the License. package org.apache.cloudstack.storage.snapshot.strategy; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.storage.snapshot.SnapshotStrategy; public class StorageBasedSnapshot implements SnapshotStrategy { diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java index 657d32c7877..218f9013a17 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java @@ -24,14 +24,14 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcConext; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; import org.apache.cloudstack.storage.motion.DataMotionService; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -52,7 +52,7 @@ public class DataObjectManagerImpl implements DataObjectManager { protected DataObject waitingForCreated(DataObject dataObj, DataStore dataStore) { long retries = this.waitingRetries; - ObjectInDataStoreVO obj = null; + DataObjectInStore obj = null; do { try { Thread.sleep(waitingTime); @@ -61,8 +61,8 @@ public class DataObjectManagerImpl implements DataObjectManager { throw new CloudRuntimeException("sleep interrupted", e); } - obj = objectInDataStoreMgr.findObject(dataObj.getId(), - dataObj.getType(), dataStore.getId(), dataStore.getRole()); + obj = objectInDataStoreMgr.findObject(dataObj, + dataStore); if (obj == null) { s_logger.debug("can't find object in db, maybe it's cleaned up already, exit waiting"); break; @@ -92,11 +92,10 @@ public class DataObjectManagerImpl implements DataObjectManager { } @Override - public void createAsync(DataObject data, DataStore store, + public void createAsync(DataObject data, DataStore store, AsyncCompletionCallback callback, boolean noCopy) { - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - data.getId(), data.getType(), store.getId(), - store.getRole()); + DataObjectInStore obj = objectInDataStoreMgr.findObject( + data, store); DataObject objInStore = null; boolean freshNewTemplate = false; if (obj == null) { @@ -105,8 +104,8 @@ public class DataObjectManagerImpl implements DataObjectManager { data, store); freshNewTemplate = true; } catch (Throwable e) { - obj = objectInDataStoreMgr.findObject(data.getId(), - data.getType(), store.getId(), store.getRole()); + obj = objectInDataStoreMgr.findObject(data, + store); if (obj == null) { CreateCmdResult result = new CreateCmdResult( null, null); @@ -184,20 +183,12 @@ public class DataObjectManagerImpl implements DataObjectManager { return null; } - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - objInStrore.getId(), objInStrore - .getType(), objInStrore.getDataStore() - .getId(), objInStrore.getDataStore() - .getRole()); - - obj.setInstallPath(result.getPath()); - obj.setSize(result.getSize()); try { - objectInDataStoreMgr.update(obj, + objectInDataStoreMgr.update(objInStrore, ObjectInDataStoreStateMachine.Event.OperationSuccessed); } catch (NoTransitionException e) { try { - objectInDataStoreMgr.update(obj, + objectInDataStoreMgr.update(objInStrore, ObjectInDataStoreStateMachine.Event.OperationFailed); } catch (NoTransitionException e1) { s_logger.debug("failed to change state", e1); @@ -259,14 +250,10 @@ public class DataObjectManagerImpl implements DataObjectManager { CopyContext context) { CopyCommandResult result = callback.getResult(); DataObject destObj = context.destObj; - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - destObj.getId(), destObj - .getType(), destObj.getDataStore() - .getId(), destObj.getDataStore() - .getRole()); + if (result.isFailed()) { try { - objectInDataStoreMgr.update(obj, Event.OperationFailed); + objectInDataStoreMgr.update(destObj, Event.OperationFailed); } catch (NoTransitionException e) { s_logger.debug("Failed to update copying state", e); } @@ -276,10 +263,8 @@ public class DataObjectManagerImpl implements DataObjectManager { context.getParentCallback().complete(res); } - obj.setInstallPath(result.getPath()); - try { - objectInDataStoreMgr.update(obj, + objectInDataStoreMgr.update(destObj, ObjectInDataStoreStateMachine.Event.OperationSuccessed); } catch (NoTransitionException e) { s_logger.debug("Failed to update copying state: ", e); @@ -311,11 +296,8 @@ public class DataObjectManagerImpl implements DataObjectManager { @Override public void deleteAsync(DataObject data, AsyncCompletionCallback callback) { - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - data.getId(), data.getType(), data.getDataStore().getId(), - data.getDataStore().getRole()); try { - objectInDataStoreMgr.update(obj, Event.DestroyRequested); + objectInDataStoreMgr.update(data, Event.DestroyRequested); } catch (NoTransitionException e) { s_logger.debug("destroy failed", e); CreateCmdResult res = new CreateCmdResult( @@ -338,23 +320,18 @@ public class DataObjectManagerImpl implements DataObjectManager { protected Void deleteAsynCallback(AsyncCallbackDispatcher callback, DeleteContext context) { DataObject destObj = context.obj; - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - destObj.getId(), destObj - .getType(), destObj.getDataStore() - .getId(), destObj.getDataStore() - .getRole()); - + CommandResult res = callback.getResult(); if (res.isFailed()) { try { - objectInDataStoreMgr.update(obj, Event.OperationFailed); + objectInDataStoreMgr.update(destObj, Event.OperationFailed); } catch (NoTransitionException e) { s_logger.debug("delete failed", e); } } else { try { - objectInDataStoreMgr.update(obj, Event.OperationSuccessed); + objectInDataStoreMgr.update(destObj, Event.OperationSuccessed); } catch (NoTransitionException e) { s_logger.debug("delete failed", e); } @@ -366,9 +343,8 @@ public class DataObjectManagerImpl implements DataObjectManager { @Override public DataObject createInternalStateOnly(DataObject data, DataStore store) { - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - data.getId(), data.getType(), store.getId(), - store.getRole()); + DataObjectInStore obj = objectInDataStoreMgr.findObject( + data, store); DataObject objInStore = null; if (obj == null) { objInStore = objectInDataStoreMgr.create( @@ -391,12 +367,6 @@ public class DataObjectManagerImpl implements DataObjectManager { @Override public void update(DataObject data, String path, Long size) { - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - data.getId(), data.getType(), data.getDataStore().getId(), - data.getDataStore().getRole()); - - obj.setInstallPath(path); - obj.setSize(size); - objectInDataStoreMgr.update(obj); + throw new CloudRuntimeException("not implemented"); } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java index f857ac5db1a..a2fd08d1e8f 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java @@ -18,12 +18,15 @@ */ package org.apache.cloudstack.storage.datastore; +import java.util.List; import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; import org.springframework.stereotype.Component; @@ -50,5 +53,22 @@ public class DataStoreManagerImpl implements DataStoreManager { String providerUuid) { return null; } + @Override + public DataStore getDataStore(String uuid, DataStoreRole role) { + if (role == DataStoreRole.Primary) { + return primaryStorMgr.getPrimaryDataStore(uuid); + } else if (role == DataStoreRole.Image) { + return imageDataStoreMgr.getImageDataStore(uuid); + } + throw new CloudRuntimeException("un recognized type" + role); + } + @Override + public List getImageStores(Scope scope) { + return imageDataStoreMgr.getList(); + } + @Override + public DataStore getPrimaryDataStore(long storeId) { + return primaryStorMgr.getPrimaryDataStore(storeId); + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java index e707de6b8bd..d170f5c707a 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java @@ -17,26 +17,20 @@ package org.apache.cloudstack.storage.datastore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import com.cloud.utils.fsm.NoTransitionException; public interface ObjectInDataStoreManager { public DataObject create(DataObject template, DataStore dataStore); - public VolumeInfo create(VolumeInfo volume, DataStore dataStore); - public SnapshotInfo create(SnapshotInfo snapshot, DataStore dataStore); - public ObjectInDataStoreVO findObject(long objectId, DataObjectType type, - long dataStoreId, DataStoreRole role); public DataObject get(DataObject dataObj, DataStore store); public boolean update(DataObject vo, Event event) throws NoTransitionException; - boolean update(ObjectInDataStoreVO obj, Event event) - throws NoTransitionException; - - boolean update(ObjectInDataStoreVO obj); + DataObjectInStore findObject(String uuid, DataObjectType type, + String dataStoreUuid, DataStoreRole role); + DataObjectInStore findObject(DataObject obj, DataStore store); + DataStore findStore(String objUuid, DataObjectType type, DataStoreRole role); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java index 7eb4932348f..87ba1d216c5 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java @@ -19,19 +19,25 @@ package org.apache.cloudstack.storage.datastore; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.storage.db.ObjectInDataStoreDao; import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.ImageDataFactory; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeHostDao; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.SearchCriteria2; import com.cloud.utils.db.SearchCriteriaService; @@ -41,16 +47,28 @@ import com.cloud.utils.fsm.StateMachine2; @Component public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager { + private static final Logger s_logger = Logger + .getLogger(ObjectInDataStoreManagerImpl.class); @Inject ImageDataFactory imageFactory; @Inject + DataStoreManager storeMgr; + @Inject VolumeDataFactory volumeFactory; @Inject ObjectInDataStoreDao objectDataStoreDao; - protected StateMachine2 stateMachines; + @Inject + VolumeHostDao volumeHostDao; + @Inject + VMTemplateHostDao templateHostDao; + @Inject + VMTemplatePoolDao templatePoolDao; + @Inject + SnapshotDataFactory snapshotFactory; + protected StateMachine2 stateMachines; public ObjectInDataStoreManagerImpl() { - stateMachines = new StateMachine2(); + stateMachines = new StateMachine2(); stateMachines.addTransition(State.Allocated, Event.CreateRequested, State.Creating); stateMachines.addTransition(State.Creating, Event.OperationSuccessed, @@ -76,101 +94,122 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager { stateMachines.addTransition(State.Allocated, Event.CreateOnlyRequested, State.Creating2); stateMachines.addTransition(State.Creating2, Event.OperationFailed, - State.Failed); + State.Allocated); stateMachines.addTransition(State.Creating2, Event.OperationSuccessed, State.Ready); } @Override public DataObject create(DataObject obj, DataStore dataStore) { - - ObjectInDataStoreVO vo = new ObjectInDataStoreVO(); - vo.setDataStoreId(dataStore.getId()); - vo.setDataStoreRole(dataStore.getRole()); - vo.setObjectId(obj.getId()); - vo.setSize(obj.getSize()); - - vo.setObjectType(obj.getType()); - vo = objectDataStoreDao.persist(vo); + if (obj.getType() == DataObjectType.TEMPLATE && dataStore.getRole() == DataStoreRole.Primary) { + VMTemplateStoragePoolVO vo = new VMTemplateStoragePoolVO(dataStore.getId(), obj.getId()); + vo = templatePoolDao.persist(vo); + } else { + ObjectInDataStoreVO vo = new ObjectInDataStoreVO(); + vo.setDataStoreRole(dataStore.getRole()); + vo.setDataStoreUuid(dataStore.getUuid()); + vo.setObjectType(obj.getType()); + vo.setObjectUuid(obj.getUuid()); + vo = objectDataStoreDao.persist(vo); + } if (obj.getType() == DataObjectType.TEMPLATE) { - return imageFactory.getTemplate(obj.getId(), dataStore); + return imageFactory.getTemplate(obj, dataStore); } else if (obj.getType() == DataObjectType.VOLUME) { - return volumeFactory.getVolume(obj.getId(), dataStore); + return volumeFactory.getVolume(obj, dataStore); + } else if (obj.getType() == DataObjectType.SNAPSHOT) { + return snapshotFactory.getSnapshot(obj, dataStore); } throw new CloudRuntimeException("unknown type"); } - - @Override - public VolumeInfo create(VolumeInfo volume, DataStore dataStore) { - ObjectInDataStoreVO vo = new ObjectInDataStoreVO(); - vo.setDataStoreId(dataStore.getId()); - vo.setDataStoreRole(dataStore.getRole()); - vo.setObjectId(volume.getId()); - vo.setObjectType(volume.getType()); - vo = objectDataStoreDao.persist(vo); - - return volumeFactory.getVolume(volume.getId(), dataStore); - } - - @Override - public SnapshotInfo create(SnapshotInfo snapshot, DataStore dataStore) { - // TODO Auto-generated method stub - return null; - } - - @Override - public ObjectInDataStoreVO findObject(long objectId, DataObjectType type, - long dataStoreId, DataStoreRole role) { - SearchCriteriaService sc = SearchCriteria2 - .create(ObjectInDataStoreVO.class); - sc.addAnd(sc.getEntity().getObjectId(), Op.EQ, objectId); - sc.addAnd(sc.getEntity().getDataStoreId(), Op.EQ, dataStoreId); - sc.addAnd(sc.getEntity().getObjectType(), Op.EQ, type); - sc.addAnd(sc.getEntity().getDataStoreRole(), Op.EQ, role); - sc.addAnd(sc.getEntity().getState(), Op.NIN, - ObjectInDataStoreStateMachine.State.Destroyed, - ObjectInDataStoreStateMachine.State.Failed); - ObjectInDataStoreVO objectStoreVO = sc.find(); - return objectStoreVO; - - } - + @Override public boolean update(DataObject data, Event event) throws NoTransitionException { - ObjectInDataStoreVO obj = this.findObject(data.getId(), data.getType(), - data.getDataStore().getId(), data.getDataStore().getRole()); + DataObjectInStore obj = this.findObject(data, data.getDataStore()); if (obj == null) { throw new CloudRuntimeException( "can't find mapping in ObjectInDataStore table for: " + data); } - return this.stateMachines.transitTo(obj, event, null, - objectDataStoreDao); - - } - - @Override - public boolean update(ObjectInDataStoreVO obj, Event event) - throws NoTransitionException { - return this.stateMachines.transitTo(obj, event, null, - objectDataStoreDao); - + + if (data.getType() == DataObjectType.TEMPLATE && data.getDataStore().getRole() == DataStoreRole.Primary) { + try { + this.stateMachines.transitTo(obj, event, null, + templatePoolDao); + } catch (NoTransitionException e) { + if (event == Event.CreateOnlyRequested || event == Event.OperationSuccessed) { + s_logger.debug("allow muliple create requests"); + } else { + throw e; + } + } + } else { + this.stateMachines.transitTo(obj, event, null, objectDataStoreDao); + } + return true; } @Override public DataObject get(DataObject dataObj, DataStore store) { if (dataObj.getType() == DataObjectType.TEMPLATE) { - return imageFactory.getTemplate(dataObj.getId(), store); + return imageFactory.getTemplate(dataObj, store); } else if (dataObj.getType() == DataObjectType.VOLUME) { - return volumeFactory.getVolume(dataObj.getId(), store); + return volumeFactory.getVolume(dataObj, store); } throw new CloudRuntimeException("unknown type"); } @Override - public boolean update(ObjectInDataStoreVO obj) { - return objectDataStoreDao.update(obj.getId(), obj); + public DataObjectInStore findObject(DataObject obj, DataStore store) { + DataObjectInStore vo = null; + SearchCriteriaService sc = SearchCriteria2.create(ObjectInDataStoreVO.class); + + if (store.getRole() == DataStoreRole.Image) { + sc.addAnd(sc.getEntity().getDataStoreUuid(), Op.EQ, store.getUuid()); + sc.addAnd(sc.getEntity().getDataStoreRole(), Op.EQ, store.getRole()); + sc.addAnd(sc.getEntity().getObjectUuid(), Op.EQ, obj.getUuid()); + sc.addAnd(sc.getEntity().getObjectType(), Op.EQ, obj.getType()); + vo = sc.find(); + } else if (obj.getType() == DataObjectType.TEMPLATE && store.getRole() == DataStoreRole.Primary) { + vo = templatePoolDao.findByPoolTemplate(store.getId(), obj.getId()); + } else { + s_logger.debug("unknown type: " + obj.getType() + " " + store.getRole()); + throw new CloudRuntimeException("unknown type"); + } + return vo; } + + @Override + public DataObjectInStore findObject(String uuid, DataObjectType type, + String dataStoreUuid, DataStoreRole role) { + DataObjectInStore vo = null; + SearchCriteriaService sc = SearchCriteria2.create(ObjectInDataStoreVO.class); + + if (role == DataStoreRole.Image) { + sc.addAnd(sc.getEntity().getDataStoreUuid(), Op.EQ, dataStoreUuid); + sc.addAnd(sc.getEntity().getDataStoreRole(), Op.EQ, role); + sc.addAnd(sc.getEntity().getObjectUuid(), Op.EQ, uuid); + sc.addAnd(sc.getEntity().getObjectType(), Op.EQ, type); + vo = sc.find(); + } + return vo; + } + + @Override + public DataStore findStore(String objUuid, DataObjectType type, DataStoreRole role) { + DataStore store = null; + if (role == DataStoreRole.Image) { + SearchCriteriaService sc = SearchCriteria2.create(ObjectInDataStoreVO.class); + sc.addAnd(sc.getEntity().getDataStoreRole(), Op.EQ, role); + sc.addAnd(sc.getEntity().getObjectUuid(), Op.EQ, objUuid); + sc.addAnd(sc.getEntity().getObjectType(), Op.EQ, type); + ObjectInDataStoreVO vo = sc.find(); + if (vo != null) { + store = this.storeMgr.getDataStore(vo.getDataStoreUuid(), vo.getDataStoreRole()); + } + } + return store; + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java index a6ba9bc1f60..fdaaace49d7 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java @@ -20,31 +20,19 @@ package org.apache.cloudstack.storage.datastore; import java.util.List; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; -import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; public interface PrimaryDataStore extends DataStore, PrimaryDataStoreInfo { VolumeInfo getVolume(long id); List getVolumes(); -/* void deleteVolumeAsync(VolumeInfo volume, AsyncCompletionCallback callback); - - void createVolumeAsync(VolumeInfo vo, VolumeDiskType diskType, AsyncCompletionCallback callback); - - void createVoluemFromBaseImageAsync(VolumeInfo volume, TemplateInfo templateStore, AsyncCompletionCallback callback); - */ - boolean exists(DataObject data); TemplateInfo getTemplate(long templateId); @@ -53,13 +41,4 @@ public interface PrimaryDataStore extends DataStore, PrimaryDataStoreInfo { DiskFormat getDefaultDiskType(); - -/* void takeSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback); - - void revertSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback); - - void deleteSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback);*/ } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java index 0ac57f445aa..e70f803ee81 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java @@ -26,8 +26,8 @@ import java.util.Map; import org.apache.cloudstack.engine.datacenter.entity.api.StorageEntity; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StoragePoolStatus; public class PrimaryDataStoreEntityImpl implements StorageEntity { private PrimaryDataStoreInfo dataStore; @@ -132,7 +132,8 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity { @Override public State getState() { - return this.dataStore.getManagedState(); + //return this.dataStore.getManagedState(); + return null; } @Override @@ -229,13 +230,7 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity { return null; } - @Override - public String getStorageProvider() { - // TODO Auto-generated method stub - return null; - } - @Override public String getStorageType() { // TODO Auto-generated method stub return null; @@ -247,4 +242,16 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity { } + @Override + public Long getStorageProviderId() { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean isInMaintenance() { + // TODO Auto-generated method stub + return false; + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java index a60ec7a6e65..664c2d1c216 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java @@ -18,11 +18,14 @@ */ package org.apache.cloudstack.storage.datastore; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; public interface PrimaryDataStoreProviderManager { public PrimaryDataStore getPrimaryDataStore(long dataStoreId); + public PrimaryDataStore getPrimaryDataStore(String uuid); boolean registerDriver(String uuid, PrimaryDataStoreDriver driver); + boolean registerHostListener(String uuid, HypervisorHostListener listener); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java index 3634b52908a..96d2da357f5 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java @@ -26,14 +26,19 @@ import java.util.UUID; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao; import org.apache.cloudstack.storage.datastore.db.DataStoreProviderVO; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.component.ManagerBase; @Component public class DataStoreProviderManagerImpl extends ManagerBase implements DataStoreProviderManager { + private static final Logger s_logger = Logger + .getLogger(DataStoreProviderManagerImpl.class); @Inject List providers; @Inject @@ -59,8 +64,8 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto @Override public boolean configure(String name, Map params) throws ConfigurationException { - -/* + Map copyParams = new HashMap(params); + //TODO: hold global lock List providerVos = providerDao.listAll(); for (DataStoreProvider provider : providers) { @@ -83,12 +88,20 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto } else { uuid = providerVO.getUuid(); } - params.put("uuid", uuid); - params.put("id", providerVO.getId()); - provider.configure(params); + copyParams.put("uuid", uuid); + copyParams.put("id", providerVO.getId()); providerMap.put(uuid, provider); + try { + boolean registrationResult = provider.configure(copyParams); + if (!registrationResult) { + providerMap.remove(uuid); + } + } catch(Exception e) { + s_logger.debug("configure provider failed", e); + providerMap.remove(uuid); + } } - */ + return true; } @@ -97,4 +110,9 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto DataStoreProviderVO provider = providerDao.findById(id); return providerMap.get(provider.getUuid()); } + + @Override + public DataStoreProvider getDefaultPrimaryDataStoreProvider() { + return this.getDataStoreProvider("ancient primary data store provider"); + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java index 502158cdaaa..d44a40e971f 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.storage.datastore.provider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; + public interface ImageDataStoreProvider extends DataStoreProvider { } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java index dbca549212c..fdf5958f1ab 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.storage.datastore.provider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; + public interface PrimaryDataStoreProvider extends DataStoreProvider { } diff --git a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java index 08f9182f237..fb7dec0fa41 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java +++ b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java @@ -16,10 +16,12 @@ // under the License. package org.apache.cloudstack.storage.db; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; -public interface ObjectInDataStoreDao extends GenericDao, StateDao { +public interface ObjectInDataStoreDao extends GenericDao, StateDao { } diff --git a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java index 4a5a913adca..50dc984d49b 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java @@ -20,17 +20,17 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.storage.VolumeVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.UpdateBuilder; @Component public class ObjectInDataStoreDaoImpl extends GenericDaoBase implements ObjectInDataStoreDao { @@ -47,7 +47,8 @@ public class ObjectInDataStoreDaoImpl extends GenericDaoBase { +public class ObjectInDataStoreVO implements StateObject, DataObjectInStore { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) long id; - @Column(name = "datastore_id") - private long dataStoreId; + @Column(name = "datastore_uuid") + private String dataStoreUuid; @Column(name = "datastore_role") @Enumerated(EnumType.STRING) private DataStoreRole dataStoreRole; - @Column(name = "object_id") - long objectId; + @Column(name = "object_uuid") + String objectUuid; @Column(name = "object_type") @Enumerated(EnumType.STRING) @@ -74,6 +76,15 @@ public class ObjectInDataStoreVO implements StateObject params) { - ImageDataStoreVO store = imageStoreDao.findByUuid(params.get("uuid")); + public ImageDataStoreVO createImageDataStore(Map params) { + ImageDataStoreVO store = imageStoreDao.findByUuid((String)params.get("uuid")); if (store != null) { - throw new CloudRuntimeException("duplicate uuid"); + return store; } store = new ImageDataStoreVO(); - store.setName(params.get("name")); - store.setProtocol(params.get("protocol")); - store.setProvider(Long.parseLong(params.get("provider"))); - store.setScope(Enum.valueOf(ScopeType.class, params.get("scope"))); - store.setUuid(params.get("uuid")); + store.setName((String)params.get("name")); + store.setProtocol((String)params.get("protocol")); + store.setProvider((Long)params.get("provider")); + store.setScope((ScopeType)params.get("scope")); + store.setUuid((String)params.get("uuid")); store = imageStoreDao.persist(store); return store; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java index 2bd361f05e9..b6d84cdcef2 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java @@ -18,9 +18,14 @@ */ package org.apache.cloudstack.storage.image.datastore; +import java.util.List; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; public interface ImageDataStoreManager { ImageDataStore getImageDataStore(long dataStoreId); + ImageDataStore getImageDataStore(String uuid); + List getList(); boolean registerDriver(String uuid, ImageDataStoreDriver driver); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java deleted file mode 100644 index b5db164055d..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.db; - -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; - -import com.cloud.domain.DomainVO; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.template.VirtualMachineTemplate.TemplateFilter; -import com.cloud.user.Account; -import com.cloud.utils.Pair; -import com.cloud.utils.db.GenericDao; -import com.cloud.utils.fsm.StateDao; - -public interface ImageDataDao extends GenericDao, StateDao { - public List listByPublic(); - - public ImageDataVO findByName(String templateName); - - public ImageDataVO findByTemplateName(String templateName); - - // public void update(ImageDataVO template); - - public List listAllSystemVMTemplates(); - - public List listDefaultBuiltinTemplates(); - - public String getRoutingTemplateUniqueName(); - - public List findIsosByIdAndPath(Long domainId, Long accountId, String path); - - public List listReadyTemplates(); - - public List listByAccountId(long accountId); - - public Set> searchTemplates(String name, String keyword, TemplateFilter templateFilter, boolean isIso, List hypers, Boolean bootable, DomainVO domain, - Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean onlyReady, boolean showDomr, List permittedAccounts, Account caller, - ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags); - - public Set> searchSwiftTemplates(String name, String keyword, TemplateFilter templateFilter, boolean isIso, List hypers, Boolean bootable, DomainVO domain, - Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean onlyReady, boolean showDomr, List permittedAccounts, Account caller, Map tags); - - public long addTemplateToZone(ImageDataVO tmplt, long zoneId); - - public List listAllInZone(long dataCenterId); - - public List listByHypervisorType(List hyperTypes); - - public List publicIsoSearch(Boolean bootable, boolean listRemoved, Map tags); - - public List userIsoSearch(boolean listRemoved); - - ImageDataVO findSystemVMTemplate(long zoneId); - - ImageDataVO findSystemVMTemplate(long zoneId, HypervisorType hType); - - ImageDataVO findRoutingTemplate(HypervisorType type); - - List listPrivateTemplatesByHost(Long hostId); - - public Long countTemplatesForAccount(long accountId); - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java deleted file mode 100644 index 301b5861f8c..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java +++ /dev/null @@ -1,975 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.db; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; -import org.apache.cloudstack.storage.image.format.ISO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.domain.DomainVO; -import com.cloud.domain.dao.DomainDao; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.server.ResourceTag.TaggedResourceType; -import com.cloud.storage.Storage; -import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.VMTemplateZoneVO; -import com.cloud.storage.dao.VMTemplateDaoImpl; -import com.cloud.storage.dao.VMTemplateDetailsDao; -import com.cloud.storage.dao.VMTemplateZoneDao; -import com.cloud.tags.ResourceTagVO; -import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.template.VirtualMachineTemplate.TemplateFilter; -import com.cloud.user.Account; -import com.cloud.utils.Pair; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.JoinBuilder; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.UpdateBuilder; -import com.cloud.utils.exception.CloudRuntimeException; - -@Component -public class ImageDataDaoImpl extends GenericDaoBase implements ImageDataDao { - private static final Logger s_logger = Logger.getLogger(VMTemplateDaoImpl.class); - - @Inject - VMTemplateZoneDao templateZoneDao; - @Inject - VMTemplateDetailsDao templateDetailsDao; - - @Inject - ConfigurationDao configDao; - @Inject - HostDao hostDao; - @Inject - DomainDao domainDao; - @Inject - DataCenterDao dcDao; - - private final String SELECT_TEMPLATE_HOST_REF = "SELECT t.id, h.data_center_id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " - + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t"; - - private final String SELECT_TEMPLATE_ZONE_REF = "SELECT t.id, tzr.zone_id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " - + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t INNER JOIN template_zone_ref tzr on (t.id = tzr.template_id) "; - - private final String SELECT_TEMPLATE_SWIFT_REF = "SELECT t.id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " - + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t"; - protected SearchBuilder TemplateNameSearch; - protected SearchBuilder UniqueNameSearch; - protected SearchBuilder tmpltTypeSearch; - protected SearchBuilder tmpltTypeHyperSearch; - protected SearchBuilder tmpltTypeHyperSearch2; - - protected SearchBuilder AccountIdSearch; - protected SearchBuilder NameSearch; - protected SearchBuilder TmpltsInZoneSearch; - private SearchBuilder PublicSearch; - private SearchBuilder NameAccountIdSearch; - private SearchBuilder PublicIsoSearch; - private SearchBuilder UserIsoSearch; - private GenericSearchBuilder CountTemplatesByAccount; - private SearchBuilder updateStateSearch; - - //ResourceTagsDaoImpl _tagsDao = ComponentInject.inject(ResourceTagsDaoImpl.class); - @Inject - ResourceTagsDaoImpl _tagsDao = null; - private String routerTmpltName; - private String consoleProxyTmpltName; - - protected ImageDataDaoImpl() { - } - - @Override - public List listByPublic() { - SearchCriteria sc = PublicSearch.create(); - sc.setParameters("public", 1); - return listBy(sc); - } - - @Override - public ImageDataVO findByName(String templateName) { - SearchCriteria sc = UniqueNameSearch.create(); - sc.setParameters("uniqueName", templateName); - return findOneIncludingRemovedBy(sc); - } - - @Override - public ImageDataVO findByTemplateName(String templateName) { - SearchCriteria sc = NameSearch.create(); - sc.setParameters("name", templateName); - return findOneIncludingRemovedBy(sc); - } - - @Override - public List publicIsoSearch(Boolean bootable, boolean listRemoved, Map tags) { - - SearchBuilder sb = null; - if (tags == null || tags.isEmpty()) { - sb = PublicIsoSearch; - } else { - sb = createSearchBuilder(); - sb.and("public", sb.entity().isPublicTemplate(), SearchCriteria.Op.EQ); - sb.and("format", sb.entity().getFormat(), SearchCriteria.Op.EQ); - sb.and("type", sb.entity().getTemplateType(), SearchCriteria.Op.EQ); - sb.and("bootable", sb.entity().isBootable(), SearchCriteria.Op.EQ); - sb.and("removed", sb.entity().getRemoved(), SearchCriteria.Op.EQ); - - SearchBuilder tagSearch = _tagsDao.createSearchBuilder(); - for (int count = 0; count < tags.size(); count++) { - tagSearch.or().op("key" + String.valueOf(count), tagSearch.entity().getKey(), SearchCriteria.Op.EQ); - tagSearch.and("value" + String.valueOf(count), tagSearch.entity().getValue(), SearchCriteria.Op.EQ); - tagSearch.cp(); - } - tagSearch.and("resourceType", tagSearch.entity().getResourceType(), SearchCriteria.Op.EQ); - sb.groupBy(sb.entity().getId()); - sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER); - } - - SearchCriteria sc = sb.create(); - - sc.setParameters("public", 1); - sc.setParameters("format", "ISO"); - sc.setParameters("type", TemplateType.PERHOST.toString()); - if (bootable != null) { - sc.setParameters("bootable", bootable); - } - - if (!listRemoved) { - sc.setParameters("removed", (Object) null); - } - - if (tags != null && !tags.isEmpty()) { - int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.ISO.toString()); - for (String key : tags.keySet()) { - sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); - sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); - count++; - } - } - - return listBy(sc); - } - - @Override - public List userIsoSearch(boolean listRemoved) { - - SearchBuilder sb = null; - sb = UserIsoSearch; - SearchCriteria sc = sb.create(); - - sc.setParameters("format", Storage.ImageFormat.ISO); - sc.setParameters("type", TemplateType.USER.toString()); - - if (!listRemoved) { - sc.setParameters("removed", (Object) null); - } - - return listBy(sc); - } - - @Override - public List listAllSystemVMTemplates() { - SearchCriteria sc = tmpltTypeSearch.create(); - sc.setParameters("templateType", Storage.TemplateType.SYSTEM); - - Filter filter = new Filter(ImageDataVO.class, "id", false, null, null); - return listBy(sc, filter); - } - - @Override - public List listPrivateTemplatesByHost(Long hostId) { - - String sql = "select * from template_host_ref as thr INNER JOIN vm_template as t ON t.id=thr.template_id " - + "where thr.host_id=? and t.public=0 and t.featured=0 and t.type='USER' and t.removed is NULL"; - - List l = new ArrayList(); - - Transaction txn = Transaction.currentTxn(); - - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(sql); - pstmt.setLong(1, hostId); - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (SQLException e) { - } catch (Throwable e) { - } - return l; - } - - @Override - public List listReadyTemplates() { - SearchCriteria sc = createSearchCriteria(); - sc.addAnd("ready", SearchCriteria.Op.EQ, true); - sc.addAnd("format", SearchCriteria.Op.NEQ, Storage.ImageFormat.ISO); - return listIncludingRemovedBy(sc); - } - - @Override - public List findIsosByIdAndPath(Long domainId, Long accountId, String path) { - SearchCriteria sc = createSearchCriteria(); - sc.addAnd("iso", SearchCriteria.Op.EQ, true); - if (domainId != null) { - sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); - } - if (accountId != null) { - sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId); - } - if (path != null) { - sc.addAnd("path", SearchCriteria.Op.EQ, path); - } - return listIncludingRemovedBy(sc); - } - - @Override - public List listByAccountId(long accountId) { - SearchCriteria sc = AccountIdSearch.create(); - sc.setParameters("accountId", accountId); - return listBy(sc); - } - - @Override - public List listByHypervisorType(List hyperTypes) { - SearchCriteria sc = createSearchCriteria(); - hyperTypes.add(HypervisorType.None); - sc.addAnd("hypervisorType", SearchCriteria.Op.IN, hyperTypes.toArray()); - return listBy(sc); - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - boolean result = super.configure(name, params); - - PublicSearch = createSearchBuilder(); - PublicSearch.and("public", PublicSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ); - - routerTmpltName = (String) params.get("routing.uniquename"); - - s_logger.debug("Found parameter routing unique name " + routerTmpltName); - if (routerTmpltName == null) { - routerTmpltName = "routing"; - } - - consoleProxyTmpltName = (String) params.get("consoleproxy.uniquename"); - if (consoleProxyTmpltName == null) { - consoleProxyTmpltName = "routing"; - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Use console proxy template : " + consoleProxyTmpltName); - } - - UniqueNameSearch = createSearchBuilder(); - UniqueNameSearch.and("uniqueName", UniqueNameSearch.entity().getUniqueName(), SearchCriteria.Op.EQ); - NameSearch = createSearchBuilder(); - NameSearch.and("name", NameSearch.entity().getName(), SearchCriteria.Op.EQ); - - NameAccountIdSearch = createSearchBuilder(); - NameAccountIdSearch.and("name", NameAccountIdSearch.entity().getName(), SearchCriteria.Op.EQ); - NameAccountIdSearch.and("accountId", NameAccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); - - PublicIsoSearch = createSearchBuilder(); - PublicIsoSearch.and("public", PublicIsoSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ); - PublicIsoSearch.and("format", PublicIsoSearch.entity().getFormat(), SearchCriteria.Op.EQ); - PublicIsoSearch.and("type", PublicIsoSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); - PublicIsoSearch.and("bootable", PublicIsoSearch.entity().isBootable(), SearchCriteria.Op.EQ); - PublicIsoSearch.and("removed", PublicIsoSearch.entity().getRemoved(), SearchCriteria.Op.EQ); - - UserIsoSearch = createSearchBuilder(); - UserIsoSearch.and("format", UserIsoSearch.entity().getFormat(), SearchCriteria.Op.EQ); - UserIsoSearch.and("type", UserIsoSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); - UserIsoSearch.and("removed", UserIsoSearch.entity().getRemoved(), SearchCriteria.Op.EQ); - - tmpltTypeHyperSearch = createSearchBuilder(); - tmpltTypeHyperSearch.and("templateType", tmpltTypeHyperSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); - SearchBuilder hostHyperSearch = hostDao.createSearchBuilder(); - hostHyperSearch.and("type", hostHyperSearch.entity().getType(), SearchCriteria.Op.EQ); - hostHyperSearch.and("zoneId", hostHyperSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - hostHyperSearch.groupBy(hostHyperSearch.entity().getHypervisorType()); - - tmpltTypeHyperSearch.join("tmplHyper", hostHyperSearch, hostHyperSearch.entity().getHypervisorType(), tmpltTypeHyperSearch.entity().getHypervisorType(), JoinBuilder.JoinType.INNER); - hostHyperSearch.done(); - tmpltTypeHyperSearch.done(); - - tmpltTypeHyperSearch2 = createSearchBuilder(); - tmpltTypeHyperSearch2.and("templateType", tmpltTypeHyperSearch2.entity().getTemplateType(), SearchCriteria.Op.EQ); - tmpltTypeHyperSearch2.and("hypervisorType", tmpltTypeHyperSearch2.entity().getHypervisorType(), SearchCriteria.Op.EQ); - - tmpltTypeSearch = createSearchBuilder(); - tmpltTypeSearch.and("removed", tmpltTypeSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - tmpltTypeSearch.and("templateType", tmpltTypeSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); - - AccountIdSearch = createSearchBuilder(); - AccountIdSearch.and("accountId", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); - AccountIdSearch.and("publicTemplate", AccountIdSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ); - AccountIdSearch.done(); - - SearchBuilder tmpltZoneSearch = templateZoneDao.createSearchBuilder(); - tmpltZoneSearch.and("removed", tmpltZoneSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - tmpltZoneSearch.and("zoneId", tmpltZoneSearch.entity().getZoneId(), SearchCriteria.Op.EQ); - - TmpltsInZoneSearch = createSearchBuilder(); - TmpltsInZoneSearch.and("removed", TmpltsInZoneSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - TmpltsInZoneSearch.and().op("avoidtype", TmpltsInZoneSearch.entity().getTemplateType(), SearchCriteria.Op.NEQ); - TmpltsInZoneSearch.or("templateType", TmpltsInZoneSearch.entity().getTemplateType(), SearchCriteria.Op.NULL); - TmpltsInZoneSearch.cp(); - TmpltsInZoneSearch.join("tmpltzone", tmpltZoneSearch, tmpltZoneSearch.entity().getTemplateId(), TmpltsInZoneSearch.entity().getId(), JoinBuilder.JoinType.INNER); - tmpltZoneSearch.done(); - TmpltsInZoneSearch.done(); - - CountTemplatesByAccount = createSearchBuilder(Long.class); - CountTemplatesByAccount.select(null, Func.COUNT, null); - CountTemplatesByAccount.and("account", CountTemplatesByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); - CountTemplatesByAccount.and("removed", CountTemplatesByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); - CountTemplatesByAccount.done(); - - updateStateSearch = this.createSearchBuilder(); - updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ); - updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ); - updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ); - updateStateSearch.done(); - return result; - } - - @Override - public String getRoutingTemplateUniqueName() { - return routerTmpltName; - } - - @Override - public Set> searchSwiftTemplates(String name, String keyword, TemplateFilter templateFilter, boolean isIso, List hypers, Boolean bootable, DomainVO domain, - Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean onlyReady, boolean showDomr, List permittedAccounts, Account caller, Map tags) { - - StringBuilder builder = new StringBuilder(); - if (!permittedAccounts.isEmpty()) { - for (Account permittedAccount : permittedAccounts) { - builder.append(permittedAccount.getAccountId() + ","); - } - } - - String permittedAccountsStr = builder.toString(); - - if (permittedAccountsStr.length() > 0) { - // chop the "," off - permittedAccountsStr = permittedAccountsStr.substring(0, permittedAccountsStr.length() - 1); - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - Set> templateZonePairList = new HashSet>(); - PreparedStatement pstmt = null; - ResultSet rs = null; - String sql = SELECT_TEMPLATE_SWIFT_REF; - try { - String joinClause = ""; - String whereClause = " WHERE t.removed IS NULL"; - - if (isIso) { - whereClause += " AND t.format = 'ISO'"; - if (!hyperType.equals(HypervisorType.None)) { - joinClause = " INNER JOIN guest_os guestOS on (guestOS.id = t.guest_os_id) INNER JOIN guest_os_hypervisor goh on ( goh.guest_os_id = guestOS.id) "; - whereClause += " AND goh.hypervisor_type = '" + hyperType.toString() + "'"; - } - } else { - whereClause += " AND t.format <> 'ISO'"; - if (hypers.isEmpty()) { - return templateZonePairList; - } else { - StringBuilder relatedHypers = new StringBuilder(); - for (HypervisorType hyper : hypers) { - relatedHypers.append("'"); - relatedHypers.append(hyper.toString()); - relatedHypers.append("'"); - relatedHypers.append(","); - } - relatedHypers.setLength(relatedHypers.length() - 1); - whereClause += " AND t.hypervisor_type IN (" + relatedHypers + ")"; - } - } - joinClause += " INNER JOIN template_swift_ref tsr on (t.id = tsr.template_id)"; - if (keyword != null) { - whereClause += " AND t.name LIKE \"%" + keyword + "%\""; - } else if (name != null) { - whereClause += " AND t.name LIKE \"%" + name + "%\""; - } - - if (bootable != null) { - whereClause += " AND t.bootable = " + bootable; - } - - if (!showDomr) { - whereClause += " AND t.type != '" + Storage.TemplateType.SYSTEM.toString() + "'"; - } - - if (templateFilter == TemplateFilter.featured) { - whereClause += " AND t.public = 1 AND t.featured = 1"; - } else if ((templateFilter == TemplateFilter.self || templateFilter == TemplateFilter.selfexecutable) && caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { - if (caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN || caller.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) { - joinClause += " INNER JOIN account a on (t.account_id = a.id) INNER JOIN domain d on (a.domain_id = d.id)"; - whereClause += " AND d.path LIKE '" + domain.getPath() + "%'"; - } else { - whereClause += " AND t.account_id IN (" + permittedAccountsStr + ")"; - } - } else if (templateFilter == TemplateFilter.sharedexecutable && caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { - if (caller.getType() == Account.ACCOUNT_TYPE_NORMAL) { - joinClause += " LEFT JOIN launch_permission lp ON t.id = lp.template_id WHERE" + " (t.account_id IN (" + permittedAccountsStr + ") OR" + " lp.account_id IN (" - + permittedAccountsStr + "))"; - } else { - joinClause += " INNER JOIN account a on (t.account_id = a.id) "; - } - } else if (templateFilter == TemplateFilter.executable && !permittedAccounts.isEmpty()) { - whereClause += " AND (t.public = 1 OR t.account_id IN (" + permittedAccountsStr + "))"; - } else if (templateFilter == TemplateFilter.community) { - whereClause += " AND t.public = 1 AND t.featured = 0"; - } else if (templateFilter == TemplateFilter.all && caller.getType() == Account.ACCOUNT_TYPE_ADMIN) { - } else if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { - return templateZonePairList; - } - - sql += joinClause + whereClause + getOrderByLimit(pageSize, startIndex); - pstmt = txn.prepareStatement(sql); - rs = pstmt.executeQuery(); - while (rs.next()) { - Pair templateZonePair = new Pair(rs.getLong(1), -1L); - templateZonePairList.add(templateZonePair); - } - - } catch (Exception e) { - s_logger.warn("Error listing templates", e); - } finally { - try { - if (rs != null) { - rs.close(); - } - if (pstmt != null) { - pstmt.close(); - } - txn.commit(); - } catch (SQLException sqle) { - s_logger.warn("Error in cleaning up", sqle); - } - } - - return templateZonePairList; - } - - @Override - public Set> searchTemplates(String name, String keyword, TemplateFilter templateFilter, boolean isIso, List hypers, Boolean bootable, DomainVO domain, - Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean onlyReady, boolean showDomr, List permittedAccounts, Account caller, - ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags) { - StringBuilder builder = new StringBuilder(); - if (!permittedAccounts.isEmpty()) { - for (Account permittedAccount : permittedAccounts) { - builder.append(permittedAccount.getAccountId() + ","); - } - } - - String permittedAccountsStr = builder.toString(); - - if (permittedAccountsStr.length() > 0) { - // chop the "," off - permittedAccountsStr = permittedAccountsStr.substring(0, permittedAccountsStr.length() - 1); - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - /* Use LinkedHashSet here to guarantee iteration order */ - Set> templateZonePairList = new LinkedHashSet>(); - PreparedStatement pstmt = null; - ResultSet rs = null; - StringBuilder relatedDomainIds = new StringBuilder(); - String sql = SELECT_TEMPLATE_ZONE_REF; - String groupByClause = ""; - try { - // short accountType; - // String accountId = null; - String guestOSJoin = ""; - StringBuilder templateHostRefJoin = new StringBuilder(); - String dataCenterJoin = "", lpjoin = ""; - String tagsJoin = ""; - - if (isIso && !hyperType.equals(HypervisorType.None)) { - guestOSJoin = " INNER JOIN guest_os guestOS on (guestOS.id = t.guest_os_id) INNER JOIN guest_os_hypervisor goh on ( goh.guest_os_id = guestOS.id) "; - } - if (onlyReady) { - templateHostRefJoin.append(" INNER JOIN template_host_ref thr on (t.id = thr.template_id) INNER JOIN host h on (thr.host_id = h.id)"); - sql = SELECT_TEMPLATE_HOST_REF; - groupByClause = " GROUP BY t.id, h.data_center_id "; - } - if ((templateFilter == TemplateFilter.featured) || (templateFilter == TemplateFilter.community)) { - dataCenterJoin = " INNER JOIN data_center dc on (h.data_center_id = dc.id)"; - } - - if (templateFilter == TemplateFilter.sharedexecutable) { - lpjoin = " INNER JOIN launch_permission lp ON t.id = lp.template_id "; - } - - if (tags != null && !tags.isEmpty()) { - tagsJoin = " INNER JOIN resource_tags r ON t.id = r.resource_id "; - } - - sql += guestOSJoin + templateHostRefJoin + dataCenterJoin + lpjoin + tagsJoin; - String whereClause = ""; - - // All joins have to be made before we start setting the condition - // settings - if ((listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources || (!permittedAccounts.isEmpty() && !(templateFilter == TemplateFilter.community || templateFilter == TemplateFilter.featured))) - && !(caller.getType() != Account.ACCOUNT_TYPE_NORMAL && templateFilter == TemplateFilter.all)) { - whereClause += " INNER JOIN account a on (t.account_id = a.id)"; - if ((templateFilter == TemplateFilter.self || templateFilter == TemplateFilter.selfexecutable) - && (caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN || caller.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN)) { - whereClause += " INNER JOIN domain d on (a.domain_id = d.id) WHERE d.path LIKE '" + domain.getPath() + "%'"; - if (listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources) { - whereClause += " AND a.type != " + Account.ACCOUNT_TYPE_PROJECT; - } - } else if (listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources) { - whereClause += " WHERE a.type != " + Account.ACCOUNT_TYPE_PROJECT; - } - } - - if (!permittedAccounts.isEmpty()) { - for (Account account : permittedAccounts) { - // accountType = account.getType(); - // accountId = Long.toString(account.getId()); - DomainVO accountDomain = domainDao.findById(account.getDomainId()); - - // get all parent domain ID's all the way till root domain - DomainVO domainTreeNode = accountDomain; - while (true) { - relatedDomainIds.append(domainTreeNode.getId()); - relatedDomainIds.append(","); - if (domainTreeNode.getParent() != null) { - domainTreeNode = domainDao.findById(domainTreeNode.getParent()); - } else { - break; - } - } - - // get all child domain ID's - if (isAdmin(account.getType())) { - List allChildDomains = domainDao.findAllChildren(accountDomain.getPath(), accountDomain.getId()); - for (DomainVO childDomain : allChildDomains) { - relatedDomainIds.append(childDomain.getId()); - relatedDomainIds.append(","); - } - } - relatedDomainIds.setLength(relatedDomainIds.length() - 1); - } - } - - String attr = " AND "; - if (whereClause.endsWith(" WHERE ")) { - attr += " WHERE "; - } - - if (!isIso) { - if (hypers.isEmpty()) { - return templateZonePairList; - } else { - StringBuilder relatedHypers = new StringBuilder(); - for (HypervisorType hyper : hypers) { - relatedHypers.append("'"); - relatedHypers.append(hyper.toString()); - relatedHypers.append("'"); - relatedHypers.append(","); - } - relatedHypers.setLength(relatedHypers.length() - 1); - whereClause += attr + " t.hypervisor_type IN (" + relatedHypers + ")"; - } - } - - if (!permittedAccounts.isEmpty() && !(templateFilter == TemplateFilter.featured || templateFilter == TemplateFilter.community || templateFilter == TemplateFilter.executable) - && !isAdmin(caller.getType())) { - whereClause += attr + "t.account_id IN (" + permittedAccountsStr + ")"; - } - - if (templateFilter == TemplateFilter.featured) { - whereClause += attr + "t.public = 1 AND t.featured = 1"; - if (!permittedAccounts.isEmpty()) { - whereClause += attr + "(dc.domain_id IN (" + relatedDomainIds + ") OR dc.domain_id is NULL)"; - } - } else if (templateFilter == TemplateFilter.self || templateFilter == TemplateFilter.selfexecutable) { - whereClause += " AND t.account_id IN (" + permittedAccountsStr + ")"; - } else if (templateFilter == TemplateFilter.sharedexecutable) { - whereClause += " AND " + " (t.account_id IN (" + permittedAccountsStr + ") OR" + " lp.account_id IN (" + permittedAccountsStr + "))"; - } else if (templateFilter == TemplateFilter.executable && !permittedAccounts.isEmpty()) { - whereClause += attr + "(t.public = 1 OR t.account_id IN (" + permittedAccountsStr + "))"; - } else if (templateFilter == TemplateFilter.community) { - whereClause += attr + "t.public = 1 AND t.featured = 0"; - if (!permittedAccounts.isEmpty()) { - whereClause += attr + "(dc.domain_id IN (" + relatedDomainIds + ") OR dc.domain_id is NULL)"; - } - } else if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN && !isIso) { - return templateZonePairList; - } - - if (tags != null && !tags.isEmpty()) { - whereClause += " AND ("; - boolean first = true; - for (String key : tags.keySet()) { - if (!first) { - whereClause += " OR "; - } - whereClause += "(r.key=\"" + key + "\" and r.value=\"" + tags.get(key) + "\")"; - first = false; - } - whereClause += ")"; - } - - if (whereClause.equals("")) { - whereClause += " WHERE "; - } else if (!whereClause.equals(" WHERE ")) { - whereClause += " AND "; - } - - sql += whereClause + getExtrasWhere(templateFilter, name, keyword, isIso, bootable, hyperType, zoneId, onlyReady, showDomr) + groupByClause + getOrderByLimit(pageSize, startIndex); - - pstmt = txn.prepareStatement(sql); - rs = pstmt.executeQuery(); - - while (rs.next()) { - Pair templateZonePair = new Pair(rs.getLong(1), rs.getLong(2)); - templateZonePairList.add(templateZonePair); - } - // for now, defaulting pageSize to a large val if null; may need to - // revisit post 2.2RC2 - if (isIso && - templateZonePairList.size() < (pageSize != null ? pageSize : 500) && - templateFilter != TemplateFilter.community && - !(templateFilter == TemplateFilter.self) /* TODO: Fix this! && !BaseCmd.isRootAdmin(caller.getType())*/) { // evaluates - // to - // true - // If - // root - // admin - // and - // filter=self - - List publicIsos = publicIsoSearch(bootable, false, tags); - List userIsos = userIsoSearch(false); - - // Listing the ISOs according to the page size.Restricting the - // total no. of ISOs on a page - // to be less than or equal to the pageSize parameter - - int i = 0; - - if (startIndex > userIsos.size()) { - i = (int) (startIndex - userIsos.size()); - } - - for (; i < publicIsos.size(); i++) { - if (templateZonePairList.size() >= pageSize) { - break; - } else { - if (keyword != null && publicIsos.get(i).getName().contains(keyword)) { - templateZonePairList.add(new Pair(publicIsos.get(i).getId(), null)); - continue; - } else if (name != null && publicIsos.get(i).getName().contains(name)) { - templateZonePairList.add(new Pair(publicIsos.get(i).getId(), null)); - continue; - } else if (keyword == null && name == null) { - templateZonePairList.add(new Pair(publicIsos.get(i).getId(), null)); - } - } - } - } - } catch (Exception e) { - s_logger.warn("Error listing templates", e); - } finally { - try { - if (rs != null) { - rs.close(); - } - if (pstmt != null) { - pstmt.close(); - } - txn.commit(); - } catch (SQLException sqle) { - s_logger.warn("Error in cleaning up", sqle); - } - } - - return templateZonePairList; - } - - private String getExtrasWhere(TemplateFilter templateFilter, String name, String keyword, boolean isIso, Boolean bootable, HypervisorType hyperType, Long zoneId, boolean onlyReady, - boolean showDomr) { - String sql = ""; - if (keyword != null) { - sql += " t.name LIKE \"%" + keyword + "%\" AND"; - } else if (name != null) { - sql += " t.name LIKE \"%" + name + "%\" AND"; - } - - if (isIso) { - sql += " t.format = 'ISO'"; - if (!hyperType.equals(HypervisorType.None)) { - sql += " AND goh.hypervisor_type = '" + hyperType.toString() + "'"; - } - } else { - sql += " t.format <> 'ISO'"; - if (!hyperType.equals(HypervisorType.None)) { - sql += " AND t.hypervisor_type = '" + hyperType.toString() + "'"; - } - } - - if (bootable != null) { - sql += " AND t.bootable = " + bootable; - } - - if (onlyReady) { - sql += " AND thr.download_state = '" + Status.DOWNLOADED.toString() + "'" + " AND thr.destroyed=0 "; - if (zoneId != null) { - sql += " AND h.data_center_id = " + zoneId; - } - } else if (zoneId != null) { - sql += " AND tzr.zone_id = " + zoneId + " AND tzr.removed is null"; - } else { - sql += " AND tzr.removed is null "; - } - if (!showDomr) { - sql += " AND t.type != '" + Storage.TemplateType.SYSTEM.toString() + "'"; - } - - sql += " AND t.removed IS NULL"; - - return sql; - } - - private String getOrderByLimit(Long pageSize, Long startIndex) { - Boolean isAscending = Boolean.parseBoolean(configDao.getValue("sortkey.algorithm")); - isAscending = (isAscending == null ? true : isAscending); - - String sql; - if (isAscending) { - sql = " ORDER BY t.sort_key ASC"; - } else { - sql = " ORDER BY t.sort_key DESC"; - } - - if ((pageSize != null) && (startIndex != null)) { - sql += " LIMIT " + startIndex.toString() + "," + pageSize.toString(); - } - return sql; - } - - @Override - @DB - public long addTemplateToZone(ImageDataVO tmplt, long zoneId) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - ImageDataVO tmplt2 = findById(tmplt.getId()); - if (tmplt2 == null) { - if (persist(tmplt) == null) { - throw new CloudRuntimeException("Failed to persist the template " + tmplt); - } - if (tmplt.getDetails() != null) { - templateDetailsDao.persist(tmplt.getId(), tmplt.getDetails()); - } - } - VMTemplateZoneVO tmpltZoneVO = templateZoneDao.findByZoneTemplate(zoneId, tmplt.getId()); - if (tmpltZoneVO == null) { - tmpltZoneVO = new VMTemplateZoneVO(zoneId, tmplt.getId(), new Date()); - templateZoneDao.persist(tmpltZoneVO); - } else { - tmpltZoneVO.setRemoved(null); - tmpltZoneVO.setLastUpdated(new Date()); - templateZoneDao.update(tmpltZoneVO.getId(), tmpltZoneVO); - } - txn.commit(); - - return tmplt.getId(); - } - - @Override - @DB - public List listAllInZone(long dataCenterId) { - SearchCriteria sc = TmpltsInZoneSearch.create(); - sc.setParameters("avoidtype", TemplateType.PERHOST.toString()); - sc.setJoinParameters("tmpltzone", "zoneId", dataCenterId); - return listBy(sc); - } - - @Override - public List listDefaultBuiltinTemplates() { - SearchCriteria sc = tmpltTypeSearch.create(); - sc.setParameters("templateType", Storage.TemplateType.BUILTIN); - return listBy(sc); - } - - @Override - public ImageDataVO findSystemVMTemplate(long zoneId) { - SearchCriteria sc = tmpltTypeHyperSearch.create(); - sc.setParameters("templateType", Storage.TemplateType.SYSTEM); - sc.setJoinParameters("tmplHyper", "type", Host.Type.Routing); - sc.setJoinParameters("tmplHyper", "zoneId", zoneId); - - // order by descending order of id and select the first (this is going - // to be the latest) - List tmplts = listBy(sc, new Filter(ImageDataVO.class, "id", false, null, 1l)); - - if (tmplts.size() > 0) { - return tmplts.get(0); - } else { - return null; - } - } - - @Override - public ImageDataVO findSystemVMTemplate(long zoneId, HypervisorType hType) { - SearchCriteria sc = tmpltTypeHyperSearch.create(); - sc.setParameters("templateType", Storage.TemplateType.SYSTEM); - sc.setJoinParameters("tmplHyper", "type", Host.Type.Routing); - sc.setJoinParameters("tmplHyper", "zoneId", zoneId); - - // order by descending order of id - List tmplts = listBy(sc, new Filter(ImageDataVO.class, "id", false, null, null)); - - for (ImageDataVO tmplt : tmplts) { - if (tmplt.getHypervisorType() == hType) { - return tmplt; - } - } - if (tmplts.size() > 0 && hType == HypervisorType.Any) { - return tmplts.get(0); - } - return null; - } - - @Override - public ImageDataVO findRoutingTemplate(HypervisorType hType) { - SearchCriteria sc = tmpltTypeHyperSearch2.create(); - sc.setParameters("templateType", Storage.TemplateType.SYSTEM); - sc.setParameters("hypervisorType", hType); - - // order by descending order of id and select the first (this is going - // to be the latest) - List tmplts = listBy(sc, new Filter(ImageDataVO.class, "id", false, null, 1l)); - - if (tmplts.size() > 0) { - return tmplts.get(0); - } else { - return null; - } - } - - @Override - public Long countTemplatesForAccount(long accountId) { - SearchCriteria sc = CountTemplatesByAccount.create(); - sc.setParameters("account", accountId); - return customSearch(sc, null).get(0); - } - - @Override - @DB - public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - ImageDataVO template = createForUpdate(); - template.setRemoved(new Date()); - - ImageDataVO vo = findById(id); - if (vo != null) { - if (vo.getFormat().equalsIgnoreCase(new ISO().toString())) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.ISO); - } else { - _tagsDao.removeByIdAndType(id, TaggedResourceType.Template); - } - } - - boolean result = update(id, template); - txn.commit(); - return result; - } - - private boolean isAdmin(short accountType) { - return ((accountType == Account.ACCOUNT_TYPE_ADMIN) || (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_READ_ONLY_ADMIN)); - } - - @Override - public boolean updateState(TemplateState currentState, TemplateEvent event, - TemplateState nextState, ImageDataVO vo, Object data) { - Long oldUpdated = vo.getUpdatedCount(); - Date oldUpdatedTime = vo.getUpdated(); - - - SearchCriteria sc = updateStateSearch.create(); - sc.setParameters("id", vo.getId()); - sc.setParameters("state", currentState); - sc.setParameters("updatedCount", vo.getUpdatedCount()); - - vo.incrUpdatedCount(); - - UpdateBuilder builder = getUpdateBuilder(vo); - builder.set(vo, "state", nextState); - builder.set(vo, "updated", new Date()); - - int rows = update((ImageDataVO) vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { - ImageDataVO dbVol = findByIdIncludingRemoved(vo.getId()); - if (dbVol != null) { - StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); - str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") - .append(dbVol.getUpdated()); - str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(vo.getUpdatedCount()) - .append("; updatedTime=").append(vo.getUpdated()); - str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) - .append("; updatedTime=").append(oldUpdatedTime); - } else { - s_logger.debug("Unable to update objectIndatastore: id=" + vo.getId() + ", as there is no such object exists in the database anymore"); - } - } - return rows > 0; - } -} \ No newline at end of file diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java deleted file mode 100644 index e3ddaed721a..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java +++ /dev/null @@ -1,450 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.db; - -import java.util.Date; -import java.util.Map; -import java.util.UUID; - -import javax.persistence.Column; -import javax.persistence.Entity; -import javax.persistence.EnumType; -import javax.persistence.Enumerated; -import javax.persistence.Id; -import javax.persistence.Table; -import javax.persistence.TableGenerator; -import javax.persistence.Temporal; -import javax.persistence.TemporalType; -import javax.persistence.Transient; - -import org.apache.cloudstack.api.Identity; -import org.apache.cloudstack.storage.image.TemplateState; - -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.Storage; -import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.VMTemplateVO; -import com.cloud.utils.db.GenericDao; -import com.cloud.utils.fsm.StateObject; - -@Entity -@Table(name = "vm_template") -public class ImageDataVO implements Identity, StateObject { - @Id - @TableGenerator(name = "vm_template_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_template_seq", allocationSize = 1) - @Column(name = "id", nullable = false) - private long id; - - @Column(name = "format") - private String format; - - @Column(name = "unique_name") - private String uniqueName; - - @Column(name = "name") - private String name = null; - - @Column(name = "public") - private boolean publicTemplate = true; - - @Column(name = "featured") - private boolean featured; - - @Column(name = "type") - private Storage.TemplateType templateType; - - @Column(name = "url") - private String url = null; - - @Column(name = "hvm") - private boolean requiresHvm; - - @Column(name = "bits") - private int bits; - - @Temporal(value = TemporalType.TIMESTAMP) - @Column(name = GenericDao.CREATED_COLUMN) - private Date created = null; - - @Column(name = GenericDao.REMOVED) - @Temporal(TemporalType.TIMESTAMP) - private Date removed; - - @Column(name = "account_id") - private long accountId; - - @Column(name = "checksum") - private String checksum; - - @Column(name = "display_text", length = 4096) - private String displayText; - - @Column(name = "enable_password") - private boolean enablePassword; - - @Column(name = "guest_os_id") - private long guestOSId; - - @Column(name = "bootable") - private boolean bootable = true; - - @Column(name = "prepopulate") - private boolean prepopulate = false; - - @Column(name = "cross_zones") - private boolean crossZones = false; - - @Column(name = "hypervisor_type") - @Enumerated(value = EnumType.STRING) - private HypervisorType hypervisorType; - - @Column(name = "extractable") - private boolean extractable = true; - - @Column(name = "source_template_id") - private Long sourceTemplateId; - - @Column(name = "template_tag") - private String templateTag; - - @Column(name = "uuid") - private String uuid; - - @Column(name = "sort_key") - private int sortKey; - - @Column(name = "enable_sshkey") - private boolean enableSshKey; - - @Column(name = "image_data_store_id") - private long imageDataStoreId; - - @Column(name = "size") - private Long size; - - @Column(name = "state") - private TemplateState state; - - @Column(name="update_count", updatable = true) - protected long updatedCount; - - @Column(name = "updated") - @Temporal(value = TemporalType.TIMESTAMP) - Date updated; - - @Transient - Map details; - - public String getUniqueName() { - return uniqueName; - } - - public void setUniqueName(String uniqueName) { - this.uniqueName = uniqueName; - } - - public ImageDataVO() { - this.uuid = UUID.randomUUID().toString(); - this.state = TemplateState.Allocated; - this.created = new Date(); - } - - public boolean getEnablePassword() { - return enablePassword; - } - - public String getFormat() { - return format; - } - - public void setEnablePassword(boolean enablePassword) { - this.enablePassword = enablePassword; - } - - public void setFormat(String format) { - this.format = format; - } - - public long getId() { - return id; - } - - public TemplateType getTemplateType() { - return templateType; - } - - public void setTemplateType(TemplateType type) { - this.templateType = type; - } - - public boolean requiresHvm() { - return requiresHvm; - } - - public void setRequireHvm(boolean hvm) { - this.requiresHvm = hvm; - } - - public int getBits() { - return bits; - } - - public void setBits(int bits) { - this.bits = bits; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public Date getRemoved() { - return removed; - } - - public boolean isPublicTemplate() { - return publicTemplate; - } - - public void setPublicTemplate(boolean publicTemplate) { - this.publicTemplate = publicTemplate; - } - - public boolean isFeatured() { - return featured; - } - - public void setFeatured(boolean featured) { - this.featured = featured; - } - - public Date getCreated() { - return created; - } - - public String getUrl() { - return url; - } - - public void setUrl(String url) { - this.url = url; - } - - public long getAccountId() { - return accountId; - } - - public void setAccountId(long accountId) { - this.accountId = accountId; - } - - public String getChecksum() { - return checksum; - } - - public void setChecksum(String checksum) { - this.checksum = checksum; - } - - public String getDisplayText() { - return displayText; - } - - public void setDisplayText(String displayText) { - this.displayText = displayText; - } - - public long getGuestOSId() { - return guestOSId; - } - - public void setGuestOSId(long guestOSId) { - this.guestOSId = guestOSId; - } - - public boolean isBootable() { - return bootable; - } - - public void setBootable(boolean bootable) { - this.bootable = bootable; - } - - public void setPrepopulate(boolean prepopulate) { - this.prepopulate = prepopulate; - } - - public boolean isPrepopulate() { - return prepopulate; - } - - public void setCrossZones(boolean crossZones) { - this.crossZones = crossZones; - } - - public boolean isCrossZones() { - return crossZones; - } - - public HypervisorType getHypervisorType() { - return hypervisorType; - } - - public void setHypervisorType(HypervisorType hyperType) { - hypervisorType = hyperType; - } - - public boolean isExtractable() { - return extractable; - } - - public void setExtractable(boolean extractable) { - this.extractable = extractable; - } - - public Long getSourceTemplateId() { - return sourceTemplateId; - } - - public void setSourceTemplateId(Long sourceTemplateId) { - this.sourceTemplateId = sourceTemplateId; - } - - public String getTemplateTag() { - return templateTag; - } - - public void setTemplateTag(String templateTag) { - this.templateTag = templateTag; - } - - public long getDomainId() { - return -1; - } - - @Override - public String getUuid() { - return this.uuid; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public Map getDetails() { - return this.details; - } - - public void setDetails(Map details) { - this.details = details; - } - - @Override - public boolean equals(Object that) { - if (this == that) { - return true; - } - if (!(that instanceof VMTemplateVO)) { - return false; - } - VMTemplateVO other = (VMTemplateVO) that; - - return ((this.getUniqueName().equals(other.getUniqueName()))); - } - - @Override - public int hashCode() { - return uniqueName.hashCode(); - } - - @Transient - String toString; - - @Override - public String toString() { - if (toString == null) { - toString = new StringBuilder("Tmpl[").append(id).append("-").append(format).append("-").append(uniqueName).toString(); - } - return toString; - } - - public void setRemoved(Date removed) { - this.removed = removed; - } - - public void setSortKey(int key) { - sortKey = key; - } - - public int getSortKey() { - return sortKey; - } - - public boolean getEnableSshKey() { - return enableSshKey; - } - - public void setEnableSshKey(boolean enable) { - enableSshKey = enable; - } - - public Long getImageDataStoreId() { - return this.imageDataStoreId; - } - - public void setImageDataStoreId(long dataStoreId) { - this.imageDataStoreId = dataStoreId; - } - - public void setSize(Long size) { - this.size = size; - } - - public Long getSize() { - return this.size; - } - - public TemplateState getState() { - return this.state; - } - - public long getUpdatedCount() { - return this.updatedCount; - } - - public void incrUpdatedCount() { - this.updatedCount++; - } - - public void decrUpdatedCount() { - this.updatedCount--; - } - - public Date getUpdated() { - return updated; - } - - public void setUpdated(Date updated) { - this.updated = updated; - } - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java b/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java index 422bc066211..908d6d52c20 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java @@ -19,9 +19,9 @@ package org.apache.cloudstack.storage.image.motion; import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; public interface ImageMotionService { diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java new file mode 100644 index 00000000000..d686336a7d7 --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -0,0 +1,581 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.motion; + +import java.util.Date; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; +import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; +import com.cloud.agent.api.CreateVolumeFromSnapshotAnswer; +import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; +import com.cloud.agent.api.UpgradeSnapshotCommand; +import com.cloud.agent.api.storage.CopyVolumeAnswer; +import com.cloud.agent.api.storage.CopyVolumeCommand; +import com.cloud.agent.api.storage.CreateAnswer; +import com.cloud.agent.api.storage.CreateCommand; +import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VolumeHostVO; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeHostDao; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.template.TemplateManager; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.DiskProfile; + +@Component +public class AncientDataMotionStrategy implements DataMotionStrategy { + private static final Logger s_logger = Logger + .getLogger(AncientDataMotionStrategy.class); + @Inject + TemplateManager templateMgr; + @Inject + VolumeHostDao volumeHostDao; + @Inject + HostDao hostDao; + @Inject + ConfigurationDao configDao; + @Inject + StorageManager storagMgr; + @Inject + VolumeDao volDao; + @Inject + VMTemplateDao templateDao; + @Inject + SnapshotManager snapshotMgr; + @Inject + SnapshotDao snapshotDao; + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + @Inject + DataStoreManager dataStoreMgr; + @Inject + VMTemplateHostDao templateHostDao; + @Inject DiskOfferingDao diskOfferingDao; + @Inject VMTemplatePoolDao templatePoolDao; + @Inject + VolumeManager volumeMgr; + + @Override + public boolean canHandle(DataObject srcData, DataObject destData) { + // TODO Auto-generated method stub + return true; + } + + @DB + protected String copyVolumeFromImage(DataObject srcData, DataObject destData) { + String value = configDao.getValue(Config.RecreateSystemVmEnabled.key()); + int _copyvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + + VolumeHostVO volumeHostVO = volumeHostDao.findByVolumeId(srcData + .getId()); + HostVO secStorage = hostDao.findById(volumeHostVO.getHostId()); + String secondaryStorageURL = secStorage.getStorageUrl(); + String[] volumePath = volumeHostVO.getInstallPath().split("/"); + String volumeUUID = volumePath[volumePath.length - 1].split("\\.")[0]; + StoragePool destPool = (StoragePool) destData.getDataStore(); + CopyVolumeCommand cvCmd = new CopyVolumeCommand(srcData.getId(), + volumeUUID, destPool, secondaryStorageURL, false, + _copyvolumewait); + CopyVolumeAnswer cvAnswer = null; + String errMsg = null; + try { + cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(destPool, + cvCmd); + } catch (StorageUnavailableException e1) { + s_logger.debug("Failed to copy volume " + srcData.getId() + " to " + + destData.getId(), e1); + errMsg = e1.toString(); + } + + if (cvAnswer == null || !cvAnswer.getResult()) { + errMsg = cvAnswer.getDetails(); + } + + VolumeVO vol = this.volDao.findById(destData.getId()); + Transaction txn = Transaction.currentTxn(); + txn.start(); + vol.setPath(cvAnswer.getVolumePath()); + vol.setFolder(destPool.getPath()); + vol.setPodId(destPool.getPodId()); + vol.setPoolId(destPool.getId()); + vol.setPodId(destPool.getPodId()); + + this.volDao.update(vol.getId(), vol); + volumeHostDao.remove(volumeHostVO.getId()); + txn.commit(); + return errMsg; + } + + private void copyTemplate(DataObject srcData, DataObject destData) { + VMTemplateVO template = this.templateDao.findById(srcData.getId()); + templateMgr.prepareTemplateForCreate(template, + (StoragePool) destData.getDataStore()); + } + + protected String copyFromSnapshot(DataObject snapObj, DataObject volObj) { + SnapshotVO snapshot = this.snapshotDao.findById(snapObj.getId()); + StoragePool pool = (StoragePool) volObj.getDataStore(); + String vdiUUID = null; + Long snapshotId = snapshot.getId(); + Long volumeId = snapshot.getVolumeId(); + Long dcId = snapshot.getDataCenterId(); + String secondaryStoragePoolUrl = this.snapshotMgr + .getSecondaryStorageURL(snapshot); + long accountId = snapshot.getAccountId(); + + String backedUpSnapshotUuid = snapshot.getBackupSnapshotId(); + snapshot = snapshotDao.findById(snapshotId); + if (snapshot.getVersion().trim().equals("2.1")) { + VolumeVO volume = this.volDao.findByIdIncludingRemoved(volumeId); + if (volume == null) { + throw new CloudRuntimeException("failed to upgrade snapshot " + + snapshotId + " due to unable to find orignal volume:" + + volumeId + ", try it later "); + } + if (volume.getTemplateId() == null) { + snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); + } else { + VMTemplateVO template = templateDao + .findByIdIncludingRemoved(volume.getTemplateId()); + if (template == null) { + throw new CloudRuntimeException( + "failed to upgrade snapshot " + + snapshotId + + " due to unalbe to find orignal template :" + + volume.getTemplateId() + + ", try it later "); + } + Long templateId = template.getId(); + Long tmpltAccountId = template.getAccountId(); + if (!snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { + throw new CloudRuntimeException( + "failed to upgrade snapshot " + + snapshotId + + " due to this snapshot is being used, try it later "); + } + UpgradeSnapshotCommand cmd = new UpgradeSnapshotCommand(null, + secondaryStoragePoolUrl, dcId, accountId, volumeId, + templateId, tmpltAccountId, null, + snapshot.getBackupSnapshotId(), snapshot.getName(), + "2.1"); + Answer answer = null; + try { + answer = this.storagMgr.sendToPool(pool, cmd); + } catch (StorageUnavailableException e) { + } finally { + snapshotDao.unlockFromLockTable(snapshotId.toString()); + } + if ((answer != null) && answer.getResult()) { + snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); + } else { + return "Unable to upgrade snapshot from 2.1 to 2.2 for " + + snapshot.getId(); + } + } + } + String basicErrMsg = "Failed to create volume from " + + snapshot.getName() + " on pool " + pool; + + try { + if (snapshot.getSwiftId() != null && snapshot.getSwiftId() != 0) { + snapshotMgr.downloadSnapshotsFromSwift(snapshot); + } else if (snapshot.getS3Id() != null && snapshot.getS3Id() != 0) { + snapshotMgr.downloadSnapshotsFromS3(snapshot); + } + String value = configDao + .getValue(Config.CreateVolumeFromSnapshotWait.toString()); + int _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CreateVolumeFromSnapshotWait + .getDefaultValue())); + CreateVolumeFromSnapshotCommand createVolumeFromSnapshotCommand = new CreateVolumeFromSnapshotCommand( + pool, secondaryStoragePoolUrl, dcId, accountId, volumeId, + backedUpSnapshotUuid, snapshot.getName(), + _createVolumeFromSnapshotWait); + CreateVolumeFromSnapshotAnswer answer; + if (!snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { + throw new CloudRuntimeException("failed to create volume from " + + snapshotId + + " due to this snapshot is being used, try it later "); + } + answer = (CreateVolumeFromSnapshotAnswer) this.storagMgr + .sendToPool(pool, createVolumeFromSnapshotCommand); + if (answer != null && answer.getResult()) { + vdiUUID = answer.getVdi(); + VolumeVO vol = this.volDao.findById(volObj.getId()); + vol.setPath(vdiUUID); + this.volDao.update(vol.getId(), vol); + return null; + } else { + s_logger.error(basicErrMsg + " due to " + + ((answer == null) ? "null" : answer.getDetails())); + throw new CloudRuntimeException(basicErrMsg); + } + } catch (StorageUnavailableException e) { + s_logger.error(basicErrMsg, e); + throw new CloudRuntimeException(basicErrMsg); + } finally { + if (snapshot.getSwiftId() != null) { + snapshotMgr.deleteSnapshotsDirForVolume( + secondaryStoragePoolUrl, dcId, accountId, volumeId); + } + snapshotDao.unlockFromLockTable(snapshotId.toString()); + } + } + + protected String cloneVolume(DataObject template, DataObject volume) { + + DiskOfferingVO offering = diskOfferingDao.findById(volume.getId()); + VMTemplateStoragePoolVO tmpltStoredOn = templatePoolDao.findByPoolTemplate(template.getDataStore().getId(), template.getId()); + VolumeInfo volInfo = (VolumeInfo)volume; + DiskProfile diskProfile = new DiskProfile(volInfo, offering, + null); + CreateCommand cmd = new CreateCommand(diskProfile, + tmpltStoredOn.getLocalDownloadPath(), + new StorageFilerTO((StoragePool)template.getDataStore())); + Answer answer = null; + StoragePool pool = (StoragePool)volume.getDataStore(); + String errMsg = null; + try { + answer = storagMgr.sendToPool(pool, null, cmd); + } catch (StorageUnavailableException e) { + s_logger.debug("Failed to send to storage pool", e); + errMsg = e.toString(); + return errMsg; + } + + if (answer.getResult()) { + VolumeVO vol = this.volDao.findById(volume.getId()); + CreateAnswer createAnswer = (CreateAnswer) answer; + vol.setFolder(pool.getPath()); + vol.setPath(createAnswer.getVolume().getPath()); + vol.setSize(createAnswer.getVolume().getSize()); + vol.setPoolType(pool.getPoolType()); + vol.setPoolId(pool.getId()); + vol.setPodId(pool.getPodId()); + this.volDao.update(vol.getId(), vol); + + } else { + if (tmpltStoredOn != null + && (answer instanceof CreateAnswer) + && ((CreateAnswer) answer) + .templateReloadRequested()) { + if (!templateMgr + .resetTemplateDownloadStateOnPool(tmpltStoredOn + .getId())) { + + } + } + errMsg = answer.getDetails(); + } + + return errMsg; + } + + @Override + public Void copyAsync(DataObject srcData, DataObject destData, + AsyncCompletionCallback callback) { + String errMsg = null; + try { + if (destData.getType() == DataObjectType.VOLUME + && srcData.getType() == DataObjectType.VOLUME) { + errMsg = copyVolumeFromImage(srcData, destData); + } else if (destData.getType() == DataObjectType.TEMPLATE + && srcData.getType() == DataObjectType.TEMPLATE) { + copyTemplate(srcData, destData); + } else if (srcData.getType() == DataObjectType.SNAPSHOT + && destData.getType() == DataObjectType.VOLUME) { + errMsg = copyFromSnapshot(srcData, destData); + } else if (srcData.getType() == DataObjectType.SNAPSHOT + && destData.getType() == DataObjectType.TEMPLATE) { + errMsg = createTemplateFromSnashot(srcData, destData); + } else if (srcData.getType() == DataObjectType.VOLUME + && destData.getType() == DataObjectType.TEMPLATE) { + errMsg = createTemplateFromVolume(srcData, destData); + } else if (srcData.getType() == DataObjectType.TEMPLATE + && destData.getType() == DataObjectType.VOLUME) { + errMsg = cloneVolume(srcData, destData); + } + } catch (Exception e) { + s_logger.debug("copy failed", e); + errMsg = e.toString(); + } + CopyCommandResult result = new CopyCommandResult(null); + result.setResult(errMsg); + callback.complete(result); + + return null; + } + + @DB + protected String createTemplateFromSnashot(DataObject srcData, + DataObject destData) { + long snapshotId = srcData.getId(); + SnapshotVO snapshot = snapshotDao.findById(snapshotId); + if (snapshot == null) { + throw new CloudRuntimeException("Unable to find Snapshot for Id " + + srcData.getId()); + } + Long zoneId = snapshot.getDataCenterId(); + HostVO secondaryStorageHost = this.templateMgr + .getSecondaryStorageHost(zoneId); + String secondaryStorageURL = snapshotMgr + .getSecondaryStorageURL(snapshot); + VMTemplateVO template = this.templateDao.findById(destData.getId()); + String name = template.getName(); + String backupSnapshotUUID = snapshot.getBackupSnapshotId(); + if (backupSnapshotUUID == null) { + throw new CloudRuntimeException( + "Unable to create private template from snapshot " + + snapshotId + + " due to there is no backupSnapshotUUID for this snapshot"); + } + + Long dcId = snapshot.getDataCenterId(); + Long accountId = snapshot.getAccountId(); + Long volumeId = snapshot.getVolumeId(); + + String origTemplateInstallPath = null; + List pools = this.storagMgr + .ListByDataCenterHypervisor(zoneId, + snapshot.getHypervisorType()); + if (pools == null || pools.size() == 0) { + throw new CloudRuntimeException( + "Unable to find storage pools in zone " + zoneId); + } + StoragePoolVO poolvo = pools.get(0); + StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore( + poolvo.getId(), DataStoreRole.Primary); + if (snapshot.getVersion() != null + && snapshot.getVersion().equalsIgnoreCase("2.1")) { + VolumeVO volume = this.volDao.findByIdIncludingRemoved(volumeId); + if (volume == null) { + throw new CloudRuntimeException("failed to upgrade snapshot " + + snapshotId + " due to unable to find orignal volume:" + + volumeId + ", try it later "); + } + if (volume.getTemplateId() == null) { + snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); + } else { + template = templateDao.findByIdIncludingRemoved(volume + .getTemplateId()); + if (template == null) { + throw new CloudRuntimeException( + "failed to upgrade snapshot " + + snapshotId + + " due to unalbe to find orignal template :" + + volume.getTemplateId() + + ", try it later "); + } + Long origTemplateId = template.getId(); + Long origTmpltAccountId = template.getAccountId(); + if (!this.volDao.lockInLockTable(volumeId.toString(), 10)) { + throw new CloudRuntimeException( + "failed to upgrade snapshot " + snapshotId + + " due to volume:" + volumeId + + " is being used, try it later "); + } + UpgradeSnapshotCommand cmd = new UpgradeSnapshotCommand(null, + secondaryStorageURL, dcId, accountId, volumeId, + origTemplateId, origTmpltAccountId, null, + snapshot.getBackupSnapshotId(), snapshot.getName(), + "2.1"); + if (!this.volDao.lockInLockTable(volumeId.toString(), 10)) { + throw new CloudRuntimeException( + "Creating template failed due to volume:" + + volumeId + + " is being used, try it later "); + } + Answer answer = null; + try { + answer = this.storagMgr.sendToPool(pool, cmd); + cmd = null; + } catch (StorageUnavailableException e) { + } finally { + this.volDao.unlockFromLockTable(volumeId.toString()); + } + if ((answer != null) && answer.getResult()) { + snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); + } else { + throw new CloudRuntimeException( + "Unable to upgrade snapshot"); + } + } + } + if (snapshot.getSwiftId() != null && snapshot.getSwiftId() != 0) { + snapshotMgr.downloadSnapshotsFromSwift(snapshot); + } + String value = configDao + .getValue(Config.CreatePrivateTemplateFromSnapshotWait + .toString()); + int _createprivatetemplatefromsnapshotwait = NumbersUtil.parseInt( + value, Integer + .parseInt(Config.CreatePrivateTemplateFromSnapshotWait + .getDefaultValue())); + + CreatePrivateTemplateFromSnapshotCommand cmd = new CreatePrivateTemplateFromSnapshotCommand( + pool, secondaryStorageURL, dcId, accountId, + snapshot.getVolumeId(), backupSnapshotUUID, snapshot.getName(), + origTemplateInstallPath, template.getId(), name, + _createprivatetemplatefromsnapshotwait); + + return sendCommand(cmd, pool, template.getId(), dcId, + secondaryStorageHost.getId()); + } + + @DB + protected String sendCommand(Command cmd, StoragePool pool, + long templateId, long zoneId, long hostId) { + + CreatePrivateTemplateAnswer answer = null; + try { + answer = (CreatePrivateTemplateAnswer) this.storagMgr.sendToPool( + pool, cmd); + } catch (StorageUnavailableException e) { + throw new CloudRuntimeException( + "Failed to execute CreatePrivateTemplateFromSnapshotCommand", + e); + } + + if (answer == null) { + return "Failed to execute CreatePrivateTemplateFromSnapshotCommand"; + } else if (!answer.getResult()) { + return "Failed to execute CreatePrivateTemplateFromSnapshotCommand" + + answer.getDetails(); + } + + VMTemplateVO privateTemplate = templateDao.findById(templateId); + String answerUniqueName = answer.getUniqueName(); + if (answerUniqueName != null) { + privateTemplate.setUniqueName(answerUniqueName); + } + ImageFormat format = answer.getImageFormat(); + if (format != null) { + privateTemplate.setFormat(format); + } else { + // This never occurs. + // Specify RAW format makes it unusable for snapshots. + privateTemplate.setFormat(ImageFormat.RAW); + } + + String checkSum = this.templateMgr + .getChecksum(hostId, answer.getPath()); + + Transaction txn = Transaction.currentTxn(); + + txn.start(); + + privateTemplate.setChecksum(checkSum); + templateDao.update(privateTemplate.getId(), privateTemplate); + + // add template zone ref for this template + templateDao.addTemplateToZone(privateTemplate, zoneId); + VMTemplateHostVO templateHostVO = new VMTemplateHostVO(hostId, + privateTemplate.getId()); + templateHostVO.setDownloadPercent(100); + templateHostVO.setDownloadState(Status.DOWNLOADED); + templateHostVO.setInstallPath(answer.getPath()); + templateHostVO.setLastUpdated(new Date()); + templateHostVO.setSize(answer.getVirtualSize()); + templateHostVO.setPhysicalSize(answer.getphysicalSize()); + templateHostDao.persist(templateHostVO); + txn.close(); + return null; + } + + private String createTemplateFromVolume(DataObject srcObj, + DataObject destObj) { + long volumeId = srcObj.getId(); + VolumeVO volume = this.volDao.findById(volumeId); + if (volume == null) { + throw new CloudRuntimeException("Unable to find volume for Id " + + volumeId); + } + long accountId = volume.getAccountId(); + + String vmName = this.volumeMgr.getVmNameOnVolume(volume); + Long zoneId = volume.getDataCenterId(); + HostVO secondaryStorageHost = this.templateMgr + .getSecondaryStorageHost(zoneId); + if (secondaryStorageHost == null) { + throw new CloudRuntimeException( + "Can not find the secondary storage for zoneId " + zoneId); + } + String secondaryStorageURL = secondaryStorageHost.getStorageUrl(); + VMTemplateVO template = this.templateDao.findById(destObj.getId()); + StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore( + volume.getPoolId(), DataStoreRole.Primary); + String value = configDao + .getValue(Config.CreatePrivateTemplateFromVolumeWait.toString()); + int _createprivatetemplatefromvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CreatePrivateTemplateFromVolumeWait + .getDefaultValue())); + + CreatePrivateTemplateFromVolumeCommand cmd = new CreatePrivateTemplateFromVolumeCommand( + pool, secondaryStorageURL, destObj.getId(), accountId, + template.getName(), template.getUniqueName(), volume.getPath(), + vmName, _createprivatetemplatefromvolumewait); + + return sendCommand(cmd, pool, template.getId(), zoneId, + secondaryStorageHost.getId()); + } + +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java index d50c9a0c8f3..f3e5c4aea50 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java +++ b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.storage.snapshot; import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; public interface SnapshotService { public SnapshotEntity getSnapshotEntity(long snapshotId); diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java index 4e311862e50..8c4c815eb7d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java +++ b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.storage.snapshot; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; + public interface SnapshotStrategy { public boolean takeSnapshot(SnapshotInfo snapshot); public boolean revertSnapshot(SnapshotInfo snapshot); diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index cd67b97b02c..aa47e8f4977 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -21,12 +21,12 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; public class PrimaryDataStoreTO { private final String uuid; private final String name; - private final String type; + private String type; private final long id; public PrimaryDataStoreTO(PrimaryDataStoreInfo dataStore) { this.uuid = dataStore.getUuid(); this.name = dataStore.getName(); - this.type = dataStore.getType(); + // this.type = dataStore.getType(); this.id = dataStore.getId(); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java index ed5990986e5..bc55ea8c3ea 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java @@ -16,8 +16,8 @@ // under the License. package org.apache.cloudstack.storage.to; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; -import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreInfo; public class TemplateTO { diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/PrimaryDataStoreDriver.java b/engine/storage/src/org/apache/cloudstack/storage/volume/PrimaryDataStoreDriver.java index 60db60b2364..2b49eee4b86 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/PrimaryDataStoreDriver.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/PrimaryDataStoreDriver.java @@ -20,8 +20,8 @@ package org.apache.cloudstack.storage.volume; import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; public interface PrimaryDataStoreDriver extends DataStoreDriver { public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback); diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreInfo.java b/engine/storage/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreInfo.java index 368c33a32bf..b8d0857d495 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreInfo.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreInfo.java @@ -18,8 +18,8 @@ */ package org.apache.cloudstack.storage.volume; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.image.TemplateInfo; public interface TemplateOnPrimaryDataStoreInfo { public String getPath(); diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index 20ceaa303fc..c6ca90d1641 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -23,40 +23,38 @@ import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.springframework.stereotype.Component; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.exception.CloudRuntimeException; @Component public class PrimaryDataStoreHelper { @Inject private PrimaryDataStoreDao dataStoreDao; - public PrimaryDataStoreVO createPrimaryDataStore(Map params) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findPoolByUUID(params.get("uuid")); + public StoragePoolVO createPrimaryDataStore(Map params) { + StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID((String)params.get("uuid")); if (dataStoreVO != null) { throw new CloudRuntimeException("duplicate uuid: " + params.get("uuid")); } - dataStoreVO = new PrimaryDataStoreVO(); - dataStoreVO.setStorageProviderId(Long.parseLong(params.get("providerId"))); - dataStoreVO.setHostAddress(params.get("server")); - dataStoreVO.setPath(params.get("path")); - dataStoreVO.setPoolType(params.get("protocol")); - dataStoreVO.setPort(Integer.parseInt(params.get("port"))); - dataStoreVO.setName(params.get("name")); - dataStoreVO.setUuid(params.get("uuid")); + dataStoreVO = new StoragePoolVO(); + dataStoreVO.setStorageProviderId(Long.parseLong((String)params.get("providerId"))); + dataStoreVO.setHostAddress((String)params.get("server")); + dataStoreVO.setPath((String)params.get("path")); + dataStoreVO.setPoolType((StoragePoolType)params.get("protocol")); + dataStoreVO.setPort(Integer.parseInt((String)params.get("port"))); + dataStoreVO.setName((String)params.get("name")); + dataStoreVO.setUuid((String)params.get("uuid")); dataStoreVO = dataStoreDao.persist(dataStoreVO); return dataStoreVO; } public boolean deletePrimaryDataStore(long id) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(id); + StoragePoolVO dataStoreVO = dataStoreDao.findById(id); if (dataStoreVO == null) { throw new CloudRuntimeException("can't find store: " + id); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDao.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDao.java index 45ff1ec2258..63cdb16c596 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDao.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDao.java @@ -18,7 +18,7 @@ */ package org.apache.cloudstack.storage.volume.db; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java index b47f08881e1..ad561502266 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java @@ -20,9 +20,9 @@ package org.apache.cloudstack.storage.volume.db; import java.util.Date; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreVO.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreVO.java index 2d355df7e2a..48a9f334a19 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreVO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreVO.java @@ -32,7 +32,9 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; + +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.fsm.StateObject; diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java index 9c009c95623..72c1843da42 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java @@ -18,45 +18,50 @@ package org.apache.cloudstack.storage.datastore; import java.io.File; import java.util.ArrayList; +import java.util.Date; import java.util.List; import javax.inject.Inject; -import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProvider; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.ImageDataFactory; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotDataFactory; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.storage.encoding.EncodingType; public class DefaultPrimaryDataStore implements PrimaryDataStore { private static final Logger s_logger = Logger .getLogger(DefaultPrimaryDataStore.class); protected PrimaryDataStoreDriver driver; - protected PrimaryDataStoreVO pdsv; + protected StoragePoolVO pdsv; @Inject protected PrimaryDataStoreDao dataStoreDao; protected PrimaryDataStoreLifeCycle lifeCycle; @@ -67,15 +72,16 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Inject SnapshotDataFactory snapshotFactory; protected DataStoreProvider provider; - @Inject - private VolumeDao2 volumeDao; + VMTemplatePoolDao templatePoolDao; + + private VolumeDao volumeDao; protected DefaultPrimaryDataStore() { } - public void configure(PrimaryDataStoreVO pdsv, + public void configure(StoragePoolVO pdsv, PrimaryDataStoreDriver driver, DataStoreProvider provider) { this.pdsv = pdsv; this.driver = driver; @@ -83,7 +89,7 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { } public static DefaultPrimaryDataStore createDataStore( - PrimaryDataStoreVO pdsv, PrimaryDataStoreDriver driver, + StoragePoolVO pdsv, PrimaryDataStoreDriver driver, DataStoreProvider provider) { DefaultPrimaryDataStore dataStore = (DefaultPrimaryDataStore)ComponentContext.inject(DefaultPrimaryDataStore.class); dataStore.configure(pdsv, driver, provider); @@ -109,19 +115,16 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Override public DataStoreDriver getDriver() { - // TODO Auto-generated method stub return this.driver; } @Override public DataStoreRole getRole() { - // TODO Auto-generated method stub return DataStoreRole.Primary; } @Override public long getId() { - // TODO Auto-generated method stub return this.pdsv.getId(); } @@ -143,7 +146,7 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Override public Scope getScope() { - PrimaryDataStoreVO vo = dataStoreDao.findById(this.pdsv.getId()); + StoragePoolVO vo = dataStoreDao.findById(this.pdsv.getId()); if (vo.getScope() == ScopeType.CLUSTER) { return new ClusterScope(vo.getClusterId(), vo.getPodId(), vo.getDataCenterId()); @@ -156,7 +159,7 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Override public boolean isHypervisorSupported(HypervisorType hypervisor) { // TODO Auto-generated method stub - return false; + return true; } @Override @@ -171,28 +174,10 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { return false; } - @Override - public long getCapacity() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public long getAvailableCapacity() { - // TODO Auto-generated method stub - return 0; - } @Override public String getUuid() { - // TODO Auto-generated method stub - return null; - } - - @Override - public State getManagedState() { - // TODO Auto-generated method stub - return null; + return this.pdsv.getUuid(); } @Override @@ -201,12 +186,6 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { return null; } - @Override - public String getType() { - // TODO Auto-generated method stub - return null; - } - @Override public PrimaryDataStoreLifeCycle getLifeCycle() { return this.lifeCycle; @@ -214,14 +193,13 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Override public boolean exists(DataObject data) { - return (objectInStoreMgr.findObject(data.getId(), data.getType(), - this.getId(), this.getRole()) != null) ? true : false; + return (objectInStoreMgr.findObject(data, data.getDataStore()) != null) ? true : false; } @Override public TemplateInfo getTemplate(long templateId) { - ObjectInDataStoreVO obj = objectInStoreMgr.findObject(templateId, DataObjectType.TEMPLATE, this.getId(), this.getRole()); - if (obj == null) { + VMTemplateStoragePoolVO template = templatePoolDao.findByPoolTemplate(this.getId(), templateId); + if (template == null || template.getState() != ObjectInDataStoreStateMachine.State.Ready) { return null; } return imageDataFactory.getTemplate(templateId, this); @@ -238,4 +216,117 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { // TODO Auto-generated method stub return null; } + + @Override + public DataObject create(DataObject obj) { + //create template on primary storage + if (obj.getType() == DataObjectType.TEMPLATE) { + VMTemplateStoragePoolVO templateStoragePoolRef = templatePoolDao.findByPoolTemplate(this.getId(), obj.getId()); + if (templateStoragePoolRef == null) { + try { + templateStoragePoolRef = new VMTemplateStoragePoolVO(this.getId(), obj.getId()); + templateStoragePoolRef = templatePoolDao.persist(templateStoragePoolRef); + } catch (Throwable t) { + templateStoragePoolRef = templatePoolDao.findByPoolTemplate(this.getId(), obj.getId()); + if (templateStoragePoolRef == null) { + throw new CloudRuntimeException("Failed to create template storage pool entry"); + } + } + } + + } + + return objectInStoreMgr.get(obj, this); + } + + @Override + public boolean delete(DataObject obj) { + // TODO Auto-generated method stub + return false; + } + + @Override + public long getDataCenterId() { + return this.pdsv.getDataCenterId(); + } + + @Override + public String getPath() { + return this.pdsv.getPath(); + } + + @Override + public StoragePoolType getPoolType() { + return this.pdsv.getPoolType(); + } + + @Override + public Date getCreated() { + return this.pdsv.getCreated(); + } + + @Override + public Date getUpdateTime() { + return this.pdsv.getUpdateTime(); + } + + @Override + public long getCapacityBytes() { + return this.pdsv.getCapacityBytes(); + } + + @Override + public long getAvailableBytes() { + return this.pdsv.getAvailableBytes(); + } + + @Override + public Long getClusterId() { + return this.pdsv.getClusterId(); + } + + @Override + public String getHostAddress() { + return this.pdsv.getHostAddress(); + } + + @Override + public String getUserInfo() { + return this.pdsv.getUserInfo(); + } + + @Override + public boolean isShared() { + return this.pdsv.getScope() == ScopeType.HOST ? false : true; + } + + @Override + public boolean isLocal() { + return !this.isShared(); + } + + @Override + public StoragePoolStatus getStatus() { + return this.pdsv.getStatus(); + } + + @Override + public int getPort() { + return this.pdsv.getPort(); + } + + @Override + public Long getPodId() { + return this.pdsv.getPodId(); + } + + @Override + public Long getStorageProviderId() { + return this.pdsv.getStorageProviderId(); + } + + @Override + public boolean isInMaintenance() { + return this.getStatus() == StoragePoolStatus.Maintenance ? true : false; + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java new file mode 100644 index 00000000000..9946fba5f63 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java @@ -0,0 +1,289 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.driver; + +import java.util.Set; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.storage.CreateAnswer; +import com.cloud.agent.api.storage.CreateCommand; +import com.cloud.agent.api.storage.DestroyCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.Storage; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.template.TemplateManager; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.dao.VMInstanceDao; + +public class AncientPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { + private static final Logger s_logger = Logger + .getLogger(AncientPrimaryDataStoreDriverImpl.class); + @Inject DiskOfferingDao diskOfferingDao; + @Inject VMTemplateDao templateDao; + @Inject VolumeDao volumeDao; + @Inject TemplateManager templateMgr; + @Inject HostDao hostDao; + @Inject StorageManager storageMgr; + @Inject VMInstanceDao vmDao; + @Inject PrimaryDataStoreDao primaryStoreDao; + @Override + public String grantAccess(DataObject data, EndPoint ep) { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean revokeAccess(DataObject data, EndPoint ep) { + // TODO Auto-generated method stub + return false; + } + + @Override + public Set listObjects(DataStore store) { + // TODO Auto-generated method stub + return null; + } + + public boolean createVolume( + VolumeInfo volume) throws StorageUnavailableException { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating volume: " + volume); + } + + DiskOfferingVO offering = diskOfferingDao.findById(volume.getDiskOfferingId()); + DiskProfile diskProfile = new DiskProfile(volume, offering, + null); + + VMTemplateVO template = null; + if (volume.getTemplateId() != null) { + template = templateDao.findById(volume.getTemplateId()); + } + + StoragePool pool = (StoragePool)volume.getDataStore(); + VolumeVO vol = volumeDao.findById(volume.getId()); + if (pool != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Trying to create in " + pool); + } + vol.setPoolId(pool.getId()); + + CreateCommand cmd = null; + VMTemplateStoragePoolVO tmpltStoredOn = null; + + for (int i = 0; i < 2; i++) { + if (template != null + && template.getFormat() != Storage.ImageFormat.ISO) { + if (pool.getPoolType() == StoragePoolType.CLVM) { + // prepareISOForCreate does what we need, which is to + // tell us where the template is + VMTemplateHostVO tmpltHostOn = templateMgr + .prepareISOForCreate(template, pool); + if (tmpltHostOn == null) { + s_logger.debug("cannot find template " + + template.getId() + " " + + template.getName()); + throw new CloudRuntimeException("cannot find template" + + template.getId() + + template.getName()); + } + HostVO secondaryStorageHost = hostDao + .findById(tmpltHostOn.getHostId()); + String tmpltHostUrl = secondaryStorageHost + .getStorageUrl(); + String fullTmpltUrl = tmpltHostUrl + "/" + + tmpltHostOn.getInstallPath(); + cmd = new CreateCommand(diskProfile, fullTmpltUrl, + new StorageFilerTO(pool)); + } else { + tmpltStoredOn = templateMgr.prepareTemplateForCreate( + template, pool); + if (tmpltStoredOn == null) { + s_logger.debug("Cannot use this pool " + pool + + " because we can't propagate template " + + template); + throw new CloudRuntimeException("Cannot use this pool " + pool + + " because we can't propagate template " + + template); + } + cmd = new CreateCommand(diskProfile, + tmpltStoredOn.getLocalDownloadPath(), + new StorageFilerTO(pool)); + } + } else { + if (template != null + && Storage.ImageFormat.ISO == template.getFormat()) { + VMTemplateHostVO tmpltHostOn = templateMgr + .prepareISOForCreate(template, pool); + if (tmpltHostOn == null) { + throw new CloudRuntimeException( + "Did not find ISO in secondry storage in zone " + + pool.getDataCenterId()); + } + } + cmd = new CreateCommand(diskProfile, new StorageFilerTO( + pool)); + } + + Answer answer = storageMgr.sendToPool(pool, null, cmd); + if (answer.getResult()) { + CreateAnswer createAnswer = (CreateAnswer) answer; + vol.setFolder(pool.getPath()); + vol.setPath(createAnswer.getVolume().getPath()); + vol.setSize(createAnswer.getVolume().getSize()); + vol.setPoolType(pool.getPoolType()); + vol.setPoolId(pool.getId()); + vol.setPodId(pool.getPodId()); + this.volumeDao.update(vol.getId(), vol); + return true; + } else { + if (tmpltStoredOn != null + && (answer instanceof CreateAnswer) + && ((CreateAnswer) answer) + .templateReloadRequested()) { + if (!templateMgr + .resetTemplateDownloadStateOnPool(tmpltStoredOn + .getId())) { + break; // break out of template-redeploy retry loop + } + } else { + break; + } + } + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Unable to create volume " + volume.getId()); + } + return false; + } + + @Override + public void createAsync(DataObject data, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + String errMsg = null; + if (data.getType() == DataObjectType.VOLUME) { + try { + createVolume((VolumeInfo)data); + } catch (StorageUnavailableException e) { + s_logger.debug("failed to create volume", e); + errMsg = e.toString(); + } catch (Exception e) { + s_logger.debug("failed to create volume", e); + errMsg = e.toString(); + } + } + CreateCmdResult result = new CreateCmdResult(null, null); + if (errMsg != null) { + result.setResult(errMsg); + } + + callback.complete(result); + + } + + @Override + public void deleteAsync(DataObject data, + AsyncCompletionCallback callback) { + + String vmName = null; + VolumeVO vol = this.volumeDao.findById(data.getId()); + + + StoragePool pool = (StoragePool)data.getDataStore(); + + DestroyCommand cmd = new DestroyCommand(pool, vol, vmName); + + CommandResult result = new CommandResult(); + try { + Answer answer = this.storageMgr.sendToPool(pool, cmd); + if (answer != null && !answer.getResult()) { + result.setResult(answer.getDetails()); + s_logger.info("Will retry delete of " + vol + " from " + pool.getId()); + } + } catch (StorageUnavailableException e) { + s_logger.error("Storage is unavailable currently. Will retry delete of " + + vol + " from " + pool.getId(), e); + result.setResult(e.toString()); + } catch (Exception ex) { + s_logger.debug("Unable to destoy volume" + vol + " from " + pool.getId(), ex); + result.setResult(ex.toString()); + } + callback.complete(result); + } + + @Override + public void copyAsync(DataObject srcdata, DataObject destData, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public boolean canCopy(DataObject srcData, DataObject destData) { + // TODO Auto-generated method stub + return false; + } + + @Override + public void takeSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public void revertSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java index dfe4518edab..efd04d18294 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcConext; @@ -35,7 +36,6 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.datastore.DataObjectManager; import org.apache.cloudstack.storage.endpoint.EndPointSelector; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.apache.log4j.Logger; diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java new file mode 100644 index 00000000000..3ce14ee8b48 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java @@ -0,0 +1,952 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.lifecycle; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CreateStoragePoolCommand; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.alert.AlertManager; +import com.cloud.capacity.Capacity; +import com.cloud.capacity.CapacityVO; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.resource.ResourceManager; +import com.cloud.server.ManagementServer; +import com.cloud.storage.OCFS2Manager; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolDiscoverer; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.StoragePoolWorkVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.user.UserContext; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.UriUtils; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExecutionException; +import com.cloud.vm.ConsoleProxyVO; +import com.cloud.vm.DomainRouterVO; +import com.cloud.vm.SecondaryStorageVmVO; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +public class AncientPrimaryDataStoreLifeCyclImpl implements + PrimaryDataStoreLifeCycle { + private static final Logger s_logger = Logger + .getLogger(AncientPrimaryDataStoreLifeCyclImpl.class); + @Inject + protected ResourceManager _resourceMgr; + protected List _discoverers; + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + @Inject + protected OCFS2Manager _ocfs2Mgr; + @Inject + DataStoreManager dataStoreMgr; + @Inject + AgentManager agentMgr; + @Inject + StorageManager storageMgr; + @Inject + protected CapacityDao _capacityDao; + + @Inject + VolumeDao volumeDao; + @Inject + VMInstanceDao vmDao; + @Inject + ManagementServer server; + @Inject + protected VirtualMachineManager vmMgr; + @Inject + protected SecondaryStorageVmDao _secStrgDao; + @Inject + UserVmDao userVmDao; + @Inject + protected UserDao _userDao; + @Inject + protected DomainRouterDao _domrDao; + @Inject + protected StoragePoolHostDao _storagePoolHostDao; + @Inject + protected AlertManager _alertMgr; + + + + @Inject + protected ConsoleProxyDao _consoleProxyDao; + + @Inject + protected StoragePoolWorkDao _storagePoolWorkDao; + + @Override + public DataStore initialize(Map dsInfos) { + Long clusterId = (Long) dsInfos.get("clusterId"); + Long podId = (Long) dsInfos.get("podId"); + Long zoneId = (Long) dsInfos.get("zoneId"); + String url = (String) dsInfos.get("url"); + Long providerId = (Long)dsInfos.get("providerId"); + if (clusterId != null && podId == null) { + throw new InvalidParameterValueException( + "Cluster id requires pod id"); + } + + URI uri = null; + try { + uri = new URI(UriUtils.encodeURIComponent(url)); + if (uri.getScheme() == null) { + throw new InvalidParameterValueException("scheme is null " + + url + ", add nfs:// as a prefix"); + } else if (uri.getScheme().equalsIgnoreCase("nfs")) { + String uriHost = uri.getHost(); + String uriPath = uri.getPath(); + if (uriHost == null || uriPath == null + || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) { + throw new InvalidParameterValueException( + "host or path is null, should be nfs://hostname/path"); + } + } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) { + String uriPath = uri.getPath(); + if (uriPath == null) { + throw new InvalidParameterValueException( + "host or path is null, should be sharedmountpoint://localhost/path"); + } + } else if (uri.getScheme().equalsIgnoreCase("rbd")) { + String uriPath = uri.getPath(); + if (uriPath == null) { + throw new InvalidParameterValueException( + "host or path is null, should be rbd://hostname/pool"); + } + } + } catch (URISyntaxException e) { + throw new InvalidParameterValueException(url + + " is not a valid uri"); + } + + String tags = (String) dsInfos.get("tags"); + Map details = (Map) dsInfos + .get("details"); + if (tags != null) { + String[] tokens = tags.split(","); + + for (String tag : tokens) { + tag = tag.trim(); + if (tag.length() == 0) { + continue; + } + details.put(tag, "true"); + } + } + + String scheme = uri.getScheme(); + String storageHost = uri.getHost(); + String hostPath = uri.getPath().replaceFirst("/", ""); + String userInfo = uri.getUserInfo(); + int port = uri.getPort(); + StoragePoolVO pool = null; + if (s_logger.isDebugEnabled()) { + s_logger.debug("createPool Params @ scheme - " + scheme + + " storageHost - " + storageHost + " hostPath - " + + hostPath + " port - " + port); + } + if (scheme.equalsIgnoreCase("nfs")) { + if (port == -1) { + port = 2049; + } + pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem, + storageHost, port, hostPath); + if (clusterId == null) { + throw new IllegalArgumentException( + "NFS need to have clusters specified for XenServers"); + } + } else if (scheme.equalsIgnoreCase("file")) { + if (port == -1) { + port = 0; + } + pool = new StoragePoolVO(StoragePoolType.Filesystem, + "localhost", 0, hostPath); + } else if (scheme.equalsIgnoreCase("sharedMountPoint")) { + pool = new StoragePoolVO(StoragePoolType.SharedMountPoint, + storageHost, 0, hostPath); + } else if (scheme.equalsIgnoreCase("clvm")) { + pool = new StoragePoolVO(StoragePoolType.CLVM, storageHost, 0, + hostPath.replaceFirst("/", "")); + } else if (scheme.equalsIgnoreCase("rbd")) { + if (port == -1) { + port = 6789; + } + pool = new StoragePoolVO(StoragePoolType.RBD, storageHost, + port, hostPath.replaceFirst("/", "")); + pool.setUserInfo(userInfo); + } else if (scheme.equalsIgnoreCase("PreSetup")) { + pool = new StoragePoolVO(StoragePoolType.PreSetup, + storageHost, 0, hostPath); + } else if (scheme.equalsIgnoreCase("iscsi")) { + String[] tokens = hostPath.split("/"); + int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1); + if (port == -1) { + port = 3260; + } + if (lun != -1) { + if (clusterId == null) { + throw new IllegalArgumentException( + "IscsiLUN need to have clusters specified"); + } + hostPath.replaceFirst("/", ""); + pool = new StoragePoolVO(StoragePoolType.IscsiLUN, + storageHost, port, hostPath); + } else { + for (StoragePoolDiscoverer discoverer : _discoverers) { + Map> pools; + try { + pools = discoverer.find(zoneId, podId, uri, details); + } catch (DiscoveryException e) { + throw new IllegalArgumentException( + "Not enough information for discovery " + uri, + e); + } + if (pools != null) { + Map.Entry> entry = pools + .entrySet().iterator().next(); + pool = entry.getKey(); + details = entry.getValue(); + break; + } + } + } + } else if (scheme.equalsIgnoreCase("iso")) { + if (port == -1) { + port = 2049; + } + pool = new StoragePoolVO(StoragePoolType.ISO, storageHost, + port, hostPath); + } else if (scheme.equalsIgnoreCase("vmfs")) { + pool = new StoragePoolVO(StoragePoolType.VMFS, + "VMFS datastore: " + hostPath, 0, hostPath); + } else if (scheme.equalsIgnoreCase("ocfs2")) { + port = 7777; + pool = new StoragePoolVO(StoragePoolType.OCFS2, "clustered", + port, hostPath); + } else { + StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme); + + if (type != null) { + pool = new StoragePoolVO(type, storageHost, + 0, hostPath); + } else { + s_logger.warn("Unable to figure out the scheme for URI: " + uri); + throw new IllegalArgumentException( + "Unable to figure out the scheme for URI: " + uri); + } + } + + if (pool == null) { + s_logger.warn("Unable to figure out the scheme for URI: " + uri); + throw new IllegalArgumentException( + "Unable to figure out the scheme for URI: " + uri); + } + + Object localStorage = dsInfos.get("localStorage"); + if (localStorage == null) { + List pools = primaryDataStoreDao + .listPoolByHostPath(storageHost, hostPath); + if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) { + Long oldPodId = pools.get(0).getPodId(); + throw new CloudRuntimeException("Storage pool " + uri + + " already in use by another pod (id=" + oldPodId + ")"); + } + } + + long poolId = primaryDataStoreDao.getNextInSequence(Long.class, "id"); + Object existingUuid = dsInfos.get("uuid"); + String uuid = null; + + if (existingUuid != null) { + uuid = (String)existingUuid; + } else if (scheme.equalsIgnoreCase("sharedmountpoint") + || scheme.equalsIgnoreCase("clvm")) { + uuid = UUID.randomUUID().toString(); + } else if (scheme.equalsIgnoreCase("PreSetup")) { + uuid = hostPath.replace("/", ""); + } else { + uuid = UUID.nameUUIDFromBytes( + new String(storageHost + hostPath).getBytes()).toString(); + } + + List spHandles = primaryDataStoreDao + .findIfDuplicatePoolsExistByUUID(uuid); + if ((spHandles != null) && (spHandles.size() > 0)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Another active pool with the same uuid already exists"); + } + throw new CloudRuntimeException( + "Another active pool with the same uuid already exists"); + } + + String poolName = (String) dsInfos.get("name"); + if (s_logger.isDebugEnabled()) { + s_logger.debug("In createPool Setting poolId - " + poolId + + " uuid - " + uuid + " zoneId - " + zoneId + " podId - " + + podId + " poolName - " + poolName); + } + + pool.setId(poolId); + pool.setUuid(uuid); + pool.setDataCenterId(zoneId); + pool.setPodId(podId); + pool.setName(poolName); + pool.setClusterId(clusterId); + pool.setStorageProviderId(providerId); + pool.setStatus(StoragePoolStatus.Initialized); + pool = primaryDataStoreDao.persist(pool, details); + + return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + } + + protected boolean createStoragePool(long hostId, StoragePool pool) { + s_logger.debug("creating pool " + pool.getName() + " on host " + + hostId); + if (pool.getPoolType() != StoragePoolType.NetworkFilesystem + && pool.getPoolType() != StoragePoolType.Filesystem + && pool.getPoolType() != StoragePoolType.IscsiLUN + && pool.getPoolType() != StoragePoolType.Iscsi + && pool.getPoolType() != StoragePoolType.VMFS + && pool.getPoolType() != StoragePoolType.SharedMountPoint + && pool.getPoolType() != StoragePoolType.PreSetup + && pool.getPoolType() != StoragePoolType.OCFS2 + && pool.getPoolType() != StoragePoolType.RBD + && pool.getPoolType() != StoragePoolType.CLVM) { + s_logger.warn(" Doesn't support storage pool type " + + pool.getPoolType()); + return false; + } + CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); + final Answer answer = agentMgr.easySend(hostId, cmd); + if (answer != null && answer.getResult()) { + return true; + } else { + primaryDataStoreDao.expunge(pool.getId()); + String msg = ""; + if (answer != null) { + msg = "Can not create storage pool through host " + hostId + + " due to " + answer.getDetails(); + s_logger.warn(msg); + } else { + msg = "Can not create storage pool through host " + hostId + + " due to CreateStoragePoolCommand returns null"; + s_logger.warn(msg); + } + throw new CloudRuntimeException(msg); + } + } + + @Override + public boolean attachCluster(DataStore store, ClusterScope scope) { + PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; + // Check if there is host up in this cluster + List allHosts = _resourceMgr.listAllUpAndEnabledHosts( + Host.Type.Routing, primarystore.getClusterId(), + primarystore.getPodId(), primarystore.getDataCenterId()); + if (allHosts.isEmpty()) { + throw new CloudRuntimeException( + "No host up to associate a storage pool with in cluster " + + primarystore.getClusterId()); + } + + if (primarystore.getPoolType() == StoragePoolType.OCFS2 + && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { + s_logger.warn("Can not create storage pool " + primarystore + + " on cluster " + primarystore.getClusterId()); + primaryDataStoreDao.expunge(primarystore.getId()); + return false; + } + + boolean success = false; + for (HostVO h : allHosts) { + success = createStoragePool(h.getId(), primarystore); + if (success) { + break; + } + } + + s_logger.debug("In createPool Adding the pool to each of the hosts"); + List poolHosts = new ArrayList(); + for (HostVO h : allHosts) { + try { + this.storageMgr.connectHostToSharedPool(h.getId(), + primarystore.getId()); + poolHosts.add(h); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + h + + " and " + primarystore, e); + } + } + + if (poolHosts.isEmpty()) { + s_logger.warn("No host can access storage pool " + primarystore + + " on cluster " + primarystore.getClusterId()); + primaryDataStoreDao.expunge(primarystore.getId()); + return false; + } else { + storageMgr.createCapacityEntry(primarystore.getId()); + } + StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); + pool.setScope(ScopeType.CLUSTER); + pool.setStatus(StoragePoolStatus.Up); + this.primaryDataStoreDao.update(pool.getId(), pool); + return true; + } + + @Override + public boolean attachZone(DataStore dataStore, ZoneScope scope) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean dettach() { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean unmanaged() { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean maintain(long storeId) { + Long userId = UserContext.current().getCallerUserId(); + User user = _userDao.findById(userId); + Account account = UserContext.current().getCaller(); + StoragePoolVO pool = this.primaryDataStoreDao.findById(storeId); + try { + StoragePool storagePool = (StoragePool) this.dataStoreMgr + .getDataStore(storeId, DataStoreRole.Primary); + List hosts = _resourceMgr.listHostsInClusterByStatus( + pool.getClusterId(), Status.Up); + if (hosts == null || hosts.size() == 0) { + pool.setStatus(StoragePoolStatus.Maintenance); + primaryDataStoreDao.update(pool.getId(), pool); + return true; + } else { + // set the pool state to prepare for maintenance + pool.setStatus(StoragePoolStatus.PrepareForMaintenance); + primaryDataStoreDao.update(pool.getId(), pool); + } + // remove heartbeat + for (HostVO host : hosts) { + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand( + false, storagePool); + final Answer answer = agentMgr.easySend(host.getId(), cmd); + if (answer == null || !answer.getResult()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool false failed due to " + + ((answer == null) ? "answer null" : answer + .getDetails())); + } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool false secceeded"); + } + } + } + // check to see if other ps exist + // if they do, then we can migrate over the system vms to them + // if they dont, then just stop all vms on this one + List upPools = primaryDataStoreDao + .listByStatusInZone(pool.getDataCenterId(), + DataStoreStatus.Up); + boolean restart = true; + if (upPools == null || upPools.size() == 0) { + restart = false; + } + + // 2. Get a list of all the ROOT volumes within this storage pool + List allVolumes = this.volumeDao.findByPoolId(pool + .getId()); + + // 3. Enqueue to the work queue + for (VolumeVO volume : allVolumes) { + VMInstanceVO vmInstance = vmDao + .findById(volume.getInstanceId()); + + if (vmInstance == null) { + continue; + } + + // enqueue sp work + if (vmInstance.getState().equals(State.Running) + || vmInstance.getState().equals(State.Starting) + || vmInstance.getState().equals(State.Stopping)) { + + try { + StoragePoolWorkVO work = new StoragePoolWorkVO( + vmInstance.getId(), pool.getId(), false, false, + server.getId()); + _storagePoolWorkDao.persist(work); + } catch (Exception e) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Work record already exists, re-using by re-setting values"); + } + StoragePoolWorkVO work = _storagePoolWorkDao + .findByPoolIdAndVmId(pool.getId(), + vmInstance.getId()); + work.setStartedAfterMaintenance(false); + work.setStoppedForMaintenance(false); + work.setManagementServerId(server.getId()); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // 4. Process the queue + List pendingWork = _storagePoolWorkDao + .listPendingWorkForPrepareForMaintenanceByPoolId(pool + .getId()); + + for (StoragePoolWorkVO work : pendingWork) { + // shut down the running vms + VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); + + if (vmInstance == null) { + continue; + } + + // if the instance is of type consoleproxy, call the console + // proxy + if (vmInstance.getType().equals( + VirtualMachine.Type.ConsoleProxy)) { + // call the consoleproxymanager + ConsoleProxyVO consoleProxy = _consoleProxyDao + .findById(vmInstance.getId()); + if (!vmMgr.advanceStop(consoleProxy, true, user, account)) { + String errorMsg = "There was an error stopping the console proxy id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + + if (this.vmMgr.advanceStart(consoleProxy, null, user, + account) == null) { + String errorMsg = "There was an error starting the console proxy id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // if the instance is of type uservm, call the user vm manager + if (vmInstance.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = userVmDao.findById(vmInstance.getId()); + if (!vmMgr.advanceStop(userVm, true, user, account)) { + String errorMsg = "There was an error stopping the user vm id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type secondary storage vm, call the + // secondary storage vm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.SecondaryStorageVm)) { + SecondaryStorageVmVO secStrgVm = _secStrgDao + .findById(vmInstance.getId()); + if (!vmMgr.advanceStop(secStrgVm, true, user, account)) { + String errorMsg = "There was an error stopping the ssvm id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) { + String errorMsg = "There was an error starting the ssvm id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // if the instance is of type domain router vm, call the network + // manager + if (vmInstance.getType().equals( + VirtualMachine.Type.DomainRouter)) { + DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); + if (!vmMgr.advanceStop(domR, true, user, account)) { + String errorMsg = "There was an error stopping the domain router id: " + + vmInstance.getId() + + " ,cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + if (vmMgr.advanceStart(domR, null, user, account) == null) { + String errorMsg = "There was an error starting the domain router id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + } + + // 5. Update the status + pool.setStatus(StoragePoolStatus.Maintenance); + this.primaryDataStoreDao.update(pool.getId(), pool); + + return true; + } catch (Exception e) { + s_logger.error( + "Exception in enabling primary storage maintenance:", e); + setPoolStateToError(pool); + throw new CloudRuntimeException(e.getMessage()); + } + } + + private void setPoolStateToError(StoragePoolVO primaryStorage) { + primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance); + this.primaryDataStoreDao.update(primaryStorage.getId(), primaryStorage); + } + + @Override + public boolean cancelMaintain(long storageId) { + // Change the storage state back to up + Long userId = UserContext.current().getCallerUserId(); + User user = _userDao.findById(userId); + Account account = UserContext.current().getCaller(); + StoragePoolVO poolVO = this.primaryDataStoreDao + .findById(storageId); + StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore( + storageId, DataStoreRole.Primary); + poolVO.setStatus(StoragePoolStatus.Up); + primaryDataStoreDao.update(storageId, poolVO); + + List hosts = _resourceMgr.listHostsInClusterByStatus( + pool.getClusterId(), Status.Up); + if (hosts == null || hosts.size() == 0) { + return true; + } + // add heartbeat + for (HostVO host : hosts) { + ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand( + true, pool); + final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); + if (answer == null || !answer.getResult()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool add failed due to " + + ((answer == null) ? "answer null" : answer + .getDetails())); + } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool add secceeded"); + } + } + } + + // 2. Get a list of pending work for this queue + List pendingWork = _storagePoolWorkDao + .listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId()); + + // 3. work through the queue + for (StoragePoolWorkVO work : pendingWork) { + try { + VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); + + if (vmInstance == null) { + continue; + } + + // if the instance is of type consoleproxy, call the console + // proxy + if (vmInstance.getType().equals( + VirtualMachine.Type.ConsoleProxy)) { + + ConsoleProxyVO consoleProxy = _consoleProxyDao + .findById(vmInstance.getId()); + if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) { + String msg = "There was an error starting the console proxy id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type ssvm, call the ssvm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.SecondaryStorageVm)) { + SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance + .getId()); + if (vmMgr.advanceStart(ssVm, null, user, account) == null) { + String msg = "There was an error starting the ssvm id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type ssvm, call the ssvm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.DomainRouter)) { + DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); + if (vmMgr.advanceStart(domR, null, user, account) == null) { + String msg = "There was an error starting the domR id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type user vm, call the user vm manager + if (vmInstance.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = userVmDao.findById(vmInstance.getId()); + + if (vmMgr.advanceStart(userVm, null, user, account) == null) { + + String msg = "There was an error starting the user vm id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } catch (Exception e) { + s_logger.debug("Failed start vm", e); + throw new CloudRuntimeException(e.toString()); + } + } + return true; + } + + @DB + @Override + public boolean deleteDataStore(long storeId) { + // for the given pool id, find all records in the storage_pool_host_ref + List hostPoolRecords = this._storagePoolHostDao + .listByPoolId(storeId); + StoragePoolVO poolVO = this.primaryDataStoreDao.findById(storeId); + StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(storeId, DataStoreRole.Primary); + boolean deleteFlag = false; + Transaction txn = Transaction.currentTxn(); + try { + // if not records exist, delete the given pool (base case) + if (hostPoolRecords.size() == 0) { + + txn.start(); + poolVO.setUuid(null); + this.primaryDataStoreDao.update(poolVO.getId(), poolVO); + primaryDataStoreDao.remove(poolVO.getId()); + deletePoolStats(poolVO.getId()); + txn.commit(); + + deleteFlag = true; + return true; + } else { + // Remove the SR associated with the Xenserver + for (StoragePoolHostVO host : hostPoolRecords) { + DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand( + pool); + final Answer answer = agentMgr.easySend(host.getHostId(), + deleteCmd); + + if (answer != null && answer.getResult()) { + deleteFlag = true; + break; + } + } + } + } finally { + if (deleteFlag) { + // now delete the storage_pool_host_ref and storage_pool records + txn.start(); + for (StoragePoolHostVO host : hostPoolRecords) { + _storagePoolHostDao.deleteStoragePoolHostDetails( + host.getHostId(), host.getPoolId()); + } + poolVO.setUuid(null); + this.primaryDataStoreDao.update(poolVO.getId(), poolVO); + primaryDataStoreDao.remove(poolVO.getId()); + deletePoolStats(poolVO.getId()); + // Delete op_host_capacity entries + this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, + null, null, null, poolVO.getId()); + txn.commit(); + + s_logger.debug("Storage pool id=" + poolVO.getId() + + " is removed successfully"); + return true; + } else { + // alert that the storage cleanup is required + s_logger.warn("Failed to Delete storage pool id: " + poolVO.getId()); + _alertMgr + .sendAlert(AlertManager.ALERT_TYPE_STORAGE_DELETE, + poolVO.getDataCenterId(), poolVO.getPodId(), + "Unable to delete storage pool id= " + poolVO.getId(), + "Delete storage pool command failed. Please check logs."); + } + } + return false; + } + + @DB + private boolean deletePoolStats(Long poolId) { + CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId, + CapacityVO.CAPACITY_TYPE_STORAGE); + CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId, + CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED); + Transaction txn = Transaction.currentTxn(); + txn.start(); + if (capacity1 != null) { + _capacityDao.remove(capacity1.getId()); + } + + if (capacity2 != null) { + _capacityDao.remove(capacity2.getId()); + } + + txn.commit(); + return true; + } + + @Override + public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId()); + if (poolHost == null) { + poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath()); + _storagePoolHostDao.persist(poolHost); + } + + StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); + pool.setScope(scope.getScopeType()); + pool.setAvailableBytes(existingInfo.getAvailableBytes()); + pool.setCapacityBytes(existingInfo.getCapacityBytes()); + pool.setStatus(StoragePoolStatus.Up); + this.primaryDataStoreDao.update(pool.getId(), pool); + this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); + + return true; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java index ffe7efdcda7..5e8727a316a 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java @@ -26,22 +26,22 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd; -import org.apache.cloudstack.storage.datastore.DataStoreStatus; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.endpoint.EndPointSelector; -import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import com.cloud.agent.api.StoragePoolInfo; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.StoragePoolStatus; public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { @Inject @@ -58,9 +58,9 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif } @Override - public DataStore initialize(Map dsInfos) { + public DataStore initialize(Map dsInfos) { - PrimaryDataStoreVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos); + StoragePoolVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos); return providerMgr.getPrimaryDataStore(storeVO.getId()); } @@ -83,11 +83,11 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif @Override public boolean attachCluster(DataStore dataStore, ClusterScope scope) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStore.getId()); + StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStore.getId()); dataStoreVO.setDataCenterId(scope.getZoneId()); dataStoreVO.setPodId(scope.getPodId()); dataStoreVO.setClusterId(scope.getScopeId()); - dataStoreVO.setStatus(DataStoreStatus.Attaching); + dataStoreVO.setStatus(StoragePoolStatus.Attaching); dataStoreVO.setScope(scope.getScopeType()); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); @@ -95,7 +95,7 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif attachCluster(dataStore); dataStoreVO = dataStoreDao.findById(dataStore.getId()); - dataStoreVO.setStatus(DataStoreStatus.Up); + dataStoreVO.setStatus(StoragePoolStatus.Up); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); return true; @@ -114,19 +114,19 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif } @Override - public boolean maintain() { + public boolean maintain(long storeId) { // TODO Auto-generated method stub return false; } @Override - public boolean cancelMaintain() { + public boolean cancelMaintain(long storeId) { // TODO Auto-generated method stub return false; } @Override - public boolean deleteDataStore() { + public boolean deleteDataStore(long storeId) { // TODO Auto-generated method stub return false; } @@ -139,4 +139,11 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif return false; } + @Override + public boolean attachHost(DataStore store, HostScope scope, + StoragePoolInfo existingInfo) { + // TODO Auto-generated method stub + return false; + } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java index 1a24d87346e..fdbe4b47c1e 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java @@ -21,21 +21,21 @@ package org.apache.cloudstack.storage.datastore.manager; import java.util.HashMap; import java.util.Map; +import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProvider; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManager; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.springframework.stereotype.Component; -import com.cloud.utils.component.ComponentContext; - @Component public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataStoreProviderManager { @Inject @@ -44,16 +44,18 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt DataStoreProviderManager providerManager; @Inject PrimaryDataStoreDao dataStoreDao; - Map driverMaps = new HashMap(); + Map driverMaps; + @PostConstruct + public void config() { + driverMaps = new HashMap(); + } + @Override public PrimaryDataStore getPrimaryDataStore(long dataStoreId) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStoreId); + StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId); long providerId = dataStoreVO.getStorageProviderId(); DataStoreProvider provider = providerManager.getDataStoreProviderById(providerId); - /*DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, - driverMaps.get(provider.getUuid()), - provider);*/ DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getUuid()), provider); return dataStore; } @@ -66,4 +68,16 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt driverMaps.put(uuid, driver); return true; } + + @Override + public PrimaryDataStore getPrimaryDataStore(String uuid) { + StoragePoolVO dataStoreVO = dataStoreDao.findByUuid(uuid); + return getPrimaryDataStore(dataStoreVO.getId()); + } + + @Override + public boolean registerHostListener(String uuid, HypervisorHostListener listener) { + // TODO Auto-generated method stub + return false; + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls index 9386454efb3..f1590397b8f 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls @@ -1,21 +1,3 @@ - - + - + @@ -72,4 +54,4 @@ - + \ No newline at end of file diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java new file mode 100644 index 00000000000..702ab238ba8 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; +import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCyclImpl; +import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.springframework.stereotype.Component; + +import com.cloud.utils.component.ComponentContext; + +@Component +public class AncientPrimaryDataStoreProviderImpl implements + PrimaryDataStoreProvider { + + private final String providerName = "ancient primary data store provider"; + protected PrimaryDataStoreDriver driver; + @Inject + PrimaryDataStoreProviderManager storeMgr; + protected DataStoreLifeCycle lifecyle; + protected String uuid; + protected long id; + @Override + public String getName() { + return providerName; + } + + @Override + public DataStoreLifeCycle getLifeCycle() { + return this.lifecyle; + } + + @Override + public boolean configure(Map params) { + lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCyclImpl.class); + driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class); + uuid = (String)params.get("uuid"); + id = (Long)params.get("id"); + storeMgr.registerDriver(uuid, this.driver); + return true; + } + + @Override + public String getUuid() { + return this.uuid; + } + + @Override + public long getId() { + return this.id; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java new file mode 100644 index 00000000000..f2cb1c45c82 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.provider; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.alert.AlertManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.exception.CloudRuntimeException; + +public class DefaultHostListener implements HypervisorHostListener { + private static final Logger s_logger = Logger + .getLogger(DefaultHostListener.class); + @Inject AgentManager agentMgr; + @Inject DataStoreManager dataStoreMgr; + @Inject AlertManager alertMgr; + @Inject StoragePoolHostDao storagePoolHostDao; + @Inject PrimaryDataStoreDao primaryStoreDao; + @Override + public boolean hostConnect(long hostId, long poolId) { + StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); + final Answer answer = agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId()); + } + + if (!answer.getResult()) { + String msg = "Add host failed due to ModifyStoragePoolCommand failed" + answer.getDetails(); + alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); + throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + pool.getId()); + } + + assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId; + ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; + + StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(pool.getId(), hostId); + if (poolHost == null) { + poolHost = new StoragePoolHostVO(pool.getId(), hostId, mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); + storagePoolHostDao.persist(poolHost); + } else { + poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); + } + + StoragePoolVO poolVO = this.primaryStoreDao.findById(poolId); + poolVO.setAvailableBytes(mspAnswer.getPoolInfo().getAvailableBytes()); + poolVO.setCapacityBytes(mspAnswer.getPoolInfo().getCapacityBytes()); + primaryStoreDao.update(pool.getId(), poolVO); + + s_logger.info("Connection established between " + pool + " host + " + hostId); + return true; + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + // TODO Auto-generated method stub + return false; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java index 540ea6381fa..85a5d0226d7 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java @@ -21,6 +21,7 @@ import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl; import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl; @@ -35,6 +36,7 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv protected PrimaryDataStoreDriver driver; @Inject PrimaryDataStoreProviderManager storeMgr; + protected DataStoreLifeCycle lifecyle; protected String uuid; protected long id; @@ -52,9 +54,11 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv public boolean configure(Map params) { lifecyle = ComponentContext.inject(DefaultPrimaryDataStoreLifeCycleImpl.class); driver = ComponentContext.inject(DefaultPrimaryDataStoreDriverImpl.class); + HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class); uuid = (String)params.get("uuid"); id = (Long)params.get("id"); storeMgr.registerDriver(uuid, this.driver); + storeMgr.registerHostListener(uuid, listener); return true; } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java index 7679bb3e729..99b34cbcf18 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java @@ -18,9 +18,9 @@ */ package org.apache.cloudstack.storage.volume; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.volume.VolumeServiceImpl.CreateBaseImageResult; public interface TemplateInstallStrategy { diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java index 80e098d769a..5f1735c180a 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java @@ -20,24 +20,16 @@ package org.apache.cloudstack.storage.volume; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.framework.async.AsyncRpcConext; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.ImageDataFactory; -import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.motion.DataMotionService; import org.apache.cloudstack.storage.volume.VolumeServiceImpl.CreateBaseImageResult; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; - @Component public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { private static final Logger s_logger = Logger @@ -50,7 +42,7 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { ImageDataFactory imageFactory; protected long waitingTime = 1800; // half an hour protected long waitingRetries = 10; - +/* protected TemplateInfo waitingForTemplateDownload(TemplateInfo template, PrimaryDataStore dataStore) { long retries = this.waitingRetries; @@ -106,8 +98,8 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { boolean freshNewTemplate = false; if (obj == null) { try { - /*templateOnPrimaryStoreObj = objectInDataStoreMgr.create( - template, store);*/ + templateOnPrimaryStoreObj = objectInDataStoreMgr.create( + template, store); freshNewTemplate = true; } catch (Throwable e) { obj = objectInDataStoreMgr.findObject(template.getId(), @@ -264,13 +256,10 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { res.setResult(result.getResult()); context.getParentCallback().complete(res); } - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - templateOnPrimaryStoreObj.getId(), templateOnPrimaryStoreObj - .getType(), templateOnPrimaryStoreObj.getDataStore() - .getId(), templateOnPrimaryStoreObj.getDataStore() - .getRole()); + DataObjectInStore obj = objectInDataStoreMgr.findObject( + templateOnPrimaryStoreObj, templateOnPrimaryStoreObj.getDataStore()); + - obj.setInstallPath(result.getPath()); CreateBaseImageResult res = new CreateBaseImageResult( templateOnPrimaryStoreObj); try { @@ -289,6 +278,12 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { } context.getParentCallback().complete(res); return null; + }*/ + @Override + public Void installAsync(TemplateInfo template, PrimaryDataStore store, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + return null; } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java index 64af097bb32..e0ecd165d7f 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java @@ -20,21 +20,23 @@ package org.apache.cloudstack.storage.volume; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.storage.datastore.DataStoreManager; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.datastore.VolumeDataFactory; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; import org.springframework.stereotype.Component; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; + @Component public class VolumeDataFactoryImpl implements VolumeDataFactory { @Inject - VolumeDao2 volumeDao; + VolumeDao volumeDao; @Inject ObjectInDataStoreManager objMap; @Inject @@ -42,12 +44,30 @@ public class VolumeDataFactoryImpl implements VolumeDataFactory { @Override public VolumeInfo getVolume(long volumeId, DataStore store) { VolumeVO volumeVO = volumeDao.findById(volumeId); - ObjectInDataStoreVO obj = objMap.findObject(volumeId, DataObjectType.VOLUME, store.getId(), store.getRole()); - if (obj == null) { - VolumeObject vol = VolumeObject.getVolumeObject(null, volumeVO); - return vol; - } + VolumeObject vol = VolumeObject.getVolumeObject(store, volumeVO); + + return vol; + } + + @Override + public VolumeInfo getVolume(long volumeId) { + VolumeVO volumeVO = volumeDao.findById(volumeId); + VolumeObject vol = null; + if (volumeVO.getPoolId() == null) { + DataStore store = objMap.findStore(volumeVO.getUuid(), DataObjectType.VOLUME, DataStoreRole.Image); + vol = VolumeObject.getVolumeObject(store, volumeVO); + } else { + DataStore store = this.storeMgr.getDataStore(volumeVO.getPoolId(), DataStoreRole.Primary); + vol = VolumeObject.getVolumeObject(store, volumeVO); + } + return vol; + } + + @Override + public VolumeInfo getVolume(DataObject volume, DataStore store) { + VolumeInfo vol = (VolumeObject)getVolume(volume.getId(), store); + vol.addPayload(((VolumeInfo)volume).getpayload()); return vol; } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java index 14d741707b5..f8d50437d14 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java @@ -22,20 +22,17 @@ import java.lang.reflect.Method; import java.util.Date; import java.util.List; import java.util.Map; -import java.util.concurrent.ExecutionException; import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; import org.apache.cloudstack.engine.datacenter.entity.api.StorageEntity; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; -import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreEntityImpl; -import org.apache.cloudstack.storage.volume.VolumeService.VolumeApiResult; - -import com.cloud.utils.exception.CloudRuntimeException; public class VolumeEntityImpl implements VolumeEntity { private VolumeInfo volumeInfo; @@ -167,7 +164,7 @@ public class VolumeEntityImpl implements VolumeEntity { @Override public void destroy() { - AsyncCallFuture future = vs.deleteVolumeAsync(volumeInfo); + /*AsyncCallFuture future = vs.deleteVolumeAsync(volumeInfo); try { result = future.get(); if (!result.isSuccess()) { @@ -177,7 +174,7 @@ public class VolumeEntityImpl implements VolumeEntity { throw new CloudRuntimeException("wait to delete volume info failed", e); } catch (ExecutionException e) { throw new CloudRuntimeException("wait to delete volume failed", e); - } + }*/ } @Override diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java deleted file mode 100644 index bcff312626f..00000000000 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.volume; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeProfile; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; -import org.springframework.stereotype.Component; - -import com.cloud.storage.Volume; -import com.cloud.storage.Volume.Event; -import com.cloud.storage.Volume.State; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; - -@Component -public class VolumeManagerImpl implements VolumeManager { - @Inject - protected VolumeDao2 _volumeDao; - private final StateMachine2 s_fsm = new StateMachine2(); - public VolumeManagerImpl() { - initStateMachine(); - } - - @Override - public VolumeVO allocateDuplicateVolume(VolumeVO oldVol) { - /* - VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), oldVol.getName(), oldVol.getDataCenterId(), oldVol.getDomainId(), oldVol.getAccountId(), oldVol.getDiskOfferingId(), oldVol.getSize()); - newVol.setTemplateId(oldVol.getTemplateId()); - newVol.setDeviceId(oldVol.getDeviceId()); - newVol.setInstanceId(oldVol.getInstanceId()); - newVol.setRecreatable(oldVol.isRecreatable()); - newVol.setReservationId(oldVol.getReservationId()); - */ - return null; - // return _volumeDao.persist(newVol); - } - - private void initStateMachine() { - s_fsm.addTransition(Volume.State.Allocated, Event.CreateRequested, Volume.State.Creating); - s_fsm.addTransition(Volume.State.Allocated, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Creating, Event.OperationRetry, Volume.State.Creating); - s_fsm.addTransition(Volume.State.Creating, Event.OperationFailed, Volume.State.Allocated); - s_fsm.addTransition(Volume.State.Creating, Event.OperationSucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Creating, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Creating, Event.CreateRequested, Volume.State.Creating); - s_fsm.addTransition(Volume.State.Allocated, Event.UploadRequested, Volume.State.UploadOp); - s_fsm.addTransition(Volume.State.UploadOp, Event.CopyRequested, Volume.State.Creating);// CopyRequested for volume from sec to primary storage - s_fsm.addTransition(Volume.State.Creating, Event.CopySucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Creating, Event.CopyFailed, Volume.State.UploadOp);// Copying volume from sec to primary failed. - s_fsm.addTransition(Volume.State.UploadOp, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Ready, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Destroy, Event.ExpungingRequested, Volume.State.Expunging); - s_fsm.addTransition(Volume.State.Ready, Event.SnapshotRequested, Volume.State.Snapshotting); - s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationSucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationFailed, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Ready, Event.MigrationRequested, Volume.State.Migrating); - s_fsm.addTransition(Volume.State.Migrating, Event.OperationSucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Migrating, Event.OperationFailed, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Destroy, Event.OperationSucceeded, Volume.State.Destroy); - s_fsm.addTransition(Volume.State.Destroying, Event.OperationSucceeded, Volume.State.Destroy); - s_fsm.addTransition(Volume.State.Destroying, Event.OperationFailed, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Destroying, Event.DestroyRequested, Volume.State.Destroying); - } - - @Override - public StateMachine2 getStateMachine() { - return s_fsm; - } - - @Override - public VolumeVO processEvent(Volume vol, Volume.Event event) throws NoTransitionException { - // _volStateMachine.transitTo(vol, event, null, _volumeDao); - return _volumeDao.findById(vol.getId()); - } - - @Override - public VolumeProfile getProfile(long volumeId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public VolumeVO getVolume(long volumeId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public VolumeVO updateVolume(VolumeVO volume) { - // TODO Auto-generated method stub - return null; - } -} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java index 9e04909135e..87951ceeb64 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -16,19 +16,23 @@ // under the License. package org.apache.cloudstack.storage.volume; +import java.util.Date; + import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; import org.apache.log4j.Logger; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; @@ -38,17 +42,16 @@ import com.cloud.utils.storage.encoding.EncodingType; public class VolumeObject implements VolumeInfo { private static final Logger s_logger = Logger.getLogger(VolumeObject.class); protected VolumeVO volumeVO; - private StateMachine2 _volStateMachine; + private StateMachine2 _volStateMachine; protected DataStore dataStore; @Inject - VolumeDao2 volumeDao; - @Inject - VolumeManager volumeMgr; + VolumeDao volumeDao; @Inject ObjectInDataStoreManager ojbectInStoreMgr; + private Object payload; protected VolumeObject() { - + _volStateMachine = Volume.State.getStateMachine(); } protected void configure(DataStore dataStore, VolumeVO volumeVO) { @@ -88,12 +91,11 @@ public class VolumeObject implements VolumeInfo { public long getVolumeId() { return volumeVO.getId(); } - public boolean stateTransit(Volume.Event event) { boolean result = false; - _volStateMachine = volumeMgr.getStateMachine(); try { result = _volStateMachine.transitTo(volumeVO, event, null, volumeDao); + volumeVO = volumeDao.findById(volumeVO.getId()); } catch (NoTransitionException e) { String errorMessage = "Failed to transit volume: " + this.getVolumeId() + ", due to: " + e.toString(); s_logger.debug(errorMessage); @@ -122,7 +124,7 @@ public class VolumeObject implements VolumeInfo { if (this.dataStore == null) { throw new CloudRuntimeException("datastore must be set before using this object"); } - ObjectInDataStoreVO obj = ojbectInStoreMgr.findObject(this.volumeVO.getId(), DataObjectType.VOLUME, this.dataStore.getId(), this.dataStore.getRole()); + DataObjectInStore obj = ojbectInStoreMgr.findObject(this.volumeVO.getUuid(), DataObjectType.VOLUME, this.dataStore.getUuid(), this.dataStore.getRole()); if (obj.getState() != ObjectInDataStoreStateMachine.State.Ready) { return this.dataStore.getUri() + "&" + EncodingType.OBJTYPE + "=" + DataObjectType.VOLUME + @@ -145,4 +147,167 @@ public class VolumeObject implements VolumeInfo { // TODO Auto-generated method stub return null; } + + @Override + public void processEvent( + ObjectInDataStoreStateMachine.Event event) { + if (this.dataStore == null) { + return; + } + try { + Volume.Event volEvent = null; + if (this.dataStore.getRole() == DataStoreRole.Image) { + ojbectInStoreMgr.update(this, event); + if (event == ObjectInDataStoreStateMachine.Event.CreateRequested) { + volEvent = Volume.Event.UploadRequested; + } else if (event == ObjectInDataStoreStateMachine.Event.OperationSuccessed) { + volEvent = Volume.Event.CopySucceeded; + } else if (event == ObjectInDataStoreStateMachine.Event.OperationFailed) { + volEvent = Volume.Event.CopyFailed; + } + } else { + if (event == ObjectInDataStoreStateMachine.Event.CreateRequested || + event == ObjectInDataStoreStateMachine.Event.CreateOnlyRequested) { + volEvent = Volume.Event.CreateRequested; + } else if (event == ObjectInDataStoreStateMachine.Event.CopyingRequested) { + volEvent = Volume.Event.CopyRequested; + } + } + + if (event == ObjectInDataStoreStateMachine.Event.DestroyRequested) { + volEvent = Volume.Event.DestroyRequested; + } else if (event == ObjectInDataStoreStateMachine.Event.ExpungeRequested) { + volEvent = Volume.Event.ExpungingRequested; + } else if (event == ObjectInDataStoreStateMachine.Event.OperationSuccessed) { + volEvent = Volume.Event.OperationSucceeded; + } else if (event == ObjectInDataStoreStateMachine.Event.OperationFailed) { + volEvent = Volume.Event.OperationFailed; + } + this.stateTransit(volEvent); + } catch (Exception e) { + s_logger.debug("Failed to update state", e); + throw new CloudRuntimeException("Failed to update state:" + e.toString()); + } + + } + + @Override + public String getName() { + return this.volumeVO.getName(); + } + + @Override + public Long getInstanceId() { + return this.volumeVO.getInstanceId(); + } + + @Override + public String getFolder() { + return this.volumeVO.getFolder(); + } + + @Override + public String getPath() { + return this.volumeVO.getPath(); + } + + @Override + public Long getPodId() { + return this.volumeVO.getPodId(); + } + + @Override + public long getDataCenterId() { + return this.volumeVO.getDataCenterId(); + } + + @Override + public Type getVolumeType() { + return this.volumeVO.getVolumeType(); + } + + @Override + public Long getPoolId() { + return this.volumeVO.getPoolId(); + } + + @Override + public Date getAttached() { + return this.volumeVO.getAttached(); + } + + @Override + public Long getDeviceId() { + return this.volumeVO.getDeviceId(); + } + + @Override + public Date getCreated() { + return this.volumeVO.getCreated(); + } + + @Override + public long getDiskOfferingId() { + return this.volumeVO.getDiskOfferingId(); + } + + @Override + public String getChainInfo() { + return this.volumeVO.getChainInfo(); + } + + @Override + public boolean isRecreatable() { + return this.volumeVO.isRecreatable(); + } + + @Override + public long getUpdatedCount() { + return this.volumeVO.getUpdatedCount(); + } + + @Override + public void incrUpdatedCount() { + this.volumeVO.incrUpdatedCount(); + } + + @Override + public Date getUpdated() { + return this.volumeVO.getUpdated(); + } + + @Override + public String getReservationId() { + return this.volumeVO.getReservationId(); + } + + @Override + public void setReservationId(String reserv) { + this.volumeVO.setReservationId(reserv); + } + + @Override + public long getAccountId() { + return this.volumeVO.getAccountId(); + } + + @Override + public long getDomainId() { + return this.volumeVO.getDomainId(); + } + + @Override + public Long getTemplateId() { + return this.volumeVO.getTemplateId(); + } + + @Override + public void addPayload(Object data) { + this.payload = data; + } + + @Override + public Object getpayload() { + return this.payload; + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 8cfbae455e7..891ad1249df 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -22,12 +22,16 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -36,22 +40,29 @@ import org.apache.cloudstack.storage.datastore.DataObjectManager; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.image.motion.ImageMotionService; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; +import org.apache.cloudstack.storage.motion.DataMotionService; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.exception.ConcurrentOperationException; import com.cloud.storage.Volume; +import com.cloud.storage.Volume.Type; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.utils.db.DB; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; //1. change volume state //2. orchestrator of volume, control most of the information of volume, storage pool id, voluem state, scope etc. @Component public class VolumeServiceImpl implements VolumeService { + private static final Logger s_logger = Logger + .getLogger(VolumeServiceImpl.class); @Inject - VolumeDao2 volDao; + VolumeDao volDao; @Inject PrimaryDataStoreProviderManager dataStoreMgr; @Inject @@ -59,27 +70,31 @@ public class VolumeServiceImpl implements VolumeService { @Inject DataObjectManager dataObjectMgr; @Inject - ImageMotionService imageMotion; + DataMotionService motionSrv; @Inject TemplateInstallStrategy templateInstallStrategy; + @Inject + VolumeDataFactory volFactory; + @Inject SnapshotManager snapshotMgr; + @Inject VMInstanceDao vmDao; public VolumeServiceImpl() { } private class CreateVolumeContext extends AsyncRpcConext { - private VolumeObject volume; + private DataObject volume; private AsyncCallFuture future; /** * @param callback */ - public CreateVolumeContext(AsyncCompletionCallback callback, VolumeObject volume, AsyncCallFuture future) { + public CreateVolumeContext(AsyncCompletionCallback callback, DataObject volume, AsyncCallFuture future) { super(callback); this.volume = volume; this.future = future; } - public VolumeObject getVolume() { + public DataObject getVolume() { return this.volume; } @@ -89,49 +104,35 @@ public class VolumeServiceImpl implements VolumeService { } - - @Override - public AsyncCallFuture createVolumeAsync(VolumeInfo volume, long dataStoreId) { - PrimaryDataStore dataStore = dataStoreMgr.getPrimaryDataStore(dataStoreId); + public AsyncCallFuture createVolumeAsync(VolumeInfo volume, DataStore dataStore) { AsyncCallFuture future = new AsyncCallFuture(); - VolumeApiResult result = new VolumeApiResult(volume); - - if (dataStore == null) { - result.setResult("Can't find dataStoreId: " + dataStoreId); - future.complete(result); - return future; - } + DataObject volumeOnStore = dataStore.create(volume); + volumeOnStore.processEvent(Event.CreateOnlyRequested); - if (dataStore.exists(volume)) { - result.setResult("Volume: " + volume.getId() + " already exists on primary data store: " + dataStoreId); - future.complete(result); - return future; - } - - VolumeObject vo = (VolumeObject) volume; - vo.stateTransit(Volume.Event.CreateRequested); - - CreateVolumeContext context = new CreateVolumeContext(null, vo, future); + CreateVolumeContext context = new CreateVolumeContext(null, volumeOnStore, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createVolumeCallback(null, null)) .setContext(context); - dataObjectMgr.createAsync(volume, dataStore, caller, true); + dataStore.getDriver().createAsync(volumeOnStore, caller); return future; } protected Void createVolumeCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { CreateCmdResult result = callback.getResult(); - VolumeObject vo = context.getVolume(); - VolumeApiResult volResult = new VolumeApiResult(vo); + DataObject vo = context.getVolume(); + String errMsg = null; if (result.isSuccess()) { - vo.stateTransit(Volume.Event.OperationSucceeded); + vo.processEvent(Event.OperationSuccessed); } else { - vo.stateTransit(Volume.Event.OperationFailed); - volResult.setResult(result.getResult()); + vo.processEvent(Event.OperationFailed); + errMsg = result.getResult(); + } + VolumeApiResult volResult = new VolumeApiResult((VolumeObject)vo); + if (errMsg != null) { + volResult.setResult(errMsg); } - context.getFuture().complete(volResult); return null; } @@ -159,26 +160,47 @@ public class VolumeServiceImpl implements VolumeService { @DB @Override - public AsyncCallFuture deleteVolumeAsync(VolumeInfo volume) { - VolumeObject vo = (VolumeObject)volume; + public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { AsyncCallFuture future = new AsyncCallFuture(); VolumeApiResult result = new VolumeApiResult(volume); - - DataStore dataStore = vo.getDataStore(); - vo.stateTransit(Volume.Event.DestroyRequested); - if (dataStore == null) { - vo.stateTransit(Volume.Event.OperationSucceeded); - volDao.remove(vo.getId()); + if (volume.getDataStore() == null) { + this.volDao.remove(volume.getId()); future.complete(result); return future; } + String vmName = null; + VolumeVO vol = this.volDao.findById(volume.getId()); + if (vol.getVolumeType() == Type.ROOT && vol.getInstanceId() != null) { + VirtualMachine vm = vmDao.findByIdIncludingRemoved(vol + .getInstanceId()); + if (vm != null) { + vmName = vm.getInstanceName(); + } + } + + String volumePath = vol.getPath(); + Long poolId = vol.getPoolId(); + if (poolId == null || volumePath == null || volumePath.trim().isEmpty()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Marking volume that was never created as destroyed: " + + vol); + } + this.volDao.remove(vol.getId()); + future.complete(result); + return future; + } + VolumeObject vo = (VolumeObject)volume; + + volume.processEvent(Event.ExpungeRequested); + + DeleteVolumeContext context = new DeleteVolumeContext(null, vo, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().deleteVolumeCallback(null, null)) .setContext(context); - dataObjectMgr.deleteAsync(volume, caller); + volume.getDataStore().getDriver().deleteAsync(volume, caller); return future; } @@ -187,10 +209,10 @@ public class VolumeServiceImpl implements VolumeService { VolumeObject vo = context.getVolume(); VolumeApiResult apiResult = new VolumeApiResult(vo); if (result.isSuccess()) { - vo.stateTransit(Volume.Event.OperationSucceeded); + vo.processEvent(Event.OperationSuccessed); volDao.remove(vo.getId()); } else { - vo.stateTransit(Volume.Event.OperationFailed); + vo.processEvent(Event.OperationFailed); apiResult.setResult(result.getResult()); } context.getFuture().complete(apiResult); @@ -203,24 +225,6 @@ public class VolumeServiceImpl implements VolumeService { return false; } - @Override - public boolean createVolumeFromSnapshot(long volumeId, long snapshotId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean rokeAccess(long volumeId, long endpointId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public VolumeEntity allocateVolumeInDb(long size, VolumeType type, String volName, Long templateId) { - VolumeVO vo = volDao.allocVolume(size, type, volName, templateId); - return new VolumeEntityImpl(VolumeObject.getVolumeObject(null, vo), this); - } - @Override public VolumeEntity getVolumeEntity(long volumeId) { VolumeVO vo = volDao.findById(volumeId); @@ -236,25 +240,21 @@ public class VolumeServiceImpl implements VolumeService { } } - @Override - public String grantAccess(VolumeInfo volume, EndPoint endpointId) { - // TODO Auto-generated method stub - return null; - } - class CreateBaseImageContext extends AsyncRpcConext { private final VolumeInfo volume; private final PrimaryDataStore dataStore; private final TemplateInfo srcTemplate; private final AsyncCallFuture future; + final DataObject destObj; public CreateBaseImageContext(AsyncCompletionCallback callback, VolumeInfo volume, PrimaryDataStore datastore, TemplateInfo srcTemplate, - AsyncCallFuture future) { + AsyncCallFuture future, DataObject destObj) { super(callback); this.volume = volume; this.dataStore = datastore; this.future = future; this.srcTemplate = srcTemplate; + this.destObj = destObj; } public VolumeInfo getVolume() { @@ -285,33 +285,45 @@ public class VolumeServiceImpl implements VolumeService { @DB protected void createBaseImageAsync(VolumeInfo volume, PrimaryDataStore dataStore, TemplateInfo template, AsyncCallFuture future) { + + DataObject templateOnPrimaryStoreObj = dataStore.create(template); CreateBaseImageContext context = new CreateBaseImageContext(null, volume, dataStore, template, - future); - - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + future, templateOnPrimaryStoreObj); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyBaseImageCallback(null, null)) .setContext(context); - DataObject templateOnPrimaryStoreObj = dataObjectMgr.createInternalStateOnly(template, dataStore); + + templateOnPrimaryStoreObj.processEvent(Event.CreateOnlyRequested); - dataObjectMgr.copyAsync(context.srcTemplate, templateOnPrimaryStoreObj, caller); + try { + motionSrv.copyAsync(template, templateOnPrimaryStoreObj, caller); + } catch (Exception e) { + s_logger.debug("failed to create template on storage", e); + templateOnPrimaryStoreObj.processEvent(Event.OperationFailed); + VolumeApiResult result = new VolumeApiResult(volume); + result.setResult(e.toString()); + caller.complete(result); + } return; } @DB - protected Void copyBaseImageCallback(AsyncCallbackDispatcher callback, CreateBaseImageContext context) { - CreateCmdResult result = callback.getResult(); + protected Void copyBaseImageCallback(AsyncCallbackDispatcher callback, CreateBaseImageContext context) { + CopyCommandResult result = callback.getResult(); VolumeApiResult res = new VolumeApiResult(context.getVolume()); AsyncCallFuture future = context.getFuture(); + DataObject templateOnPrimaryStoreObj = context.destObj; if (!result.isSuccess()) { + templateOnPrimaryStoreObj.processEvent(Event.OperationFailed); res.setResult(result.getResult()); future.complete(res); return null; } - DataObject templateOnPrimaryStoreObj = objectInDataStoreMgr.get(context.srcTemplate, context.dataStore); - + + templateOnPrimaryStoreObj.processEvent(Event.OperationSuccessed); createVolumeFromBaseImageAsync(context.volume, templateOnPrimaryStoreObj, context.dataStore, future); return null; } @@ -332,10 +344,7 @@ public class VolumeServiceImpl implements VolumeService { this.templateOnStore = templateOnStore; } - public VolumeObject getVolumeObject() { - return this.vo; - } - + public AsyncCallFuture getFuture() { return this.future; } @@ -343,39 +352,32 @@ public class VolumeServiceImpl implements VolumeService { @DB protected void createVolumeFromBaseImageAsync(VolumeInfo volume, DataObject templateOnPrimaryStore, PrimaryDataStore pd, AsyncCallFuture future) { - VolumeObject vo = (VolumeObject) volume; - try { - vo.stateTransit(Volume.Event.CreateRequested); - } catch (Exception e) { - VolumeApiResult result = new VolumeApiResult(volume); - result.setResult(e.toString()); - future.complete(result); - return; - } - + VolumeObject vo = (VolumeObject)volume; CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(null, vo, pd, templateOnPrimaryStore, future); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyBaseImageCallBack(null, null)) .setContext(context); - DataObject volumeOnPrimaryStorage = dataObjectMgr.createInternalStateOnly(volume, pd); - dataObjectMgr.copyAsync(context.templateOnStore, volumeOnPrimaryStorage, caller); + DataObject volumeOnPrimaryStorage = pd.create(volume); + volume.processEvent(Event.CreateOnlyRequested); + + motionSrv.copyAsync(context.templateOnStore, volumeOnPrimaryStorage, caller); return; } @DB - public Void copyBaseImageCallBack(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { - VolumeObject vo = context.getVolumeObject(); - CreateCmdResult result = callback.getResult(); + public Void copyBaseImageCallBack(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { + VolumeObject vo = context.vo; + CopyCommandResult result = callback.getResult(); VolumeApiResult volResult = new VolumeApiResult(vo); if (result.isSuccess()) { if (result.getPath() != null) { vo.setPath(result.getPath()); } - vo.stateTransit(Volume.Event.OperationSucceeded); + vo.processEvent(Event.OperationSuccessed); } else { - vo.stateTransit(Volume.Event.OperationFailed); + vo.processEvent(Event.OperationFailed); volResult.setResult(result.getResult()); } @@ -397,13 +399,65 @@ public class VolumeServiceImpl implements VolumeService { return future; } - createVolumeFromBaseImageAsync(volume, template, pd, future); + createVolumeFromBaseImageAsync(volume, templateOnPrimaryStore, pd, future); return future; } @Override - public TemplateOnPrimaryDataStoreInfo grantAccess(TemplateOnPrimaryDataStoreInfo template, EndPoint endPoint) { + @DB + public boolean destroyVolume(long volumeId) + throws ConcurrentOperationException { + + VolumeInfo vol = this.volFactory.getVolume(volumeId); + vol.processEvent(Event.DestroyRequested); + this.snapshotMgr.deletePoliciesForVolume(volumeId); + + vol.processEvent(Event.OperationSuccessed); + + return true; + } + + @Override + public AsyncCallFuture createVolumeFromSnapshot( + VolumeInfo volume, DataStore store, SnapshotInfo snapshot) { // TODO Auto-generated method stub return null; } + + @Override + public AsyncCallFuture copyVolume(VolumeInfo srcVolume, + DataStore destStore) { + // TODO Auto-generated method stub + return null; + } + + @Override + public AsyncCallFuture registerVolume(VolumeInfo volume, DataStore store) { + + AsyncCallFuture future = new AsyncCallFuture(); + VolumeObject vo = (VolumeObject) volume; + vo.stateTransit(Volume.Event.UploadRequested); + + CreateVolumeContext context = new CreateVolumeContext(null, vo, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().registerVolumeCallback(null, null)) + .setContext(context); + + dataObjectMgr.createAsync(volume, store, caller, true); + return future; + } + + protected Void registerVolumeCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { + CreateCmdResult result = callback.getResult(); + VolumeObject vo = (VolumeObject)context.volume; + /*if (result.isFailed()) { + vo.stateTransit(Volume.Event.OperationFailed); + } else { + vo.stateTransit(Volume.Event.OperationSucceeded); + }*/ + VolumeApiResult res = new VolumeApiResult(vo); + context.future.complete(res); + return null; + } + } diff --git a/framework/api/pom.xml b/framework/api/pom.xml new file mode 100644 index 00000000000..3212d7c2644 --- /dev/null +++ b/framework/api/pom.xml @@ -0,0 +1,42 @@ + + + 4.0.0 + cloud-framework-api + + org.apache.cloudstack + cloudstack-framework + 4.1.0-SNAPSHOT + ../pom.xml + + + + + org.apache.cloudstack + cloud-utils + 4.1.0-SNAPSHOT + + + + + + install + src + ${project.basedir}/test + + + ${project.basedir}/test/resources + + + + diff --git a/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncCallFuture.java b/framework/api/src/org/apache/cloudstack/framework/async/AsyncCallFuture.java similarity index 100% rename from framework/ipc/src/org/apache/cloudstack/framework/async/AsyncCallFuture.java rename to framework/api/src/org/apache/cloudstack/framework/async/AsyncCallFuture.java diff --git a/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncCompletionCallback.java b/framework/api/src/org/apache/cloudstack/framework/async/AsyncCompletionCallback.java similarity index 100% rename from framework/ipc/src/org/apache/cloudstack/framework/async/AsyncCompletionCallback.java rename to framework/api/src/org/apache/cloudstack/framework/async/AsyncCompletionCallback.java diff --git a/framework/pom.xml b/framework/pom.xml index 4dfb409f04e..4633dab2b30 100644 --- a/framework/pom.xml +++ b/framework/pom.xml @@ -33,5 +33,6 @@ ipc rest events + api diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index 84c37473a4a..5d7edce12ef 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -60,9 +60,9 @@ import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.user.Account; import com.cloud.utils.UriUtils; diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java index a0540637e95..c2f4923e7e6 100755 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java @@ -79,9 +79,9 @@ import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.user.Account; diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java index 70660d2bb69..9c291491114 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java @@ -144,6 +144,7 @@ public class XenServerStorageResource { try { obj = Decoder.decode(uriString); + DecodedDataStore store = obj.getStore(); if (obj.getObjType().equalsIgnoreCase("template") && store.getRole().equalsIgnoreCase("image")) { return getTemplateSize(cmd, obj.getPath()); @@ -224,6 +225,7 @@ public class XenServerStorageResource { } protected SR getNfsSR(Connection conn, DecodedDataStore store) { + Map deviceConfig = new HashMap(); String uuid = store.getUuid(); @@ -410,6 +412,7 @@ public class XenServerStorageResource { try { DecodedDataObject obj = Decoder.decode(storeUrl); DecodedDataStore store = obj.getStore(); + if (store.getScheme().equalsIgnoreCase("nfs")) { SR sr = getNfsSR(conn, store); } else if (store.getScheme().equalsIgnoreCase("iscsi")) { @@ -570,7 +573,9 @@ public class XenServerStorageResource { Connection conn = hypervisorResource.getConnection(); try { DecodedDataObject obj = Decoder.decode(dataStoreUri); + DecodedDataStore store = obj.getStore(); + SR sr = hypervisorResource.getStorageRepository(conn, store.getUuid()); hypervisorResource.setupHeartbeatSr(conn, sr, false); long capacity = sr.getPhysicalSize(conn); diff --git a/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java b/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java index 812867ee69d..af21f50cc6f 100644 --- a/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java +++ b/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java @@ -21,6 +21,7 @@ import java.util.List; import javax.ejb.Local; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -28,7 +29,6 @@ import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.server.StatsCollector; import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateVO; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; @@ -77,7 +77,8 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { break; } if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) { - suitablePools.add(pool); + StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + suitablePools.add(pol); } } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java index 3244c7aa4ed..88c53740f32 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java @@ -24,8 +24,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { diff --git a/server/src/com/cloud/alert/AlertManagerImpl.java b/server/src/com/cloud/alert/AlertManagerImpl.java index 4545f0a5e99..f8a8fd8b1b9 100755 --- a/server/src/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/com/cloud/alert/AlertManagerImpl.java @@ -38,6 +38,8 @@ import javax.mail.URLName; import javax.mail.internet.InternetAddress; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -69,8 +71,6 @@ import com.cloud.network.dao.IPAddressDao; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceManager; import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ManagerBase; @@ -102,7 +102,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager { @Inject private VolumeDao _volumeDao; @Inject private IPAddressDao _publicIPAddressDao; @Inject private DataCenterIpAddressDao _privateIPAddressDao; - @Inject private StoragePoolDao _storagePoolDao; + @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject private ConfigurationDao _configDao; @Inject private ResourceManager _resourceMgr; @Inject private ConfigurationManager _configMgr; diff --git a/server/src/com/cloud/api/ApiDBUtils.java b/server/src/com/cloud/api/ApiDBUtils.java index e6b1bf16a03..0a203528f85 100755 --- a/server/src/com/cloud/api/ApiDBUtils.java +++ b/server/src/com/cloud/api/ApiDBUtils.java @@ -45,7 +45,7 @@ import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; - +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.springframework.stereotype.Component; import com.cloud.api.query.dao.AccountJoinDao; @@ -182,10 +182,13 @@ import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.*; import com.cloud.storage.Storage.ImageFormat; + import com.cloud.storage.Volume.Type; import com.cloud.storage.dao.*; import com.cloud.storage.snapshot.SnapshotPolicy; +import com.cloud.template.TemplateManager; import com.cloud.user.*; + import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.SSHKeyPairDao; import com.cloud.user.dao.UserDao; @@ -218,9 +221,12 @@ public class ApiDBUtils { static AsyncJobManager _asyncMgr; static SecurityGroupManager _securityGroupMgr; static StorageManager _storageMgr; + static VolumeManager _volumeMgr; static UserVmManager _userVmMgr; static NetworkModel _networkModel; static NetworkManager _networkMgr; + static TemplateManager _templateMgr; + static StatsCollector _statsCollector; static AccountDao _accountDao; @@ -321,6 +327,8 @@ public class ApiDBUtils { @Inject private NetworkModel networkModel; @Inject private NetworkManager networkMgr; @Inject private StatsCollector statsCollector; + @Inject private TemplateManager templateMgr; + @Inject private VolumeManager volumeMgr; @Inject private AccountDao accountDao; @Inject private AccountVlanMapDao accountVlanMapDao; @@ -421,6 +429,7 @@ public class ApiDBUtils { _networkModel = networkModel; _networkMgr = networkMgr; _configMgr = configMgr; + _templateMgr = templateMgr; _accountDao = accountDao; _accountVlanMapDao = accountVlanMapDao; @@ -784,7 +793,7 @@ public class ApiDBUtils { List res = _templateHostDao.listByTemplateId(templateId); return res.size() == 0 ? null : res.get(0); } else { - return _storageMgr.getTemplateHostRef(zoneId, templateId, readyOnly); + return _templateMgr.getTemplateHostRef(zoneId, templateId, readyOnly); } } @@ -886,7 +895,7 @@ public class ApiDBUtils { throw new InvalidParameterValueException("Please specify a valid volume ID."); } - return _storageMgr.volumeOnSharedStoragePool(volume); + return _volumeMgr.volumeOnSharedStoragePool(volume); } public static List getNics(VirtualMachine vm) { diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index a94e93568e2..3da31689d1d 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -94,6 +94,7 @@ import org.apache.cloudstack.api.response.VpcResponse; import org.apache.cloudstack.api.response.VpnUsersResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.api.response.S3Response; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.springframework.stereotype.Component; import com.cloud.async.AsyncJob; @@ -165,6 +166,7 @@ import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; + import com.cloud.storage.snapshot.SnapshotPolicy; import com.cloud.storage.snapshot.SnapshotSchedule; import com.cloud.template.VirtualMachineTemplate; diff --git a/server/src/com/cloud/api/commands/GetUsageRecordsCmd.java b/server/src/com/cloud/api/commands/GetUsageRecordsCmd.java new file mode 100644 index 00000000000..36d66d9dc96 --- /dev/null +++ b/server/src/com/cloud/api/commands/GetUsageRecordsCmd.java @@ -0,0 +1,372 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.api.commands; + +import java.text.DecimalFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.List; +import java.util.TimeZone; + +import org.apache.cloudstack.api.response.AccountResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.ApiConstants; +import com.cloud.api.ApiDBUtils; +import com.cloud.dc.DataCenter; +import com.cloud.domain.Domain; + +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.ListResponse; +import com.cloud.projects.Project; +import com.cloud.server.ManagementServerExt; +import com.cloud.storage.VMTemplateVO; + +import org.apache.cloudstack.api.response.UsageRecordResponse; + +import com.cloud.usage.UsageTypes; +import com.cloud.usage.UsageVO; +import com.cloud.user.Account; +import com.cloud.uuididentity.dao.IdentityDao; +import com.cloud.uuididentity.dao.IdentityDaoImpl; +import com.cloud.vm.VMInstanceVO; + +@APICommand(name = "listUsageRecords", description="Lists usage records for accounts", responseObject=UsageRecordResponse.class) +public class GetUsageRecordsCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(GetUsageRecordsCmd.class.getName()); + + private static final String s_name = "listusagerecordsresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ACCOUNT, type=CommandType.STRING, description="List usage records for the specified user.") + private String accountName; + + @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType = DomainResponse.class, + description="List usage records for the specified domain.") + private Long domainId; + + @Parameter(name=ApiConstants.END_DATE, type=CommandType.DATE, required=true, description="End date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-03.") + private Date endDate; + + @Parameter(name=ApiConstants.START_DATE, type=CommandType.DATE, required=true, description="Start date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-01.") + private Date startDate; + + @Parameter(name=ApiConstants.ACCOUNT_ID, type=CommandType.UUID, entityType = AccountResponse.class, + description="List usage records for the specified account") + private Long accountId; + + @Parameter(name=ApiConstants.PROJECT_ID, type=CommandType.UUID, entityType = ProjectResponse.class, + description="List usage records for specified project") + private Long projectId; + + @Parameter(name=ApiConstants.TYPE, type=CommandType.LONG, description="List usage records for the specified usage type") + private Long usageType; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getAccountName() { + return accountName; + } + + public Long getDomainId() { + return domainId; + } + + public Date getEndDate() { + return endDate; + } + + public Date getStartDate() { + return startDate; + } + + public Long getAccountId() { + return accountId; + } + + public Long getUsageType() { + return usageType; + } + + public Long getProjectId() { + return projectId; + } + + ///////////////////////////////////////////////////// + /////////////// Misc parameters /////////////////// + ///////////////////////////////////////////////////// + + private TimeZone usageTimezone; + + public TimeZone getUsageTimezone() { + return usageTimezone; + } + + public void setUsageTimezone(TimeZone tz) { + this.usageTimezone = tz; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + public String getDateStringInternal(Date inputDate) { + if (inputDate == null) return null; + + TimeZone tz = getUsageTimezone(); + Calendar cal = Calendar.getInstance(tz); + cal.setTime(inputDate); + + StringBuffer sb = new StringBuffer(); + sb.append(cal.get(Calendar.YEAR)+"-"); + + int month = cal.get(Calendar.MONTH) + 1; + if (month < 10) { + sb.append("0" + month + "-"); + } else { + sb.append(month+"-"); + } + + int day = cal.get(Calendar.DAY_OF_MONTH); + if (day < 10) { + sb.append("0" + day); + } else { + sb.append(""+day); + } + + sb.append("'T'"); + + int hour = cal.get(Calendar.HOUR_OF_DAY); + if (hour < 10) { + sb.append("0" + hour + ":"); + } else { + sb.append(hour+":"); + } + + int minute = cal.get(Calendar.MINUTE); + if (minute < 10) { + sb.append("0" + minute + ":"); + } else { + sb.append(minute+":"); + } + + int seconds = cal.get(Calendar.SECOND); + if (seconds < 10) { + sb.append("0" + seconds); + } else { + sb.append(""+seconds); + } + + double offset = cal.get(Calendar.ZONE_OFFSET); + if (tz.inDaylightTime(inputDate)) { + offset += (1.0*tz.getDSTSavings()); // add the timezone's DST value (typically 1 hour expressed in milliseconds) + } + + offset = offset / (1000d*60d*60d); + int hourOffset = (int)offset; + double decimalVal = Math.abs(offset) - Math.abs(hourOffset); + int minuteOffset = (int)(decimalVal * 60); + + if (hourOffset < 0) { + if (hourOffset > -10) { + sb.append("-0"+Math.abs(hourOffset)); + } else { + sb.append("-"+Math.abs(hourOffset)); + } + } else { + if (hourOffset < 10) { + sb.append("+0" + hourOffset); + } else { + sb.append("+" + hourOffset); + } + } + + sb.append(":"); + + if (minuteOffset == 0) { + sb.append("00"); + } else if (minuteOffset < 10) { + sb.append("0" + minuteOffset); + } else { + sb.append("" + minuteOffset); + } + + return sb.toString(); + } + + @Override + public void execute(){ + ManagementServerExt _mgrExt = (ManagementServerExt)_mgr; + List usageRecords = _mgrExt.getUsageRecords(this); + IdentityDao identityDao = new IdentityDaoImpl(); + ListResponse response = new ListResponse(); + List usageResponses = new ArrayList(); + for (Object usageRecordGeneric : usageRecords) { + UsageRecordResponse usageRecResponse = new UsageRecordResponse(); + if (usageRecordGeneric instanceof UsageVO) { + UsageVO usageRecord = (UsageVO)usageRecordGeneric; + + Account account = ApiDBUtils.findAccountByIdIncludingRemoved(usageRecord.getAccountId()); + if (account.getType() == Account.ACCOUNT_TYPE_PROJECT) { + //find the project + Project project = ApiDBUtils.findProjectByProjectAccountId(account.getId()); + usageRecResponse.setProjectId(project.getUuid()); + usageRecResponse.setProjectName(project.getName()); + } else { + usageRecResponse.setAccountId(account.getUuid()); + usageRecResponse.setAccountName(account.getAccountName()); + } + + Domain domain = ApiDBUtils.findDomainById(usageRecord.getDomainId()); + if (domain != null) { + usageRecResponse.setDomainId(domain.getUuid()); + } + + if (usageRecord.getZoneId() != null) { + DataCenter zone = ApiDBUtils.findZoneById(usageRecord.getZoneId()); + if (zone != null) { + usageRecResponse.setZoneId(zone.getUuid()); + } + } + usageRecResponse.setDescription(usageRecord.getDescription()); + usageRecResponse.setUsage(usageRecord.getUsageDisplay()); + usageRecResponse.setUsageType(usageRecord.getUsageType()); + if (usageRecord.getVmInstanceId() != null) { + VMInstanceVO vm = ApiDBUtils.findVMInstanceById(usageRecord.getVmInstanceId()); + if (vm != null) { + usageRecResponse.setVirtualMachineId(vm.getUuid()); + } + } + usageRecResponse.setVmName(usageRecord.getVmName()); + if (usageRecord.getTemplateId() != null) { + VMTemplateVO template = ApiDBUtils.findTemplateById(usageRecord.getTemplateId()); + if (template != null) { + usageRecResponse.setTemplateId(template.getUuid()); + } + } + + if(usageRecord.getUsageType() == UsageTypes.RUNNING_VM || usageRecord.getUsageType() == UsageTypes.ALLOCATED_VM){ + //Service Offering Id + usageRecResponse.setOfferingId(identityDao.getIdentityUuid("disk_offering", usageRecord.getOfferingId().toString())); + //VM Instance ID + usageRecResponse.setUsageId(identityDao.getIdentityUuid("vm_instance", usageRecord.getUsageId().toString())); + //Hypervisor Type + usageRecResponse.setType(usageRecord.getType()); + + } else if(usageRecord.getUsageType() == UsageTypes.IP_ADDRESS){ + //isSourceNAT + usageRecResponse.setSourceNat((usageRecord.getType().equals("SourceNat"))?true:false); + //isSystem + usageRecResponse.setSystem((usageRecord.getSize() == 1)?true:false); + //IP Address ID + usageRecResponse.setUsageId(identityDao.getIdentityUuid("user_ip_address", usageRecord.getUsageId().toString())); + + } else if(usageRecord.getUsageType() == UsageTypes.NETWORK_BYTES_SENT || usageRecord.getUsageType() == UsageTypes.NETWORK_BYTES_RECEIVED){ + //Device Type + usageRecResponse.setType(usageRecord.getType()); + if(usageRecord.getType().equals("DomainRouter")){ + //Domain Router Id + usageRecResponse.setUsageId(identityDao.getIdentityUuid("vm_instance", usageRecord.getUsageId().toString())); + } else { + //External Device Host Id + usageRecResponse.setUsageId(identityDao.getIdentityUuid("host", usageRecord.getUsageId().toString())); + } + //Network ID + usageRecResponse.setNetworkId(identityDao.getIdentityUuid("networks", usageRecord.getNetworkId().toString())); + + } else if(usageRecord.getUsageType() == UsageTypes.VOLUME){ + //Volume ID + usageRecResponse.setUsageId(identityDao.getIdentityUuid("volumes", usageRecord.getUsageId().toString())); + //Volume Size + usageRecResponse.setSize(usageRecord.getSize()); + //Disk Offering Id + if(usageRecord.getOfferingId() != null){ + usageRecResponse.setOfferingId(identityDao.getIdentityUuid("disk_offering", usageRecord.getOfferingId().toString())); + } + + } else if(usageRecord.getUsageType() == UsageTypes.TEMPLATE || usageRecord.getUsageType() == UsageTypes.ISO){ + //Template/ISO ID + usageRecResponse.setUsageId(identityDao.getIdentityUuid("vm_template", usageRecord.getUsageId().toString())); + //Template/ISO Size + usageRecResponse.setSize(usageRecord.getSize()); + + } else if(usageRecord.getUsageType() == UsageTypes.SNAPSHOT){ + //Snapshot ID + usageRecResponse.setUsageId(identityDao.getIdentityUuid("snapshots", usageRecord.getUsageId().toString())); + //Snapshot Size + usageRecResponse.setSize(usageRecord.getSize()); + + } else if(usageRecord.getUsageType() == UsageTypes.LOAD_BALANCER_POLICY){ + //Load Balancer Policy ID + usageRecResponse.setUsageId(usageRecord.getUsageId().toString()); + + } else if(usageRecord.getUsageType() == UsageTypes.PORT_FORWARDING_RULE){ + //Port Forwarding Rule ID + usageRecResponse.setUsageId(usageRecord.getUsageId().toString()); + + } else if(usageRecord.getUsageType() == UsageTypes.NETWORK_OFFERING){ + //Network Offering Id + usageRecResponse.setOfferingId(identityDao.getIdentityUuid("network_offerings", usageRecord.getOfferingId().toString())); + //is Default + usageRecResponse.setDefault((usageRecord.getUsageId() == 1)? true:false); + + } else if(usageRecord.getUsageType() == UsageTypes.VPN_USERS){ + //VPN User ID + usageRecResponse.setUsageId(usageRecord.getUsageId().toString()); + + } else if(usageRecord.getUsageType() == UsageTypes.SECURITY_GROUP){ + //Security Group Id + usageRecResponse.setUsageId(identityDao.getIdentityUuid("security_group", usageRecord.getUsageId().toString())); + } + + if (usageRecord.getRawUsage() != null) { + DecimalFormat decimalFormat = new DecimalFormat("###########.######"); + usageRecResponse.setRawUsage(decimalFormat.format(usageRecord.getRawUsage())); + } + + if (usageRecord.getStartDate() != null) { + usageRecResponse.setStartDate(getDateStringInternal(usageRecord.getStartDate())); + } + if (usageRecord.getEndDate() != null) { + usageRecResponse.setEndDate(getDateStringInternal(usageRecord.getEndDate())); + } + } + + usageRecResponse.setObjectName("usagerecord"); + usageResponses.add(usageRecResponse); + } + + response.setResponses(usageResponses); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } +} diff --git a/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java b/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java index 4440b7a3a10..0cf19fbdfff 100755 --- a/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java +++ b/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java @@ -37,10 +37,10 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.resource.ResourceManager; -import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.TemplateProfile; +import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.template.TemplateAdapter; import com.cloud.template.TemplateAdapterBase; diff --git a/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java b/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java index 5de5ccdd059..2817fcc32c2 100755 --- a/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java +++ b/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java @@ -27,19 +27,12 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; -import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; -import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.vm.StartVMCmd; import org.apache.log4j.Logger; -import com.cloud.agent.api.Answer; import com.cloud.agent.api.StopAnswer; -import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; -import com.cloud.agent.api.baremetal.IpmiBootorResetCommand; import com.cloud.agent.manager.Commands; -import org.apache.cloudstack.api.command.user.vm.StartVMCmd; import com.cloud.baremetal.PxeServerManager.PxeServerType; import com.cloud.configuration.Resource.ResourceType; @@ -62,25 +55,18 @@ import com.cloud.org.Grouping; import com.cloud.resource.ResourceManager; import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.Storage; -import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.TemplateProfile; import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.Volume; +import com.cloud.storage.Storage.TemplateType; import com.cloud.template.TemplateAdapter; -import com.cloud.template.TemplateAdapter.TemplateAdapterType; import com.cloud.user.Account; -import com.cloud.user.AccountVO; import com.cloud.user.SSHKeyPair; -import com.cloud.user.User; import com.cloud.user.UserContext; import com.cloud.user.*; import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateListener; import com.cloud.utils.net.NetUtils; @@ -103,7 +89,7 @@ public class BareMetalVmManagerImpl extends UserVmManagerImpl implements BareMet @PostConstruct public void init() { } - + /* @Override public boolean attachISOToVM(long vmId, long isoId, boolean attach) { s_logger.warn("attachISOToVM is not supported by Bare Metal, just fake a true"); @@ -131,6 +117,7 @@ public class BareMetalVmManagerImpl extends UserVmManagerImpl implements BareMet @Override public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account templateOwner) throws ResourceAllocationException { /*Baremetal creates record after host rebooting for imaging, in createPrivateTemplate*/ + /* return null; } @@ -164,10 +151,12 @@ public class BareMetalVmManagerImpl extends UserVmManagerImpl implements BareMet } HostVO pxe = pxes.get(0); + */ /* * prepare() will check if current account has right for creating * template */ + /* TemplateAdapter adapter = AdapterBase.getAdapterByName(_adapters, TemplateAdapterType.BareMetal.getName()); Long userId = UserContext.current().getCallerUserId(); userId = (userId == null ? User.UID_SYSTEM : userId); @@ -202,6 +191,7 @@ public class BareMetalVmManagerImpl extends UserVmManagerImpl implements BareMet throw new CloudRuntimeException(e.getMessage()); } } + */ @Override public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, diff --git a/server/src/com/cloud/capacity/CapacityManager.java b/server/src/com/cloud/capacity/CapacityManager.java index fffb41f87e6..656e744ecf7 100755 --- a/server/src/com/cloud/capacity/CapacityManager.java +++ b/server/src/com/cloud/capacity/CapacityManager.java @@ -16,8 +16,9 @@ // under the License. package com.cloud.capacity; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.host.HostVO; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.component.Manager; import com.cloud.vm.VirtualMachine; diff --git a/server/src/com/cloud/capacity/CapacityManagerImpl.java b/server/src/com/cloud/capacity/CapacityManagerImpl.java index 4787c7bb37f..74152ff9c39 100755 --- a/server/src/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/com/cloud/capacity/CapacityManagerImpl.java @@ -27,6 +27,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -57,10 +58,7 @@ import com.cloud.resource.ServerResource; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStoragePoolVO; -import com.cloud.storage.VMTemplateSwiftVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplatePoolDao; @@ -499,28 +497,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } // Add the size for the templateForVmCreation if its not already present - if ((templateForVmCreation != null) && !tmpinstalled) { - // If the template that was passed into this allocator is not installed in the storage pool, - // add 3 * (template size on secondary storage) to the running total - VMTemplateHostVO templateHostVO = _storageMgr.findVmTemplateHost(templateForVmCreation.getId(), pool); - - if (templateHostVO == null) { - VMTemplateSwiftVO templateSwiftVO = _swiftMgr.findByTmpltId(templateForVmCreation.getId()); - if (templateSwiftVO != null) { - long templateSize = templateSwiftVO.getPhysicalSize(); - if (templateSize == 0) { - templateSize = templateSwiftVO.getSize(); - } - totalAllocatedSize += (templateSize + _extraBytesPerVolume); - } - } else { - long templateSize = templateHostVO.getPhysicalSize(); - if ( templateSize == 0 ){ - templateSize = templateHostVO.getSize(); - } - totalAllocatedSize += (templateSize + _extraBytesPerVolume); - } - } + /*if ((templateForVmCreation != null) && !tmpinstalled) { + + }*/ return totalAllocatedSize; } diff --git a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java index c33bfafc3af..358261470e3 100755 --- a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -27,13 +27,13 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityVO; import com.cloud.storage.Storage; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.dao.StoragePoolDao; import com.cloud.utils.Pair; import com.cloud.utils.db.Filter; diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 168ac0e43cb..69f70e521d8 100755 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -29,14 +29,10 @@ import java.util.UUID; import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import javax.persistence.Table; import org.apache.cloudstack.api.ServerApiException; -import com.cloud.offering.DiskOffering; -import com.cloud.storage.dao.DiskOfferingDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; -import org.springframework.context.annotation.Primary; -import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.api.AgentControlAnswer; @@ -102,6 +98,7 @@ import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; import com.cloud.network.rules.RulesManager; +import com.cloud.offering.DiskOffering; import com.cloud.offering.NetworkOffering; import com.cloud.offering.ServiceOffering; import com.cloud.offerings.dao.NetworkOfferingDao; @@ -114,13 +111,14 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.servlet.ConsoleProxyServlet; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.template.TemplateManager; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.User; @@ -233,6 +231,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy @Inject RulesManager _rulesMgr; @Inject + TemplateManager templateMgr; + @Inject IPAddressDao _ipAddressDao; private ConsoleProxyListener _listener; @@ -1175,7 +1175,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenterId); if (zoneHostInfo != null && isZoneHostReady(zoneHostInfo)) { VMTemplateVO template = _templateDao.findSystemVMTemplate(dataCenterId); - HostVO secondaryStorageHost = _storageMgr.getSecondaryStorageHost(dataCenterId); + HostVO secondaryStorageHost = this.templateMgr.getSecondaryStorageHost(dataCenterId); boolean templateReady = false; if (template != null && secondaryStorageHost != null) { diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java index 66a24ac0e43..b452da06807 100755 --- a/server/src/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/com/cloud/deploy/FirstFitPlanner.java @@ -27,6 +27,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.log4j.Logger; import com.cloud.agent.manager.allocator.HostAllocator; @@ -59,7 +60,6 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.allocator.StoragePoolAllocator; @@ -99,6 +99,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { @Inject protected CapacityDao _capacityDao; @Inject protected AccountManager _accountMgr; @Inject protected StorageManager _storageMgr; + @Inject DataStoreManager dataStoreMgr; //@com.cloud.utils.component.Inject(adapter=StoragePoolAllocator.class) @Inject protected List _storagePoolAllocators; @@ -736,11 +737,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { if(plan.getPoolId() != null){ s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: "+toBeCreated.getPoolId()); List suitablePools = new ArrayList(); - StoragePoolVO pool; + StoragePool pool = null; if(toBeCreated.getPoolId() != null){ - pool = _storagePoolDao.findById(toBeCreated.getPoolId()); + pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); }else{ - pool = _storagePoolDao.findById(plan.getPoolId()); + pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); } if(!pool.isInMaintenance()){ diff --git a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java index eb27fda1fe8..bba8be5c649 100755 --- a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -31,7 +31,6 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.apache.log4j.NDC; -import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -60,6 +59,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceManager; import com.cloud.server.ManagementServer; import com.cloud.storage.StorageManager; +import com.cloud.storage.VolumeManager; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.user.AccountManager; @@ -140,6 +140,8 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai ManagementServer _msServer; @Inject ConfigurationDao _configDao; + @Inject + VolumeManager volumeMgr; String _instance; ScheduledExecutorService _executor; @@ -499,7 +501,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai return null; // VM doesn't require HA } - if (!_storageMgr.canVmRestartOnAnotherServer(vm.getId())) { + if (!this.volumeMgr.canVmRestartOnAnotherServer(vm.getId())) { if (s_logger.isDebugEnabled()) { s_logger.debug("VM can not restart on another server."); } diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index 79ccdb30198..98044fb5968 100755 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -44,6 +44,7 @@ import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd; import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd; import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd; import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -116,7 +117,6 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.StorageService; import com.cloud.storage.Swift; import com.cloud.storage.SwiftVO; @@ -2223,20 +2223,22 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, User caller = _accountMgr.getActiveUser(UserContext.current() .getCallerUserId()); - if (forceDestroyStorage) { - // put local storage into mainenance mode, will set all the VMs on - // this local storage into stopped state - StoragePool storagePool = _storageMgr.findLocalStorageOnHost(host + + if (forceDestroyStorage) { + // put local storage into mainenance mode, will set all the VMs on + // this local storage into stopped state + StoragePoolVO storagePool = _storageMgr.findLocalStorageOnHost(host .getId()); if (storagePool != null) { if (storagePool.getStatus() == StoragePoolStatus.Up || storagePool.getStatus() == StoragePoolStatus.ErrorInMaintenance) { - try { - storagePool = _storageSvr + try { + StoragePool pool = _storageSvr .preparePrimaryStorageForMaintenance(storagePool .getId()); - if (storagePool == null) { - s_logger.debug("Failed to set primary storage into maintenance mode"); + if (pool == null) { + s_logger.debug("Failed to set primary storage into maintenance mode"); + throw new UnableDeleteHostException( "Failed to set primary storage into maintenance mode"); } diff --git a/server/src/com/cloud/server/ManagementServer.java b/server/src/com/cloud/server/ManagementServer.java index 5c34deea53b..6773725f361 100755 --- a/server/src/com/cloud/server/ManagementServer.java +++ b/server/src/com/cloud/server/ManagementServer.java @@ -19,11 +19,12 @@ package com.cloud.server; import java.util.Date; import java.util.List; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.event.EventVO; import com.cloud.host.HostVO; import com.cloud.info.ConsoleProxyInfo; import com.cloud.storage.GuestOSVO; -import com.cloud.storage.StoragePoolVO; import com.cloud.utils.Pair; import com.cloud.utils.component.PluggableService; import com.cloud.vm.VirtualMachine; diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index d3812067118..e80d48c6512 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -47,12 +47,12 @@ import javax.management.MalformedObjectNameException; import javax.management.NotCompliantMBeanException; import javax.naming.ConfigurationException; -import com.cloud.storage.dao.*; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ApiConstants; import com.cloud.event.ActionEventUtils; import org.apache.cloudstack.api.BaseUpdateTemplateOrIsoCmd; + import org.apache.cloudstack.api.command.admin.account.*; import org.apache.cloudstack.api.command.admin.autoscale.*; import org.apache.cloudstack.api.command.admin.cluster.*; @@ -108,6 +108,8 @@ import org.apache.cloudstack.api.command.user.vpc.*; import org.apache.cloudstack.api.command.user.vpn.*; import org.apache.cloudstack.api.command.user.zone.*; import org.apache.cloudstack.api.response.ExtractResponse; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; @@ -206,15 +208,22 @@ import com.cloud.storage.GuestOSCategoryVO; import com.cloud.storage.GuestOSVO; import com.cloud.storage.GuestOsCategory; import com.cloud.storage.Storage; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePoolVO; +import com.cloud.storage.StoragePool; import com.cloud.storage.Upload; import com.cloud.storage.Upload.Mode; import com.cloud.storage.UploadVO; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.StoragePoolDao; +import com.cloud.storage.dao.UploadDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.s3.S3Manager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.snapshot.SnapshotManager; @@ -222,6 +231,7 @@ import com.cloud.storage.swift.SwiftManager; import com.cloud.storage.upload.UploadMonitor; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; +import com.cloud.template.TemplateManager; import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -390,6 +400,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject HighAvailabilityManager _haMgr; @Inject + TemplateManager templateMgr; + @Inject + DataStoreManager dataStoreMgr; + @Inject HostTagsDao _hostTagsDao; @Inject @@ -2635,8 +2649,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } long accountId = volume.getAccountId(); - StoragePoolVO srcPool = _poolDao.findById(volume.getPoolId()); - HostVO sserver = _storageMgr.getSecondaryStorageHost(zoneId); + StoragePool srcPool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); + HostVO sserver = this.templateMgr.getSecondaryStorageHost(zoneId); String secondaryStorageURL = sserver.getStorageUrl(); List extractURLList = _uploadDao.listByTypeUploadStatus(volumeId, Upload.Type.VOLUME, UploadVO.Status.DOWNLOAD_URL_CREATED); @@ -2713,7 +2727,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } - private String getFormatForPool(StoragePoolVO pool) { + private String getFormatForPool(StoragePool pool) { ClusterVO cluster = ApiDBUtils.findClusterById(pool.getClusterId()); if (cluster.getHypervisorType() == HypervisorType.XenServer) { diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java index be83c188f8b..76bae5b4aca 100755 --- a/server/src/com/cloud/server/StatsCollector.java +++ b/server/src/com/cloud/server/StatsCollector.java @@ -29,8 +29,8 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; -import com.cloud.resource.ResourceManager; - +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -49,14 +49,13 @@ import com.cloud.host.HostStats; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; +import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.StorageStats; import com.cloud.storage.VolumeStats; import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.secondary.SecondaryStorageVmManager; @@ -84,7 +83,7 @@ public class StatsCollector { @Inject private HostDao _hostDao; @Inject private UserVmDao _userVmDao; @Inject private VolumeDao _volsDao; - @Inject private StoragePoolDao _storagePoolDao; + @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject private StorageManager _storageManager; @Inject private StoragePoolHostDao _storagePoolHostDao; @Inject private SecondaryStorageVmManager _ssvmMgr; @@ -301,7 +300,7 @@ public class StatsCollector { GetStorageStatsCommand command = new GetStorageStatsCommand(pool.getUuid(), pool.getPoolType(), pool.getPath()); long poolId = pool.getId(); try { - Answer answer = _storageManager.sendToPool(pool, command); + Answer answer = _storageManager.sendToPool(pool.getId(), command); if (answer != null && answer.getResult()) { storagePoolStats.put(pool.getId(), (StorageStats)answer); diff --git a/server/src/com/cloud/storage/LocalStoragePoolListener.java b/server/src/com/cloud/storage/LocalStoragePoolListener.java index 8d5875e9d76..a04c79cf435 100755 --- a/server/src/com/cloud/storage/LocalStoragePoolListener.java +++ b/server/src/com/cloud/storage/LocalStoragePoolListener.java @@ -16,8 +16,6 @@ // under the License. package com.cloud.storage; -import java.util.List; - import javax.inject.Inject; import org.apache.log4j.Logger; @@ -30,20 +28,14 @@ import com.cloud.agent.api.Command; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupStorageCommand; import com.cloud.agent.api.StoragePoolInfo; -import com.cloud.capacity.Capacity; -import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; -import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.ConnectionException; import com.cloud.host.HostVO; import com.cloud.host.Status; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.db.DB; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; public class LocalStoragePoolListener implements Listener { private final static Logger s_logger = Logger.getLogger(LocalStoragePoolListener.class); @@ -91,63 +83,7 @@ public class LocalStoragePoolListener implements Listener { return; } - DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); - if (dc == null || !dc.isLocalStorageEnabled()) { - return; - } - - try { - StoragePoolVO pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), pInfo.getHostPath(), pInfo.getUuid()); - if(pool == null && host.getHypervisorType() == HypervisorType.VMware) { - // perform run-time upgrade. In versions prior to 2.2.12, there is a bug that we don't save local datastore info (host path is empty), this will cause us - // not able to distinguish multiple local datastores that may be available on the host, to support smooth migration, we - // need to perform runtime upgrade here - if(pInfo.getHostPath().length() > 0) { - pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), "", pInfo.getUuid()); - } - } - - if (pool == null) { - - long poolId = _storagePoolDao.getNextInSequence(Long.class, "id"); - String name = cmd.getName() == null ? (host.getName() + " Local Storage") : cmd.getName(); - Transaction txn = Transaction.currentTxn(); - txn.start(); - pool = new StoragePoolVO(poolId, name, pInfo.getUuid(), pInfo.getPoolType(), host.getDataCenterId(), - host.getPodId(), pInfo.getAvailableBytes(), pInfo.getCapacityBytes(), pInfo.getHost(), 0, - pInfo.getHostPath()); - pool.setClusterId(host.getClusterId()); - pool.setStatus(StoragePoolStatus.Up); - _storagePoolDao.persist(pool, pInfo.getDetails()); - StoragePoolHostVO poolHost = new StoragePoolHostVO(pool.getId(), host.getId(), pInfo.getLocalPath()); - _storagePoolHostDao.persist(poolHost); - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); - - txn.commit(); - } else { - Transaction txn = Transaction.currentTxn(); - txn.start(); - pool.setPath(pInfo.getHostPath()); - pool.setAvailableBytes(pInfo.getAvailableBytes()); - pool.setCapacityBytes(pInfo.getCapacityBytes()); - _storagePoolDao.update(pool.getId(), pool); - if (pInfo.getDetails() != null) { - _storagePoolDao.updateDetails(pool.getId(), pInfo.getDetails()); - } - StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(pool.getId(), host.getId()); - if (poolHost == null) { - poolHost = new StoragePoolHostVO(pool.getId(), host.getId(), pInfo.getLocalPath()); - _storagePoolHostDao.persist(poolHost); - } - - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); - - txn.commit(); - } - } catch (Exception e) { - s_logger.warn("Unable to setup the local storage pool for " + host, e); - throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e); - } + this._storageMgr.createLocalStorage(host, pInfo); } diff --git a/server/src/com/cloud/storage/OCFS2ManagerImpl.java b/server/src/com/cloud/storage/OCFS2ManagerImpl.java index 6bbeec40551..5c526a69e4f 100755 --- a/server/src/com/cloud/storage/OCFS2ManagerImpl.java +++ b/server/src/com/cloud/storage/OCFS2ManagerImpl.java @@ -25,6 +25,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; diff --git a/server/src/com/cloud/storage/RegisterVolumePayload.java b/server/src/com/cloud/storage/RegisterVolumePayload.java new file mode 100644 index 00000000000..142de186e25 --- /dev/null +++ b/server/src/com/cloud/storage/RegisterVolumePayload.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +public class RegisterVolumePayload { + private final String url; + private final String checksum; + private final String format; + + public RegisterVolumePayload(String url, String checksum, String format) { + this.url = url; + this.checksum = checksum; + this.format = format; + } + + public String getUrl() { + return this.url; + } + + public String getChecksum() { + return this.checksum; + } + + public String getFormat() { + return this.format; + } +} diff --git a/server/src/com/cloud/storage/StorageManager.java b/server/src/com/cloud/storage/StorageManager.java index 97853ac76de..9213b4bf486 100755 --- a/server/src/com/cloud/storage/StorageManager.java +++ b/server/src/com/cloud/storage/StorageManager.java @@ -17,50 +17,29 @@ package com.cloud.storage; import java.util.List; +import java.util.Set; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; +import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.manager.Commands; import com.cloud.capacity.CapacityVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; -import com.cloud.deploy.DeployDestination; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InsufficientStorageCapacityException; +import com.cloud.exception.ConnectionException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; -import com.cloud.host.HostVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.Volume.Event; -import com.cloud.storage.Volume.Type; -import com.cloud.user.Account; import com.cloud.utils.Pair; -import com.cloud.utils.component.Manager; -import com.cloud.utils.fsm.NoTransitionException; import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineProfile; - -public interface StorageManager extends StorageService, Manager { - boolean canVmRestartOnAnotherServer(long vmId); - - /** Returns the absolute path of the specified ISO - * @param templateId - the ID of the template that represents the ISO - * @param datacenterId - * @return absolute ISO path - */ - public Pair getAbsoluteIsoPath(long templateId, long dataCenterId); - - /** - * Returns the URL of the secondary storage host - * @param zoneId - * @return URL - */ - public String getSecondaryStorageURL(long zoneId); +public interface StorageManager extends StorageService { /** * Returns a comma separated list of tags for the specified storage pool * @param poolId @@ -68,67 +47,9 @@ public interface StorageManager extends StorageService, Manager { */ public String getStoragePoolTags(long poolId); - /** - * Returns the secondary storage host - * @param zoneId - * @return secondary storage host - */ - public HostVO getSecondaryStorageHost(long zoneId); + - /** - * Returns the secondary storage host - * @param zoneId - * @return secondary storage host - */ - public VMTemplateHostVO findVmTemplateHost(long templateId, StoragePool pool); - /** - * Moves a volume from its current storage pool to a storage pool with enough capacity in the specified zone, pod, or cluster - * @param volume - * @param destPoolDcId - * @param destPoolPodId - * @param destPoolClusterId - * @return VolumeVO - * @throws ConcurrentOperationException - */ - VolumeVO moveVolume(VolumeVO volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType) throws ConcurrentOperationException; - - /** - * Create a volume based on the given criteria - * @param volume - * @param vm - * @param template - * @param dc - * @param pod - * @param clusterId - * @param offering - * @param diskOffering - * @param avoids - * @param size - * @param hyperType - * @return volume VO if success, null otherwise - */ - VolumeVO createVolume(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId, - ServiceOfferingVO offering, DiskOfferingVO diskOffering, List avoids, long size, HypervisorType hyperType); - - /** - * Marks the specified volume as destroyed in the management server database. The expunge thread will delete the volume from its storage pool. - * @param volume - * @return - */ - boolean destroyVolume(VolumeVO volume) throws ConcurrentOperationException; - - /** Create capacity entries in the op capacity table - * @param storagePool - */ - public void createCapacityEntry(StoragePoolVO storagePool); - - /** - * Checks that the volume is stored on a shared storage pool - * @param volume - * @return true if the volume is on a shared storage pool, false otherwise - */ - boolean volumeOnSharedStoragePool(VolumeVO volume); Answer sendToPool(long poolId, Command cmd) throws StorageUnavailableException; Answer sendToPool(StoragePool pool, Command cmd) throws StorageUnavailableException; @@ -137,17 +58,6 @@ public interface StorageManager extends StorageService, Manager { Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List hostIdsToAvoid, Commands cmds) throws StorageUnavailableException; Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List hostIdsToAvoid, Command cmd) throws StorageUnavailableException; - /** - * Checks that one of the following is true: - * 1. The volume is not attached to any VM - * 2. The volume is attached to a VM that is running on a host with the KVM hypervisor, and the VM is stopped - * 3. The volume is attached to a VM that is running on a host with the XenServer hypervisor (the VM can be stopped or running) - * @return true if one of the above conditions is true - */ - boolean volumeInactive(VolumeVO volume); - - String getVmNameOnVolume(VolumeVO volume); - /** * Checks if a host has running VMs that are using its local storage pool. * @return true if local storage is active on the host @@ -162,31 +72,10 @@ public interface StorageManager extends StorageService, Manager { String getPrimaryStorageNameLabel(VolumeVO volume); - /** - * Allocates one volume. - * @param - * @param type - * @param offering - * @param name - * @param size - * @param template - * @param vm - * @param account - * @return VolumeVO a persisted volume. - */ - DiskProfile allocateRawVolume(Type type, String name, DiskOfferingVO offering, Long size, T vm, Account owner); - DiskProfile allocateTemplatedVolume(Type type, String name, DiskOfferingVO offering, VMTemplateVO template, T vm, Account owner); void createCapacityEntry(StoragePoolVO storagePool, short capacityType, long allocated); - void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException; - - void release(VirtualMachineProfile profile); - - void cleanupVolumes(long vmId) throws ConcurrentOperationException; - - void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest); Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, Command cmd) throws StorageUnavailableException; @@ -194,14 +83,6 @@ public interface StorageManager extends StorageService, Manager { CapacityVO getStoragePoolUsedStats(Long poolId, Long clusterId, Long podId, Long zoneId); - boolean createStoragePool(long hostId, StoragePoolVO pool); - - boolean delPoolFromHost(long hostId); - - HostVO getSecondaryStorageHost(long zoneId, long tmpltId); - - List getSecondaryStorageHosts(long zoneId); - List ListByDataCenterHypervisor(long datacenterId, HypervisorType type); @@ -209,34 +90,34 @@ public interface StorageManager extends StorageService, Manager { StoragePoolVO findLocalStorageOnHost(long hostId); - VMTemplateHostVO getTemplateHostRef(long zoneId, long tmpltId, boolean readyOnly); - - boolean StorageMigration( - VirtualMachineProfile vm, - StoragePool destPool) throws ConcurrentOperationException; - - boolean stateTransitTo(Volume vol, Event event) - throws NoTransitionException; - - VolumeVO allocateDuplicateVolume(VolumeVO oldVol, Long templateId); - Host updateSecondaryStorage(long secStorageId, String newUrl); List getUpHostsInPool(long poolId); void cleanupSecondaryStorage(boolean recurring); - VolumeVO copyVolumeFromSecToPrimary(VolumeVO volume, VMInstanceVO vm, - VMTemplateVO template, DataCenterVO dc, HostPodVO pod, - Long clusterId, ServiceOfferingVO offering, - DiskOfferingVO diskOffering, List avoids, long size, - HypervisorType hyperType) throws NoTransitionException; - - String getSupportedImageFormatForCluster(Long clusterId); HypervisorType getHypervisorTypeFromFormat(ImageFormat format); boolean storagePoolHasEnoughSpace(List volume, StoragePool pool); - boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException; + + boolean registerHostListener(String providerUuid, HypervisorHostListener listener); + + StoragePool findStoragePool(DiskProfile dskCh, DataCenterVO dc, + HostPodVO pod, Long clusterId, Long hostId, VMInstanceVO vm, + Set avoid); + + + void connectHostToSharedPool(long hostId, long poolId) + throws StorageUnavailableException; + + void createCapacityEntry(long poolId); + + + + + + DataStore createLocalStorage(Host host, StoragePoolInfo poolInfo) throws ConnectionException; + } diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 05e0cfe9869..f2d92e590d2 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -17,8 +17,6 @@ package com.cloud.storage; import java.math.BigDecimal; -import java.net.Inet6Address; -import java.net.InetAddress; import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; @@ -27,15 +25,13 @@ import java.sql.ResultSet; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Date; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; -import java.util.UUID; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -48,17 +44,41 @@ import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaint import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; -import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; -import com.cloud.agent.api.*; -import com.cloud.agent.api.storage.*; -import com.cloud.agent.api.to.StorageFilerTO; -import com.cloud.agent.api.to.VolumeTO; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BackupSnapshotCommand; +import com.cloud.agent.api.CleanupSnapshotBackupCommand; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.ManageSnapshotCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.agent.api.storage.DeleteTemplateCommand; +import com.cloud.agent.api.storage.DeleteVolumeCommand; + import com.cloud.agent.manager.Commands; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; @@ -72,46 +92,61 @@ import com.cloud.cluster.ClusterManagerListener; import com.cloud.cluster.ManagementServerHostVO; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; -import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.consoleproxy.ConsoleProxyManager; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; -import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; -import com.cloud.deploy.DeployDestination; -import com.cloud.domain.Domain; import com.cloud.domain.dao.DomainDao; -import com.cloud.event.ActionEvent; -import com.cloud.event.EventTypes; -import com.cloud.event.UsageEventUtils; + import com.cloud.event.dao.EventDao; -import com.cloud.exception.*; +import com.cloud.event.dao.UsageEventDao; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.ConnectionException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceInUseException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.StorageUnavailableException; + import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.network.NetworkModel; -import com.cloud.offering.ServiceOffering; import com.cloud.org.Grouping; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.server.ManagementServer; import com.cloud.server.StatsCollector; -import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.storage.Volume.Event; import com.cloud.storage.Volume.Type; import com.cloud.storage.allocator.StoragePoolAllocator; -import com.cloud.storage.dao.*; + +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotPolicyDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VMTemplateS3Dao; +import com.cloud.storage.dao.VMTemplateSwiftDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeHostDao; + import com.cloud.storage.download.DownloadMonitor; import com.cloud.storage.listener.StoragePoolMonitor; import com.cloud.storage.listener.VolumeStateListener; @@ -124,23 +159,24 @@ import com.cloud.template.TemplateManager; import com.cloud.user.*; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; -import com.cloud.uservm.UserVm; -import com.cloud.utils.EnumUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.*; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.exception.ExecutionException; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; -import com.cloud.vm.*; + +import com.cloud.vm.DiskProfile; +import com.cloud.vm.UserVmManager; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VirtualMachineProfileImpl; + import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.*; @@ -173,6 +209,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject protected NetworkModel _networkMgr; @Inject + protected ServiceOfferingDao _serviceOfferingDao; + @Inject protected VolumeDao _volsDao; @Inject protected HostDao _hostDao; @@ -209,7 +247,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject protected VMInstanceDao _vmInstanceDao; @Inject - protected StoragePoolDao _storagePoolDao = null; + protected PrimaryDataStoreDao _storagePoolDao = null; @Inject protected CapacityDao _capacityDao; @Inject @@ -262,14 +300,30 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C protected ResourceTagDao _resourceTagDao; @Inject protected List _storagePoolAllocators; - @Inject ConfigurationDao _configDao; - @Inject ManagementServer _msServer; + @Inject + ConfigurationDao _configDao; + @Inject + ManagementServer _msServer; + @Inject + DataStoreManager dataStoreMgr; + @Inject + DataStoreProviderManager dataStoreProviderMgr; + @Inject + VolumeService volService; + @Inject + VolumeDataFactory volFactory; + @Inject + ImageDataFactory tmplFactory; + @Inject + SnapshotDataFactory snapshotFactory; + @Inject + protected HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; - // TODO : we don't have any instantiated pool discover, disable injection temporarily + // TODO : we don't have any instantiated pool discover, disable injection + // temporarily // @Inject protected List _discoverers; - protected SearchBuilder HostTemplateStatesSearch; protected GenericSearchBuilder UpHostsInPoolSearch; protected SearchBuilder StoragePoolSearch; @@ -288,32 +342,39 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C protected BigDecimal _overProvisioningFactor = new BigDecimal(1); private long _maxVolumeSizeInGb; private long _serverId; - private final StateMachine2 _volStateMachine; + private int _customDiskOfferingMinSize = 1; private int _customDiskOfferingMaxSize = 1024; private double _storageUsedThreshold = 1.0d; private double _storageAllocatedThreshold = 1.0d; protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1); + private Map hostListeners = new HashMap(); private boolean _recreateSystemVmEnabled; - public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException { + public boolean share(VMInstanceVO vm, List vols, HostVO host, + boolean cancelPreviousShare) throws StorageUnavailableException { // if pool is in maintenance and it is the ONLY pool available; reject - List rootVolForGivenVm = _volsDao.findByInstanceAndType(vm.getId(), Type.ROOT); + List rootVolForGivenVm = _volsDao.findByInstanceAndType( + vm.getId(), Type.ROOT); if (rootVolForGivenVm != null && rootVolForGivenVm.size() > 0) { - boolean isPoolAvailable = isPoolAvailable(rootVolForGivenVm.get(0).getPoolId()); + boolean isPoolAvailable = isPoolAvailable(rootVolForGivenVm.get(0) + .getPoolId()); if (!isPoolAvailable) { - throw new StorageUnavailableException("Can not share " + vm, rootVolForGivenVm.get(0).getPoolId()); + throw new StorageUnavailableException("Can not share " + vm, + rootVolForGivenVm.get(0).getPoolId()); } } // this check is done for maintenance mode for primary storage // if any one of the volume is unusable, we return false - // if we return false, the allocator will try to switch to another PS if available + // if we return false, the allocator will try to switch to another PS if + // available for (VolumeVO vol : vols) { if (vol.getRemoved() != null) { - s_logger.warn("Volume id:" + vol.getId() + " is removed, cannot share on this instance"); + s_logger.warn("Volume id:" + vol.getId() + + " is removed, cannot share on this instance"); // not ok to share return false; } @@ -323,26 +384,15 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return true; } - @Override - public VolumeVO allocateDuplicateVolume(VolumeVO oldVol, Long templateId) { - VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), oldVol.getName(), oldVol.getDataCenterId(), oldVol.getDomainId(), oldVol.getAccountId(), oldVol.getDiskOfferingId(), oldVol.getSize()); - if (templateId != null) { - newVol.setTemplateId(templateId); - } else { - newVol.setTemplateId(oldVol.getTemplateId()); - } - newVol.setDeviceId(oldVol.getDeviceId()); - newVol.setInstanceId(oldVol.getInstanceId()); - newVol.setRecreatable(oldVol.isRecreatable()); - return _volsDao.persist(newVol); - } - private boolean isPoolAvailable(Long poolId) { // get list of all pools List pools = _storagePoolDao.listAll(); // if no pools or 1 pool which is in maintenance - if (pools == null || pools.size() == 0 || (pools.size() == 1 && pools.get(0).getStatus().equals(StoragePoolStatus.Maintenance))) { + if (pools == null + || pools.size() == 0 + || (pools.size() == 1 && pools.get(0).getStatus() + .equals(DataStoreStatus.Maintenance))) { return false; } else { return true; @@ -350,8 +400,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public List ListByDataCenterHypervisor(long datacenterId, HypervisorType type) { - List pools = _storagePoolDao.listByDataCenterId(datacenterId); + public List ListByDataCenterHypervisor( + long datacenterId, HypervisorType type) { + List pools = _storagePoolDao + .listByDataCenterId(datacenterId); List retPools = new ArrayList(); for (StoragePoolVO pool : pools) { if (pool.getStatus() != StoragePoolStatus.Up) { @@ -368,21 +420,33 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean isLocalStorageActiveOnHost(Long hostId) { - List storagePoolHostRefs = _storagePoolHostDao.listByHostId(hostId); + List storagePoolHostRefs = _storagePoolHostDao + .listByHostId(hostId); for (StoragePoolHostVO storagePoolHostRef : storagePoolHostRefs) { - StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolHostRef.getPoolId()); - if (storagePool.getPoolType() == StoragePoolType.LVM || storagePool.getPoolType() == StoragePoolType.EXT) { - SearchBuilder volumeSB = _volsDao.createSearchBuilder(); - volumeSB.and("poolId", volumeSB.entity().getPoolId(), SearchCriteria.Op.EQ); - volumeSB.and("removed", volumeSB.entity().getRemoved(), SearchCriteria.Op.NULL); + StoragePoolVO PrimaryDataStoreVO = _storagePoolDao + .findById(storagePoolHostRef.getPoolId()); + if (PrimaryDataStoreVO.getPoolType() == StoragePoolType.LVM + || PrimaryDataStoreVO.getPoolType() == StoragePoolType.EXT) { + SearchBuilder volumeSB = _volsDao + .createSearchBuilder(); + volumeSB.and("poolId", volumeSB.entity().getPoolId(), + SearchCriteria.Op.EQ); + volumeSB.and("removed", volumeSB.entity().getRemoved(), + SearchCriteria.Op.NULL); - SearchBuilder activeVmSB = _vmInstanceDao.createSearchBuilder(); - activeVmSB.and("state", activeVmSB.entity().getState(), SearchCriteria.Op.IN); - volumeSB.join("activeVmSB", activeVmSB, volumeSB.entity().getInstanceId(), activeVmSB.entity().getId(), JoinBuilder.JoinType.INNER); + SearchBuilder activeVmSB = _vmInstanceDao + .createSearchBuilder(); + activeVmSB.and("state", activeVmSB.entity().getState(), + SearchCriteria.Op.IN); + volumeSB.join("activeVmSB", activeVmSB, volumeSB.entity() + .getInstanceId(), activeVmSB.entity().getId(), + JoinBuilder.JoinType.INNER); SearchCriteria volumeSC = volumeSB.create(); - volumeSC.setParameters("poolId", storagePool.getId()); - volumeSC.setJoinParameters("activeVmSB", "state", State.Starting, State.Running, State.Stopping, State.Migrating); + volumeSC.setParameters("poolId", PrimaryDataStoreVO.getId()); + volumeSC.setJoinParameters("activeVmSB", "state", + State.Starting, State.Running, State.Stopping, + State.Migrating); List volumes = _volsDao.search(volumeSC, null); if (volumes.size() > 0) { @@ -394,26 +458,35 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return false; } - protected StoragePoolVO findStoragePool(DiskProfile dskCh, final DataCenterVO dc, HostPodVO pod, Long clusterId, Long hostId, VMInstanceVO vm, final Set avoid) { + @Override + public StoragePool findStoragePool(DiskProfile dskCh, + final DataCenterVO dc, HostPodVO pod, Long clusterId, Long hostId, + VMInstanceVO vm, final Set avoid) { - VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); + VirtualMachineProfile profile = new VirtualMachineProfileImpl( + vm); for (StoragePoolAllocator allocator : _storagePoolAllocators) { - final List poolList = allocator.allocateToPool(dskCh, profile, dc.getId(), pod.getId(), clusterId, hostId, avoid, 1); + final List poolList = allocator.allocateToPool( + dskCh, profile, dc.getId(), pod.getId(), clusterId, hostId, + avoid, 1); if (poolList != null && !poolList.isEmpty()) { - return (StoragePoolVO) poolList.get(0); + return (StoragePool)this.dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary); } } return null; } @Override - public Answer[] sendToPool(StoragePool pool, Commands cmds) throws StorageUnavailableException { + public Answer[] sendToPool(StoragePool pool, Commands cmds) + throws StorageUnavailableException { return sendToPool(pool, null, null, cmds).second(); } @Override - public Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, Command cmd) throws StorageUnavailableException { - Answer[] answers = sendToPool(pool, hostIdsToTryFirst, null, new Commands(cmd)).second(); + public Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, + Command cmd) throws StorageUnavailableException { + Answer[] answers = sendToPool(pool, hostIdsToTryFirst, null, + new Commands(cmd)).second(); if (answers == null) { return null; } @@ -421,7 +494,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public Answer sendToPool(StoragePool pool, Command cmd) throws StorageUnavailableException { + public Answer sendToPool(StoragePool pool, Command cmd) + throws StorageUnavailableException { Answer[] answers = sendToPool(pool, new Commands(cmd)); if (answers == null) { return null; @@ -429,439 +503,27 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return answers[0]; } - @Override - public Answer sendToPool(long poolId, Command cmd) throws StorageUnavailableException { - StoragePool pool = _storagePoolDao.findById(poolId); - return sendToPool(pool, cmd); - } - - @Override - public Answer[] sendToPool(long poolId, Commands cmds) throws StorageUnavailableException { - StoragePool pool = _storagePoolDao.findById(poolId); - return sendToPool(pool, cmds); - } - - protected DiskProfile createDiskCharacteristics(VolumeVO volume, VMTemplateVO template, DataCenterVO dc, DiskOfferingVO diskOffering) { - if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) { - SearchCriteria sc = HostTemplateStatesSearch.create(); - sc.setParameters("id", template.getId()); - sc.setParameters("state", com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED); - sc.setJoinParameters("host", "dcId", dc.getId()); - - List sss = _vmTemplateHostDao.search(sc, null); - if (sss.size() == 0) { - throw new CloudRuntimeException("Template " + template.getName() + " has not been completely downloaded to zone " + dc.getId()); - } - VMTemplateHostVO ss = sss.get(0); - - return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), ss.getSize(), diskOffering.getTagsArray(), diskOffering.getUseLocalStorage(), - diskOffering.isRecreatable(), Storage.ImageFormat.ISO != template.getFormat() ? template.getId() : null); - } else { - return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(), - diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null); - } - } - - @Override - public boolean canVmRestartOnAnotherServer(long vmId) { - List vols = _volsDao.findCreatedByInstance(vmId); - for (VolumeVO vol : vols) { - if (!vol.isRecreatable() && !vol.getPoolType().isShared()) { - return false; - } - } - return true; - } - - @DB - protected Pair createVolumeFromSnapshot(VolumeVO volume, SnapshotVO snapshot) { - VolumeVO createdVolume = null; - Long volumeId = volume.getId(); - - String volumeFolder = null; - - try { - stateTransitTo(volume, Volume.Event.CreateRequested); - } catch (NoTransitionException e) { - s_logger.debug(e.toString()); - return null; - } - // Create the Volume object and save it so that we can return it to the user - Account account = _accountDao.findById(volume.getAccountId()); - - final HashSet poolsToAvoid = new HashSet(); - StoragePoolVO pool = null; - boolean success = false; - Set podsToAvoid = new HashSet(); - Pair pod = null; - String volumeUUID = null; - String details = null; - - DiskOfferingVO diskOffering = _diskOfferingDao.findByIdIncludingRemoved(volume.getDiskOfferingId()); - DataCenterVO dc = _dcDao.findById(volume.getDataCenterId()); - DiskProfile dskCh = new DiskProfile(volume, diskOffering, snapshot.getHypervisorType()); - - int retry = 0; - // Determine what pod to store the volume in - while ((pod = _resourceMgr.findPod(null, null, dc, account.getId(), podsToAvoid)) != null) { - podsToAvoid.add(pod.first().getId()); - // Determine what storage pool to store the volume in - while ((pool = findStoragePool(dskCh, dc, pod.first(), null, null, null, poolsToAvoid)) != null) { - poolsToAvoid.add(pool); - volumeFolder = pool.getPath(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Attempting to create volume from snapshotId: " + snapshot.getId() + " on storage pool " + pool.getName()); - } - - // Get the newly created VDI from the snapshot. - // This will return a null volumePath if it could not be created - Pair volumeDetails = createVDIFromSnapshot(UserContext.current().getCallerUserId(), snapshot, pool); - - volumeUUID = volumeDetails.first(); - details = volumeDetails.second(); - - if (volumeUUID != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume with UUID " + volumeUUID + " was created on storage pool " + pool.getName()); - } - success = true; - break; // break out of the "find storage pool" loop - } else { - retry++; - if (retry >= 3) { - _volsDao.expunge(volumeId); - String msg = "Unable to create volume from snapshot " + snapshot.getId() + " after retrying 3 times, due to " + details; - s_logger.debug(msg); - throw new CloudRuntimeException(msg); - - } - } - s_logger.warn("Unable to create volume on pool " + pool.getName() + ", reason: " + details); - } - - if (success) { - break; // break out of the "find pod" loop - } - } - - if (!success) { - _volsDao.expunge(volumeId); - String msg = "Unable to create volume from snapshot " + snapshot.getId() + " due to " + details; - s_logger.debug(msg); - throw new CloudRuntimeException(msg); - - } - - createdVolume = _volsDao.findById(volumeId); - - try { - if (success) { - createdVolume.setPodId(pod.first().getId()); - createdVolume.setPoolId(pool.getId()); - createdVolume.setPoolType(pool.getPoolType()); - createdVolume.setFolder(volumeFolder); - createdVolume.setPath(volumeUUID); - createdVolume.setDomainId(account.getDomainId()); - stateTransitTo(createdVolume, Volume.Event.OperationSucceeded); - } - } catch (NoTransitionException e) { - s_logger.debug("Failed to update volume state: " + e.toString()); - return null; - } - - return new Pair(createdVolume, details); - } - - @Override - public boolean stateTransitTo(Volume vol, Volume.Event event) throws NoTransitionException { - return _volStateMachine.transitTo(vol, event, null, _volsDao); - } - - protected VolumeVO createVolumeFromSnapshot(VolumeVO volume, long snapshotId) { - - // By default, assume failure. - VolumeVO createdVolume = null; - SnapshotVO snapshot = _snapshotDao.findById(snapshotId); // Precondition: snapshot is not null and not removed. - - Pair volumeDetails = createVolumeFromSnapshot(volume, snapshot); - if (volumeDetails != null) { - createdVolume = volumeDetails.first(); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, createdVolume.getAccountId(), - createdVolume.getDataCenterId(), createdVolume.getId(), createdVolume.getName(), createdVolume.getDiskOfferingId(), - null, createdVolume.getSize(), Volume.class.getName(), createdVolume.getUuid()); - } - return createdVolume; - } - - protected Pair createVDIFromSnapshot(long userId, SnapshotVO snapshot, StoragePoolVO pool) { - String vdiUUID = null; - Long snapshotId = snapshot.getId(); - Long volumeId = snapshot.getVolumeId(); - Long dcId = snapshot.getDataCenterId(); - String secondaryStoragePoolUrl = _snapMgr.getSecondaryStorageURL(snapshot); - long accountId = snapshot.getAccountId(); - - String backedUpSnapshotUuid = snapshot.getBackupSnapshotId(); - snapshot = _snapshotDao.findById(snapshotId); - if (snapshot.getVersion().trim().equals("2.1")) { - VolumeVO volume = _volsDao.findByIdIncludingRemoved(volumeId); - if (volume == null) { - throw new CloudRuntimeException("failed to upgrade snapshot " + snapshotId + " due to unable to find orignal volume:" + volumeId + ", try it later "); - } - if (volume.getTemplateId() == null) { - _snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); - } else { - VMTemplateVO template = _templateDao.findByIdIncludingRemoved(volume.getTemplateId()); - if (template == null) { - throw new CloudRuntimeException("failed to upgrade snapshot " + snapshotId + " due to unalbe to find orignal template :" + volume.getTemplateId() + ", try it later "); - } - Long templateId = template.getId(); - Long tmpltAccountId = template.getAccountId(); - if (!_snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { - throw new CloudRuntimeException("failed to upgrade snapshot " + snapshotId + " due to this snapshot is being used, try it later "); - } - UpgradeSnapshotCommand cmd = new UpgradeSnapshotCommand(null, secondaryStoragePoolUrl, dcId, accountId, volumeId, templateId, tmpltAccountId, null, snapshot.getBackupSnapshotId(), - snapshot.getName(), "2.1"); - Answer answer = null; - try { - answer = sendToPool(pool, cmd); - } catch (StorageUnavailableException e) { - } finally { - _snapshotDao.unlockFromLockTable(snapshotId.toString()); - } - if ((answer != null) && answer.getResult()) { - _snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); - } else { - return new Pair(null, "Unable to upgrade snapshot from 2.1 to 2.2 for " + snapshot.getId()); - } - } - } - String basicErrMsg = "Failed to create volume from " + snapshot.getName() + " on pool " + pool; - try { - if (snapshot.getSwiftId() != null && snapshot.getSwiftId() != 0) { - _snapshotMgr.downloadSnapshotsFromSwift(snapshot); - } else if (snapshot.getS3Id() != null && snapshot.getS3Id() != 0) { - _snapshotMgr.downloadSnapshotsFromS3(snapshot); - } - CreateVolumeFromSnapshotCommand createVolumeFromSnapshotCommand = new CreateVolumeFromSnapshotCommand(pool, secondaryStoragePoolUrl, dcId, accountId, volumeId, - backedUpSnapshotUuid, snapshot.getName(), _createVolumeFromSnapshotWait); - CreateVolumeFromSnapshotAnswer answer; - if (!_snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { - throw new CloudRuntimeException("failed to create volume from " + snapshotId + " due to this snapshot is being used, try it later "); - } - answer = (CreateVolumeFromSnapshotAnswer) sendToPool(pool, createVolumeFromSnapshotCommand); - if (answer != null && answer.getResult()) { - vdiUUID = answer.getVdi(); - } else { - s_logger.error(basicErrMsg + " due to " + ((answer == null) ? "null" : answer.getDetails())); - throw new CloudRuntimeException(basicErrMsg); - } - } catch (StorageUnavailableException e) { - s_logger.error(basicErrMsg); - } finally { - if (snapshot.getSwiftId() != null) { - _snapshotMgr.deleteSnapshotsDirForVolume(secondaryStoragePoolUrl, dcId, accountId, volumeId); - } - _snapshotDao.unlockFromLockTable(snapshotId.toString()); - } - return new Pair(vdiUUID, basicErrMsg); - } - - - @Override - @DB - public VolumeVO copyVolumeFromSecToPrimary(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId, ServiceOfferingVO offering, DiskOfferingVO diskOffering, - List avoids, long size, HypervisorType hyperType) throws NoTransitionException { - - final HashSet avoidPools = new HashSet(avoids); - DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, diskOffering); - dskCh.setHyperType(vm.getHypervisorType()); - // Find a suitable storage to create volume on - StoragePoolVO destPool = findStoragePool(dskCh, dc, pod, clusterId, null, vm, avoidPools); - - // Copy the volume from secondary storage to the destination storage pool - stateTransitTo(volume, Event.CopyRequested); - VolumeHostVO volumeHostVO = _volumeHostDao.findByVolumeId(volume.getId()); - HostVO secStorage = _hostDao.findById(volumeHostVO.getHostId()); - String secondaryStorageURL = secStorage.getStorageUrl(); - String[] volumePath = volumeHostVO.getInstallPath().split("/"); - String volumeUUID = volumePath[volumePath.length - 1].split("\\.")[0]; - - CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volumeUUID, destPool, secondaryStorageURL, false, _copyvolumewait); - CopyVolumeAnswer cvAnswer; - try { - cvAnswer = (CopyVolumeAnswer) sendToPool(destPool, cvCmd); - } catch (StorageUnavailableException e1) { - stateTransitTo(volume, Event.CopyFailed); - throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - - if (cvAnswer == null || !cvAnswer.getResult()) { - stateTransitTo(volume, Event.CopyFailed); - throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - Transaction txn = Transaction.currentTxn(); - txn.start(); - volume.setPath(cvAnswer.getVolumePath()); - volume.setFolder(destPool.getPath()); - volume.setPodId(destPool.getPodId()); - volume.setPoolId(destPool.getId()); - volume.setPodId(destPool.getPodId()); - stateTransitTo(volume, Event.CopySucceeded); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), - volume.getDataCenterId(), volume.getId(), volume.getName(), volume.getDiskOfferingId(), - null, volume.getSize(), Volume.class.getName(), volume.getUuid()); - _volumeHostDao.remove(volumeHostVO.getId()); - txn.commit(); - return volume; - - } - - @Override - @DB - public VolumeVO createVolume(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId, ServiceOfferingVO offering, DiskOfferingVO diskOffering, - List avoids, long size, HypervisorType hyperType) { - StoragePoolVO pool = null; - final HashSet avoidPools = new HashSet(avoids); - - try { - stateTransitTo(volume, Volume.Event.CreateRequested); - } catch (NoTransitionException e) { - s_logger.debug("Unable to update volume state: " + e.toString()); - return null; - } - - if (diskOffering != null && diskOffering.isCustomized()) { - diskOffering.setDiskSize(size); - } - DiskProfile dskCh = null; - if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) { - dskCh = createDiskCharacteristics(volume, template, dc, offering); - } else { - dskCh = createDiskCharacteristics(volume, template, dc, diskOffering); - } - - dskCh.setHyperType(hyperType); - - VolumeTO created = null; - int retry = _retry; - while (--retry >= 0) { - created = null; - - long podId = pod.getId(); - pod = _podDao.findById(podId); - if (pod == null) { - s_logger.warn("Unable to find pod " + podId + " when create volume " + volume.getName()); - break; - } - - pool = findStoragePool(dskCh, dc, pod, clusterId, vm.getHostId(), vm, avoidPools); - if (pool == null) { - s_logger.warn("Unable to find storage poll when create volume " + volume.getName()); - break; - } - - avoidPools.add(pool); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to create " + volume + " on " + pool); - } - - CreateCommand cmd = null; - VMTemplateStoragePoolVO tmpltStoredOn = null; - - for (int i = 0; i < 2; i++) { - if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) { - if (pool.getPoolType() == StoragePoolType.CLVM) { - //prepareISOForCreate does what we need, which is to tell us where the template is - VMTemplateHostVO tmpltHostOn = _tmpltMgr.prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - continue; - } - HostVO secondaryStorageHost = _hostDao.findById(tmpltHostOn.getHostId()); - String tmpltHostUrl = secondaryStorageHost.getStorageUrl(); - String fullTmpltUrl = tmpltHostUrl + "/" + tmpltHostOn.getInstallPath(); - cmd = new CreateCommand(dskCh, fullTmpltUrl, new StorageFilerTO(pool)); - } else { - tmpltStoredOn = _tmpltMgr.prepareTemplateForCreate(template, pool); - if (tmpltStoredOn == null) { - continue; - } - cmd = new CreateCommand(dskCh, tmpltStoredOn.getLocalDownloadPath(), new StorageFilerTO(pool)); - } - } else { - if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO == template.getFormat()) { - VMTemplateHostVO tmpltHostOn = _tmpltMgr.prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - throw new CloudRuntimeException("Did not find ISO in secondry storage in zone " + pool.getDataCenterId()); - } - } - cmd = new CreateCommand(dskCh, new StorageFilerTO(pool)); - } - - try { - Answer answer = sendToPool(pool, cmd); - if (answer != null && answer.getResult()) { - created = ((CreateAnswer) answer).getVolume(); - break; - } - - if (tmpltStoredOn != null && answer != null && (answer instanceof CreateAnswer) && ((CreateAnswer) answer).templateReloadRequested()) { - if (!_tmpltMgr.resetTemplateDownloadStateOnPool(tmpltStoredOn.getId())) { - break; // break out of template-redeploy retry loop - } - } else { - break; - } - } catch (StorageUnavailableException e) { - s_logger.debug("Storage unavailable for " + pool.getId()); - break; // break out of template-redeploy retry loop - } - } - - if (created != null) { - break; - } - - s_logger.debug("Retrying the create because it failed on pool " + pool); - } - - if (created == null) { - return null; - } else { - volume.setFolder(pool.getPath()); - volume.setPath(created.getPath()); - volume.setSize(created.getSize()); - volume.setPoolType(pool.getPoolType()); - volume.setPoolId(pool.getId()); - volume.setPodId(pod.getId()); - try { - stateTransitTo(volume, Volume.Event.OperationSucceeded); - } catch (NoTransitionException e) { - s_logger.debug("Unable to update volume state: " + e.toString()); - return null; - } - return volume; - } - } - - public Long chooseHostForStoragePool(StoragePoolVO poolVO, List avoidHosts, boolean sendToVmResidesOn, Long vmId) { + public Long chooseHostForStoragePool(StoragePoolVO poolVO, + List avoidHosts, boolean sendToVmResidesOn, Long vmId) { if (sendToVmResidesOn) { if (vmId != null) { VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId); if (vmInstance != null) { Long hostId = vmInstance.getHostId(); - if (hostId != null && !avoidHosts.contains(vmInstance.getHostId())) { + if (hostId != null + && !avoidHosts.contains(vmInstance.getHostId())) { return hostId; } } } /* - * Can't find the vm where host resides on(vm is destroyed? or volume is detached from vm), randomly choose - * a host - * to send the cmd + * Can't find the vm where host resides on(vm is destroyed? or + * volume is detached from vm), randomly choose a host to send the + * cmd */ } - List poolHosts = _poolHostDao.listByHostStatus(poolVO.getId(), Status.Up); + List poolHosts = _poolHostDao.listByHostStatus( + poolVO.getId(), Status.Up); Collections.shuffle(poolHosts); if (poolHosts != null && poolHosts.size() > 0) { for (StoragePoolHostVO sphvo : poolHosts) { @@ -876,9 +538,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean configure(String name, Map params) throws ConfigurationException { - Map configs = _configDao.getConfiguration("management-server", params); - String overProvisioningFactorStr = configs.get("storage.overprovisioning.factor"); + Map configs = _configDao.getConfiguration( + "management-server", params); + + String overProvisioningFactorStr = configs + .get("storage.overprovisioning.factor"); if (overProvisioningFactorStr != null) { _overProvisioningFactor = new BigDecimal(overProvisioningFactorStr); } @@ -886,94 +551,128 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _retry = NumbersUtil.parseInt(configs.get(Config.StartRetry.key()), 10); _pingInterval = NumbersUtil.parseInt(configs.get("ping.interval"), 60); _hostRetry = NumbersUtil.parseInt(configs.get("host.retry"), 2); - _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800); - s_logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); + _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt( + configs.get("pool.acquisition.wait.seconds"), 1800); + s_logger.info("pool.acquisition.wait.seconds is configured as " + + _storagePoolAcquisitionWaitSeconds + " seconds"); - _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao), true, false, true); + _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, + _storagePoolDao), true, false, true); String storageCleanupEnabled = configs.get("storage.cleanup.enabled"); - _storageCleanupEnabled = (storageCleanupEnabled == null) ? true : Boolean.parseBoolean(storageCleanupEnabled); + _storageCleanupEnabled = (storageCleanupEnabled == null) ? true + : Boolean.parseBoolean(storageCleanupEnabled); - String value = _configDao.getValue(Config.CreateVolumeFromSnapshotWait.toString()); - _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CreateVolumeFromSnapshotWait.getDefaultValue())); + String value = _configDao.getValue(Config.CreateVolumeFromSnapshotWait + .toString()); + _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CreateVolumeFromSnapshotWait + .getDefaultValue())); value = _configDao.getValue(Config.CopyVolumeWait.toString()); - _copyvolumewait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + _copyvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); value = _configDao.getValue(Config.RecreateSystemVmEnabled.key()); _recreateSystemVmEnabled = Boolean.parseBoolean(value); value = _configDao.getValue(Config.StorageTemplateCleanupEnabled.key()); - _templateCleanupEnabled = (value == null ? true : Boolean.parseBoolean(value)); + _templateCleanupEnabled = (value == null ? true : Boolean + .parseBoolean(value)); String time = configs.get("storage.cleanup.interval"); _storageCleanupInterval = NumbersUtil.parseInt(time, 86400); - String storageUsedThreshold = _configDao.getValue(Config.StorageCapacityDisableThreshold.key()); + String storageUsedThreshold = _configDao + .getValue(Config.StorageCapacityDisableThreshold.key()); if (storageUsedThreshold != null) { _storageUsedThreshold = Double.parseDouble(storageUsedThreshold); } - String storageAllocatedThreshold = _configDao.getValue(Config.StorageAllocatedCapacityDisableThreshold.key()); + String storageAllocatedThreshold = _configDao + .getValue(Config.StorageAllocatedCapacityDisableThreshold.key()); if (storageAllocatedThreshold != null) { - _storageAllocatedThreshold = Double.parseDouble(storageAllocatedThreshold); + _storageAllocatedThreshold = Double + .parseDouble(storageAllocatedThreshold); } - String globalStorageOverprovisioningFactor = configs.get("storage.overprovisioning.factor"); - _storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f)); + String globalStorageOverprovisioningFactor = configs + .get("storage.overprovisioning.factor"); + _storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat( + globalStorageOverprovisioningFactor, 2.0f)); - s_logger.info("Storage cleanup enabled: " + _storageCleanupEnabled + ", interval: " + _storageCleanupInterval + ", template cleanup enabled: " + _templateCleanupEnabled); + s_logger.info("Storage cleanup enabled: " + _storageCleanupEnabled + + ", interval: " + _storageCleanupInterval + + ", template cleanup enabled: " + _templateCleanupEnabled); String workers = configs.get("expunge.workers"); int wrks = NumbersUtil.parseInt(workers, 10); - _executor = Executors.newScheduledThreadPool(wrks, new NamedThreadFactory("StorageManager-Scavenger")); + _executor = Executors.newScheduledThreadPool(wrks, + new NamedThreadFactory("StorageManager-Scavenger")); - _agentMgr.registerForHostEvents(ComponentContext.inject(LocalStoragePoolListener.class), true, false, false); + _agentMgr.registerForHostEvents( + ComponentContext.inject(LocalStoragePoolListener.class), true, + false, false); - String maxVolumeSizeInGbString = _configDao.getValue("storage.max.volume.size"); - _maxVolumeSizeInGb = NumbersUtil.parseLong(maxVolumeSizeInGbString, 2000); + String maxVolumeSizeInGbString = _configDao + .getValue("storage.max.volume.size"); + _maxVolumeSizeInGb = NumbersUtil.parseLong(maxVolumeSizeInGbString, + 2000); - String _customDiskOfferingMinSizeStr = _configDao.getValue(Config.CustomDiskOfferingMinSize.toString()); - _customDiskOfferingMinSize = NumbersUtil.parseInt(_customDiskOfferingMinSizeStr, Integer.parseInt(Config.CustomDiskOfferingMinSize.getDefaultValue())); + String _customDiskOfferingMinSizeStr = _configDao + .getValue(Config.CustomDiskOfferingMinSize.toString()); + _customDiskOfferingMinSize = NumbersUtil.parseInt( + _customDiskOfferingMinSizeStr, Integer + .parseInt(Config.CustomDiskOfferingMinSize + .getDefaultValue())); - String _customDiskOfferingMaxSizeStr = _configDao.getValue(Config.CustomDiskOfferingMaxSize.toString()); - _customDiskOfferingMaxSize = NumbersUtil.parseInt(_customDiskOfferingMaxSizeStr, Integer.parseInt(Config.CustomDiskOfferingMaxSize.getDefaultValue())); + String _customDiskOfferingMaxSizeStr = _configDao + .getValue(Config.CustomDiskOfferingMaxSize.toString()); + _customDiskOfferingMaxSize = NumbersUtil.parseInt( + _customDiskOfferingMaxSizeStr, Integer + .parseInt(Config.CustomDiskOfferingMaxSize + .getDefaultValue())); - HostTemplateStatesSearch = _vmTemplateHostDao.createSearchBuilder(); - HostTemplateStatesSearch.and("id", HostTemplateStatesSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); - HostTemplateStatesSearch.and("state", HostTemplateStatesSearch.entity().getDownloadState(), SearchCriteria.Op.EQ); - - SearchBuilder HostSearch = _hostDao.createSearchBuilder(); - HostSearch.and("dcId", HostSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - - HostTemplateStatesSearch.join("host", HostSearch, HostSearch.entity().getId(), HostTemplateStatesSearch.entity().getHostId(), JoinBuilder.JoinType.INNER); - HostSearch.done(); - HostTemplateStatesSearch.done(); _serverId = _msServer.getId(); - UpHostsInPoolSearch = _storagePoolHostDao.createSearchBuilder(Long.class); - UpHostsInPoolSearch.selectField(UpHostsInPoolSearch.entity().getHostId()); + UpHostsInPoolSearch = _storagePoolHostDao + .createSearchBuilder(Long.class); + UpHostsInPoolSearch.selectField(UpHostsInPoolSearch.entity() + .getHostId()); SearchBuilder hostSearch = _hostDao.createSearchBuilder(); hostSearch.and("status", hostSearch.entity().getStatus(), Op.EQ); - hostSearch.and("resourceState", hostSearch.entity().getResourceState(), Op.EQ); - UpHostsInPoolSearch.join("hosts", hostSearch, hostSearch.entity().getId(), UpHostsInPoolSearch.entity().getHostId(), JoinType.INNER); - UpHostsInPoolSearch.and("pool", UpHostsInPoolSearch.entity().getPoolId(), Op.EQ); + hostSearch.and("resourceState", hostSearch.entity().getResourceState(), + Op.EQ); + UpHostsInPoolSearch.join("hosts", hostSearch, hostSearch.entity() + .getId(), UpHostsInPoolSearch.entity().getHostId(), + JoinType.INNER); + UpHostsInPoolSearch.and("pool", UpHostsInPoolSearch.entity() + .getPoolId(), Op.EQ); UpHostsInPoolSearch.done(); StoragePoolSearch = _vmInstanceDao.createSearchBuilder(); SearchBuilder volumeSearch = _volumeDao.createSearchBuilder(); - volumeSearch.and("volumeType", volumeSearch.entity().getVolumeType(), SearchCriteria.Op.EQ); - volumeSearch.and("poolId", volumeSearch.entity().getPoolId(), SearchCriteria.Op.EQ); - StoragePoolSearch.join("vmVolume", volumeSearch, volumeSearch.entity().getInstanceId(), StoragePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER); + volumeSearch.and("volumeType", volumeSearch.entity().getVolumeType(), + SearchCriteria.Op.EQ); + volumeSearch.and("poolId", volumeSearch.entity().getPoolId(), + SearchCriteria.Op.EQ); + StoragePoolSearch.join("vmVolume", volumeSearch, volumeSearch.entity() + .getInstanceId(), StoragePoolSearch.entity().getId(), + JoinBuilder.JoinType.INNER); StoragePoolSearch.done(); LocalStorageSearch = _storagePoolDao.createSearchBuilder(); - SearchBuilder storageHostSearch = _storagePoolHostDao.createSearchBuilder(); - storageHostSearch.and("hostId", storageHostSearch.entity().getHostId(), SearchCriteria.Op.EQ); - LocalStorageSearch.join("poolHost", storageHostSearch, storageHostSearch.entity().getPoolId(), LocalStorageSearch.entity().getId(), JoinBuilder.JoinType.INNER); - LocalStorageSearch.and("type", LocalStorageSearch.entity().getPoolType(), SearchCriteria.Op.IN); + SearchBuilder storageHostSearch = _storagePoolHostDao + .createSearchBuilder(); + storageHostSearch.and("hostId", storageHostSearch.entity().getHostId(), + SearchCriteria.Op.EQ); + LocalStorageSearch.join("poolHost", storageHostSearch, + storageHostSearch.entity().getPoolId(), LocalStorageSearch + .entity().getId(), JoinBuilder.JoinType.INNER); + LocalStorageSearch.and("type", LocalStorageSearch.entity() + .getPoolType(), SearchCriteria.Op.IN); LocalStorageSearch.done(); Volume.State.getStateMachine().registerListener( new VolumeStateListener()); @@ -981,159 +680,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return true; } - public String getRandomVolumeName() { - return UUID.randomUUID().toString(); - } - - @Override - public boolean volumeOnSharedStoragePool(VolumeVO volume) { - Long poolId = volume.getPoolId(); - if (poolId == null) { - return false; - } else { - StoragePoolVO pool = _storagePoolDao.findById(poolId); - - if (pool == null) { - return false; - } else { - return pool.isShared(); - } - } - } - - @Override - public boolean volumeInactive(VolumeVO volume) { - Long vmId = volume.getInstanceId(); - if (vmId != null) { - UserVm vm = _userVmDao.findById(vmId); - if (vm == null) { - return true; - } - State state = vm.getState(); - if (state.equals(State.Stopped) || state.equals(State.Destroyed)) { - return true; - } - } - return false; - } - - @Override - public String getVmNameOnVolume(VolumeVO volume) { - Long vmId = volume.getInstanceId(); - if (vmId != null) { - VMInstanceVO vm = _vmInstanceDao.findById(vmId); - - if (vm == null) { - return null; - } - return vm.getInstanceName(); - } - return null; - } - - @Override - public Pair getAbsoluteIsoPath(long templateId, long dataCenterId) { - String isoPath = null; - - List storageHosts = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.SecondaryStorage, dataCenterId); - if (storageHosts != null) { - for (HostVO storageHost : storageHosts) { - List templateHostVOs = _vmTemplateHostDao.listByTemplateHostStatus(templateId, storageHost.getId(), VMTemplateStorageResourceAssoc.Status.DOWNLOADED ); - if (templateHostVOs != null && !templateHostVOs.isEmpty()) { - VMTemplateHostVO tmpHostVO = templateHostVOs.get(0); - isoPath = storageHost.getStorageUrl() + "/" + tmpHostVO.getInstallPath(); - return new Pair(isoPath, storageHost.getStorageUrl()); - } - } - } - s_logger.warn("Unable to find secondary storage in zone id=" + dataCenterId); - return null; - } - - @Override - public String getSecondaryStorageURL(long zoneId) { - // Determine the secondary storage URL - HostVO secondaryStorageHost = getSecondaryStorageHost(zoneId); - - if (secondaryStorageHost == null) { - return null; - } - - return secondaryStorageHost.getStorageUrl(); - } - - @Override - public HostVO getSecondaryStorageHost(long zoneId, long tmpltId) { - List hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); - if (hosts == null || hosts.size() == 0) { - return null; - } - for (HostVO host : hosts) { - VMTemplateHostVO tmpltHost = _vmTemplateHostDao.findByHostTemplate(host.getId(), tmpltId); - if (tmpltHost != null && !tmpltHost.getDestroyed() && tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - return host; - } - } - return null; - } - - @Override - public VMTemplateHostVO getTemplateHostRef(long zoneId, long tmpltId, boolean readyOnly) { - List hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); - if (hosts == null || hosts.size() == 0) { - return null; - } - VMTemplateHostVO inProgress = null; - VMTemplateHostVO other = null; - for (HostVO host : hosts) { - VMTemplateHostVO tmpltHost = _vmTemplateHostDao.findByHostTemplate(host.getId(), tmpltId); - if (tmpltHost != null && !tmpltHost.getDestroyed()) { - if (tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - return tmpltHost; - } else if (tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { - inProgress = tmpltHost; - } else { - other = tmpltHost; - } - } - } - if (inProgress != null) { - return inProgress; - } - return other; - } - - @Override - public HostVO getSecondaryStorageHost(long zoneId) { - List hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); - if (hosts == null || hosts.size() == 0) { - hosts = _ssvmMgr.listLocalSecondaryStorageHostsInOneZone(zoneId); - if (hosts.isEmpty()) { - return null; - } - } - - int size = hosts.size(); - Random rn = new Random(); - int index = rn.nextInt(size); - return hosts.get(index); - } - - @Override - public List getSecondaryStorageHosts(long zoneId) { - List hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); - if (hosts == null || hosts.size() == 0) { - hosts = _ssvmMgr.listLocalSecondaryStorageHostsInOneZone(zoneId); - if (hosts.isEmpty()) { - return new ArrayList(); - } - } - return hosts; - } - + @Override public String getStoragePoolTags(long poolId) { - return _configMgr.listToCsvTags(_storagePoolDao.searchForStoragePoolDetails(poolId, "true")); + return _configMgr.listToCsvTags(_storagePoolDao + .searchForStoragePoolDetails(poolId, "true")); } @Override @@ -1141,7 +692,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (_storageCleanupEnabled) { Random generator = new Random(); int initialDelay = generator.nextInt(_storageCleanupInterval); - _executor.scheduleWithFixedDelay(new StorageGarbageCollector(), initialDelay, _storageCleanupInterval, TimeUnit.SECONDS); + _executor.scheduleWithFixedDelay(new StorageGarbageCollector(), + initialDelay, _storageCleanupInterval, TimeUnit.SECONDS); } else { s_logger.debug("Storage cleanup is not enabled, so the storage cleanup thread is not being scheduled."); } @@ -1157,22 +709,97 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return true; } + + @DB + @Override + public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws ConnectionException { - protected StorageManagerImpl() { - _volStateMachine = Volume.State.getStateMachine(); + DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); + if (dc == null || !dc.isLocalStorageEnabled()) { + return null; + } + DataStore store = null; + try { + StoragePoolVO pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), pInfo.getHostPath(), pInfo.getUuid()); + if(pool == null && host.getHypervisorType() == HypervisorType.VMware) { + // perform run-time upgrade. In versions prior to 2.2.12, there is a bug that we don't save local datastore info (host path is empty), this will cause us + // not able to distinguish multiple local datastores that may be available on the host, to support smooth migration, we + // need to perform runtime upgrade here + if(pInfo.getHostPath().length() > 0) { + pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), "", pInfo.getUuid()); + } + } + DataStoreProvider provider = this.dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); + DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + if (pool == null) { + Map params = new HashMap(); + String name = (host.getName() + " Local Storage"); + params.put("zoneId", host.getDataCenterId()); + params.put("clusterId", host.getClusterId()); + params.put("podId", host.getPodId()); + params.put("url", pInfo.getPoolType().toString() + "://" + pInfo.getHost() + "/" + pInfo.getHostPath()); + params.put("name", name); + params.put("localStorage", true); + params.put("details", pInfo.getDetails()); + params.put("uuid", pInfo.getUuid()); + params.put("providerId", provider.getId()); + + store = lifeCycle.initialize(params); + } else { + store = (DataStore) dataStoreMgr.getDataStore(pool.getId(), + DataStoreRole.Primary); + } + + HostScope scope = new HostScope(host.getId()); + lifeCycle.attachHost(store, scope, pInfo); + } catch (Exception e) { + s_logger.warn("Unable to setup the local storage pool for " + host, e); + throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e); + } + + return (DataStore) dataStoreMgr.getDataStore(store.getId(), + DataStoreRole.Primary); } @Override @SuppressWarnings("rawtypes") - public StoragePoolVO createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException { - Long clusterId = cmd.getClusterId(); - Long podId = cmd.getPodId(); - Map ds = cmd.getDetails(); + public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) + throws ResourceInUseException, IllegalArgumentException, + UnknownHostException, ResourceUnavailableException { + String providerUuid = cmd.getStorageProviderUuid(); + DataStoreProvider storeProvider = dataStoreProviderMgr + .getDataStoreProviderByUuid(providerUuid); - if (clusterId != null && podId == null) { - throw new InvalidParameterValueException("Cluster id requires pod id"); + if (storeProvider == null) { + storeProvider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); + throw new InvalidParameterValueException( + "invalid storage provider uuid" + providerUuid); } + Long clusterId = cmd.getClusterId(); + Long podId = cmd.getPodId(); + Long zoneId = cmd.getZoneId(); + + ScopeType scopeType = ScopeType.CLUSTER; + String scope = cmd.getScope(); + if (scope != null) { + try { + scopeType = Enum.valueOf(ScopeType.class, scope); + } catch (Exception e) { + throw new InvalidParameterValueException("invalid scope" + + scope); + } + } + + if (scopeType == ScopeType.CLUSTER && clusterId == null) { + throw new InvalidParameterValueException( + "cluster id can't be null, if scope is cluster"); + } else if (scopeType == ScopeType.ZONE && zoneId == null) { + throw new InvalidParameterValueException( + "zone id can't be null, if scope is zone"); + } + + Map ds = cmd.getDetails(); Map details = new HashMap(); if (ds != null) { Collection detailsCollection = ds.values(); @@ -1182,233 +809,69 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C Iterator it2 = d.entrySet().iterator(); while (it2.hasNext()) { Map.Entry entry = (Map.Entry) it2.next(); - details.put((String) entry.getKey(), (String) entry.getValue()); + details.put((String) entry.getKey(), + (String) entry.getValue()); } } } - // verify input parameters - Long zoneId = cmd.getZoneId(); DataCenterVO zone = _dcDao.findById(cmd.getZoneId()); if (zone == null) { - throw new InvalidParameterValueException("unable to find zone by id " + zoneId); + throw new InvalidParameterValueException( + "unable to find zone by id " + zoneId); } // Check if zone is disabled Account account = UserContext.current().getCaller(); - if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getType())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + if (Grouping.AllocationState.Disabled == zone.getAllocationState() + && !_accountMgr.isRootAdmin(account.getType())) { + throw new PermissionDeniedException( + "Cannot perform this operation, Zone is currently disabled: " + + zoneId); } - // Check if there is host up in this cluster - List allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, clusterId, podId, zoneId); - if (allHosts.isEmpty()) { - throw new ResourceUnavailableException("No host up to associate a storage pool with in cluster " + clusterId, Pod.class, podId); - } - URI uri = null; + Map params = new HashMap(); + params.put("zoneId", zone.getId()); + params.put("clusterId", clusterId); + params.put("podId", podId); + params.put("url", cmd.getUrl()); + params.put("tags", cmd.getTags()); + params.put("name", cmd.getStoragePoolName()); + params.put("details", details); + params.put("providerId", storeProvider.getId()); + + DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle(); + DataStore store = null; try { - uri = new URI(UriUtils.encodeURIComponent(cmd.getUrl())); - if (uri.getScheme() == null) { - throw new InvalidParameterValueException("scheme is null " + cmd.getUrl() + ", add nfs:// as a prefix"); - } else if (uri.getScheme().equalsIgnoreCase("nfs")) { - String uriHost = uri.getHost(); - String uriPath = uri.getPath(); - if (uriHost == null || uriPath == null || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) { - throw new InvalidParameterValueException("host or path is null, should be nfs://hostname/path"); - } - } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) { - String uriPath = uri.getPath(); - if (uriPath == null) { - throw new InvalidParameterValueException("host or path is null, should be sharedmountpoint://localhost/path"); - } - } else if (uri.getScheme().equalsIgnoreCase("rbd")) { - String uriPath = uri.getPath(); - if (uriPath == null) { - throw new InvalidParameterValueException("host or path is null, should be rbd://hostname/pool"); - } + store = lifeCycle.initialize(params); + + if (scopeType == ScopeType.CLUSTER) { + ClusterScope clusterScope = new ClusterScope(clusterId, podId, + zoneId); + lifeCycle.attachCluster(store, clusterScope); + } else if (scopeType == ScopeType.ZONE) { + ZoneScope zoneScope = new ZoneScope(zoneId); + lifeCycle.attachZone(store, zoneScope); } - } catch (URISyntaxException e) { - throw new InvalidParameterValueException(cmd.getUrl() + " is not a valid uri"); + } catch (Exception e) { + s_logger.debug("Failed to add data store", e); + throw new CloudRuntimeException("Failed to add data store", e); } - String tags = cmd.getTags(); - if (tags != null) { - String[] tokens = tags.split(","); - - for (String tag : tokens) { - tag = tag.trim(); - if (tag.length() == 0) { - continue; - } - details.put(tag, "true"); - } - } - - String scheme = uri.getScheme(); - String storageHost = uri.getHost(); - String hostPath = uri.getPath(); - String userInfo = uri.getUserInfo(); - int port = uri.getPort(); - StoragePoolVO pool = null; - if (s_logger.isDebugEnabled()) { - s_logger.debug("createPool Params @ scheme - " + scheme + " storageHost - " + storageHost + " hostPath - " + hostPath + " port - " + port); - } - if (scheme.equalsIgnoreCase("nfs")) { - if (port == -1) { - port = 2049; - } - pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem, storageHost, port, hostPath); - if (clusterId == null) { - throw new IllegalArgumentException("NFS need to have clusters specified for XenServers"); - } - } else if (scheme.equalsIgnoreCase("file")) { - if (port == -1) { - port = 0; - } - pool = new StoragePoolVO(StoragePoolType.Filesystem, "localhost", 0, hostPath); - } else if (scheme.equalsIgnoreCase("sharedMountPoint")) { - pool = new StoragePoolVO(StoragePoolType.SharedMountPoint, storageHost, 0, hostPath); - } else if (scheme.equalsIgnoreCase("clvm")) { - pool = new StoragePoolVO(StoragePoolType.CLVM, storageHost, 0, hostPath.replaceFirst("/", "")); - } else if (scheme.equalsIgnoreCase("rbd")) { - if (port == -1) { - port = 6789; - } - pool = new StoragePoolVO(StoragePoolType.RBD, storageHost, port, hostPath.replaceFirst("/", ""), userInfo); - } else if (scheme.equalsIgnoreCase("PreSetup")) { - pool = new StoragePoolVO(StoragePoolType.PreSetup, storageHost, 0, hostPath); - } else if (scheme.equalsIgnoreCase("iscsi")) { - String[] tokens = hostPath.split("/"); - int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1); - if (port == -1) { - port = 3260; - } - if (lun != -1) { - if (clusterId == null) { - throw new IllegalArgumentException("IscsiLUN need to have clusters specified"); - } - hostPath.replaceFirst("/", ""); - pool = new StoragePoolVO(StoragePoolType.IscsiLUN, storageHost, port, hostPath); - } else { - for (StoragePoolDiscoverer discoverer : _discoverers) { - Map> pools; - try { - pools = discoverer.find(cmd.getZoneId(), podId, uri, details); - } catch (DiscoveryException e) { - throw new IllegalArgumentException("Not enough information for discovery " + uri, e); - } - if (pools != null) { - Map.Entry> entry = pools.entrySet().iterator().next(); - pool = entry.getKey(); - details = entry.getValue(); - break; - } - } - } - } else if (scheme.equalsIgnoreCase("iso")) { - if (port == -1) { - port = 2049; - } - pool = new StoragePoolVO(StoragePoolType.ISO, storageHost, port, hostPath); - } else if (scheme.equalsIgnoreCase("vmfs")) { - pool = new StoragePoolVO(StoragePoolType.VMFS, "VMFS datastore: " + hostPath, 0, hostPath); - } else if (scheme.equalsIgnoreCase("ocfs2")) { - port = 7777; - pool = new StoragePoolVO(StoragePoolType.OCFS2, "clustered", port, hostPath); - } else { - s_logger.warn("Unable to figure out the scheme for URI: " + uri); - throw new IllegalArgumentException("Unable to figure out the scheme for URI: " + uri); - } - - if (pool == null) { - s_logger.warn("Unable to figure out the scheme for URI: " + uri); - throw new IllegalArgumentException("Unable to figure out the scheme for URI: " + uri); - } - - List pools = _storagePoolDao.listPoolByHostPath(storageHost, hostPath); - if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) { - Long oldPodId = pools.get(0).getPodId(); - throw new ResourceInUseException("Storage pool " + uri + " already in use by another pod (id=" + oldPodId + ")", "StoragePool", uri.toASCIIString()); - } - - long poolId = _storagePoolDao.getNextInSequence(Long.class, "id"); - String uuid = null; - if (scheme.equalsIgnoreCase("sharedmountpoint") || scheme.equalsIgnoreCase("clvm")) { - uuid = UUID.randomUUID().toString(); - } else if (scheme.equalsIgnoreCase("PreSetup")) { - uuid = hostPath.replace("/", ""); - } else { - uuid = UUID.nameUUIDFromBytes(new String(storageHost + hostPath).getBytes()).toString(); - } - - List spHandles = _storagePoolDao.findIfDuplicatePoolsExistByUUID(uuid); - if ((spHandles != null) && (spHandles.size() > 0)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Another active pool with the same uuid already exists"); - } - throw new ResourceInUseException("Another active pool with the same uuid already exists"); - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("In createPool Setting poolId - " + poolId + " uuid - " + uuid + " zoneId - " + zoneId + " podId - " + podId + " poolName - " + cmd.getStoragePoolName()); - } - - pool.setId(poolId); - pool.setUuid(uuid); - pool.setDataCenterId(cmd.getZoneId()); - pool.setPodId(podId); - pool.setName(cmd.getStoragePoolName()); - pool.setClusterId(clusterId); - pool.setStatus(StoragePoolStatus.Up); - pool = _storagePoolDao.persist(pool, details); - - if (pool.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(allHosts, pool)) { - s_logger.warn("Can not create storage pool " + pool + " on cluster " + clusterId); - _storagePoolDao.expunge(pool.getId()); - return null; - } - - boolean success = false; - for (HostVO h : allHosts) { - success = createStoragePool(h.getId(), pool); - if (success) { - break; - } - } - if (!success) { - s_logger.warn("Can not create storage pool " + pool + " on cluster " + clusterId); - _storagePoolDao.expunge(pool.getId()); - return null; - } - s_logger.debug("In createPool Adding the pool to each of the hosts"); - List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { - try { - connectHostToSharedPool(h.getId(), pool); - poolHosts.add(h); - } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + h + " and " + pool, e); - } - } - - if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + pool + " on cluster " + clusterId); - _storagePoolDao.expunge(pool.getId()); - return null; - } else { - createCapacityEntry(pool); - } - return pool; + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(store.getId(), + DataStoreRole.Primary); } @Override - public StoragePoolVO updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException { + public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) + throws IllegalArgumentException { // Input validation Long id = cmd.getId(); List tags = cmd.getTags(); StoragePoolVO pool = _storagePoolDao.findById(id); if (pool == null) { - throw new IllegalArgumentException("Unable to find storage pool with ID: " + id); + throw new IllegalArgumentException( + "Unable to find storage pool with ID: " + id); } if (tags != null) { @@ -1423,835 +886,112 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _storagePoolDao.updateDetails(id, details); } - return pool; + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(pool.getId(), + DataStoreRole.Primary); } @Override @DB public boolean deletePool(DeletePoolCmd cmd) { Long id = cmd.getId(); - boolean deleteFlag = false; boolean forced = cmd.isForced(); - // verify parameters StoragePoolVO sPool = _storagePoolDao.findById(id); if (sPool == null) { s_logger.warn("Unable to find pool:" + id); - throw new InvalidParameterValueException("Unable to find pool by id " + id); + throw new InvalidParameterValueException( + "Unable to find pool by id " + id); } - if(sPool.getStatus() != StoragePoolStatus.Maintenance){ - s_logger.warn("Unable to delete storage id: " + id +" due to it is not in Maintenance state"); - throw new InvalidParameterValueException("Unable to delete storage due to it is not in Maintenance state, id: " + id); + if (sPool.getStatus() != StoragePoolStatus.Maintenance) { + s_logger.warn("Unable to delete storage id: " + id + + " due to it is not in Maintenance state"); + throw new InvalidParameterValueException( + "Unable to delete storage due to it is not in Maintenance state, id: " + + id); } - if (sPool.getPoolType().equals(StoragePoolType.LVM) || sPool.getPoolType().equals(StoragePoolType.EXT)) { + if (sPool.isLocal()) { s_logger.warn("Unable to delete local storage id:" + id); - throw new InvalidParameterValueException("Unable to delete local storage id: " + id); + throw new InvalidParameterValueException( + "Unable to delete local storage id: " + id); } Pair vlms = _volsDao.getCountAndTotalByPool(id); if (forced) { if (vlms.first() > 0) { - Pair nonDstrdVlms = _volsDao.getNonDestroyedCountAndTotalByPool(id); + Pair nonDstrdVlms = _volsDao + .getNonDestroyedCountAndTotalByPool(id); if (nonDstrdVlms.first() > 0) { - throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated " + - "non-destroyed vols for this pool"); + throw new CloudRuntimeException("Cannot delete pool " + + sPool.getName() + " as there are associated " + + "non-destroyed vols for this pool"); } - //force expunge non-destroyed volumes + // force expunge non-destroyed volumes List vols = _volsDao.listVolumesToBeDestroyed(); for (VolumeVO vol : vols) { - expungeVolume(vol, true); + AsyncCallFuture future = this.volService.expungeVolumeAsync(this.volFactory.getVolume(vol.getId())); + try { + future.get(); + } catch (InterruptedException e) { + s_logger.debug("expunge volume failed" + vol.getId(), e); + } catch (ExecutionException e) { + s_logger.debug("expunge volume failed" + vol.getId(), e); + } } } } else { // Check if the pool has associated volumes in the volumes table // If it does , then you cannot delete the pool if (vlms.first() > 0) { - throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated vols" + - " for this pool"); + throw new CloudRuntimeException("Cannot delete pool " + + sPool.getName() + " as there are associated vols" + + " for this pool"); } } - // First get the host_id from storage_pool_host_ref for given pool id - StoragePoolVO lock = _storagePoolDao.acquireInLockTable(sPool.getId()); + StoragePoolVO lock = _storagePoolDao.acquireInLockTable(sPool + .getId()); if (lock == null) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to acquire lock when deleting StoragePool with ID: " + sPool.getId()); + s_logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO with ID: " + + sPool.getId()); } return false; } - // mark storage pool as removed (so it can't be used for new volumes creation), release the lock - boolean isLockReleased = false; - isLockReleased = _storagePoolDao.releaseFromLockTable(lock.getId()); + _storagePoolDao.releaseFromLockTable(lock.getId()); s_logger.trace("Released lock for storage pool " + id); - // for the given pool id, find all records in the storage_pool_host_ref - List hostPoolRecords = _storagePoolHostDao.listByPoolId(id); - Transaction txn = Transaction.currentTxn(); - try { - // if not records exist, delete the given pool (base case) - if (hostPoolRecords.size() == 0) { - - txn.start(); - sPool.setUuid(null); - _storagePoolDao.update(id, sPool); - _storagePoolDao.remove(id); - deletePoolStats(id); - txn.commit(); - - deleteFlag = true; - return true; - } else { - // Remove the SR associated with the Xenserver - for (StoragePoolHostVO host : hostPoolRecords) { - DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(sPool); - final Answer answer = _agentMgr.easySend(host.getHostId(), deleteCmd); - - if (answer != null && answer.getResult()) { - deleteFlag = true; - break; - } - } - } - } finally { - if (deleteFlag) { - // now delete the storage_pool_host_ref and storage_pool records - txn.start(); - for (StoragePoolHostVO host : hostPoolRecords) { - _storagePoolHostDao.deleteStoragePoolHostDetails(host.getHostId(), host.getPoolId()); - } - sPool.setUuid(null); - _storagePoolDao.update(id, sPool); - _storagePoolDao.remove(id); - deletePoolStats(id); - // Delete op_host_capacity entries - _capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, null, null, null, id); - txn.commit(); - - s_logger.debug("Storage pool id=" + id + " is removed successfully"); - return true; - } else { - // alert that the storage cleanup is required - s_logger.warn("Failed to Delete storage pool id: " + id); - _alertMgr.sendAlert(AlertManager.ALERT_TYPE_STORAGE_DELETE, sPool.getDataCenterId(), sPool.getPodId(), "Unable to delete storage pool id= " + id, - "Delete storage pool command failed. Please check logs."); - } - - if (lock != null && !isLockReleased) { - _storagePoolDao.releaseFromLockTable(lock.getId()); - } - } + DataStoreProvider storeProvider = dataStoreProviderMgr + .getDataStoreProviderById(sPool.getStorageProviderId()); + DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle(); + lifeCycle.deleteDataStore(id); return false; - - } - - @DB - private boolean deletePoolStats(Long poolId) { - CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId, CapacityVO.CAPACITY_TYPE_STORAGE); - CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId, CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED); - Transaction txn = Transaction.currentTxn(); - txn.start(); - if (capacity1 != null) { - _capacityDao.remove(capacity1.getId()); - } - - if (capacity2 != null) { - _capacityDao.remove(capacity2.getId()); - } - - txn.commit(); - return true; - } @Override - public boolean createStoragePool(long hostId, StoragePoolVO pool) { - s_logger.debug("creating pool " + pool.getName() + " on host " + hostId); - if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem && pool.getPoolType() != StoragePoolType.IscsiLUN - && pool.getPoolType() != StoragePoolType.Iscsi && pool.getPoolType() != StoragePoolType.VMFS && pool.getPoolType() != StoragePoolType.SharedMountPoint - && pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.OCFS2 && pool.getPoolType() != StoragePoolType.RBD && pool.getPoolType() != StoragePoolType.CLVM) { - s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); - return false; - } - CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); - final Answer answer = _agentMgr.easySend(hostId, cmd); - if (answer != null && answer.getResult()) { - return true; - } else { - _storagePoolDao.expunge(pool.getId()); - String msg = ""; - if (answer != null) { - msg = "Can not create storage pool through host " + hostId + " due to " + answer.getDetails(); - s_logger.warn(msg); - } else { - msg = "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; - s_logger.warn(msg); - } - throw new CloudRuntimeException(msg); - } - } - - @Override - public boolean delPoolFromHost(long hostId) { - List poolHosts = _poolHostDao.listByHostIdIncludingRemoved(hostId); - for (StoragePoolHostVO poolHost : poolHosts) { - s_logger.debug("Deleting pool " + poolHost.getPoolId() + " from host " + hostId); - _poolHostDao.remove(poolHost.getId()); - } - return true; - } - - public void connectHostToSharedPool(long hostId, StoragePoolVO pool) throws StorageUnavailableException { - assert (pool.getPoolType().isShared()) : "Now, did you actually read the name of this method?"; + public void connectHostToSharedPool(long hostId, long poolId) + throws StorageUnavailableException { + StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + assert (pool.isShared()) : "Now, did you actually read the name of this method?"; s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId); - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); - final Answer answer = _agentMgr.easySend(hostId, cmd); - - if (answer == null) { - throw new StorageUnavailableException("Unable to get an answer to the modify storage pool command", pool.getId()); - } - - if (!answer.getResult()) { - String msg = "Add host failed due to ModifyStoragePoolCommand failed" + answer.getDetails(); - _alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new StorageUnavailableException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails(), pool.getId()); - } - - assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId; - ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; - - StoragePoolHostVO poolHost = _poolHostDao.findByPoolHost(pool.getId(), hostId); - if (poolHost == null) { - poolHost = new StoragePoolHostVO(pool.getId(), hostId, mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); - _poolHostDao.persist(poolHost); - } else { - poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); - } - pool.setAvailableBytes(mspAnswer.getPoolInfo().getAvailableBytes()); - pool.setCapacityBytes(mspAnswer.getPoolInfo().getCapacityBytes()); - _storagePoolDao.update(pool.getId(), pool); - - s_logger.info("Connection established between " + pool + " host + " + hostId); + DataStoreProvider provider = dataStoreProviderMgr + .getDataStoreProviderById(pool.getStorageProviderId()); + HypervisorHostListener listener = hostListeners.get(provider.getUuid()); + listener.hostConnect(hostId, pool.getId()); } + + - @Override - public VolumeVO moveVolume(VolumeVO volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType) throws ConcurrentOperationException { + - // Find a destination storage pool with the specified criteria - DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); - DiskProfile dskCh = new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(), - diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null); - dskCh.setHyperType(dataDiskHyperType); - DataCenterVO destPoolDataCenter = _dcDao.findById(destPoolDcId); - HostPodVO destPoolPod = _podDao.findById(destPoolPodId); - StoragePoolVO destPool = findStoragePool(dskCh, destPoolDataCenter, destPoolPod, destPoolClusterId, null, null, new HashSet()); - String secondaryStorageURL = getSecondaryStorageURL(volume.getDataCenterId()); + + - if (destPool == null) { - throw new CloudRuntimeException("Failed to find a storage pool with enough capacity to move the volume to."); - } - if (secondaryStorageURL == null) { - throw new CloudRuntimeException("Failed to find secondary storage."); - } - - List vols = new ArrayList(); - vols.add(volume); - migrateVolumes(vols, destPool); - return _volsDao.findById(volume.getId()); - } - - - /* - * Upload the volume to secondary storage. - * - */ - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPLOAD, eventDescription = "uploading volume", async = true) - public VolumeVO uploadVolume(UploadVolumeCmd cmd) throws ResourceAllocationException{ - Account caller = UserContext.current().getCaller(); - long ownerId = cmd.getEntityOwnerId(); - Long zoneId = cmd.getZoneId(); - String volumeName = cmd.getVolumeName(); - String url = cmd.getUrl(); - String format = cmd.getFormat(); - - validateVolume(caller, ownerId, zoneId, volumeName, url, format); - VolumeVO volume = persistVolume(caller, ownerId, zoneId, volumeName, url, cmd.getFormat()); - _downloadMonitor.downloadVolumeToStorage(volume, zoneId, url, cmd.getChecksum(), ImageFormat.valueOf(format.toUpperCase())); - return volume; - } - - private boolean validateVolume(Account caller, long ownerId, Long zoneId, String volumeName, String url, String format) throws ResourceAllocationException{ - - // permission check - _accountMgr.checkAccess(caller, null, true, _accountMgr.getActiveAccountById(ownerId)); - - // Check that the resource limit for volumes won't be exceeded - _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), ResourceType.volume); - - - // Verify that zone exists - DataCenterVO zone = _dcDao.findById(zoneId); - if (zone == null) { - throw new InvalidParameterValueException("Unable to find zone by id " + zoneId); - } - - // Check if zone is disabled - if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); - } - - if (url.toLowerCase().contains("file://")) { - throw new InvalidParameterValueException("File:// type urls are currently unsupported"); - } - - ImageFormat imgfmt = ImageFormat.valueOf(format.toUpperCase()); - if (imgfmt == null) { - throw new IllegalArgumentException("Image format is incorrect " + format + ". Supported formats are " + EnumUtils.listValues(ImageFormat.values())); - } - - String userSpecifiedName = volumeName; - if (userSpecifiedName == null) { - userSpecifiedName = getRandomVolumeName(); - } - if((!url.toLowerCase().endsWith("vhd"))&&(!url.toLowerCase().endsWith("vhd.zip")) - &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz")) - &&(!url.toLowerCase().endsWith("qcow2"))&&(!url.toLowerCase().endsWith("qcow2.zip")) - &&(!url.toLowerCase().endsWith("qcow2.bz2"))&&(!url.toLowerCase().endsWith("qcow2.gz")) - &&(!url.toLowerCase().endsWith("ova"))&&(!url.toLowerCase().endsWith("ova.zip")) - &&(!url.toLowerCase().endsWith("ova.bz2"))&&(!url.toLowerCase().endsWith("ova.gz")) - &&(!url.toLowerCase().endsWith("img"))&&(!url.toLowerCase().endsWith("raw"))){ - throw new InvalidParameterValueException("Please specify a valid " + format.toLowerCase()); - } - - if ((format.equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith(".vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase().endsWith("vhd.gz") )) - || (format.equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith(".qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz") )) - || (format.equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith(".ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase().endsWith("ova.gz"))) - || (format.equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith(".img") && !url.toLowerCase().endsWith("raw")))) { - throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + format.toLowerCase()); - } - validateUrl(url); - - return false; - } - - private String validateUrl(String url){ - try { - URI uri = new URI(url); - if ((uri.getScheme() == null) || (!uri.getScheme().equalsIgnoreCase("http") - && !uri.getScheme().equalsIgnoreCase("https") && !uri.getScheme().equalsIgnoreCase("file"))) { - throw new IllegalArgumentException("Unsupported scheme for url: " + url); - } - - int port = uri.getPort(); - if (!(port == 80 || port == 443 || port == -1)) { - throw new IllegalArgumentException("Only ports 80 and 443 are allowed"); - } - String host = uri.getHost(); - try { - InetAddress hostAddr = InetAddress.getByName(host); - if (hostAddr.isAnyLocalAddress() || hostAddr.isLinkLocalAddress() || hostAddr.isLoopbackAddress() || hostAddr.isMulticastAddress()) { - throw new IllegalArgumentException("Illegal host specified in url"); - } - if (hostAddr instanceof Inet6Address) { - throw new IllegalArgumentException("IPV6 addresses not supported (" + hostAddr.getHostAddress() + ")"); - } - } catch (UnknownHostException uhe) { - throw new IllegalArgumentException("Unable to resolve " + host); - } - - return uri.toString(); - } catch (URISyntaxException e) { - throw new IllegalArgumentException("Invalid URL " + url); - } - - } - - private VolumeVO persistVolume(Account caller, long ownerId, Long zoneId, String volumeName, String url, String format) { - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - VolumeVO volume = new VolumeVO(volumeName, zoneId, -1, -1, -1, new Long(-1), null, null, 0, Volume.Type.DATADISK); - volume.setPoolId(null); - volume.setDataCenterId(zoneId); - volume.setPodId(null); - volume.setAccountId(ownerId); - volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId())); - long diskOfferingId = _diskOfferingDao.findByUniqueName("Cloud.com-Custom").getId(); - volume.setDiskOfferingId(diskOfferingId); - //volume.setSize(size); - volume.setInstanceId(null); - volume.setUpdated(new Date()); - volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId()); - - volume = _volsDao.persist(volume); - try { - stateTransitTo(volume, Event.UploadRequested); - } catch (NoTransitionException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - UserContext.current().setEventDetails("Volume Id: " + volume.getId()); - - // Increment resource count during allocation; if actual creation fails, decrement it - _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume); - - txn.commit(); - return volume; - } - - - /* - * Just allocate a volume in the database, don't send the createvolume cmd to hypervisor. The volume will be finally - * created - * only when it's attached to a VM. - */ - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true) - public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException { - // FIXME: some of the scheduled event stuff might be missing here... - Account caller = UserContext.current().getCaller(); - - long ownerId = cmd.getEntityOwnerId(); - - // permission check - _accountMgr.checkAccess(caller, null, true, _accountMgr.getActiveAccountById(ownerId)); - - // Check that the resource limit for volumes won't be exceeded - _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), ResourceType.volume); - - Long zoneId = cmd.getZoneId(); - Long diskOfferingId = null; - DiskOfferingVO diskOffering = null; - Long size = null; - - // validate input parameters before creating the volume - if ((cmd.getSnapshotId() == null && cmd.getDiskOfferingId() == null) || (cmd.getSnapshotId() != null && cmd.getDiskOfferingId() != null)) { - throw new InvalidParameterValueException("Either disk Offering Id or snapshot Id must be passed whilst creating volume"); - } - - if (cmd.getSnapshotId() == null) {// create a new volume - - diskOfferingId = cmd.getDiskOfferingId(); - size = cmd.getSize(); - Long sizeInGB = size; - if (size != null) { - if (size > 0) { - size = size * 1024 * 1024 * 1024; // user specify size in GB - } else { - throw new InvalidParameterValueException("Disk size must be larger than 0"); - } - } - - // Check that the the disk offering is specified - diskOffering = _diskOfferingDao.findById(diskOfferingId); - if ((diskOffering == null) || diskOffering.getRemoved() != null || !DiskOfferingVO.Type.Disk.equals(diskOffering.getType())) { - throw new InvalidParameterValueException("Please specify a valid disk offering."); - } - - if (diskOffering.isCustomized()) { - if (size == null) { - throw new InvalidParameterValueException("This disk offering requires a custom size specified"); - } - if ((sizeInGB < _customDiskOfferingMinSize) || (sizeInGB > _customDiskOfferingMaxSize)) { - throw new InvalidParameterValueException("Volume size: " + sizeInGB + "GB is out of allowed range. Max: " + _customDiskOfferingMaxSize + " Min:" + _customDiskOfferingMinSize); - } - } - - if (!diskOffering.isCustomized() && size != null) { - throw new InvalidParameterValueException("This disk offering does not allow custom size"); - } - - if (diskOffering.getDomainId() == null) { - // do nothing as offering is public - } else { - _configMgr.checkDiskOfferingAccess(caller, diskOffering); - } - - if (diskOffering.getDiskSize() > 0) { - size = diskOffering.getDiskSize(); - } - - if (!validateVolumeSizeRange(size)) {// convert size from mb to gb for validation - throw new InvalidParameterValueException("Invalid size for custom volume creation: " + size + " ,max volume size is:" + _maxVolumeSizeInGb); - } - } else { // create volume from snapshot - Long snapshotId = cmd.getSnapshotId(); - SnapshotVO snapshotCheck = _snapshotDao.findById(snapshotId); - if (snapshotCheck == null) { - throw new InvalidParameterValueException("unable to find a snapshot with id " + snapshotId); - } - - if (snapshotCheck.getState() != Snapshot.State.BackedUp) { - throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.State.BackedUp + " state yet and can't be used for volume creation"); - } - - diskOfferingId = snapshotCheck.getDiskOfferingId(); - diskOffering = _diskOfferingDao.findById(diskOfferingId); - zoneId = snapshotCheck.getDataCenterId(); - size = snapshotCheck.getSize(); // ; disk offering is used for tags purposes - - // check snapshot permissions - _accountMgr.checkAccess(caller, null, true, snapshotCheck); - - /* - * // bug #11428. Operation not supported if vmware and snapshots parent volume = ROOT - * if(snapshotCheck.getHypervisorType() == HypervisorType.VMware - * && _volumeDao.findByIdIncludingRemoved(snapshotCheck.getVolumeId()).getVolumeType() == Type.ROOT){ - * throw new UnsupportedServiceException("operation not supported, snapshot with id " + snapshotId + - * " is created from ROOT volume"); - * } - * - */ - } - - // Verify that zone exists - DataCenterVO zone = _dcDao.findById(zoneId); - if (zone == null) { - throw new InvalidParameterValueException("Unable to find zone by id " + zoneId); - } - - // Check if zone is disabled - if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); - } - - // If local storage is disabled then creation of volume with local disk offering not allowed - if (!zone.isLocalStorageEnabled() && diskOffering.getUseLocalStorage()) { - throw new InvalidParameterValueException("Zone is not configured to use local storage but volume's disk offering " + diskOffering.getName() + " uses it"); - } - - // Check that there is appropriate primary storage pool in the specified zone - List storagePools = _storagePoolDao.listByDataCenterId(zoneId); - boolean appropriatePoolExists = false; - if (!diskOffering.getUseLocalStorage()) { - for (StoragePoolVO storagePool : storagePools) { - if (storagePool.isShared()) { - appropriatePoolExists = true; - break; - } - } - } else { - for (StoragePoolVO storagePool : storagePools) { - if (storagePool.isLocal()) { - appropriatePoolExists = true; - break; - } - } - } - - // Check that there is at least one host in the specified zone - List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByType(Host.Type.Routing, zoneId); - if (hosts.isEmpty()) { - throw new InvalidParameterValueException("There is no workable host in data center id " + zoneId + ", please check hosts' agent status and see if they are disabled"); - } - - if (!appropriatePoolExists) { - String storageType = diskOffering.getUseLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString(); - throw new InvalidParameterValueException("Volume's disk offering uses " + storageType + " storage, please specify a zone that has at least one " + storageType + " primary storage pool."); - } - - String userSpecifiedName = cmd.getVolumeName(); - if (userSpecifiedName == null) { - userSpecifiedName = getRandomVolumeName(); - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - VolumeVO volume = new VolumeVO(userSpecifiedName, -1, -1, -1, -1, new Long(-1), null, null, 0, Volume.Type.DATADISK); - volume.setPoolId(null); - volume.setDataCenterId(zoneId); - volume.setPodId(null); - volume.setAccountId(ownerId); - volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId())); - volume.setDiskOfferingId(diskOfferingId); - volume.setSize(size); - volume.setInstanceId(null); - volume.setUpdated(new Date()); - volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId()); - - volume = _volsDao.persist(volume); - if(cmd.getSnapshotId() == null){ - //for volume created from snapshot, create usage event after volume creation - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), - volume.getDataCenterId(), volume.getId(), volume.getName(), diskOfferingId, null, size, - Volume.class.getName(), volume.getUuid()); - } - - UserContext.current().setEventDetails("Volume Id: " + volume.getId()); - - // Increment resource count during allocation; if actual creation fails, decrement it - _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume); - - txn.commit(); - - return volume; - } - - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", async = true) - public VolumeVO createVolume(CreateVolumeCmd cmd) { - VolumeVO volume = _volsDao.findById(cmd.getEntityId()); - boolean created = false; - - try { - if (cmd.getSnapshotId() != null) { - volume = createVolumeFromSnapshot(volume, cmd.getSnapshotId()); - if (volume.getState() == Volume.State.Ready) { - created = true; - } - return volume; - } else { - _volsDao.update(volume.getId(), volume); - created = true; - } - - return _volsDao.findById(volume.getId()); - } finally { - if (!created) { - s_logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); - _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume); - } - } - } - - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true) - public VolumeVO resizeVolume(ResizeVolumeCmd cmd) { - VolumeVO volume = _volsDao.findById(cmd.getEntityId()); - Long newSize = null; - boolean shrinkOk = cmd.getShrinkOk(); - boolean success = false; - DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); - DiskOfferingVO newDiskOffering = null; - - newDiskOffering = _diskOfferingDao.findById(cmd.getNewDiskOfferingId()); - - /* Volumes with no hypervisor have never been assigned, and can be resized by recreating. - perhaps in the future we can just update the db entry for the volume */ - if(_volsDao.getHypervisorType(volume.getId()) == HypervisorType.None){ - throw new InvalidParameterValueException("Can't resize a volume that has never been attached, not sure which hypervisor type. Recreate volume to resize."); - } - - /* Only works for KVM/Xen/VMware for now */ - if(_volsDao.getHypervisorType(volume.getId()) != HypervisorType.KVM - && _volsDao.getHypervisorType(volume.getId()) != HypervisorType.XenServer - && _volsDao.getHypervisorType(volume.getId()) != HypervisorType.VMware){ - throw new InvalidParameterValueException("Cloudstack currently only supports volumes marked as KVM, XenServer or VMware hypervisor for resize"); - } - - if (volume == null) { - throw new InvalidParameterValueException("No such volume"); - } - - if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("Volume should be in ready state before attempting a resize"); - } - - if (!volume.getVolumeType().equals(Volume.Type.DATADISK)) { - throw new InvalidParameterValueException("Can only resize DATA volumes"); - } - - /* figure out whether or not a new disk offering or size parameter is required, get the correct size value */ - if (newDiskOffering == null) { - if (diskOffering.isCustomized()) { - newSize = cmd.getSize(); - - if (newSize == null) { - throw new InvalidParameterValueException("new offering is of custom size, need to specify a size"); - } - - newSize = ( newSize << 30 ); - } else { - throw new InvalidParameterValueException("current offering" + volume.getDiskOfferingId() + " cannot be resized, need to specify a disk offering"); - } - } else { - - if (newDiskOffering.getRemoved() != null || !DiskOfferingVO.Type.Disk.equals(newDiskOffering.getType())) { - throw new InvalidParameterValueException("Disk offering ID is missing or invalid"); - } - - if(diskOffering.getTags() != null) { - if(!newDiskOffering.getTags().equals(diskOffering.getTags())){ - throw new InvalidParameterValueException("Tags on new and old disk offerings must match"); - } - } else if (newDiskOffering.getTags() != null ){ - throw new InvalidParameterValueException("There are no tags on current disk offering, new disk offering needs to have no tags"); - } - - if (newDiskOffering.getDomainId() == null) { - // do nothing as offering is public - } else { - _configMgr.checkDiskOfferingAccess(UserContext.current().getCaller(), newDiskOffering); - } - - if (newDiskOffering.isCustomized()) { - newSize = cmd.getSize(); - - if (newSize == null) { - throw new InvalidParameterValueException("new offering is of custom size, need to specify a size"); - } - - newSize = ( newSize << 30 ); - } else { - newSize = newDiskOffering.getDiskSize(); - } - } - - if (newSize == null) { - throw new InvalidParameterValueException("could not detect a size parameter or fetch one from the diskofferingid parameter"); - } - - if (!validateVolumeSizeRange(newSize)) { - throw new InvalidParameterValueException("Requested size out of range"); - } - - /* does the caller have the authority to act on this volume? */ - _accountMgr.checkAccess(UserContext.current().getCaller(), null, true, volume); - - UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); - - StoragePool pool = _storagePoolDao.findById(volume.getPoolId()); - long currentSize = volume.getSize(); - - /* lets make certain they (think they) know what they're doing if they - want to shrink, by forcing them to provide the shrinkok parameter. This will - be checked again at the hypervisor level where we can see the actual disk size */ - if (currentSize > newSize && !shrinkOk) { - throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " - + newSize + " would shrink the volume, need to sign off by supplying the shrinkok parameter with value of true"); - } - - /* get a list of hosts to send the commands to, try the system the - associated vm is running on first, then the last known place it ran. - If not attached to a userVm, we pass 'none' and resizevolume.sh is - ok with that since it only needs the vm name to live resize */ - long[] hosts = null; - String instanceName = "none"; - if (userVm != null) { - instanceName = userVm.getInstanceName(); - if(userVm.getHostId() != null) { - hosts = new long[] { userVm.getHostId() }; - } else if(userVm.getLastHostId() != null) { - hosts = new long[] { userVm.getLastHostId() }; - } - - /*Xen only works offline, SR does not support VDI.resizeOnline*/ - if(_volsDao.getHypervisorType(volume.getId()) == HypervisorType.XenServer - && ! userVm.getState().equals(State.Stopped)) { - throw new InvalidParameterValueException("VM must be stopped or disk detached in order to resize with the Xen HV"); - } - } - - try { - try { - stateTransitTo(volume, Volume.Event.ResizeRequested); - } catch (NoTransitionException etrans) { - throw new CloudRuntimeException("Unable to change volume state for resize: " + etrans.toString()); - } - - ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(volume.getPath(), new StorageFilerTO(pool), - currentSize, newSize, shrinkOk, instanceName); - ResizeVolumeAnswer answer = (ResizeVolumeAnswer) sendToPool(pool, hosts, resizeCmd); - - /* need to fetch/store new volume size in database. This value comes from - hypervisor rather than trusting that a success means we have a volume of the - size we requested */ - if (answer != null && answer.getResult()) { - long finalSize = answer.getNewSize(); - s_logger.debug("Resize: volume started at size " + currentSize + " and ended at size " + finalSize); - volume.setSize(finalSize); - if (newDiskOffering != null) { - volume.setDiskOfferingId(cmd.getNewDiskOfferingId()); - } - _volsDao.update(volume.getId(), volume); - - success = true; - return volume; - } else if (answer != null) { - s_logger.debug("Resize: returned '" + answer.getDetails() + "'"); - } - } catch (StorageUnavailableException e) { - s_logger.debug("volume failed to resize: "+e); - return null; - } finally { - if(success) { - try { - stateTransitTo(volume, Volume.Event.OperationSucceeded); - } catch (NoTransitionException etrans) { - throw new CloudRuntimeException("Failed to change volume state: " + etrans.toString()); - } - } else { - try { - stateTransitTo(volume, Volume.Event.OperationFailed); - } catch (NoTransitionException etrans) { - throw new CloudRuntimeException("Failed to change volume state: " + etrans.toString()); - } - } - } - return null; - } - - @Override - @DB - public boolean destroyVolume(VolumeVO volume) throws ConcurrentOperationException { - try { - if (!stateTransitTo(volume, Volume.Event.DestroyRequested)) { - throw new ConcurrentOperationException("Failed to transit to destroyed state"); - } - } catch (NoTransitionException e) { - s_logger.debug("Unable to destoy the volume: " + e.toString()); - return false; - } - - long volumeId = volume.getId(); - - // Delete the recurring snapshot policies for this volume. - _snapshotMgr.deletePoliciesForVolume(volumeId); - - Long instanceId = volume.getInstanceId(); - VMInstanceVO vmInstance = null; - if (instanceId != null) { - vmInstance = _vmInstanceDao.findById(instanceId); - } - - if (instanceId == null || (vmInstance.getType().equals(VirtualMachine.Type.User))) { - // Decrement the resource count for volumes belonging user VM's only - _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume); - // Log usage event for volumes belonging user VM's only - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), - volume.getDataCenterId(), volume.getId(), volume.getName(), - Volume.class.getName(), volume.getUuid()); - } - - try { - if (!stateTransitTo(volume, Volume.Event.OperationSucceeded)) { - throw new ConcurrentOperationException("Failed to transit state"); - - } - } catch (NoTransitionException e) { - s_logger.debug("Unable to change volume state: " + e.toString()); - return false; - } - - return true; - - } - - @Override - public void createCapacityEntry(StoragePoolVO storagePool) { - createCapacityEntry(storagePool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, 0); - } + @Override public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, long allocated) { @@ -2384,17 +1124,21 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } + cleanupSecondaryStorage(recurring); List vols = _volsDao.listVolumesToBeDestroyed(); for (VolumeVO vol : vols) { try { - expungeVolume(vol, false); + + this.volService.expungeVolumeAsync(this.volFactory.getVolume(vol.getId())); + } catch (Exception e) { s_logger.warn("Unable to destroy " + vol.getId(), e); } } + // remove snapshots in Error state List snapshots = _snapshotDao.listAllByStatus(Snapshot.State.Error); for (SnapshotVO snapshotVO : snapshots) { @@ -2405,11 +1149,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } - } finally { + }finally { scanLock.unlock(); } - } - } finally { + } + }finally { scanLock.releaseRef(); } } @@ -2430,7 +1174,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } return list; } catch (Exception e) { - s_logger.debug("failed to get all volumes who has snapshots in secondary storage " + hostId + " due to " + e.getMessage()); + s_logger.debug("failed to get all volumes who has snapshots in secondary storage " + + hostId + " due to " + e.getMessage()); return null; } @@ -2451,7 +1196,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } return list; } catch (Exception e) { - s_logger.debug("failed to get all snapshots for a volume " + volumeId + " due to " + e.getMessage()); + s_logger.debug("failed to get all snapshots for a volume " + + volumeId + " due to " + e.getMessage()); return null; } } @@ -2461,42 +1207,66 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public void cleanupSecondaryStorage(boolean recurring) { try { // Cleanup templates in secondary storage hosts - List secondaryStorageHosts = _ssvmMgr.listSecondaryStorageHostsInAllZones(); + List secondaryStorageHosts = _ssvmMgr + .listSecondaryStorageHostsInAllZones(); for (HostVO secondaryStorageHost : secondaryStorageHosts) { try { long hostId = secondaryStorageHost.getId(); - List destroyedTemplateHostVOs = _vmTemplateHostDao.listDestroyed(hostId); - s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateHostVOs.size() + " templates to cleanup on secondary storage host: " + List destroyedTemplateHostVOs = _vmTemplateHostDao + .listDestroyed(hostId); + s_logger.debug("Secondary storage garbage collector found " + + destroyedTemplateHostVOs.size() + + " templates to cleanup on secondary storage host: " + secondaryStorageHost.getName()); for (VMTemplateHostVO destroyedTemplateHostVO : destroyedTemplateHostVOs) { - if (!_tmpltMgr.templateIsDeleteable(destroyedTemplateHostVO)) { + if (!_tmpltMgr + .templateIsDeleteable(destroyedTemplateHostVO)) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Not deleting template at: " + destroyedTemplateHostVO); + s_logger.debug("Not deleting template at: " + + destroyedTemplateHostVO); } continue; } if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting template host: " + destroyedTemplateHostVO); + s_logger.debug("Deleting template host: " + + destroyedTemplateHostVO); } - String installPath = destroyedTemplateHostVO.getInstallPath(); + String installPath = destroyedTemplateHostVO + .getInstallPath(); if (installPath != null) { - Answer answer = _agentMgr.sendToSecStorage(secondaryStorageHost, new DeleteTemplateCommand(secondaryStorageHost.getStorageUrl(), destroyedTemplateHostVO.getInstallPath())); + Answer answer = _agentMgr.sendToSecStorage( + secondaryStorageHost, + new DeleteTemplateCommand( + secondaryStorageHost + .getStorageUrl(), + destroyedTemplateHostVO + .getInstallPath())); if (answer == null || !answer.getResult()) { - s_logger.debug("Failed to delete " + destroyedTemplateHostVO + " due to " + ((answer == null) ? "answer is null" : answer.getDetails())); + s_logger.debug("Failed to delete " + + destroyedTemplateHostVO + + " due to " + + ((answer == null) ? "answer is null" + : answer.getDetails())); } else { - _vmTemplateHostDao.remove(destroyedTemplateHostVO.getId()); - s_logger.debug("Deleted template at: " + destroyedTemplateHostVO.getInstallPath()); + _vmTemplateHostDao + .remove(destroyedTemplateHostVO.getId()); + s_logger.debug("Deleted template at: " + + destroyedTemplateHostVO + .getInstallPath()); } } else { - _vmTemplateHostDao.remove(destroyedTemplateHostVO.getId()); + _vmTemplateHostDao.remove(destroyedTemplateHostVO + .getId()); } } } catch (Exception e) { - s_logger.warn("problem cleaning up templates in secondary storage " + secondaryStorageHost, e); + s_logger.warn( + "problem cleaning up templates in secondary storage " + + secondaryStorageHost, e); } } @@ -2511,9 +1281,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C for (Long volumeId : vIDs) { boolean lock = false; try { - VolumeVO volume = _volsDao.findByIdIncludingRemoved(volumeId); + VolumeVO volume = _volsDao + .findByIdIncludingRemoved(volumeId); if (volume.getRemoved() == null) { - volume = _volsDao.acquireInLockTable(volumeId, 10); + volume = _volsDao.acquireInLockTable(volumeId, + 10); if (volume == null) { continue; } @@ -2523,16 +1295,25 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (snapshots == null) { continue; } - CleanupSnapshotBackupCommand cmd = new CleanupSnapshotBackupCommand(secondaryStorageHost.getStorageUrl(), secondaryStorageHost.getDataCenterId(), volume.getAccountId(), - volumeId, snapshots); + CleanupSnapshotBackupCommand cmd = new CleanupSnapshotBackupCommand( + secondaryStorageHost.getStorageUrl(), + secondaryStorageHost.getDataCenterId(), + volume.getAccountId(), volumeId, snapshots); - Answer answer = _agentMgr.sendToSecStorage(secondaryStorageHost, cmd); + Answer answer = _agentMgr.sendToSecStorage( + secondaryStorageHost, cmd); if ((answer == null) || !answer.getResult()) { - String details = "Failed to cleanup snapshots for volume " + volumeId + " due to " + (answer == null ? "null" : answer.getDetails()); + String details = "Failed to cleanup snapshots for volume " + + volumeId + + " due to " + + (answer == null ? "null" : answer + .getDetails()); s_logger.warn(details); } } catch (Exception e1) { - s_logger.warn("problem cleaning up snapshots in secondary storage " + secondaryStorageHost, e1); + s_logger.warn( + "problem cleaning up snapshots in secondary storage " + + secondaryStorageHost, e1); } finally { if (lock) { _volsDao.releaseFromLockTable(volumeId); @@ -2540,40 +1321,63 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } } catch (Exception e2) { - s_logger.warn("problem cleaning up snapshots in secondary storage " + secondaryStorageHost, e2); + s_logger.warn( + "problem cleaning up snapshots in secondary storage " + + secondaryStorageHost, e2); } } - //CleanUp volumes on Secondary Storage. + // CleanUp volumes on Secondary Storage. for (HostVO secondaryStorageHost : secondaryStorageHosts) { try { long hostId = secondaryStorageHost.getId(); - List destroyedVolumeHostVOs = _volumeHostDao.listDestroyed(hostId); - s_logger.debug("Secondary storage garbage collector found " + destroyedVolumeHostVOs.size() + " templates to cleanup on secondary storage host: " + List destroyedVolumeHostVOs = _volumeHostDao + .listDestroyed(hostId); + s_logger.debug("Secondary storage garbage collector found " + + destroyedVolumeHostVOs.size() + + " templates to cleanup on secondary storage host: " + secondaryStorageHost.getName()); for (VolumeHostVO destroyedVolumeHostVO : destroyedVolumeHostVOs) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting volume host: " + destroyedVolumeHostVO); + s_logger.debug("Deleting volume host: " + + destroyedVolumeHostVO); } - String installPath = destroyedVolumeHostVO.getInstallPath(); + String installPath = destroyedVolumeHostVO + .getInstallPath(); if (installPath != null) { - Answer answer = _agentMgr.sendToSecStorage(secondaryStorageHost, new DeleteVolumeCommand(secondaryStorageHost.getStorageUrl(), destroyedVolumeHostVO.getInstallPath())); + Answer answer = _agentMgr.sendToSecStorage( + secondaryStorageHost, + new DeleteVolumeCommand( + secondaryStorageHost + .getStorageUrl(), + destroyedVolumeHostVO + .getInstallPath())); if (answer == null || !answer.getResult()) { - s_logger.debug("Failed to delete " + destroyedVolumeHostVO + " due to " + ((answer == null) ? "answer is null" : answer.getDetails())); + s_logger.debug("Failed to delete " + + destroyedVolumeHostVO + + " due to " + + ((answer == null) ? "answer is null" + : answer.getDetails())); } else { - _volumeHostDao.remove(destroyedVolumeHostVO.getId()); - s_logger.debug("Deleted volume at: " + destroyedVolumeHostVO.getInstallPath()); + _volumeHostDao.remove(destroyedVolumeHostVO + .getId()); + s_logger.debug("Deleted volume at: " + + destroyedVolumeHostVO + .getInstallPath()); } } else { - _volumeHostDao.remove(destroyedVolumeHostVO.getId()); + _volumeHostDao + .remove(destroyedVolumeHostVO.getId()); } } - }catch (Exception e2) { - s_logger.warn("problem cleaning up volumes in secondary storage " + secondaryStorageHost, e2); + } catch (Exception e2) { + s_logger.warn( + "problem cleaning up volumes in secondary storage " + + secondaryStorageHost, e2); } } } catch (Exception e3) { @@ -2585,235 +1389,63 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public String getPrimaryStorageNameLabel(VolumeVO volume) { Long poolId = volume.getPoolId(); - // poolId is null only if volume is destroyed, which has been checked before. + // poolId is null only if volume is destroyed, which has been checked + // before. assert poolId != null; - StoragePoolVO storagePoolVO = _storagePoolDao.findById(poolId); - assert storagePoolVO != null; - return storagePoolVO.getUuid(); + StoragePoolVO PrimaryDataStoreVO = _storagePoolDao + .findById(poolId); + assert PrimaryDataStoreVO != null; + return PrimaryDataStoreVO.getUuid(); } @Override @DB - public StoragePoolVO preparePrimaryStorageForMaintenance(Long primaryStorageId) throws ResourceUnavailableException, InsufficientCapacityException { + public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance( + Long primaryStorageId) throws ResourceUnavailableException, + InsufficientCapacityException { Long userId = UserContext.current().getCallerUserId(); User user = _userDao.findById(userId); Account account = UserContext.current().getCaller(); + boolean restart = true; StoragePoolVO primaryStorage = null; - try { - // 1. Get the primary storage record and perform validation check - primaryStorage = _storagePoolDao.lockRow(primaryStorageId, true); - if (primaryStorage == null) { - String msg = "Unable to obtain lock on the storage pool record in preparePrimaryStorageForMaintenance()"; - s_logger.error(msg); - throw new ExecutionException(msg); - } - - List spes = _storagePoolDao.listBy(primaryStorage.getDataCenterId(), primaryStorage.getPodId(), primaryStorage.getClusterId()); - for (StoragePoolVO sp : spes) { - if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { - throw new CloudRuntimeException("Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + sp.getId() + " is already in PrepareForMaintenance mode "); - } - } - - if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up) && !primaryStorage.getStatus().equals(StoragePoolStatus.ErrorInMaintenance)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorageId + " is not ready for migration, as the status is:" + primaryStorage.getStatus().toString()); - } - - List hosts = _resourceMgr.listHostsInClusterByStatus(primaryStorage.getClusterId(), Status.Up); - if (hosts == null || hosts.size() == 0) { - primaryStorage.setStatus(StoragePoolStatus.Maintenance); - _storagePoolDao.update(primaryStorageId, primaryStorage); - return _storagePoolDao.findById(primaryStorageId); - } else { - // set the pool state to prepare for maintenance - primaryStorage.setStatus(StoragePoolStatus.PrepareForMaintenance); - _storagePoolDao.update(primaryStorageId, primaryStorage); - } - // remove heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(false, primaryStorage); - final Answer answer = _agentMgr.easySend(host.getId(), cmd); - if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false secceeded"); - } - } - } - // check to see if other ps exist - // if they do, then we can migrate over the system vms to them - // if they dont, then just stop all vms on this one - List upPools = _storagePoolDao.listByStatusInZone(primaryStorage.getDataCenterId(), StoragePoolStatus.Up); - - if (upPools == null || upPools.size() == 0) { - restart = false; - } - - // 2. Get a list of all the ROOT volumes within this storage pool - List allVolumes = _volsDao.findByPoolId(primaryStorageId); - - // 3. Enqueue to the work queue - for (VolumeVO volume : allVolumes) { - VMInstanceVO vmInstance = _vmInstanceDao.findById(volume.getInstanceId()); - - if (vmInstance == null) { - continue; - } - - // enqueue sp work - if (vmInstance.getState().equals(State.Running) || vmInstance.getState().equals(State.Starting) || vmInstance.getState().equals(State.Stopping)) { - - try { - StoragePoolWorkVO work = new StoragePoolWorkVO(vmInstance.getId(), primaryStorageId, false, false, _serverId); - _storagePoolWorkDao.persist(work); - } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Work record already exists, re-using by re-setting values"); - } - StoragePoolWorkVO work = _storagePoolWorkDao.findByPoolIdAndVmId(primaryStorageId, vmInstance.getId()); - work.setStartedAfterMaintenance(false); - work.setStoppedForMaintenance(false); - work.setManagementServerId(_serverId); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // 4. Process the queue - List pendingWork = _storagePoolWorkDao.listPendingWorkForPrepareForMaintenanceByPoolId(primaryStorageId); - - for (StoragePoolWorkVO work : pendingWork) { - // shut down the running vms - VMInstanceVO vmInstance = _vmInstanceDao.findById(work.getVmId()); - - if (vmInstance == null) { - continue; - } - - // if the instance is of type consoleproxy, call the console proxy - if (vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) { - // call the consoleproxymanager - ConsoleProxyVO consoleProxy = _consoleProxyDao.findById(vmInstance.getId()); - if (!_vmMgr.advanceStop(consoleProxy, true, user, account)) { - String errorMsg = "There was an error stopping the console proxy id: " + vmInstance.getId() + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - - if (_vmMgr.advanceStart(consoleProxy, null, user, account) == null) { - String errorMsg = "There was an error starting the console proxy id: " + vmInstance.getId() + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // if the instance is of type uservm, call the user vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - UserVmVO userVm = _userVmDao.findById(vmInstance.getId()); - if (!_vmMgr.advanceStop(userVm, true, user, account)) { - String errorMsg = "There was an error stopping the user vm id: " + vmInstance.getId() + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type secondary storage vm, call the secondary storage vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO secStrgVm = _secStrgDao.findById(vmInstance.getId()); - if (!_vmMgr.advanceStop(secStrgVm, true, user, account)) { - String errorMsg = "There was an error stopping the ssvm id: " + vmInstance.getId() + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - if (_vmMgr.advanceStart(secStrgVm, null, user, account) == null) { - String errorMsg = "There was an error starting the ssvm id: " + vmInstance.getId() + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // if the instance is of type domain router vm, call the network manager - if (vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - if (!_vmMgr.advanceStop(domR, true, user, account)) { - String errorMsg = "There was an error stopping the domain router id: " + vmInstance.getId() + " ,cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - if (_vmMgr.advanceStart(domR, null, user, account) == null) { - String errorMsg = "There was an error starting the domain router id: " + vmInstance.getId() + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - } - - // 5. Update the status - primaryStorage.setStatus(StoragePoolStatus.Maintenance); - _storagePoolDao.update(primaryStorageId, primaryStorage); - - return _storagePoolDao.findById(primaryStorageId); - } catch (Exception e) { - if (e instanceof ExecutionException || e instanceof ResourceUnavailableException) { - s_logger.error("Exception in enabling primary storage maintenance:", e); - setPoolStateToError(primaryStorage); - throw (ResourceUnavailableException) e; - } - if (e instanceof InvalidParameterValueException) { - s_logger.error("Exception in enabling primary storage maintenance:", e); - setPoolStateToError(primaryStorage); - throw (InvalidParameterValueException) e; - } - if (e instanceof InsufficientCapacityException) { - s_logger.error("Exception in enabling primary storage maintenance:", e); - setPoolStateToError(primaryStorage); - throw (InsufficientCapacityException) e; - } - // for everything else - s_logger.error("Exception in enabling primary storage maintenance:", e); - setPoolStateToError(primaryStorage); - throw new CloudRuntimeException(e.getMessage()); + primaryStorage = _storagePoolDao.findById(primaryStorageId); + if (primaryStorage == null) { + String msg = "Unable to obtain lock on the storage pool record in preparePrimaryStorageForMaintenance()"; + s_logger.error(msg); + throw new InvalidParameterValueException(msg); } + + List spes = _storagePoolDao.listBy( + primaryStorage.getDataCenterId(), primaryStorage.getPodId(), + primaryStorage.getClusterId()); + for (StoragePoolVO sp : spes) { + if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { + throw new CloudRuntimeException( + "Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + + sp.getId() + + " is already in PrepareForMaintenance mode "); + } + } + + if (!primaryStorage.getStatus().equals(DataStoreStatus.Up) + && !primaryStorage.getStatus().equals( + DataStoreStatus.ErrorInMaintenance)) { + throw new InvalidParameterValueException("Primary storage with id " + + primaryStorageId + + " is not ready for migration, as the status is:" + + primaryStorage.getStatus().toString()); + } + + DataStoreProvider provider = dataStoreProviderMgr + .getDataStoreProviderById(primaryStorage.getStorageProviderId()); + DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + lifeCycle.maintain(primaryStorage.getId()); + + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore( + primaryStorage.getId(), DataStoreRole.Primary); } private void setPoolStateToError(StoragePoolVO primaryStorage) { @@ -2823,156 +1455,46 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override @DB - public StoragePoolVO cancelPrimaryStorageForMaintenance(CancelPrimaryStorageMaintenanceCmd cmd) throws ResourceUnavailableException { + public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance( + CancelPrimaryStorageMaintenanceCmd cmd) + throws ResourceUnavailableException { Long primaryStorageId = cmd.getId(); Long userId = UserContext.current().getCallerUserId(); User user = _userDao.findById(userId); Account account = UserContext.current().getCaller(); StoragePoolVO primaryStorage = null; - try { - Transaction txn = Transaction.currentTxn(); - txn.start(); - // 1. Get the primary storage record and perform validation check - primaryStorage = _storagePoolDao.lockRow(primaryStorageId, true); - if (primaryStorage == null) { - String msg = "Unable to obtain lock on the storage pool in cancelPrimaryStorageForMaintenance()"; - s_logger.error(msg); - throw new ExecutionException(msg); - } + primaryStorage = _storagePoolDao.findById(primaryStorageId); - if (primaryStorage.getStatus().equals(StoragePoolStatus.Up) || primaryStorage.getStatus().equals(StoragePoolStatus.PrepareForMaintenance)) { - throw new StorageUnavailableException("Primary storage with id " + primaryStorageId + " is not ready to complete migration, as the status is:" + primaryStorage.getStatus().toString(), - primaryStorageId); - } - - // Change the storage state back to up - primaryStorage.setStatus(StoragePoolStatus.Up); - _storagePoolDao.update(primaryStorageId, primaryStorage); - txn.commit(); - List hosts = _resourceMgr.listHostsInClusterByStatus(primaryStorage.getClusterId(), Status.Up); - if (hosts == null || hosts.size() == 0) { - return _storagePoolDao.findById(primaryStorageId); - } - // add heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, primaryStorage); - final Answer answer = _agentMgr.easySend(host.getId(), msPoolCmd); - if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add secceeded"); - } - } - } - - // 2. Get a list of pending work for this queue - List pendingWork = _storagePoolWorkDao.listPendingWorkForCancelMaintenanceByPoolId(primaryStorageId); - - // 3. work through the queue - for (StoragePoolWorkVO work : pendingWork) { - - VMInstanceVO vmInstance = _vmInstanceDao.findById(work.getVmId()); - - if (vmInstance == null) { - continue; - } - - // if the instance is of type consoleproxy, call the console proxy - if (vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) { - - ConsoleProxyVO consoleProxy = _consoleProxyDao.findById(vmInstance.getId()); - if (_vmMgr.advanceStart(consoleProxy, null, user, account) == null) { - String msg = "There was an error starting the console proxy id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type ssvm, call the ssvm manager - if (vmInstance.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance.getId()); - if (_vmMgr.advanceStart(ssVm, null, user, account) == null) { - String msg = "There was an error starting the ssvm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type ssvm, call the ssvm manager - if (vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - if (_vmMgr.advanceStart(domR, null, user, account) == null) { - String msg = "There was an error starting the domR id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type user vm, call the user vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - UserVmVO userVm = _userVmDao.findById(vmInstance.getId()); - try { - if (_vmMgr.advanceStart(userVm, null, user, account) == null) { - - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } catch (StorageUnavailableException e) { - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg, e); - throw new ExecutionException(msg); - } catch (InsufficientCapacityException e) { - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg, e); - throw new ExecutionException(msg); - } catch (ConcurrentOperationException e) { - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg, e); - throw new ExecutionException(msg); - } catch (ExecutionException e) { - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg, e); - throw new ExecutionException(msg); - } - } - } - return primaryStorage; - } catch (Exception e) { - setPoolStateToError(primaryStorage); - if (e instanceof ExecutionException) { - throw (ResourceUnavailableException) e; - } else if (e instanceof InvalidParameterValueException) { - throw (InvalidParameterValueException) e; - } else {// all other exceptions - throw new CloudRuntimeException(e.getMessage()); - } + if (primaryStorage == null) { + String msg = "Unable to obtain lock on the storage pool in cancelPrimaryStorageForMaintenance()"; + s_logger.error(msg); + throw new InvalidParameterValueException(msg); } + + if (primaryStorage.getStatus().equals(DataStoreStatus.Up) + || primaryStorage.getStatus().equals( + DataStoreStatus.PrepareForMaintenance)) { + throw new StorageUnavailableException("Primary storage with id " + + primaryStorageId + + " is not ready to complete migration, as the status is:" + + primaryStorage.getStatus().toString(), primaryStorageId); + } + + DataStoreProvider provider = dataStoreProviderMgr + .getDataStoreProviderById(primaryStorage.getStorageProviderId()); + DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + lifeCycle.cancelMaintain(primaryStorage.getId()); + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore( + primaryStorage.getId(), DataStoreRole.Primary); } - private boolean sendToVmResidesOn(StoragePoolVO storagePool, Command cmd) { - ClusterVO cluster = _clusterDao.findById(storagePool.getClusterId()); - if ((cluster.getHypervisorType() == HypervisorType.KVM || cluster.getHypervisorType() == HypervisorType.VMware) + private boolean sendToVmResidesOn(StoragePoolVO PrimaryDataStoreVO, + Command cmd) { + ClusterVO cluster = _clusterDao.findById(PrimaryDataStoreVO + .getClusterId()); + if ((cluster.getHypervisorType() == HypervisorType.KVM || cluster + .getHypervisorType() == HypervisorType.VMware) && ((cmd instanceof ManageSnapshotCommand) || (cmd instanceof BackupSnapshotCommand))) { return true; } else { @@ -2980,754 +1502,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_DELETE, eventDescription = "deleting volume") - public boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException { - - // Check that the volume ID is valid - VolumeVO volume = _volsDao.findById(volumeId); - if (volume == null) { - throw new InvalidParameterValueException("Unable to aquire volume with ID: " + volumeId); - } - - if (!_snapshotMgr.canOperateOnVolume(volume)) { - throw new InvalidParameterValueException("There are snapshot creating on it, Unable to delete the volume"); - } - - // permission check - _accountMgr.checkAccess(caller, null, true, volume); - - // Check that the volume is not currently attached to any VM - if (volume.getInstanceId() != null) { - throw new InvalidParameterValueException("Please specify a volume that is not attached to any VM."); - } - - // Check that volume is completely Uploaded - if (volume.getState() == Volume.State.UploadOp){ - VolumeHostVO volumeHost = _volumeHostDao.findByVolumeId(volume.getId()); - if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS){ - throw new InvalidParameterValueException("Please specify a volume that is not uploading"); - } - } - - // Check that the volume is not already destroyed - if (volume.getState() != Volume.State.Destroy) { - if (!destroyVolume(volume)) { - return false; - } - } - - try { - expungeVolume(volume, false); - } catch (Exception e) { - s_logger.warn("Failed to expunge volume:", e); - return false; - } - - return true; - } - - private boolean validateVolumeSizeRange(long size) { - if (size < 0 || (size > 0 && size < (1024 * 1024 * 1024))) { - throw new InvalidParameterValueException("Please specify a size of at least 1 Gb."); - } else if (size > (_maxVolumeSizeInGb * 1024 * 1024 * 1024)) { - throw new InvalidParameterValueException("volume size " + size + ", but the maximum size allowed is " + _maxVolumeSizeInGb + " Gb."); - } - - return true; - } - - protected DiskProfile toDiskProfile(VolumeVO vol, DiskOfferingVO offering) { - return new DiskProfile(vol.getId(), vol.getVolumeType(), vol.getName(), offering.getId(), vol.getSize(), offering.getTagsArray(), offering.getUseLocalStorage(), offering.isRecreatable(), - vol.getTemplateId()); - } - - @Override - public DiskProfile allocateRawVolume(Type type, String name, DiskOfferingVO offering, Long size, T vm, Account owner) { - if (size == null) { - size = offering.getDiskSize(); - } else { - size = (size * 1024 * 1024 * 1024); - } - VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), size); - if (vm != null) { - vol.setInstanceId(vm.getId()); - } - - if (type.equals(Type.ROOT)) { - vol.setDeviceId(0l); - } else { - vol.setDeviceId(1l); - } - - vol = _volsDao.persist(vol); - - // Save usage event and update resource count for user vm volumes - if (vm instanceof UserVm) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), - vol.getDataCenterId(), vol.getId(), vol.getName(), offering.getId(), null, size, - Volume.class.getName(), vol.getUuid()); - _resourceLimitMgr.incrementResourceCount(vm.getAccountId(), ResourceType.volume); - } - return toDiskProfile(vol, offering); - } - - @Override - public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOfferingVO offering, VMTemplateVO template, T vm, Account owner) { - assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template really...."; - - SearchCriteria sc = HostTemplateStatesSearch.create(); - sc.setParameters("id", template.getId()); - sc.setParameters("state", com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED); - sc.setJoinParameters("host", "dcId", vm.getDataCenterId()); - List tsvs = _vmTemplateSwiftDao.listByTemplateId(template.getId()); - Long size = null; - if (tsvs != null && tsvs.size() > 0) { - size = tsvs.get(0).getSize(); - } - - if (size == null && _s3Mgr.isS3Enabled()) { - VMTemplateS3VO vmTemplateS3VO = _vmTemplateS3Dao.findOneByTemplateId(template.getId()); - if (vmTemplateS3VO != null) { - size = vmTemplateS3VO.getSize(); - } - } - - if (size == null) { - List sss = _vmTemplateHostDao.search(sc, null); - if (sss == null || sss.size() == 0) { - throw new CloudRuntimeException("Template " + template.getName() + " has not been completely downloaded to zone " + vm.getDataCenterId()); - } - size = sss.get(0).getSize(); - } - - VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), size); - if (vm != null) { - vol.setInstanceId(vm.getId()); - } - vol.setTemplateId(template.getId()); - - if (type.equals(Type.ROOT)) { - vol.setDeviceId(0l); - if (!vm.getType().equals(VirtualMachine.Type.User)) { - vol.setRecreatable(true); - } - } else { - vol.setDeviceId(1l); - } - - vol = _volsDao.persist(vol); - - // Create event and update resource count for volumes if vm is a user vm - if (vm instanceof UserVm) { - - Long offeringId = null; - - if (offering.getType() == DiskOfferingVO.Type.Disk) { - offeringId = offering.getId(); - } - - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), - vol.getDataCenterId(), vol.getId(), vol.getName(), offeringId, template.getId(), - vol.getSize(), Volume.class.getName(), vol.getUuid()); - - _resourceLimitMgr.incrementResourceCount(vm.getAccountId(), ResourceType.volume); - } - return toDiskProfile(vol, offering); - } - - @Override - public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest) { - List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Preparing " + vols.size() + " volumes for " + vm); - } - - for (VolumeVO vol : vols) { - StoragePool pool = _storagePoolDao.findById(vol.getPoolId()); - vm.addDisk(new VolumeTO(vol, pool)); - } - - if (vm.getType() == VirtualMachine.Type.User) { - UserVmVO userVM = (UserVmVO) vm.getVirtualMachine(); - if (userVM.getIsoId() != null) { - Pair isoPathPair = getAbsoluteIsoPath(userVM.getIsoId(), userVM.getDataCenterId()); - if (isoPathPair != null) { - String isoPath = isoPathPair.first(); - VolumeTO iso = new VolumeTO(vm.getId(), Volume.Type.ISO, StoragePoolType.ISO, null, null, null, isoPath, 0, null, null); - vm.addDisk(iso); - } - } - } - } - - @DB - @Override - public Volume migrateVolume(Long volumeId, Long storagePoolId) throws ConcurrentOperationException { - VolumeVO vol = _volsDao.findById(volumeId); - if (vol == null) { - throw new InvalidParameterValueException("Failed to find the volume id: " + volumeId); - } - - if (vol.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("Volume must be in ready state"); - } - - if (vol.getInstanceId() != null) { - throw new InvalidParameterValueException("Volume needs to be dettached from VM"); - } - - StoragePool destPool = _storagePoolDao.findById(storagePoolId); - if (destPool == null) { - throw new InvalidParameterValueException("Failed to find the destination storage pool: " + storagePoolId); - } - - if (!volumeOnSharedStoragePool(vol)) { - throw new InvalidParameterValueException("Migration of volume from local storage pool is not supported"); - } - - List vols = new ArrayList(); - vols.add(vol); - - migrateVolumes(vols, destPool); - return vol; - } - - @DB - public boolean migrateVolumes(List volumes, StoragePool destPool) throws ConcurrentOperationException { - Transaction txn = Transaction.currentTxn(); - txn.start(); - - boolean transitResult = false; - long checkPointTaskId = -1; - try { - List volIds = new ArrayList(); - for (Volume volume : volumes) { - if (!_snapshotMgr.canOperateOnVolume((VolumeVO) volume)) { - throw new CloudRuntimeException("There are snapshots creating on this volume, can not move this volume"); - } - - try { - if (!stateTransitTo(volume, Volume.Event.MigrationRequested)) { - throw new ConcurrentOperationException("Failed to transit volume state"); - } - } catch (NoTransitionException e) { - s_logger.debug("Failed to set state into migrate: " + e.toString()); - throw new CloudRuntimeException("Failed to set state into migrate: " + e.toString()); - } - volIds.add(volume.getId()); - } - - transitResult = true; - } finally { - if (!transitResult) { - txn.rollback(); - } else { - txn.commit(); - } - } - - // At this stage, nobody can modify volumes. Send the copyvolume command - List> destroyCmds = new ArrayList>(); - List answers = new ArrayList(); - try { - for (Volume volume : volumes) { - String secondaryStorageURL = getSecondaryStorageURL(volume.getDataCenterId()); - StoragePoolVO srcPool = _storagePoolDao.findById(volume.getPoolId()); - CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volume.getPath(), srcPool, secondaryStorageURL, true, _copyvolumewait); - CopyVolumeAnswer cvAnswer; - try { - cvAnswer = (CopyVolumeAnswer) sendToPool(srcPool, cvCmd); - } catch (StorageUnavailableException e1) { - throw new CloudRuntimeException("Failed to copy the volume from the source primary storage pool to secondary storage.", e1); - } - - if (cvAnswer == null || !cvAnswer.getResult()) { - throw new CloudRuntimeException("Failed to copy the volume from the source primary storage pool to secondary storage."); - } - - String secondaryStorageVolumePath = cvAnswer.getVolumePath(); - - // Copy the volume from secondary storage to the destination storage - // pool - cvCmd = new CopyVolumeCommand(volume.getId(), secondaryStorageVolumePath, destPool, secondaryStorageURL, false, _copyvolumewait); - try { - cvAnswer = (CopyVolumeAnswer) sendToPool(destPool, cvCmd); - } catch (StorageUnavailableException e1) { - throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - - if (cvAnswer == null || !cvAnswer.getResult()) { - throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - - answers.add(cvAnswer); - destroyCmds.add(new Pair(srcPool, new DestroyCommand(srcPool, volume, null))); - } - } finally { - if (answers.size() != volumes.size()) { - // this means one of copying volume failed - for (Volume volume : volumes) { - try { - stateTransitTo(volume, Volume.Event.OperationFailed); - } catch (NoTransitionException e) { - s_logger.debug("Failed to change volume state: " + e.toString()); - } - } - } else { - // Need a transaction, make sure all the volumes get migrated to new storage pool - txn = Transaction.currentTxn(); - txn.start(); - - transitResult = false; - try { - for (int i = 0; i < volumes.size(); i++) { - CopyVolumeAnswer answer = answers.get(i); - VolumeVO volume = (VolumeVO) volumes.get(i); - Long oldPoolId = volume.getPoolId(); - volume.setPath(answer.getVolumePath()); - volume.setFolder(destPool.getPath()); - volume.setPodId(destPool.getPodId()); - volume.setPoolId(destPool.getId()); - volume.setLastPoolId(oldPoolId); - volume.setPodId(destPool.getPodId()); - try { - stateTransitTo(volume, Volume.Event.OperationSucceeded); - } catch (NoTransitionException e) { - s_logger.debug("Failed to change volume state: " + e.toString()); - throw new CloudRuntimeException("Failed to change volume state: " + e.toString()); - } - } - transitResult = true; - } finally { - if (!transitResult) { - txn.rollback(); - } else { - txn.commit(); - } - } - - } - } - - // all the volumes get migrated to new storage pool, need to delete the copy on old storage pool - for (Pair cmd : destroyCmds) { - try { - Answer cvAnswer = sendToPool(cmd.first(), cmd.second()); - } catch (StorageUnavailableException e) { - s_logger.debug("Unable to delete the old copy on storage pool: " + e.toString()); - } - } - return true; - } - - @Override - public boolean StorageMigration(VirtualMachineProfile vm, StoragePool destPool) throws ConcurrentOperationException { - List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - List volumesNeedToMigrate = new ArrayList(); - - for (VolumeVO volume : vols) { - if (volume.getState() != Volume.State.Ready) { - s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); - throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state"); - } - - if (volume.getPoolId() == destPool.getId()) { - s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId()); - continue; - } - - volumesNeedToMigrate.add(volume); - } - - if (volumesNeedToMigrate.isEmpty()) { - s_logger.debug("No volume need to be migrated"); - return true; - } - - return migrateVolumes(volumesNeedToMigrate, destPool); - } - - @Override - public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException { - - if (dest == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + vm); - } - throw new CloudRuntimeException("Unable to prepare Volume for vm because DeployDestination is null, vm:" + vm); - } - List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm); - } - - boolean recreate = _recreateSystemVmEnabled; - - List recreateVols = new ArrayList(vols.size()); - - for (VolumeVO vol : vols) { - StoragePool assignedPool = null; - if (dest.getStorageForDisks() != null) { - assignedPool = dest.getStorageForDisks().get(vol); - } - if (assignedPool == null && recreate) { - assignedPool = _storagePoolDao.findById(vol.getPoolId()); - - } - if (assignedPool != null || recreate) { - Volume.State state = vol.getState(); - if (state == Volume.State.Allocated || state == Volume.State.Creating) { - recreateVols.add(vol); - } else { - if (vol.isRecreatable()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner"); - } - recreateVols.add(vol); - } else { - if (assignedPool.getId() != vol.getPoolId()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Mismatch in storage pool " + assignedPool + " assigned by deploymentPlanner and the one associated with volume " + vol); - } - DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId()); - if (diskOffering.getUseLocalStorage()) - { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Local volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner"); - } - recreateVols.add(vol); - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner"); - } - try { - List volumesToMigrate = new ArrayList(); - volumesToMigrate.add(vol); - migrateVolumes(volumesToMigrate, assignedPool); - vm.addDisk(new VolumeTO(vol, assignedPool)); - } catch (ConcurrentOperationException e) { - throw new CloudRuntimeException("Migration of volume " + vol + " to storage pool " + assignedPool + " failed", e); - } - } - } else { - StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); - vm.addDisk(new VolumeTO(vol, pool)); - } - - } - } - } else { - if (vol.getPoolId() == null) { - throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol, Volume.class, vol.getId()); - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM"); - } - StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); - vm.addDisk(new VolumeTO(vol, pool)); - } - } - - for (VolumeVO vol : recreateVols) { - VolumeVO newVol; - StoragePool existingPool = null; - if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) { - existingPool = _storagePoolDao.findById(vol.getPoolId()); - s_logger.debug("existing pool: " + existingPool.getId()); - } - - if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) { - newVol = vol; - } else { - newVol = switchVolume(vol, vm); - // update the volume->storagePool map since volumeId has changed - if (dest.getStorageForDisks() != null && dest.getStorageForDisks().containsKey(vol)) { - StoragePool poolWithOldVol = dest.getStorageForDisks().get(vol); - dest.getStorageForDisks().put(newVol, poolWithOldVol); - dest.getStorageForDisks().remove(vol); - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created new volume " + newVol + " for old volume " + vol); - } - } - - try { - stateTransitTo(newVol, Volume.Event.CreateRequested); - } catch (NoTransitionException e) { - throw new CloudRuntimeException("Unable to create " + e.toString()); - } - - Pair created = createVolume(newVol, _diskOfferingDao.findById(newVol.getDiskOfferingId()), vm, vols, dest, existingPool); - - if (created == null) { - Long poolId = newVol.getPoolId(); - newVol.setPoolId(null); - try { - stateTransitTo(newVol, Volume.Event.OperationFailed); - } catch (NoTransitionException e) { - throw new CloudRuntimeException("Unable to update the failure on a volume: " + newVol, e); - } - throw new StorageUnavailableException("Unable to create " + newVol, poolId == null ? -1L : poolId); - } - created.first().setDeviceId(newVol.getDeviceId().intValue()); - newVol.setFolder(created.second().getPath()); - newVol.setPath(created.first().getPath()); - newVol.setSize(created.first().getSize()); - newVol.setPoolType(created.second().getPoolType()); - newVol.setPodId(created.second().getPodId()); - try { - stateTransitTo(newVol, Volume.Event.OperationSucceeded); - } catch (NoTransitionException e) { - throw new CloudRuntimeException("Unable to update an CREATE operation succeeded on volume " + newVol, e); - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + newVol + " is created on " + created.second()); - } - - vm.addDisk(created.first()); - } - } - - @DB - protected VolumeVO switchVolume(VolumeVO existingVolume, VirtualMachineProfile vm) throws StorageUnavailableException { - Transaction txn = Transaction.currentTxn(); - - Long templateIdToUse = null; - Long volTemplateId = existingVolume.getTemplateId(); - long vmTemplateId = vm.getTemplateId(); - if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId + ", updating templateId in the new Volume"); - } - templateIdToUse = vmTemplateId; - } - - txn.start(); - VolumeVO newVolume = allocateDuplicateVolume(existingVolume, templateIdToUse); - // In case of Vmware if vm reference is not removed then during root disk cleanup - // the vm also gets deleted, so remove the reference - if (vm.getHypervisorType() == HypervisorType.VMware) { - _volsDao.detachVolume(existingVolume.getId()); - } - try { - stateTransitTo(existingVolume, Volume.Event.DestroyRequested); - } catch (NoTransitionException e) { - s_logger.debug("Unable to destroy existing volume: " + e.toString()); - } - txn.commit(); - return newVolume; - - } - - public Pair createVolume(VolumeVO toBeCreated, DiskOfferingVO offering, VirtualMachineProfile vm, List alreadyCreated, - DeployDestination dest, StoragePool sPool) throws StorageUnavailableException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating volume: " + toBeCreated); - } - DiskProfile diskProfile = new DiskProfile(toBeCreated, offering, vm.getHypervisorType()); - - VMTemplateVO template = null; - if (toBeCreated.getTemplateId() != null) { - template = _templateDao.findById(toBeCreated.getTemplateId()); - } - - StoragePool pool = null; - if (sPool != null) { - pool = sPool; - } else { - pool = dest.getStorageForDisks().get(toBeCreated); - } - - if (pool != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to create in " + pool); - } - toBeCreated.setPoolId(pool.getId()); - try { - stateTransitTo(toBeCreated, Volume.Event.OperationRetry); - } catch (NoTransitionException e) { - throw new CloudRuntimeException("Unable to retry a create operation on volume " + toBeCreated); - } - - CreateCommand cmd = null; - VMTemplateStoragePoolVO tmpltStoredOn = null; - - for (int i = 0; i < 2; i++) { - if (template != null && template.getFormat() != Storage.ImageFormat.ISO) { - if (pool.getPoolType() == StoragePoolType.CLVM) { - //prepareISOForCreate does what we need, which is to tell us where the template is - VMTemplateHostVO tmpltHostOn = _tmpltMgr.prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - s_logger.debug("cannot find template " + template.getId() + " " + template.getName()); - return null; - } - HostVO secondaryStorageHost = _hostDao.findById(tmpltHostOn.getHostId()); - String tmpltHostUrl = secondaryStorageHost.getStorageUrl(); - String fullTmpltUrl = tmpltHostUrl + "/" + tmpltHostOn.getInstallPath(); - cmd = new CreateCommand(diskProfile, fullTmpltUrl, new StorageFilerTO(pool)); - } else { - tmpltStoredOn = _tmpltMgr.prepareTemplateForCreate(template, pool); - if (tmpltStoredOn == null) { - s_logger.debug("Cannot use this pool " + pool + " because we can't propagate template " + template); - return null; - } - cmd = new CreateCommand(diskProfile, tmpltStoredOn.getLocalDownloadPath(), new StorageFilerTO(pool)); - } - } else { - if (template != null && Storage.ImageFormat.ISO == template.getFormat()) { - VMTemplateHostVO tmpltHostOn = _tmpltMgr.prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - throw new CloudRuntimeException("Did not find ISO in secondry storage in zone " + pool.getDataCenterId()); - } - } - cmd = new CreateCommand(diskProfile, new StorageFilerTO(pool)); - } - long[] hostIdsToTryFirst = { dest.getHost().getId() }; - Answer answer = sendToPool(pool, hostIdsToTryFirst, cmd); - if (answer.getResult()) { - CreateAnswer createAnswer = (CreateAnswer) answer; - return new Pair(createAnswer.getVolume(), pool); - } else { - if (tmpltStoredOn != null && (answer instanceof CreateAnswer) && ((CreateAnswer) answer).templateReloadRequested()) { - if (!_tmpltMgr.resetTemplateDownloadStateOnPool(tmpltStoredOn.getId())) { - break; // break out of template-redeploy retry loop - } - } else { - break; - } - } - } - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to create volume " + toBeCreated); - } - return null; - } - - @Override - public void release(VirtualMachineProfile profile) { - // add code here - } - - public void expungeVolume(VolumeVO vol, boolean force) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expunging " + vol); - } - - //Find out if the volume is present on secondary storage - VolumeHostVO volumeHost = _volumeHostDao.findByVolumeId(vol.getId()); - if(volumeHost != null){ - if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED){ - HostVO ssHost = _hostDao.findById(volumeHost.getHostId()); - DeleteVolumeCommand dtCommand = new DeleteVolumeCommand(ssHost.getStorageUrl(), volumeHost.getInstallPath()); - Answer answer = _agentMgr.sendToSecStorage(ssHost, dtCommand); - if (answer == null || !answer.getResult()) { - s_logger.debug("Failed to delete " + volumeHost + " due to " + ((answer == null) ? "answer is null" : answer.getDetails())); - return; - } - }else if(volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS){ - s_logger.debug("Volume: " + vol.getName() + " is currently being uploaded; cant' delete it."); - throw new CloudRuntimeException("Please specify a volume that is not currently being uploaded."); - } - _volumeHostDao.remove(volumeHost.getId()); - _volumeDao.remove(vol.getId()); - return; - } - - String vmName = null; - if (vol.getVolumeType() == Type.ROOT && vol.getInstanceId() != null) { - VirtualMachine vm = _vmInstanceDao.findByIdIncludingRemoved(vol.getInstanceId()); - if (vm != null) { - vmName = vm.getInstanceName(); - } - } - - String volumePath = vol.getPath(); - Long poolId = vol.getPoolId(); - if (poolId == null || volumePath == null || volumePath.trim().isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Marking volume that was never created as destroyed: " + vol); - } - _volsDao.remove(vol.getId()); - return; - } - - StoragePoolVO pool = _storagePoolDao.findById(poolId); - if (pool == null) { - s_logger.debug("Removing volume as storage pool is gone: " + poolId); - _volsDao.remove(vol.getId()); - return; - } - - DestroyCommand cmd = new DestroyCommand(pool, vol, vmName); - boolean removeVolume = false; - try { - Answer answer = sendToPool(pool, cmd); - if (answer != null && answer.getResult()) { - removeVolume = true; - } else { - s_logger.info("Will retry delete of " + vol + " from " + poolId); - } - } catch (StorageUnavailableException e) { - if (force) { - s_logger.info("Storage is unavailable currently, but marking volume id=" + vol.getId() + " as expunged anyway due to force=true"); - removeVolume = true; - } else { - s_logger.info("Storage is unavailable currently. Will retry delete of " + vol + " from " + poolId); - } - } catch (RuntimeException ex) { - if (force) { - s_logger.info("Failed to expunge volume, but marking volume id=" + vol.getId() + " as expunged anyway " + - "due to force=true. Volume failed to expunge due to ", ex); - removeVolume = true; - } else { - throw ex; - } - } finally { - if (removeVolume) { - _volsDao.remove(vol.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume successfully expunged from " + poolId); - } - } - } - - } - - @Override - @DB - public void cleanupVolumes(long vmId) throws ConcurrentOperationException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cleaning storage for vm: " + vmId); - } - List volumesForVm = _volsDao.findByInstance(vmId); - List toBeExpunged = new ArrayList(); - Transaction txn = Transaction.currentTxn(); - txn.start(); - for (VolumeVO vol : volumesForVm) { - if (vol.getVolumeType().equals(Type.ROOT)) { - // This check is for VM in Error state (volume is already destroyed) - if (!vol.getState().equals(Volume.State.Destroy)) { - destroyVolume(vol); - } - toBeExpunged.add(vol); - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detaching " + vol); - } - _volsDao.detachVolume(vol.getId()); - } - } - txn.commit(); - - for (VolumeVO expunge : toBeExpunged) { - expungeVolume(expunge, false); - } - } - + + + protected class StorageGarbageCollector implements Runnable { public StorageGarbageCollector() { @@ -3747,25 +1524,35 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public void onManagementNodeJoined(List nodeList, long selfNodeId) { + public void onManagementNodeJoined(List nodeList, + long selfNodeId) { // TODO Auto-generated method stub } @Override - public void onManagementNodeLeft(List nodeList, long selfNodeId) { + public void onManagementNodeLeft(List nodeList, + long selfNodeId) { for (ManagementServerHostVO vo : nodeList) { if (vo.getMsid() == _serverId) { - s_logger.info("Cleaning up storage maintenance jobs associated with Management server" + vo.getMsid()); - List poolIds = _storagePoolWorkDao.searchForPoolIdsForPendingWorkJobs(vo.getMsid()); + s_logger.info("Cleaning up storage maintenance jobs associated with Management server" + + vo.getMsid()); + List poolIds = _storagePoolWorkDao + .searchForPoolIdsForPendingWorkJobs(vo.getMsid()); if (poolIds.size() > 0) { for (Long poolId : poolIds) { - StoragePoolVO pool = _storagePoolDao.findById(poolId); + StoragePoolVO pool = _storagePoolDao + .findById(poolId); // check if pool is in an inconsistent state if (pool != null - && (pool.getStatus().equals(StoragePoolStatus.ErrorInMaintenance) || pool.getStatus().equals(StoragePoolStatus.PrepareForMaintenance) || pool.getStatus().equals( - StoragePoolStatus.CancelMaintenance))) { - _storagePoolWorkDao.removePendingJobsOnMsRestart(vo.getMsid(), poolId); + && (pool.getStatus().equals( + DataStoreStatus.ErrorInMaintenance) + || pool.getStatus() + .equals(DataStoreStatus.PrepareForMaintenance) || pool + .getStatus() + .equals(DataStoreStatus.CancelMaintenance))) { + _storagePoolWorkDao.removePendingJobsOnMsRestart( + vo.getMsid(), poolId); pool.setStatus(StoragePoolStatus.ErrorInMaintenance); _storagePoolDao.update(poolId, pool); } @@ -3794,22 +1581,28 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); } - CapacityVO capacity = new CapacityVO(hostId, zoneId, null, null, 0, 0, CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE); + CapacityVO capacity = new CapacityVO(hostId, zoneId, null, null, 0, 0, + CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE); for (HostVO host : hosts) { - StorageStats stats = ApiDBUtils.getSecondaryStorageStatistics(host.getId()); + StorageStats stats = ApiDBUtils.getSecondaryStorageStatistics(host + .getId()); if (stats == null) { continue; } - capacity.setUsedCapacity(stats.getByteUsed() + capacity.getUsedCapacity()); - capacity.setTotalCapacity(stats.getCapacityBytes() + capacity.getTotalCapacity()); + capacity.setUsedCapacity(stats.getByteUsed() + + capacity.getUsedCapacity()); + capacity.setTotalCapacity(stats.getCapacityBytes() + + capacity.getTotalCapacity()); } return capacity; } @Override - public CapacityVO getStoragePoolUsedStats(Long poolId, Long clusterId, Long podId, Long zoneId) { - SearchCriteria sc = _storagePoolDao.createSearchCriteria(); + public CapacityVO getStoragePoolUsedStats(Long poolId, Long clusterId, + Long podId, Long zoneId) { + SearchCriteria sc = _storagePoolDao + .createSearchCriteria(); List pools = new ArrayList(); if (zoneId != null) { @@ -3833,59 +1626,29 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C pools = _storagePoolDao.search(sc, null); } - CapacityVO capacity = new CapacityVO(poolId, zoneId, podId, clusterId, 0, 0, CapacityVO.CAPACITY_TYPE_STORAGE); - for (StoragePoolVO storagePool : pools) { - StorageStats stats = ApiDBUtils.getStoragePoolStatistics(storagePool.getId()); + CapacityVO capacity = new CapacityVO(poolId, zoneId, podId, clusterId, + 0, 0, CapacityVO.CAPACITY_TYPE_STORAGE); + for (StoragePoolVO PrimaryDataStoreVO : pools) { + StorageStats stats = ApiDBUtils + .getStoragePoolStatistics(PrimaryDataStoreVO.getId()); if (stats == null) { continue; } - capacity.setUsedCapacity(stats.getByteUsed() + capacity.getUsedCapacity()); - capacity.setTotalCapacity(stats.getCapacityBytes() + capacity.getTotalCapacity()); + capacity.setUsedCapacity(stats.getByteUsed() + + capacity.getUsedCapacity()); + capacity.setTotalCapacity(stats.getCapacityBytes() + + capacity.getTotalCapacity()); } return capacity; } @Override - public StoragePool getStoragePool(long id) { - return _storagePoolDao.findById(id); + public PrimaryDataStoreInfo getStoragePool(long id) { + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(id, + DataStoreRole.Primary); } - @Override - public VMTemplateHostVO findVmTemplateHost(long templateId, StoragePool pool) { - long dcId = pool.getDataCenterId(); - Long podId = pool.getPodId(); - - List secHosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(dcId); - - // FIXME, for cloudzone, the local secondary storoge - if (pool.isLocal() && pool.getPoolType() == StoragePoolType.Filesystem && secHosts.isEmpty()) { - List sphs = _storagePoolHostDao.listByPoolId(pool.getId()); - if (!sphs.isEmpty()) { - StoragePoolHostVO localStoragePoolHost = sphs.get(0); - return _templateHostDao.findLocalSecondaryStorageByHostTemplate(localStoragePoolHost.getHostId(), templateId); - } else { - return null; - } - } - - if (secHosts.size() == 1) { - VMTemplateHostVO templateHostVO = _templateHostDao.findByHostTemplate(secHosts.get(0).getId(), templateId); - return templateHostVO; - } - if (podId != null) { - List templHosts = _templateHostDao.listByTemplateStatus(templateId, dcId, podId, VMTemplateStorageResourceAssoc.Status.DOWNLOADED); - if (templHosts != null && !templHosts.isEmpty()) { - Collections.shuffle(templHosts); - return templHosts.get(0); - } - } - List templHosts = _templateHostDao.listByTemplateStatus(templateId, dcId, VMTemplateStorageResourceAssoc.Status.DOWNLOADED); - if (templHosts != null && !templHosts.isEmpty()) { - Collections.shuffle(templHosts); - return templHosts.get(0); - } - return null; - } + @Override @DB @@ -3900,9 +1663,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @DB public StoragePoolVO findLocalStorageOnHost(long hostId) { SearchCriteria sc = LocalStorageSearch.create(); - sc.setParameters("type", new Object[] { StoragePoolType.Filesystem, StoragePoolType.LVM }); + sc.setParameters("type", new Object[] { StoragePoolType.Filesystem, + StoragePoolType.LVM }); sc.setJoinParameters("poolHost", "hostId", hostId); - List storagePools = _storagePoolDao.search(sc, null); + List storagePools = _storagePoolDao + .search(sc, null); if (!storagePools.isEmpty()) { return storagePools.get(0); } else { @@ -3914,25 +1679,33 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public Host updateSecondaryStorage(long secStorageId, String newUrl) { HostVO secHost = _hostDao.findById(secStorageId); if (secHost == null) { - throw new InvalidParameterValueException("Can not find out the secondary storage id: " + secStorageId); + throw new InvalidParameterValueException( + "Can not find out the secondary storage id: " + + secStorageId); } if (secHost.getType() != Host.Type.SecondaryStorage) { - throw new InvalidParameterValueException("host: " + secStorageId + " is not a secondary storage"); + throw new InvalidParameterValueException("host: " + secStorageId + + " is not a secondary storage"); } URI uri = null; try { uri = new URI(UriUtils.encodeURIComponent(newUrl)); if (uri.getScheme() == null) { - throw new InvalidParameterValueException("uri.scheme is null " + newUrl + ", add nfs:// as a prefix"); + throw new InvalidParameterValueException("uri.scheme is null " + + newUrl + ", add nfs:// as a prefix"); } else if (uri.getScheme().equalsIgnoreCase("nfs")) { - if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") || uri.getPath() == null || uri.getPath().equalsIgnoreCase("")) { - throw new InvalidParameterValueException("Your host and/or path is wrong. Make sure it's of the format nfs://hostname/path"); + if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") + || uri.getPath() == null + || uri.getPath().equalsIgnoreCase("")) { + throw new InvalidParameterValueException( + "Your host and/or path is wrong. Make sure it's of the format nfs://hostname/path"); } } } catch (URISyntaxException e) { - throw new InvalidParameterValueException(newUrl + " is not a valid uri"); + throw new InvalidParameterValueException(newUrl + + " is not a valid uri"); } String oldUrl = secHost.getStorageUrl(); @@ -3941,7 +1714,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { oldUri = new URI(UriUtils.encodeURIComponent(oldUrl)); if (!oldUri.getScheme().equalsIgnoreCase(uri.getScheme())) { - throw new InvalidParameterValueException("can not change old scheme:" + oldUri.getScheme() + " to " + uri.getScheme()); + throw new InvalidParameterValueException( + "can not change old scheme:" + oldUri.getScheme() + + " to " + uri.getScheme()); } } catch (URISyntaxException e) { s_logger.debug("Failed to get uri from " + oldUrl); @@ -3954,29 +1729,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return secHost; } - - - @Override - public String getSupportedImageFormatForCluster(Long clusterId) { - ClusterVO cluster = ApiDBUtils.findClusterById(clusterId); - - if (cluster.getHypervisorType() == HypervisorType.XenServer) { - return "vhd"; - } else if (cluster.getHypervisorType() == HypervisorType.KVM) { - return "qcow2"; - } else if (cluster.getHypervisorType() == HypervisorType.VMware) { - return "ova"; - } else if (cluster.getHypervisorType() == HypervisorType.Ovm) { - return "raw"; - } else { - return null; - } - } + @Override public HypervisorType getHypervisorTypeFromFormat(ImageFormat format) { - if(format == null) { + if (format == null) { return HypervisorType.None; } @@ -3993,22 +1751,32 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } - private boolean checkUsagedSpace(StoragePool pool){ + private boolean checkUsagedSpace(StoragePool pool) { StatsCollector sc = StatsCollector.getInstance(); if (sc != null) { long totalSize = pool.getCapacityBytes(); StorageStats stats = sc.getStoragePoolStats(pool.getId()); - if(stats == null){ + if (stats == null) { stats = sc.getStorageStats(pool.getId()); } if (stats != null) { - double usedPercentage = ((double)stats.getByteUsed() / (double)totalSize); + double usedPercentage = ((double) stats.getByteUsed() / (double) totalSize); if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + ", usedPct: " + usedPercentage + ", disable threshold: " + _storageUsedThreshold); + s_logger.debug("Checking pool " + pool.getId() + + " for storage, totalSize: " + + pool.getCapacityBytes() + ", usedBytes: " + + stats.getByteUsed() + ", usedPct: " + + usedPercentage + ", disable threshold: " + + _storageUsedThreshold); } if (usedPercentage >= _storageUsedThreshold) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " +usedPercentage + " has crossed the pool.storage.capacity.disablethreshold: " + _storageUsedThreshold); + s_logger.debug("Insufficient space on pool: " + + pool.getId() + + " since its usage percentage: " + + usedPercentage + + " has crossed the pool.storage.capacity.disablethreshold: " + + _storageUsedThreshold); } return false; } @@ -4019,54 +1787,113 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public boolean storagePoolHasEnoughSpace(List volumes, StoragePool pool) { - if(volumes == null || volumes.isEmpty()) + public boolean storagePoolHasEnoughSpace(List volumes, + StoragePool pool) { + if (volumes == null || volumes.isEmpty()) return false; - if(!checkUsagedSpace(pool)) + if (!checkUsagedSpace(pool)) return false; // allocated space includes template of specified volume StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); - long allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, null); + long allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity( + poolVO, null); long totalAskingSize = 0; for (Volume volume : volumes) { - if(volume.getTemplateId()!=null){ - VMTemplateVO tmpl = _templateDao.findById(volume.getTemplateId()); - if (tmpl.getFormat() != ImageFormat.ISO){ - allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl); + if (volume.getTemplateId() != null) { + VMTemplateVO tmpl = _templateDao.findById(volume + .getTemplateId()); + if (tmpl.getFormat() != ImageFormat.ISO) { + allocatedSizeWithtemplate = _capacityMgr + .getAllocatedPoolCapacity(poolVO, tmpl); } } - if(volume.getState() != Volume.State.Ready) + if (volume.getState() != Volume.State.Ready) totalAskingSize = totalAskingSize + volume.getSize(); } long totalOverProvCapacity; if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) { - totalOverProvCapacity = _storageOverprovisioningFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue();// All this for the inaccuracy of floats for big number multiplication. - }else { + totalOverProvCapacity = _storageOverprovisioningFactor.multiply( + new BigDecimal(pool.getCapacityBytes())).longValue(); + } else { totalOverProvCapacity = pool.getCapacityBytes(); } if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + _storageAllocatedThreshold); + s_logger.debug("Checking pool: " + pool.getId() + + " for volume allocation " + volumes.toString() + + ", maxSize : " + totalOverProvCapacity + + ", totalAllocatedSize : " + allocatedSizeWithtemplate + + ", askingSize : " + totalAskingSize + + ", allocated disable threshold: " + + _storageAllocatedThreshold); } - double usedPercentage = (allocatedSizeWithtemplate + totalAskingSize) / (double)(totalOverProvCapacity); - if (usedPercentage > _storageAllocatedThreshold){ + double usedPercentage = (allocatedSizeWithtemplate + totalAskingSize) + / (double) (totalOverProvCapacity); + if (usedPercentage > _storageAllocatedThreshold) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + " since its allocated percentage: " +usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + _storageAllocatedThreshold + ", skipping this pool"); + s_logger.debug("Insufficient un-allocated capacity on: " + + pool.getId() + + " for volume allocation: " + + volumes.toString() + + " since its allocated percentage: " + + usedPercentage + + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + + _storageAllocatedThreshold + ", skipping this pool"); } return false; } if (totalOverProvCapacity < (allocatedSizeWithtemplate + totalAskingSize)) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + ", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize); + s_logger.debug("Insufficient un-allocated capacity on: " + + pool.getId() + " for volume allocation: " + + volumes.toString() + + ", not enough storage, maxSize : " + + totalOverProvCapacity + ", totalAllocatedSize : " + + allocatedSizeWithtemplate + ", askingSize : " + + totalAskingSize); } return false; } return true; } + @Override + public void createCapacityEntry(long poolId) { + StoragePoolVO storage = _storagePoolDao.findById(poolId); + createCapacityEntry(storage, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, 0); + } + + + @Override + public synchronized boolean registerHostListener(String providerUuid, + HypervisorHostListener listener) { + hostListeners.put(providerUuid, listener); + return true; + } + + @Override + public Answer sendToPool(long poolId, Command cmd) + throws StorageUnavailableException { + // TODO Auto-generated method stub + return null; + } + + @Override + public Answer[] sendToPool(long poolId, Commands cmd) + throws StorageUnavailableException { + // TODO Auto-generated method stub + return null; + } + + @Override + public String getName() { + // TODO Auto-generated method stub + return null; + } + } diff --git a/server/src/com/cloud/storage/TemplateProfile.java b/server/src/com/cloud/storage/TemplateProfile.java index 1d8b6bfc1a3..41bbaaa1057 100755 --- a/server/src/com/cloud/storage/TemplateProfile.java +++ b/server/src/com/cloud/storage/TemplateProfile.java @@ -18,9 +18,9 @@ package com.cloud.storage; import java.util.Map; + import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.VMTemplateVO; public class TemplateProfile { Long userId; @@ -46,6 +46,7 @@ public class TemplateProfile { Long templateId; VMTemplateVO template; String templateTag; + String imageStoreUuid; Map details; public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, @@ -83,10 +84,12 @@ public class TemplateProfile { public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, Long zoneId, - HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, Boolean sshKeyEnabled) { + HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, Boolean sshKeyEnabled, + String imageStoreUuid) { this(templateId, userId, name, displayText, bits, passwordEnabled, requiresHvm, url, isPublic, featured, isExtractable, format, guestOsId, zoneId, hypervisorType, accountName, domainId, accountId, chksum, bootable, details, sshKeyEnabled); this.templateTag = templateTag; + this.imageStoreUuid = imageStoreUuid; } public Long getTemplateId() { @@ -252,4 +255,8 @@ public class TemplateProfile { public Boolean getSshKeyEnabled() { return this.sshKeyEnbaled; } + + public String getImageStoreUuid() { + return this.imageStoreUuid; + } } diff --git a/server/src/com/cloud/storage/VolumeManager.java b/server/src/com/cloud/storage/VolumeManager.java new file mode 100644 index 00000000000..41434f41dcc --- /dev/null +++ b/server/src/com/cloud/storage/VolumeManager.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; + +import com.cloud.deploy.DeployDestination; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientStorageCapacityException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Volume.Type; +import com.cloud.user.Account; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +public interface VolumeManager extends VolumeApiService { + + VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, + Long destPoolClusterId, HypervisorType dataDiskHyperType) + throws ConcurrentOperationException; + + VolumeVO uploadVolume(UploadVolumeCmd cmd) + throws ResourceAllocationException; + + VolumeVO allocateDuplicateVolume(VolumeVO oldVol, Long templateId); + + boolean volumeOnSharedStoragePool(VolumeVO volume); + + boolean volumeInactive(VolumeVO volume); + + String getVmNameOnVolume(VolumeVO volume); + + VolumeVO allocVolume(CreateVolumeCmd cmd) + throws ResourceAllocationException; + + VolumeVO createVolume(CreateVolumeCmd cmd); + + VolumeVO resizeVolume(ResizeVolumeCmd cmd); + + boolean deleteVolume(long volumeId, Account caller) + throws ConcurrentOperationException; + + void destroyVolume(VolumeVO volume); + + DiskProfile allocateRawVolume(Type type, String name, DiskOfferingVO offering, Long size, VMInstanceVO vm, Account owner); + Volume attachVolumeToVM(AttachVolumeCmd command); + + Volume detachVolumeFromVM(DetachVolumeCmd cmmd); + + void release(VirtualMachineProfile profile); + + void cleanupVolumes(long vmId) throws ConcurrentOperationException; + + Volume migrateVolume(Long volumeId, Long storagePoolId) + throws ConcurrentOperationException; + + boolean StorageMigration( + VirtualMachineProfile vm, + StoragePool destPool) throws ConcurrentOperationException; + + void prepareForMigration( + VirtualMachineProfile vm, + DeployDestination dest); + + void prepare(VirtualMachineProfile vm, + DeployDestination dest) throws StorageUnavailableException, + InsufficientStorageCapacityException, ConcurrentOperationException; + + boolean canVmRestartOnAnotherServer(long vmId); + + DiskProfile allocateTemplatedVolume(Type type, String name, + DiskOfferingVO offering, VMTemplateVO template, VMInstanceVO vm, + Account owner); +} diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java new file mode 100644 index 00000000000..5843dddbec5 --- /dev/null +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -0,0 +1,2620 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ExecutionException; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.AttachVolumeAnswer; +import com.cloud.agent.api.AttachVolumeCommand; +import com.cloud.agent.api.storage.CopyVolumeAnswer; +import com.cloud.agent.api.storage.CopyVolumeCommand; +import com.cloud.agent.api.storage.DestroyCommand; +import com.cloud.agent.api.storage.ResizeVolumeAnswer; +import com.cloud.agent.api.storage.ResizeVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.agent.api.to.VolumeTO; +import com.cloud.alert.AlertManager; +import com.cloud.api.ApiDBUtils; +import com.cloud.async.AsyncJobExecutor; +import com.cloud.async.AsyncJobManager; +import com.cloud.async.AsyncJobVO; +import com.cloud.async.BaseAsyncJobExecutor; +import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.configuration.Config; +import com.cloud.configuration.ConfigurationManager; +import com.cloud.configuration.Resource.ResourceType; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.consoleproxy.ConsoleProxyManager; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DeployDestination; +import com.cloud.domain.Domain; +import com.cloud.domain.dao.DomainDao; +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventVO; +import com.cloud.event.dao.EventDao; +import com.cloud.event.dao.UsageEventDao; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientStorageCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; +import com.cloud.network.NetworkModel; +import com.cloud.org.Grouping; +import com.cloud.resource.ResourceManager; +import com.cloud.server.ManagementServer; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.Volume.Event; +import com.cloud.storage.Volume.Type; +import com.cloud.storage.allocator.StoragePoolAllocator; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotPolicyDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VMTemplateS3Dao; +import com.cloud.storage.dao.VMTemplateSwiftDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeHostDao; +import com.cloud.storage.download.DownloadMonitor; +import com.cloud.storage.s3.S3Manager; +import com.cloud.storage.secondary.SecondaryStorageVmManager; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.storage.snapshot.SnapshotScheduler; +import com.cloud.tags.dao.ResourceTagDao; +import com.cloud.template.TemplateManager; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.UserContext; +import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.UserDao; +import com.cloud.uservm.UserVm; +import com.cloud.utils.EnumUtils; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.JoinBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.UserVmManager; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@Component +public class VolumeManagerImpl extends ManagerBase implements VolumeManager { + private static final Logger s_logger = Logger + .getLogger(VolumeManagerImpl.class); + @Inject + protected UserVmManager _userVmMgr; + @Inject + protected AgentManager _agentMgr; + @Inject + protected TemplateManager _tmpltMgr; + @Inject + protected AsyncJobManager _asyncMgr; + @Inject + protected SnapshotManager _snapshotMgr; + @Inject + protected SnapshotScheduler _snapshotScheduler; + @Inject + protected AccountManager _accountMgr; + @Inject + protected ConfigurationManager _configMgr; + @Inject + protected ConsoleProxyManager _consoleProxyMgr; + @Inject + protected SecondaryStorageVmManager _secStorageMgr; + @Inject + protected NetworkModel _networkMgr; + @Inject + protected ServiceOfferingDao _serviceOfferingDao; + @Inject + protected VolumeDao _volsDao; + @Inject + protected HostDao _hostDao; + @Inject + protected ConsoleProxyDao _consoleProxyDao; + @Inject + protected SnapshotDao _snapshotDao; + @Inject + protected SnapshotManager _snapMgr; + @Inject + protected SnapshotPolicyDao _snapshotPolicyDao; + @Inject + protected StoragePoolHostDao _storagePoolHostDao; + @Inject + protected AlertManager _alertMgr; + @Inject + protected VMTemplateHostDao _vmTemplateHostDao = null; + @Inject + protected VMTemplatePoolDao _vmTemplatePoolDao = null; + @Inject + protected VMTemplateSwiftDao _vmTemplateSwiftDao = null; + @Inject + protected VMTemplateS3Dao _vmTemplateS3Dao; + @Inject + protected S3Manager _s3Mgr; + @Inject + protected VMTemplateDao _vmTemplateDao = null; + @Inject + protected StoragePoolHostDao _poolHostDao = null; + @Inject + protected UserVmDao _userVmDao; + @Inject + VolumeHostDao _volumeHostDao; + @Inject + protected VMInstanceDao _vmInstanceDao; + @Inject + protected PrimaryDataStoreDao _storagePoolDao = null; + @Inject + protected CapacityDao _capacityDao; + @Inject + protected CapacityManager _capacityMgr; + @Inject + protected DiskOfferingDao _diskOfferingDao; + @Inject + protected AccountDao _accountDao; + @Inject + protected EventDao _eventDao = null; + @Inject + protected DataCenterDao _dcDao = null; + @Inject + protected HostPodDao _podDao = null; + @Inject + protected VMTemplateDao _templateDao; + @Inject + protected VMTemplateHostDao _templateHostDao; + @Inject + protected ServiceOfferingDao _offeringDao; + @Inject + protected DomainDao _domainDao; + @Inject + protected UserDao _userDao; + @Inject + protected ClusterDao _clusterDao; + @Inject + protected UsageEventDao _usageEventDao; + @Inject + protected VirtualMachineManager _vmMgr; + @Inject + protected DomainRouterDao _domrDao; + @Inject + protected SecondaryStorageVmDao _secStrgDao; + @Inject + protected StoragePoolWorkDao _storagePoolWorkDao; + @Inject + protected HypervisorGuruManager _hvGuruMgr; + @Inject + protected VolumeDao _volumeDao; + @Inject + protected OCFS2Manager _ocfs2Mgr; + @Inject + protected ResourceLimitService _resourceLimitMgr; + @Inject + protected SecondaryStorageVmManager _ssvmMgr; + @Inject + protected ResourceManager _resourceMgr; + @Inject + protected DownloadMonitor _downloadMonitor; + @Inject + protected ResourceTagDao _resourceTagDao; + @Inject + protected List _storagePoolAllocators; + @Inject + ConfigurationDao _configDao; + @Inject + ManagementServer _msServer; + @Inject + DataStoreManager dataStoreMgr; + @Inject + DataStoreProviderManager dataStoreProviderMgr; + @Inject + VolumeService volService; + @Inject + VolumeDataFactory volFactory; + @Inject + ImageDataFactory tmplFactory; + @Inject + SnapshotDataFactory snapshotFactory; + private int _copyvolumewait; + @Inject + protected HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; + private final StateMachine2 _volStateMachine; + @Inject + StorageManager storageMgr; + private int _customDiskOfferingMinSize = 1; + private int _customDiskOfferingMaxSize = 1024; + private long _maxVolumeSizeInGb; + private boolean _recreateSystemVmEnabled; + protected SearchBuilder HostTemplateStatesSearch; + + public VolumeManagerImpl() { + _volStateMachine = Volume.State.getStateMachine(); + } + + @Override + public VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, + Long destPoolPodId, Long destPoolClusterId, + HypervisorType dataDiskHyperType) + throws ConcurrentOperationException { + + // Find a destination storage pool with the specified criteria + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume + .getDiskOfferingId()); + DiskProfile dskCh = new DiskProfile(volume.getId(), + volume.getVolumeType(), volume.getName(), diskOffering.getId(), + diskOffering.getDiskSize(), diskOffering.getTagsArray(), + diskOffering.getUseLocalStorage(), + diskOffering.isRecreatable(), null); + dskCh.setHyperType(dataDiskHyperType); + DataCenterVO destPoolDataCenter = _dcDao.findById(destPoolDcId); + HostPodVO destPoolPod = _podDao.findById(destPoolPodId); + + StoragePool destPool = storageMgr.findStoragePool(dskCh, + destPoolDataCenter, destPoolPod, destPoolClusterId, null, null, + new HashSet()); + + if (destPool == null) { + throw new CloudRuntimeException( + "Failed to find a storage pool with enough capacity to move the volume to."); + } + + List vols = new ArrayList(); + vols.add(volume); + migrateVolumes(vols, destPool); + return this.volFactory.getVolume(volume.getId()); + } + + /* + * Upload the volume to secondary storage. + */ + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPLOAD, eventDescription = "uploading volume", async = true) + public VolumeVO uploadVolume(UploadVolumeCmd cmd) + throws ResourceAllocationException { + Account caller = UserContext.current().getCaller(); + long ownerId = cmd.getEntityOwnerId(); + Long zoneId = cmd.getZoneId(); + String volumeName = cmd.getVolumeName(); + String url = cmd.getUrl(); + String format = cmd.getFormat(); + String imageStoreUuid = cmd.getImageStoreUuid(); + DataStore store = this._tmpltMgr.getImageStore(imageStoreUuid, zoneId); + + validateVolume(caller, ownerId, zoneId, volumeName, url, format); + + VolumeVO volume = persistVolume(caller, ownerId, zoneId, volumeName, + url, cmd.getFormat()); + + VolumeInfo vol = this.volFactory.getVolume(volume.getId()); + + RegisterVolumePayload payload = new RegisterVolumePayload(cmd.getUrl(), cmd.getChecksum(), + cmd.getFormat()); + vol.addPayload(payload); + + this.volService.registerVolume(vol, store); + return volume; + } + + private boolean validateVolume(Account caller, long ownerId, Long zoneId, + String volumeName, String url, String format) + throws ResourceAllocationException { + + // permission check + _accountMgr.checkAccess(caller, null, true, + _accountMgr.getActiveAccountById(ownerId)); + + // Check that the resource limit for volumes won't be exceeded + _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), + ResourceType.volume); + + // Verify that zone exists + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException( + "Unable to find zone by id " + zoneId); + } + + // Check if zone is disabled + if (Grouping.AllocationState.Disabled == zone.getAllocationState() + && !_accountMgr.isRootAdmin(caller.getType())) { + throw new PermissionDeniedException( + "Cannot perform this operation, Zone is currently disabled: " + + zoneId); + } + + if (url.toLowerCase().contains("file://")) { + throw new InvalidParameterValueException( + "File:// type urls are currently unsupported"); + } + + ImageFormat imgfmt = ImageFormat.valueOf(format.toUpperCase()); + if (imgfmt == null) { + throw new IllegalArgumentException("Image format is incorrect " + + format + ". Supported formats are " + + EnumUtils.listValues(ImageFormat.values())); + } + + String userSpecifiedName = volumeName; + if (userSpecifiedName == null) { + userSpecifiedName = getRandomVolumeName(); + } + if ((!url.toLowerCase().endsWith("vhd")) + && (!url.toLowerCase().endsWith("vhd.zip")) + && (!url.toLowerCase().endsWith("vhd.bz2")) + && (!url.toLowerCase().endsWith("vhd.gz")) + && (!url.toLowerCase().endsWith("qcow2")) + && (!url.toLowerCase().endsWith("qcow2.zip")) + && (!url.toLowerCase().endsWith("qcow2.bz2")) + && (!url.toLowerCase().endsWith("qcow2.gz")) + && (!url.toLowerCase().endsWith("ova")) + && (!url.toLowerCase().endsWith("ova.zip")) + && (!url.toLowerCase().endsWith("ova.bz2")) + && (!url.toLowerCase().endsWith("ova.gz")) + && (!url.toLowerCase().endsWith("img")) + && (!url.toLowerCase().endsWith("raw"))) { + throw new InvalidParameterValueException("Please specify a valid " + + format.toLowerCase()); + } + + if ((format.equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith( + ".vhd") + && !url.toLowerCase().endsWith("vhd.zip") + && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase() + .endsWith("vhd.gz"))) + || (format.equalsIgnoreCase("qcow2") && (!url.toLowerCase() + .endsWith(".qcow2") + && !url.toLowerCase().endsWith("qcow2.zip") + && !url.toLowerCase().endsWith("qcow2.bz2") && !url + .toLowerCase().endsWith("qcow2.gz"))) + || (format.equalsIgnoreCase("ova") && (!url.toLowerCase() + .endsWith(".ova") + && !url.toLowerCase().endsWith("ova.zip") + && !url.toLowerCase().endsWith("ova.bz2") && !url + .toLowerCase().endsWith("ova.gz"))) + || (format.equalsIgnoreCase("raw") && (!url.toLowerCase() + .endsWith(".img") && !url.toLowerCase().endsWith("raw")))) { + throw new InvalidParameterValueException( + "Please specify a valid URL. URL:" + url + + " is an invalid for the format " + + format.toLowerCase()); + } + validateUrl(url); + + return false; + } + + @Override + public VolumeVO allocateDuplicateVolume(VolumeVO oldVol, Long templateId) { + VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), + oldVol.getName(), oldVol.getDataCenterId(), + oldVol.getDomainId(), oldVol.getAccountId(), + oldVol.getDiskOfferingId(), oldVol.getSize()); + if (templateId != null) { + newVol.setTemplateId(templateId); + } else { + newVol.setTemplateId(oldVol.getTemplateId()); + } + newVol.setDeviceId(oldVol.getDeviceId()); + newVol.setInstanceId(oldVol.getInstanceId()); + newVol.setRecreatable(oldVol.isRecreatable()); + return _volsDao.persist(newVol); + } + + @DB + protected VolumeInfo createVolumeFromSnapshot(VolumeVO volume, + SnapshotVO snapshot) { + Account account = _accountDao.findById(volume.getAccountId()); + + final HashSet poolsToAvoid = new HashSet(); + StoragePool pool = null; + + Set podsToAvoid = new HashSet(); + Pair pod = null; + + + DiskOfferingVO diskOffering = _diskOfferingDao + .findByIdIncludingRemoved(volume.getDiskOfferingId()); + DataCenterVO dc = _dcDao.findById(volume.getDataCenterId()); + DiskProfile dskCh = new DiskProfile(volume, diskOffering, + snapshot.getHypervisorType()); + + // Determine what pod to store the volume in + while ((pod = _resourceMgr.findPod(null, null, dc, account.getId(), + podsToAvoid)) != null) { + podsToAvoid.add(pod.first().getId()); + // Determine what storage pool to store the volume in + while ((pool = storageMgr.findStoragePool(dskCh, dc, pod.first(), null, null, + null, poolsToAvoid)) != null) { + break; + + } + } + + VolumeInfo vol = this.volFactory.getVolume(volume.getId()); + DataStore store = this.dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + SnapshotInfo snapInfo = this.snapshotFactory.getSnapshot(snapshot.getId()); + AsyncCallFuture future = this.volService.createVolumeFromSnapshot(vol, store, snapInfo); + try { + VolumeApiResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("Failed to create volume from snapshot:" + result.getResult()); + throw new CloudRuntimeException("Failed to create volume from snapshot:" + result.getResult()); + } + return result.getVolume(); + } catch (InterruptedException e) { + s_logger.debug("Failed to create volume from snapshot", e); + throw new CloudRuntimeException("Failed to create volume from snapshot", e); + } catch (ExecutionException e) { + s_logger.debug("Failed to create volume from snapshot", e); + throw new CloudRuntimeException("Failed to create volume from snapshot", e); + } + + } + + protected DiskProfile createDiskCharacteristics(VolumeInfo volume, + VMTemplateVO template, DataCenterVO dc, DiskOfferingVO diskOffering) { + if (volume.getVolumeType() == Type.ROOT + && Storage.ImageFormat.ISO != template.getFormat()) { + SearchCriteria sc = HostTemplateStatesSearch + .create(); + sc.setParameters("id", template.getId()); + sc.setParameters( + "state", + com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + sc.setJoinParameters("host", "dcId", dc.getId()); + + List sss = _vmTemplateHostDao.search(sc, null); + if (sss.size() == 0) { + throw new CloudRuntimeException("Template " + + template.getName() + + " has not been completely downloaded to zone " + + dc.getId()); + } + VMTemplateHostVO ss = sss.get(0); + + return new DiskProfile(volume.getId(), volume.getVolumeType(), + volume.getName(), diskOffering.getId(), ss.getSize(), + diskOffering.getTagsArray(), + diskOffering.getUseLocalStorage(), + diskOffering.isRecreatable(), + Storage.ImageFormat.ISO != template.getFormat() ? template + .getId() : null); + } else { + return new DiskProfile(volume.getId(), volume.getVolumeType(), + volume.getName(), diskOffering.getId(), + diskOffering.getDiskSize(), diskOffering.getTagsArray(), + diskOffering.getUseLocalStorage(), + diskOffering.isRecreatable(), null); + } + } + + protected VolumeVO createVolumeFromSnapshot(VolumeVO volume, long snapshotId) { + VolumeInfo createdVolume = null; + SnapshotVO snapshot = _snapshotDao.findById(snapshotId); + createdVolume = createVolumeFromSnapshot(volume, + snapshot); + + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, + createdVolume.getAccountId(), + createdVolume.getDataCenterId(), createdVolume.getId(), + createdVolume.getName(), createdVolume.getDiskOfferingId(), + null, createdVolume.getSize()); + _usageEventDao.persist(usageEvent); + + return this._volsDao.findById(createdVolume.getId()); + } + + @DB + public VolumeInfo copyVolumeFromSecToPrimary(VolumeInfo volume, + VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, + HostPodVO pod, Long clusterId, ServiceOfferingVO offering, + DiskOfferingVO diskOffering, List avoids, + long size, HypervisorType hyperType) throws NoTransitionException { + + final HashSet avoidPools = new HashSet( + avoids); + DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, + diskOffering); + dskCh.setHyperType(vm.getHypervisorType()); + // Find a suitable storage to create volume on + StoragePool destPool = storageMgr.findStoragePool(dskCh, dc, pod, + clusterId, null, vm, avoidPools); + DataStore destStore = this.dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary); + AsyncCallFuture future = this.volService.copyVolume(volume, destStore); + + try { + VolumeApiResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("copy volume failed: " + result.getResult()); + throw new CloudRuntimeException("copy volume failed: " + result.getResult()); + } + return result.getVolume(); + } catch (InterruptedException e) { + s_logger.debug("Failed to copy volume: " + volume.getId(), e); + throw new CloudRuntimeException("Failed to copy volume", e); + } catch (ExecutionException e) { + s_logger.debug("Failed to copy volume: " + volume.getId(), e); + throw new CloudRuntimeException("Failed to copy volume", e); + } + } + + @DB + public VolumeInfo createVolume(VolumeInfo volume, VMInstanceVO vm, + VMTemplateVO template, DataCenterVO dc, HostPodVO pod, + Long clusterId, ServiceOfferingVO offering, + DiskOfferingVO diskOffering, List avoids, + long size, HypervisorType hyperType) { + StoragePool pool = null; + + if (diskOffering != null && diskOffering.isCustomized()) { + diskOffering.setDiskSize(size); + } + + DiskProfile dskCh = null; + if (volume.getVolumeType() == Type.ROOT + && Storage.ImageFormat.ISO != template.getFormat()) { + dskCh = createDiskCharacteristics(volume, template, dc, offering); + } else { + dskCh = createDiskCharacteristics(volume, template, dc, + diskOffering); + } + + dskCh.setHyperType(hyperType); + + final HashSet avoidPools = new HashSet( + avoids); + + pool = storageMgr.findStoragePool(dskCh, dc, pod, clusterId, vm.getHostId(), + vm, avoidPools); + if (pool == null) { + s_logger.warn("Unable to find storage poll when create volume " + + volume.getName()); + throw new CloudRuntimeException("Unable to find storage poll when create volume" + volume.getName()); + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Trying to create " + volume + " on " + pool); + } + DataStore store = this.dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + AsyncCallFuture future = null; + boolean isNotCreatedFromTemplate = volume.getTemplateId() == null ? true : false; + if (isNotCreatedFromTemplate) { + future = this.volService.createVolumeAsync(volume, store); + } else { + TemplateInfo templ = this.tmplFactory.getTemplate(template.getId()); + future = this.volService.createVolumeFromTemplateAsync(volume, store.getId(), templ); + } + try { + VolumeApiResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("create volume failed: " + result.getResult()); + throw new CloudRuntimeException("create volume failed:" + result.getResult()); + } + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), + volume.getDataCenterId(), volume.getId(), volume.getName(), + volume.getDiskOfferingId(), null, volume.getSize()); + _usageEventDao.persist(usageEvent); + return result.getVolume(); + } catch (InterruptedException e) { + s_logger.error("create volume failed", e); + throw new CloudRuntimeException("create volume failed", e); + } catch (ExecutionException e) { + s_logger.error("create volume failed", e); + throw new CloudRuntimeException("create volume failed", e); + } + + } + + public String getRandomVolumeName() { + return UUID.randomUUID().toString(); + } + + private VolumeVO persistVolume(Account caller, long ownerId, Long zoneId, + String volumeName, String url, String format) { + + Transaction txn = Transaction.currentTxn(); + txn.start(); + + VolumeVO volume = new VolumeVO(volumeName, zoneId, -1, -1, -1, + new Long(-1), null, null, 0, Volume.Type.DATADISK); + volume.setPoolId(null); + volume.setDataCenterId(zoneId); + volume.setPodId(null); + volume.setAccountId(ownerId); + volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller + .getDomainId())); + long diskOfferingId = _diskOfferingDao.findByUniqueName( + "Cloud.com-Custom").getId(); + volume.setDiskOfferingId(diskOfferingId); + // volume.setSize(size); + volume.setInstanceId(null); + volume.setUpdated(new Date()); + volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller + .getDomainId()); + + volume = _volsDao.persist(volume); + try { + stateTransitTo(volume, Event.UploadRequested); + } catch (NoTransitionException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + UserContext.current().setEventDetails("Volume Id: " + volume.getId()); + + // Increment resource count during allocation; if actual creation fails, + // decrement it + _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), + ResourceType.volume); + + txn.commit(); + return volume; + } + + @Override + public boolean volumeOnSharedStoragePool(VolumeVO volume) { + Long poolId = volume.getPoolId(); + if (poolId == null) { + return false; + } else { + StoragePoolVO pool = _storagePoolDao.findById(poolId); + + if (pool == null) { + return false; + } else { + return (pool.getScope() == ScopeType.HOST) ? false : true; + } + } + } + + @Override + public boolean volumeInactive(VolumeVO volume) { + Long vmId = volume.getInstanceId(); + if (vmId != null) { + UserVm vm = _userVmDao.findById(vmId); + if (vm == null) { + return true; + } + State state = vm.getState(); + if (state.equals(State.Stopped) || state.equals(State.Destroyed)) { + return true; + } + } + return false; + } + + @Override + public String getVmNameOnVolume(VolumeVO volume) { + Long vmId = volume.getInstanceId(); + if (vmId != null) { + VMInstanceVO vm = _vmInstanceDao.findById(vmId); + + if (vm == null) { + return null; + } + return vm.getInstanceName(); + } + return null; + } + + /* + * Just allocate a volume in the database, don't send the createvolume cmd + * to hypervisor. The volume will be finally created only when it's attached + * to a VM. + */ + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true) + public VolumeVO allocVolume(CreateVolumeCmd cmd) + throws ResourceAllocationException { + // FIXME: some of the scheduled event stuff might be missing here... + Account caller = UserContext.current().getCaller(); + + long ownerId = cmd.getEntityOwnerId(); + + // permission check + _accountMgr.checkAccess(caller, null, true, + _accountMgr.getActiveAccountById(ownerId)); + + // Check that the resource limit for volumes won't be exceeded + _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), + ResourceType.volume); + + Long zoneId = cmd.getZoneId(); + Long diskOfferingId = null; + DiskOfferingVO diskOffering = null; + Long size = null; + + // validate input parameters before creating the volume + if ((cmd.getSnapshotId() == null && cmd.getDiskOfferingId() == null) + || (cmd.getSnapshotId() != null && cmd.getDiskOfferingId() != null)) { + throw new InvalidParameterValueException( + "Either disk Offering Id or snapshot Id must be passed whilst creating volume"); + } + + if (cmd.getSnapshotId() == null) {// create a new volume + + diskOfferingId = cmd.getDiskOfferingId(); + size = cmd.getSize(); + Long sizeInGB = size; + if (size != null) { + if (size > 0) { + size = size * 1024 * 1024 * 1024; // user specify size in GB + } else { + throw new InvalidParameterValueException( + "Disk size must be larger than 0"); + } + } + + // Check that the the disk offering is specified + diskOffering = _diskOfferingDao.findById(diskOfferingId); + if ((diskOffering == null) || diskOffering.getRemoved() != null + || !DiskOfferingVO.Type.Disk.equals(diskOffering.getType())) { + throw new InvalidParameterValueException( + "Please specify a valid disk offering."); + } + + if (diskOffering.isCustomized()) { + if (size == null) { + throw new InvalidParameterValueException( + "This disk offering requires a custom size specified"); + } + if ((sizeInGB < _customDiskOfferingMinSize) + || (sizeInGB > _customDiskOfferingMaxSize)) { + throw new InvalidParameterValueException("Volume size: " + + sizeInGB + "GB is out of allowed range. Max: " + + _customDiskOfferingMaxSize + " Min:" + + _customDiskOfferingMinSize); + } + } + + if (!diskOffering.isCustomized() && size != null) { + throw new InvalidParameterValueException( + "This disk offering does not allow custom size"); + } + + if (diskOffering.getDomainId() == null) { + // do nothing as offering is public + } else { + _configMgr.checkDiskOfferingAccess(caller, diskOffering); + } + + if (diskOffering.getDiskSize() > 0) { + size = diskOffering.getDiskSize(); + } + + if (!validateVolumeSizeRange(size)) {// convert size from mb to gb + // for validation + throw new InvalidParameterValueException( + "Invalid size for custom volume creation: " + size + + " ,max volume size is:" + _maxVolumeSizeInGb); + } + } else { // create volume from snapshot + Long snapshotId = cmd.getSnapshotId(); + SnapshotVO snapshotCheck = _snapshotDao.findById(snapshotId); + if (snapshotCheck == null) { + throw new InvalidParameterValueException( + "unable to find a snapshot with id " + snapshotId); + } + + if (snapshotCheck.getStatus() != Snapshot.Status.BackedUp) { + throw new InvalidParameterValueException("Snapshot id=" + + snapshotId + " is not in " + Snapshot.Status.BackedUp + + " state yet and can't be used for volume creation"); + } + + diskOfferingId = snapshotCheck.getDiskOfferingId(); + diskOffering = _diskOfferingDao.findById(diskOfferingId); + zoneId = snapshotCheck.getDataCenterId(); + size = snapshotCheck.getSize(); // ; disk offering is used for tags + // purposes + + // check snapshot permissions + _accountMgr.checkAccess(caller, null, true, snapshotCheck); + } + + // Verify that zone exists + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException( + "Unable to find zone by id " + zoneId); + } + + // Check if zone is disabled + if (Grouping.AllocationState.Disabled == zone.getAllocationState() + && !_accountMgr.isRootAdmin(caller.getType())) { + throw new PermissionDeniedException( + "Cannot perform this operation, Zone is currently disabled: " + + zoneId); + } + + // If local storage is disabled then creation of volume with local disk + // offering not allowed + if (!zone.isLocalStorageEnabled() && diskOffering.getUseLocalStorage()) { + throw new InvalidParameterValueException( + "Zone is not configured to use local storage but volume's disk offering " + + diskOffering.getName() + " uses it"); + } + + String userSpecifiedName = cmd.getVolumeName(); + if (userSpecifiedName == null) { + userSpecifiedName = getRandomVolumeName(); + } + + Transaction txn = Transaction.currentTxn(); + txn.start(); + + VolumeVO volume = new VolumeVO(userSpecifiedName, -1, -1, -1, -1, + new Long(-1), null, null, 0, Volume.Type.DATADISK); + volume.setPoolId(null); + volume.setDataCenterId(zoneId); + volume.setPodId(null); + volume.setAccountId(ownerId); + volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller + .getDomainId())); + volume.setDiskOfferingId(diskOfferingId); + volume.setSize(size); + volume.setInstanceId(null); + volume.setUpdated(new Date()); + volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller + .getDomainId()); + + volume = _volsDao.persist(volume); + if (cmd.getSnapshotId() == null) { + // for volume created from snapshot, create usage event after volume + // creation + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), + volume.getDataCenterId(), volume.getId(), volume.getName(), + diskOfferingId, null, size); + _usageEventDao.persist(usageEvent); + } + + UserContext.current().setEventDetails("Volume Id: " + volume.getId()); + + // Increment resource count during allocation; if actual creation fails, + // decrement it + _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), + ResourceType.volume); + + txn.commit(); + + return volume; + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", async = true) + public VolumeVO createVolume(CreateVolumeCmd cmd) { + VolumeVO volume = _volsDao.findById(cmd.getEntityId()); + boolean created = true; + + try { + if (cmd.getSnapshotId() != null) { + volume = createVolumeFromSnapshot(volume, cmd.getSnapshotId()); + if (volume.getState() != Volume.State.Ready) { + created = false; + } + } + return volume; + } catch(Exception e) { + created = false; + s_logger.debug("Failed to create volume: " + volume.getId(), e); + return null; + } finally { + if (!created) { + s_logger.trace("Decrementing volume resource count for account id=" + + volume.getAccountId() + + " as volume failed to create on the backend"); + _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), + ResourceType.volume); + } + } + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true) + public VolumeVO resizeVolume(ResizeVolumeCmd cmd) { + VolumeVO volume = _volsDao.findById(cmd.getEntityId()); + Long newSize = null; + boolean shrinkOk = cmd.getShrinkOk(); + boolean success = false; + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume + .getDiskOfferingId()); + DiskOfferingVO newDiskOffering = null; + + newDiskOffering = _diskOfferingDao.findById(cmd.getNewDiskOfferingId()); + + /* + * Volumes with no hypervisor have never been assigned, and can be + * resized by recreating. perhaps in the future we can just update the + * db entry for the volume + */ + if (_volsDao.getHypervisorType(volume.getId()) == HypervisorType.None) { + throw new InvalidParameterValueException( + "Can't resize a volume that has never been attached, not sure which hypervisor type. Recreate volume to resize."); + } + + /* Only works for KVM/Xen for now */ + if (_volsDao.getHypervisorType(volume.getId()) != HypervisorType.KVM + && _volsDao.getHypervisorType(volume.getId()) != HypervisorType.XenServer) { + throw new InvalidParameterValueException( + "Cloudstack currently only supports volumes marked as KVM or XenServer hypervisor for resize"); + } + + if (volume == null) { + throw new InvalidParameterValueException("No such volume"); + } + + if (volume.getState() != Volume.State.Ready) { + throw new InvalidParameterValueException( + "Volume should be in ready state before attempting a resize"); + } + + if (!volume.getVolumeType().equals(Volume.Type.DATADISK)) { + throw new InvalidParameterValueException( + "Can only resize DATA volumes"); + } + + /* + * figure out whether or not a new disk offering or size parameter is + * required, get the correct size value + */ + if (newDiskOffering == null) { + if (diskOffering.isCustomized()) { + newSize = cmd.getSize(); + + if (newSize == null) { + throw new InvalidParameterValueException( + "new offering is of custom size, need to specify a size"); + } + + newSize = (newSize << 30); + } else { + throw new InvalidParameterValueException("current offering" + + volume.getDiskOfferingId() + + " cannot be resized, need to specify a disk offering"); + } + } else { + + if (newDiskOffering.getRemoved() != null + || !DiskOfferingVO.Type.Disk.equals(newDiskOffering + .getType())) { + throw new InvalidParameterValueException( + "Disk offering ID is missing or invalid"); + } + + if (diskOffering.getTags() != null) { + if (!newDiskOffering.getTags().equals(diskOffering.getTags())) { + throw new InvalidParameterValueException( + "Tags on new and old disk offerings must match"); + } + } else if (newDiskOffering.getTags() != null) { + throw new InvalidParameterValueException( + "There are no tags on current disk offering, new disk offering needs to have no tags"); + } + + if (newDiskOffering.getDomainId() == null) { + // do nothing as offering is public + } else { + _configMgr.checkDiskOfferingAccess(UserContext.current() + .getCaller(), newDiskOffering); + } + + if (newDiskOffering.isCustomized()) { + newSize = cmd.getSize(); + + if (newSize == null) { + throw new InvalidParameterValueException( + "new offering is of custom size, need to specify a size"); + } + + newSize = (newSize << 30); + } else { + newSize = newDiskOffering.getDiskSize(); + } + } + + if (newSize == null) { + throw new InvalidParameterValueException( + "could not detect a size parameter or fetch one from the diskofferingid parameter"); + } + + if (!validateVolumeSizeRange(newSize)) { + throw new InvalidParameterValueException( + "Requested size out of range"); + } + + /* does the caller have the authority to act on this volume? */ + _accountMgr.checkAccess(UserContext.current().getCaller(), null, true, + volume); + + UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); + + PrimaryDataStoreInfo pool = (PrimaryDataStoreInfo)this.dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary); + long currentSize = volume.getSize(); + + /* + * lets make certain they (think they) know what they're doing if they + * want to shrink, by forcing them to provide the shrinkok parameter. + * This will be checked again at the hypervisor level where we can see + * the actual disk size + */ + if (currentSize > newSize && !shrinkOk) { + throw new InvalidParameterValueException( + "Going from existing size of " + + currentSize + + " to size of " + + newSize + + " would shrink the volume, need to sign off by supplying the shrinkok parameter with value of true"); + } + + /* + * get a list of hosts to send the commands to, try the system the + * associated vm is running on first, then the last known place it ran. + * If not attached to a userVm, we pass 'none' and resizevolume.sh is ok + * with that since it only needs the vm name to live resize + */ + long[] hosts = null; + String instanceName = "none"; + if (userVm != null) { + instanceName = userVm.getInstanceName(); + if (userVm.getHostId() != null) { + hosts = new long[] { userVm.getHostId() }; + } else if (userVm.getLastHostId() != null) { + hosts = new long[] { userVm.getLastHostId() }; + } + + /* Xen only works offline, SR does not support VDI.resizeOnline */ + if (_volsDao.getHypervisorType(volume.getId()) == HypervisorType.XenServer + && !userVm.getState().equals(State.Stopped)) { + throw new InvalidParameterValueException( + "VM must be stopped or disk detached in order to resize with the Xen HV"); + } + } + + try { + try { + stateTransitTo(volume, Volume.Event.ResizeRequested); + } catch (NoTransitionException etrans) { + throw new CloudRuntimeException( + "Unable to change volume state for resize: " + + etrans.toString()); + } + + ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand( + volume.getPath(), new StorageFilerTO(pool), currentSize, + newSize, shrinkOk, instanceName); + ResizeVolumeAnswer answer = (ResizeVolumeAnswer) this.storageMgr.sendToPool(pool, + hosts, resizeCmd); + + /* + * need to fetch/store new volume size in database. This value comes + * from hypervisor rather than trusting that a success means we have + * a volume of the size we requested + */ + if (answer != null && answer.getResult()) { + long finalSize = answer.getNewSize(); + s_logger.debug("Resize: volume started at size " + currentSize + + " and ended at size " + finalSize); + volume.setSize(finalSize); + if (newDiskOffering != null) { + volume.setDiskOfferingId(cmd.getNewDiskOfferingId()); + } + _volsDao.update(volume.getId(), volume); + + success = true; + return volume; + } else if (answer != null) { + s_logger.debug("Resize: returned '" + answer.getDetails() + "'"); + } + } catch (StorageUnavailableException e) { + s_logger.debug("volume failed to resize: " + e); + return null; + } finally { + if (success) { + try { + stateTransitTo(volume, Volume.Event.OperationSucceeded); + } catch (NoTransitionException etrans) { + throw new CloudRuntimeException( + "Failed to change volume state: " + + etrans.toString()); + } + } else { + try { + stateTransitTo(volume, Volume.Event.OperationFailed); + } catch (NoTransitionException etrans) { + throw new CloudRuntimeException( + "Failed to change volume state: " + + etrans.toString()); + } + } + } + return null; + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_DELETE, eventDescription = "deleting volume") + public boolean deleteVolume(long volumeId, Account caller) + throws ConcurrentOperationException { + + VolumeVO volume = _volsDao.findById(volumeId); + if (volume == null) { + throw new InvalidParameterValueException( + "Unable to aquire volume with ID: " + volumeId); + } + + if (!_snapshotMgr.canOperateOnVolume(volume)) { + throw new InvalidParameterValueException( + "There are snapshot creating on it, Unable to delete the volume"); + } + + _accountMgr.checkAccess(caller, null, true, volume); + + if (volume.getInstanceId() != null) { + throw new InvalidParameterValueException( + "Please specify a volume that is not attached to any VM."); + } + + if (volume.getState() == Volume.State.UploadOp) { + VolumeHostVO volumeHost = _volumeHostDao.findByVolumeId(volume + .getId()); + if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { + throw new InvalidParameterValueException( + "Please specify a volume that is not uploading"); + } + } + + try { + if (volume.getState() != Volume.State.Destroy && volume.getState() != Volume.State.Expunging && volume.getState() != Volume.State.Expunging) { + Long instanceId = volume.getInstanceId(); + if (!this.volService.destroyVolume(volume.getId())) { + return false; + } + + VMInstanceVO vmInstance = this._vmInstanceDao.findById(instanceId); + if (instanceId == null + || (vmInstance.getType().equals(VirtualMachine.Type.User))) { + // Decrement the resource count for volumes belonging user VM's only + _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), + ResourceType.volume); + // Log usage event for volumes belonging user VM's only + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), + volume.getDataCenterId(), volume.getId(), volume.getName()); + _usageEventDao.persist(usageEvent); + } + } + AsyncCallFuture future = this.volService.expungeVolumeAsync(this.volFactory.getVolume(volume.getId())); + future.get(); + + } catch (Exception e) { + s_logger.warn("Failed to expunge volume:", e); + return false; + } + + return true; + } + + private boolean validateVolumeSizeRange(long size) { + if (size < 0 || (size > 0 && size < (1024 * 1024 * 1024))) { + throw new InvalidParameterValueException( + "Please specify a size of at least 1 Gb."); + } else if (size > (_maxVolumeSizeInGb * 1024 * 1024 * 1024)) { + throw new InvalidParameterValueException("volume size " + size + + ", but the maximum size allowed is " + _maxVolumeSizeInGb + + " Gb."); + } + + return true; + } + + protected DiskProfile toDiskProfile(VolumeVO vol, DiskOfferingVO offering) { + return new DiskProfile(vol.getId(), vol.getVolumeType(), vol.getName(), + offering.getId(), vol.getSize(), offering.getTagsArray(), + offering.getUseLocalStorage(), offering.isRecreatable(), + vol.getTemplateId()); + } + + @Override + public DiskProfile allocateRawVolume(Type type, + String name, DiskOfferingVO offering, Long size, VMInstanceVO vm, Account owner) { + if (size == null) { + size = offering.getDiskSize(); + } else { + size = (size * 1024 * 1024 * 1024); + } + VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), + owner.getDomainId(), owner.getId(), offering.getId(), size); + if (vm != null) { + vol.setInstanceId(vm.getId()); + } + + if (type.equals(Type.ROOT)) { + vol.setDeviceId(0l); + } else { + vol.setDeviceId(1l); + } + + vol = _volsDao.persist(vol); + + // Save usage event and update resource count for user vm volumes + if (vm instanceof UserVm) { + + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), + vol.getDataCenterId(), vol.getId(), vol.getName(), + offering.getId(), null, size); + _usageEventDao.persist(usageEvent); + + _resourceLimitMgr.incrementResourceCount(vm.getAccountId(), + ResourceType.volume); + } + return toDiskProfile(vol, offering); + } + + @Override + public DiskProfile allocateTemplatedVolume( + Type type, String name, DiskOfferingVO offering, + VMTemplateVO template, VMInstanceVO vm, Account owner) { + assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template really...."; + + Long size = this._tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId()); + + VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), + owner.getDomainId(), owner.getId(), offering.getId(), size); + if (vm != null) { + vol.setInstanceId(vm.getId()); + } + vol.setTemplateId(template.getId()); + + if (type.equals(Type.ROOT)) { + vol.setDeviceId(0l); + if (!vm.getType().equals(VirtualMachine.Type.User)) { + vol.setRecreatable(true); + } + } else { + vol.setDeviceId(1l); + } + + vol = _volsDao.persist(vol); + + // Create event and update resource count for volumes if vm is a user vm + if (vm instanceof UserVm) { + + Long offeringId = null; + + if (offering.getType() == DiskOfferingVO.Type.Disk) { + offeringId = offering.getId(); + } + + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), + vol.getDataCenterId(), vol.getId(), vol.getName(), + offeringId, template.getId(), vol.getSize()); + _usageEventDao.persist(usageEvent); + + _resourceLimitMgr.incrementResourceCount(vm.getAccountId(), + ResourceType.volume); + } + return toDiskProfile(vol, offering); + } + + private String getSupportedImageFormatForCluster(Long clusterId) { + ClusterVO cluster = ApiDBUtils.findClusterById(clusterId); + + if (cluster.getHypervisorType() == HypervisorType.XenServer) { + return "vhd"; + } else if (cluster.getHypervisorType() == HypervisorType.KVM) { + return "qcow2"; + } else if (cluster.getHypervisorType() == HypervisorType.VMware) { + return "ova"; + } else if (cluster.getHypervisorType() == HypervisorType.Ovm) { + return "raw"; + } else { + return null; + } + } + + private VolumeInfo copyVolume(StoragePoolVO rootDiskPool + , VolumeInfo volume, VMInstanceVO vm, VMTemplateVO rootDiskTmplt, DataCenterVO dcVO, + HostPodVO pod, DiskOfferingVO diskVO, ServiceOfferingVO svo, HypervisorType rootDiskHyperType) throws NoTransitionException { + VolumeHostVO volHostVO = _volumeHostDao.findByHostVolume(volume.getDataStore().getId(), volume.getId()); + if (!volHostVO + .getFormat() + .getFileExtension() + .equals( + getSupportedImageFormatForCluster(rootDiskPool + .getClusterId()))) { + throw new InvalidParameterValueException( + "Failed to attach volume to VM since volumes format " + + volHostVO.getFormat() + .getFileExtension() + + " is not compatible with the vm hypervisor type"); + } + + VolumeInfo volumeOnPrimary = copyVolumeFromSecToPrimary(volume, + vm, rootDiskTmplt, dcVO, pod, + rootDiskPool.getClusterId(), svo, diskVO, + new ArrayList(), + volume.getSize(), rootDiskHyperType); + + return volumeOnPrimary; + } + + private VolumeInfo createVolumeOnPrimaryStorage(VMInstanceVO vm, VolumeVO rootVolumeOfVm, VolumeInfo volume, HypervisorType rootDiskHyperType) throws NoTransitionException { + VMTemplateVO rootDiskTmplt = _templateDao.findById(vm + .getTemplateId()); + DataCenterVO dcVO = _dcDao.findById(vm + .getDataCenterId()); + HostPodVO pod = _podDao.findById(vm.getPodIdToDeployIn()); + StoragePoolVO rootDiskPool = _storagePoolDao + .findById(rootVolumeOfVm.getPoolId()); + ServiceOfferingVO svo = _serviceOfferingDao.findById(vm + .getServiceOfferingId()); + DiskOfferingVO diskVO = _diskOfferingDao.findById(volume + .getDiskOfferingId()); + Long clusterId = (rootDiskPool == null ? null : rootDiskPool + .getClusterId()); + + VolumeInfo vol = null; + if (volume.getState() == Volume.State.Allocated) { + vol = createVolume(volume, vm, + rootDiskTmplt, dcVO, pod, clusterId, svo, diskVO, + new ArrayList(), volume.getSize(), + rootDiskHyperType); + } else if (volume.getState() == Volume.State.Uploaded) { + vol = copyVolume(rootDiskPool + , volume, vm, rootDiskTmplt, dcVO, + pod, diskVO, svo, rootDiskHyperType); + } + return vol; + } + + private boolean needMoveVolume(VolumeVO rootVolumeOfVm, VolumeInfo volume) { + StoragePoolVO vmRootVolumePool = _storagePoolDao + .findById(rootVolumeOfVm.getPoolId()); + DiskOfferingVO volumeDiskOffering = _diskOfferingDao + .findById(volume.getDiskOfferingId()); + String[] volumeTags = volumeDiskOffering.getTagsArray(); + + boolean isVolumeOnSharedPool = !volumeDiskOffering + .getUseLocalStorage(); + StoragePoolVO sourcePool = _storagePoolDao.findById(volume + .getPoolId()); + List matchingVMPools = _storagePoolDao + .findPoolsByTags(vmRootVolumePool.getDataCenterId(), + vmRootVolumePool.getPodId(), + vmRootVolumePool.getClusterId(), volumeTags, + isVolumeOnSharedPool); + + boolean moveVolumeNeeded = true; + if (matchingVMPools.size() == 0) { + String poolType; + if (vmRootVolumePool.getClusterId() != null) { + poolType = "cluster"; + } else if (vmRootVolumePool.getPodId() != null) { + poolType = "pod"; + } else { + poolType = "zone"; + } + throw new CloudRuntimeException( + "There are no storage pools in the VM's " + poolType + + " with all of the volume's tags (" + + volumeDiskOffering.getTags() + ")."); + } else { + long sourcePoolId = sourcePool.getId(); + Long sourcePoolDcId = sourcePool.getDataCenterId(); + Long sourcePoolPodId = sourcePool.getPodId(); + Long sourcePoolClusterId = sourcePool.getClusterId(); + for (StoragePoolVO vmPool : matchingVMPools) { + long vmPoolId = vmPool.getId(); + Long vmPoolDcId = vmPool.getDataCenterId(); + Long vmPoolPodId = vmPool.getPodId(); + Long vmPoolClusterId = vmPool.getClusterId(); + + // Moving a volume is not required if storage pools belongs + // to same cluster in case of shared volume or + // identical storage pool in case of local + if (sourcePoolDcId == vmPoolDcId + && sourcePoolPodId == vmPoolPodId + && sourcePoolClusterId == vmPoolClusterId + && (isVolumeOnSharedPool || sourcePoolId == vmPoolId)) { + moveVolumeNeeded = false; + break; + } + } + } + + return moveVolumeNeeded; + } + + + private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volume, Long deviceId) { + String errorMsg = "Failed to attach volume: " + volume.getName() + + " to VM: " + vm.getHostName(); + boolean sendCommand = (vm.getState() == State.Running); + AttachVolumeAnswer answer = null; + Long hostId = vm.getHostId(); + if (hostId == null) { + hostId = vm.getLastHostId(); + HostVO host = _hostDao.findById(hostId); + if (host != null + && host.getHypervisorType() == HypervisorType.VMware) { + sendCommand = true; + } + } + + if (sendCommand) { + StoragePoolVO volumePool = _storagePoolDao.findById(volume + .getPoolId()); + AttachVolumeCommand cmd = new AttachVolumeCommand(true, + vm.getInstanceName(), volume.getPoolType(), + volume.getFolder(), volume.getPath(), volume.getName(), + deviceId, volume.getChainInfo()); + cmd.setPoolUuid(volumePool.getUuid()); + + try { + answer = (AttachVolumeAnswer) _agentMgr.send(hostId, cmd); + } catch (Exception e) { + throw new CloudRuntimeException(errorMsg + " due to: " + + e.getMessage()); + } + } + + if (!sendCommand || (answer != null && answer.getResult())) { + // Mark the volume as attached + if (sendCommand) { + _volsDao.attachVolume(volume.getId(), vm.getId(), + answer.getDeviceId()); + } else { + _volsDao.attachVolume(volume.getId(), vm.getId(), deviceId); + } + return _volsDao.findById(volume.getId()); + } else { + if (answer != null) { + String details = answer.getDetails(); + if (details != null && !details.isEmpty()) { + errorMsg += "; " + details; + } + } + throw new CloudRuntimeException(errorMsg); + } + } + + private int getMaxDataVolumesSupported(UserVmVO vm) { + Long hostId = vm.getHostId(); + if (hostId == null) { + hostId = vm.getLastHostId(); + } + HostVO host = _hostDao.findById(hostId); + Integer maxDataVolumesSupported = null; + if (host != null) { + _hostDao.loadDetails(host); + maxDataVolumesSupported = _hypervisorCapabilitiesDao + .getMaxDataVolumesLimit(host.getHypervisorType(), + host.getDetail("product_version")); + } + if (maxDataVolumesSupported == null) { + maxDataVolumesSupported = 6; // 6 data disks by default if nothing + // is specified in + // 'hypervisor_capabilities' table + } + + return maxDataVolumesSupported.intValue(); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_ATTACH, eventDescription = "attaching volume", async = true) + public Volume attachVolumeToVM(AttachVolumeCmd command) { + Long vmId = command.getVirtualMachineId(); + Long volumeId = command.getId(); + Long deviceId = command.getDeviceId(); + Account caller = UserContext.current().getCaller(); + + // Check that the volume ID is valid + VolumeInfo volume = volFactory.getVolume(volumeId); + // Check that the volume is a data volume + if (volume == null || volume.getVolumeType() != Volume.Type.DATADISK) { + throw new InvalidParameterValueException( + "Please specify a valid data volume."); + } + + // Check that the volume is not currently attached to any VM + if (volume.getInstanceId() != null) { + throw new InvalidParameterValueException( + "Please specify a volume that is not attached to any VM."); + } + + // Check that the volume is not destroyed + if (volume.getState() == Volume.State.Destroy) { + throw new InvalidParameterValueException( + "Please specify a volume that is not destroyed."); + } + + // Check that the virtual machine ID is valid and it's a user vm + UserVmVO vm = _userVmDao.findById(vmId); + if (vm == null || vm.getType() != VirtualMachine.Type.User) { + throw new InvalidParameterValueException( + "Please specify a valid User VM."); + } + + // Check that the VM is in the correct state + if (vm.getState() != State.Running && vm.getState() != State.Stopped) { + throw new InvalidParameterValueException( + "Please specify a VM that is either running or stopped."); + } + + // Check that the device ID is valid + if (deviceId != null) { + if (deviceId.longValue() == 0) { + throw new InvalidParameterValueException( + "deviceId can't be 0, which is used by Root device"); + } + } + + // Check that the number of data volumes attached to VM is less than + // that supported by hypervisor + List existingDataVolumes = _volsDao.findByInstanceAndType( + vmId, Volume.Type.DATADISK); + int maxDataVolumesSupported = getMaxDataVolumesSupported(vm); + if (existingDataVolumes.size() >= maxDataVolumesSupported) { + throw new InvalidParameterValueException( + "The specified VM already has the maximum number of data disks (" + + maxDataVolumesSupported + + "). Please specify another VM."); + } + + // Check that the VM and the volume are in the same zone + if (vm.getDataCenterId() != volume.getDataCenterId()) { + throw new InvalidParameterValueException( + "Please specify a VM that is in the same zone as the volume."); + } + + // If local storage is disabled then attaching a volume with local disk + // offering not allowed + DataCenterVO dataCenter = _dcDao.findById(volume.getDataCenterId()); + if (!dataCenter.isLocalStorageEnabled()) { + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume + .getDiskOfferingId()); + if (diskOffering.getUseLocalStorage()) { + throw new InvalidParameterValueException( + "Zone is not configured to use local storage but volume's disk offering " + + diskOffering.getName() + " uses it"); + } + } + + // permission check + _accountMgr.checkAccess(caller, null, true, volume, vm); + + if (!(Volume.State.Allocated.equals(volume.getState()) + || Volume.State.Ready.equals(volume.getState()) || Volume.State.Uploaded + .equals(volume.getState()))) { + throw new InvalidParameterValueException( + "Volume state must be in Allocated, Ready or in Uploaded state"); + } + + VolumeVO rootVolumeOfVm = null; + List rootVolumesOfVm = _volsDao.findByInstanceAndType(vmId, + Volume.Type.ROOT); + if (rootVolumesOfVm.size() != 1) { + throw new CloudRuntimeException( + "The VM " + + vm.getHostName() + + " has more than one ROOT volume and is in an invalid state."); + } else { + rootVolumeOfVm = rootVolumesOfVm.get(0); + } + + HypervisorType rootDiskHyperType = vm.getHypervisorType(); + + HypervisorType dataDiskHyperType = _volsDao.getHypervisorType(volume + .getId()); + if (dataDiskHyperType != HypervisorType.None + && rootDiskHyperType != dataDiskHyperType) { + throw new InvalidParameterValueException( + "Can't attach a volume created by: " + dataDiskHyperType + + " to a " + rootDiskHyperType + " vm"); + } + + + deviceId = getDeviceId(vmId, deviceId); + VolumeInfo volumeOnPrimaryStorage = volume; + if (volume.getState().equals(Volume.State.Allocated) + || volume.getState() == Volume.State.Uploaded) { + try { + volumeOnPrimaryStorage = createVolumeOnPrimaryStorage(vm, rootVolumeOfVm, volume, rootDiskHyperType); + } catch (NoTransitionException e) { + s_logger.debug("Failed to create volume on primary storage", e); + throw new CloudRuntimeException("Failed to create volume on primary storage", e); + } + } + + boolean moveVolumeNeeded = needMoveVolume(rootVolumeOfVm, volumeOnPrimaryStorage); + + if (moveVolumeNeeded) { + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)volumeOnPrimaryStorage.getDataStore(); + if (primaryStore.isLocal()) { + throw new CloudRuntimeException( + "Failed to attach local data volume " + + volume.getName() + + " to VM " + + vm.getDisplayName() + + " as migration of local data volume is not allowed"); + } + StoragePoolVO vmRootVolumePool = _storagePoolDao + .findById(rootVolumeOfVm.getPoolId()); + + try { + volumeOnPrimaryStorage = moveVolume(volumeOnPrimaryStorage, + vmRootVolumePool.getDataCenterId(), + vmRootVolumePool.getPodId(), + vmRootVolumePool.getClusterId(), + dataDiskHyperType); + } catch (ConcurrentOperationException e) { + s_logger.debug("move volume failed", e); + throw new CloudRuntimeException("move volume failed", e); + } + } + + + AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor + .getCurrentExecutor(); + if (asyncExecutor != null) { + AsyncJobVO job = asyncExecutor.getJob(); + + if (s_logger.isInfoEnabled()) { + s_logger.info("Trying to attaching volume " + volumeId + + " to vm instance:" + vm.getId() + + ", update async job-" + job.getId() + + " progress status"); + } + + _asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volumeId); + _asyncMgr.updateAsyncJobStatus(job.getId(), + BaseCmd.PROGRESS_INSTANCE_CREATED, volumeId); + } + + VolumeVO newVol = _volumeDao.findById(volumeOnPrimaryStorage.getId()); + newVol = sendAttachVolumeCommand(vm, newVol, deviceId); + return newVol; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_DETACH, eventDescription = "detaching volume", async = true) + public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { + Account caller = UserContext.current().getCaller(); + if ((cmmd.getId() == null && cmmd.getDeviceId() == null && cmmd + .getVirtualMachineId() == null) + || (cmmd.getId() != null && (cmmd.getDeviceId() != null || cmmd + .getVirtualMachineId() != null)) + || (cmmd.getId() == null && (cmmd.getDeviceId() == null || cmmd + .getVirtualMachineId() == null))) { + throw new InvalidParameterValueException( + "Please provide either a volume id, or a tuple(device id, instance id)"); + } + + Long volumeId = cmmd.getId(); + VolumeVO volume = null; + + if (volumeId != null) { + volume = _volsDao.findById(volumeId); + } else { + volume = _volsDao.findByInstanceAndDeviceId( + cmmd.getVirtualMachineId(), cmmd.getDeviceId()).get(0); + } + + Long vmId = null; + + if (cmmd.getVirtualMachineId() == null) { + vmId = volume.getInstanceId(); + } else { + vmId = cmmd.getVirtualMachineId(); + } + + // Check that the volume ID is valid + if (volume == null) { + throw new InvalidParameterValueException( + "Unable to find volume with ID: " + volumeId); + } + + // Permissions check + _accountMgr.checkAccess(caller, null, true, volume); + + // Check that the volume is a data volume + if (volume.getVolumeType() != Volume.Type.DATADISK) { + throw new InvalidParameterValueException( + "Please specify a data volume."); + } + + // Check that the volume is currently attached to a VM + if (vmId == null) { + throw new InvalidParameterValueException( + "The specified volume is not attached to a VM."); + } + + // Check that the VM is in the correct state + UserVmVO vm = this._userVmDao.findById(vmId); + if (vm.getState() != State.Running && vm.getState() != State.Stopped + && vm.getState() != State.Destroyed) { + throw new InvalidParameterValueException( + "Please specify a VM that is either running or stopped."); + } + + AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor + .getCurrentExecutor(); + if (asyncExecutor != null) { + AsyncJobVO job = asyncExecutor.getJob(); + + if (s_logger.isInfoEnabled()) { + s_logger.info("Trying to attaching volume " + volumeId + + "to vm instance:" + vm.getId() + + ", update async job-" + job.getId() + + " progress status"); + } + + _asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volumeId); + _asyncMgr.updateAsyncJobStatus(job.getId(), + BaseCmd.PROGRESS_INSTANCE_CREATED, volumeId); + } + + String errorMsg = "Failed to detach volume: " + volume.getName() + + " from VM: " + vm.getHostName(); + boolean sendCommand = (vm.getState() == State.Running); + Answer answer = null; + + if (sendCommand) { + AttachVolumeCommand cmd = new AttachVolumeCommand(false, + vm.getInstanceName(), volume.getPoolType(), + volume.getFolder(), volume.getPath(), volume.getName(), + cmmd.getDeviceId() != null ? cmmd.getDeviceId() : volume + .getDeviceId(), volume.getChainInfo()); + + StoragePoolVO volumePool = _storagePoolDao.findById(volume + .getPoolId()); + cmd.setPoolUuid(volumePool.getUuid()); + + try { + answer = _agentMgr.send(vm.getHostId(), cmd); + } catch (Exception e) { + throw new CloudRuntimeException(errorMsg + " due to: " + + e.getMessage()); + } + } + + if (!sendCommand || (answer != null && answer.getResult())) { + // Mark the volume as detached + _volsDao.detachVolume(volume.getId()); + if (answer != null && answer instanceof AttachVolumeAnswer) { + volume.setChainInfo(((AttachVolumeAnswer) answer) + .getChainInfo()); + _volsDao.update(volume.getId(), volume); + } + + return _volsDao.findById(volumeId); + } else { + + if (answer != null) { + String details = answer.getDetails(); + if (details != null && !details.isEmpty()) { + errorMsg += "; " + details; + } + } + + throw new CloudRuntimeException(errorMsg); + } + } + + + + + + + @DB + protected VolumeVO switchVolume(VolumeVO existingVolume, + VirtualMachineProfile vm) + throws StorageUnavailableException { + Transaction txn = Transaction.currentTxn(); + + Long templateIdToUse = null; + Long volTemplateId = existingVolume.getTemplateId(); + long vmTemplateId = vm.getTemplateId(); + if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("switchVolume: Old Volume's templateId: " + + volTemplateId + + " does not match the VM's templateId: " + + vmTemplateId + + ", updating templateId in the new Volume"); + } + templateIdToUse = vmTemplateId; + } + + txn.start(); + VolumeVO newVolume = allocateDuplicateVolume(existingVolume, + templateIdToUse); + // In case of Vmware if vm reference is not removed then during root + // disk cleanup + // the vm also gets deleted, so remove the reference + if (vm.getHypervisorType() == HypervisorType.VMware) { + _volsDao.detachVolume(existingVolume.getId()); + } + try { + stateTransitTo(existingVolume, Volume.Event.DestroyRequested); + } catch (NoTransitionException e) { + s_logger.debug("Unable to destroy existing volume: " + e.toString()); + } + txn.commit(); + return newVolume; + + } + + + @Override + public void release(VirtualMachineProfile profile) { + // add code here + } + + + @Override + @DB + public void cleanupVolumes(long vmId) throws ConcurrentOperationException { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cleaning storage for vm: " + vmId); + } + List volumesForVm = _volsDao.findByInstance(vmId); + List toBeExpunged = new ArrayList(); + Transaction txn = Transaction.currentTxn(); + txn.start(); + for (VolumeVO vol : volumesForVm) { + if (vol.getVolumeType().equals(Type.ROOT)) { + // This check is for VM in Error state (volume is already + // destroyed) + if (!vol.getState().equals(Volume.State.Destroy)) { + this.volService.destroyVolume(vol.getId()); + } + toBeExpunged.add(vol); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Detaching " + vol); + } + _volsDao.detachVolume(vol.getId()); + } + } + txn.commit(); + AsyncCallFuture future = null; + for (VolumeVO expunge : toBeExpunged) { + future = this.volService.expungeVolumeAsync(this.volFactory.getVolume(expunge.getId())); + try { + future.get(); + } catch (InterruptedException e) { + s_logger.debug("failed expunge volume" + expunge.getId(), e); + } catch (ExecutionException e) { + s_logger.debug("failed expunge volume" + expunge.getId(), e); + } + } + } + + @DB + @Override + public Volume migrateVolume(Long volumeId, Long storagePoolId) + throws ConcurrentOperationException { + VolumeVO vol = _volsDao.findById(volumeId); + if (vol == null) { + throw new InvalidParameterValueException( + "Failed to find the volume id: " + volumeId); + } + + if (vol.getState() != Volume.State.Ready) { + throw new InvalidParameterValueException( + "Volume must be in ready state"); + } + + if (vol.getInstanceId() != null) { + throw new InvalidParameterValueException( + "Volume needs to be dettached from VM"); + } + + StoragePool destPool = (StoragePool)this.dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + if (destPool == null) { + throw new InvalidParameterValueException( + "Failed to find the destination storage pool: " + + storagePoolId); + } + + if (!volumeOnSharedStoragePool(vol)) { + throw new InvalidParameterValueException( + "Migration of volume from local storage pool is not supported"); + } + + List vols = new ArrayList(); + vols.add(vol); + + migrateVolumes(vols, destPool); + return vol; + } + + @DB + public boolean migrateVolumes(List volumes, StoragePool destPool) + throws ConcurrentOperationException { + Transaction txn = Transaction.currentTxn(); + txn.start(); + + boolean transitResult = false; + long checkPointTaskId = -1; + try { + List volIds = new ArrayList(); + for (Volume volume : volumes) { + if (!_snapshotMgr.canOperateOnVolume((VolumeVO) volume)) { + throw new CloudRuntimeException( + "There are snapshots creating on this volume, can not move this volume"); + } + + try { + if (!stateTransitTo(volume, Volume.Event.MigrationRequested)) { + throw new ConcurrentOperationException( + "Failed to transit volume state"); + } + } catch (NoTransitionException e) { + s_logger.debug("Failed to set state into migrate: " + + e.toString()); + throw new CloudRuntimeException( + "Failed to set state into migrate: " + e.toString()); + } + volIds.add(volume.getId()); + } + + transitResult = true; + } finally { + if (!transitResult) { + txn.rollback(); + } else { + txn.commit(); + } + } + + // At this stage, nobody can modify volumes. Send the copyvolume command + List> destroyCmds = new ArrayList>(); + List answers = new ArrayList(); + try { + for (Volume volume : volumes) { + String secondaryStorageURL = this._tmpltMgr.getSecondaryStorageURL(volume + .getDataCenterId()); + StoragePool srcPool = (StoragePool)this.dataStoreMgr.getDataStore(volume + .getPoolId(), DataStoreRole.Primary); + CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), + volume.getPath(), srcPool, secondaryStorageURL, true, + _copyvolumewait); + CopyVolumeAnswer cvAnswer; + try { + cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(srcPool, cvCmd); + } catch (StorageUnavailableException e1) { + throw new CloudRuntimeException( + "Failed to copy the volume from the source primary storage pool to secondary storage.", + e1); + } + + if (cvAnswer == null || !cvAnswer.getResult()) { + throw new CloudRuntimeException( + "Failed to copy the volume from the source primary storage pool to secondary storage."); + } + + String secondaryStorageVolumePath = cvAnswer.getVolumePath(); + + // Copy the volume from secondary storage to the destination + // storage + // pool + cvCmd = new CopyVolumeCommand(volume.getId(), + secondaryStorageVolumePath, destPool, + secondaryStorageURL, false, _copyvolumewait); + try { + cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool, cvCmd); + } catch (StorageUnavailableException e1) { + throw new CloudRuntimeException( + "Failed to copy the volume from secondary storage to the destination primary storage pool."); + } + + if (cvAnswer == null || !cvAnswer.getResult()) { + throw new CloudRuntimeException( + "Failed to copy the volume from secondary storage to the destination primary storage pool."); + } + + answers.add(cvAnswer); + destroyCmds.add(new Pair( + srcPool, new DestroyCommand(srcPool, volume, null))); + } + } finally { + if (answers.size() != volumes.size()) { + // this means one of copying volume failed + for (Volume volume : volumes) { + try { + stateTransitTo(volume, Volume.Event.OperationFailed); + } catch (NoTransitionException e) { + s_logger.debug("Failed to change volume state: " + + e.toString()); + } + } + } else { + // Need a transaction, make sure all the volumes get migrated to + // new storage pool + txn = Transaction.currentTxn(); + txn.start(); + + transitResult = false; + try { + for (int i = 0; i < volumes.size(); i++) { + CopyVolumeAnswer answer = answers.get(i); + VolumeVO volume = (VolumeVO) volumes.get(i); + Long oldPoolId = volume.getPoolId(); + volume.setPath(answer.getVolumePath()); + volume.setFolder(destPool.getPath()); + volume.setPodId(destPool.getPodId()); + volume.setPoolId(destPool.getId()); + volume.setLastPoolId(oldPoolId); + volume.setPodId(destPool.getPodId()); + try { + stateTransitTo(volume, + Volume.Event.OperationSucceeded); + } catch (NoTransitionException e) { + s_logger.debug("Failed to change volume state: " + + e.toString()); + throw new CloudRuntimeException( + "Failed to change volume state: " + + e.toString()); + } + } + transitResult = true; + } finally { + if (!transitResult) { + txn.rollback(); + } else { + txn.commit(); + } + } + + } + } + + // all the volumes get migrated to new storage pool, need to delete the + // copy on old storage pool + for (Pair cmd : destroyCmds) { + try { + Answer cvAnswer = this.storageMgr.sendToPool(cmd.first(), cmd.second()); + } catch (StorageUnavailableException e) { + s_logger.debug("Unable to delete the old copy on storage pool: " + + e.toString()); + } + } + return true; + } + + @Override + public boolean StorageMigration( + VirtualMachineProfile vm, + StoragePool destPool) throws ConcurrentOperationException { + List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); + List volumesNeedToMigrate = new ArrayList(); + + for (VolumeVO volume : vols) { + if (volume.getState() != Volume.State.Ready) { + s_logger.debug("volume: " + volume.getId() + " is in " + + volume.getState() + " state"); + throw new CloudRuntimeException("volume: " + volume.getId() + + " is in " + volume.getState() + " state"); + } + + if (volume.getPoolId() == destPool.getId()) { + s_logger.debug("volume: " + volume.getId() + + " is on the same storage pool: " + destPool.getId()); + continue; + } + + volumesNeedToMigrate.add(volume); + } + + if (volumesNeedToMigrate.isEmpty()) { + s_logger.debug("No volume need to be migrated"); + return true; + } + + return migrateVolumes(volumesNeedToMigrate, destPool); + } + + @Override + public void prepareForMigration( + VirtualMachineProfile vm, + DeployDestination dest) { + List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Preparing " + vols.size() + " volumes for " + vm); + } + + for (VolumeVO vol : vols) { + PrimaryDataStoreInfo pool = (PrimaryDataStoreInfo)this.dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); + vm.addDisk(new VolumeTO(vol, pool)); + } + + if (vm.getType() == VirtualMachine.Type.User) { + UserVmVO userVM = (UserVmVO) vm.getVirtualMachine(); + if (userVM.getIsoId() != null) { + Pair isoPathPair = this._tmpltMgr.getAbsoluteIsoPath( + userVM.getIsoId(), userVM.getDataCenterId()); + if (isoPathPair != null) { + String isoPath = isoPathPair.first(); + VolumeTO iso = new VolumeTO(vm.getId(), Volume.Type.ISO, + StoragePoolType.ISO, null, null, null, isoPath, 0, + null, null); + vm.addDisk(iso); + } + } + } + } + + + + private static enum VolumeTaskType { + RECREATE, + NOP, + MIGRATE + } + private static class VolumeTask { + final VolumeTaskType type; + final StoragePoolVO pool; + final VolumeVO volume; + VolumeTask(VolumeTaskType type, VolumeVO volume, StoragePoolVO pool) { + this.type = type; + this.pool = pool; + this.volume = volume; + } + } + + private List getTasks(List vols, Map destVols) throws StorageUnavailableException { + boolean recreate = _recreateSystemVmEnabled; + List tasks = new ArrayList(); + for (VolumeVO vol : vols) { + StoragePoolVO assignedPool = null; + if (destVols != null) { + StoragePool pool = destVols.get(vol); + if (pool != null) { + assignedPool = _storagePoolDao.findById(pool.getId()); + } + } + if (assignedPool == null && recreate) { + assignedPool = _storagePoolDao.findById(vol.getPoolId()); + } + if (assignedPool != null || recreate) { + Volume.State state = vol.getState(); + if (state == Volume.State.Allocated + || state == Volume.State.Creating) { + VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null); + tasks.add(task); + } else { + if (vol.isRecreatable()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Volume " + vol + + " will be recreated on storage pool " + + assignedPool + + " assigned by deploymentPlanner"); + } + VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null); + tasks.add(task); + } else { + if (assignedPool.getId() != vol.getPoolId()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Mismatch in storage pool " + + assignedPool + + " assigned by deploymentPlanner and the one associated with volume " + + vol); + } + DiskOfferingVO diskOffering = _diskOfferingDao + .findById(vol.getDiskOfferingId()); + if (diskOffering.getUseLocalStorage()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Local volume " + + vol + + " will be recreated on storage pool " + + assignedPool + + " assigned by deploymentPlanner"); + } + VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null); + tasks.add(task); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Shared volume " + + vol + + " will be migrated on storage pool " + + assignedPool + + " assigned by deploymentPlanner"); + } + VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, null); + tasks.add(task); + } + } else { + StoragePoolVO pool = _storagePoolDao + .findById(vol.getPoolId()); + VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool); + tasks.add(task); + } + + } + } + } else { + if (vol.getPoolId() == null) { + throw new StorageUnavailableException( + "Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + + vol, Volume.class, vol.getId()); + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("No need to recreate the volume: " + vol + + ", since it already has a pool assigned: " + + vol.getPoolId() + ", adding disk to VM"); + } + StoragePoolVO pool = _storagePoolDao.findById(vol + .getPoolId()); + VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool); + tasks.add(task); + } + } + + return tasks; + } + + private Pair recreateVolume(VolumeVO vol, VirtualMachineProfile vm, + DeployDestination dest) throws StorageUnavailableException { + VolumeVO newVol; + boolean recreate = _recreateSystemVmEnabled; + DataStore destPool = null; + if (recreate + && (dest.getStorageForDisks() == null || dest + .getStorageForDisks().get(vol) == null)) { + destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); + s_logger.debug("existing pool: " + destPool.getId()); + } else { + StoragePool pool = dest.getStorageForDisks().get(vol); + destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + } + + if (vol.getState() == Volume.State.Allocated + || vol.getState() == Volume.State.Creating) { + newVol = vol; + } else { + newVol = switchVolume(vol, vm); + // update the volume->PrimaryDataStoreVO map since volumeId has + // changed + if (dest.getStorageForDisks() != null + && dest.getStorageForDisks().containsKey(vol)) { + StoragePool poolWithOldVol = dest + .getStorageForDisks().get(vol); + dest.getStorageForDisks().put(newVol, poolWithOldVol); + dest.getStorageForDisks().remove(vol); + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Created new volume " + newVol + + " for old volume " + vol); + } + } + VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool); + Long templateId = newVol.getTemplateId(); + AsyncCallFuture future = null; + if (templateId == null) { + future = this.volService.createVolumeAsync(volume, destPool); + } else { + TemplateInfo templ = this.tmplFactory.getTemplate(templateId); + future = this.volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ); + } + VolumeApiResult result = null; + try { + result = future.get(); + if (result.isFailed()) { + s_logger.debug("Unable to create " + + newVol + ":" + result.getResult()); + throw new StorageUnavailableException("Unable to create " + + newVol + ":" + result.getResult(), destPool.getId()); + } + newVol = this._volsDao.findById(newVol.getId()); + } catch (InterruptedException e) { + s_logger.error("Unable to create " + newVol, e); + throw new StorageUnavailableException("Unable to create " + + newVol + ":" + e.toString(), destPool.getId()); + } catch (ExecutionException e) { + s_logger.error("Unable to create " + newVol, e); + throw new StorageUnavailableException("Unable to create " + + newVol + ":" + e.toString(), destPool.getId()); + } + + return new Pair(newVol, destPool); + } + + @Override + public void prepare(VirtualMachineProfile vm, + DeployDestination dest) throws StorageUnavailableException, + InsufficientStorageCapacityException, ConcurrentOperationException { + + if (dest == null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + + vm); + } + throw new CloudRuntimeException( + "Unable to prepare Volume for vm because DeployDestination is null, vm:" + + vm); + } + List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Checking if we need to prepare " + vols.size() + + " volumes for " + vm); + } + + List tasks = getTasks(vols, dest.getStorageForDisks()); + Volume vol = null; + StoragePool pool = null; + for (VolumeTask task : tasks) { + if (task.type == VolumeTaskType.NOP) { + pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary); + vol = task.volume; + } else if (task.type == VolumeTaskType.MIGRATE) { + pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary); + List volumes = new ArrayList(); + volumes.add(task.volume); + migrateVolumes(volumes, pool); + vol = task.volume; + } else if (task.type == VolumeTaskType.RECREATE) { + Pair result = recreateVolume(task.volume, vm, dest); + pool = (StoragePool)dataStoreMgr.getDataStore(result.second().getId(), DataStoreRole.Primary); + vol = result.first(); + } + vm.addDisk(new VolumeTO(vol, pool)); + } + } + + private Long getDeviceId(long vmId, Long deviceId) { + // allocate deviceId + List vols = _volsDao.findByInstance(vmId); + if (deviceId != null) { + if (deviceId.longValue() > 15 || deviceId.longValue() == 0 + || deviceId.longValue() == 3) { + throw new RuntimeException("deviceId should be 1,2,4-15"); + } + for (VolumeVO vol : vols) { + if (vol.getDeviceId().equals(deviceId)) { + throw new RuntimeException("deviceId " + deviceId + + " is used by vm" + vmId); + } + } + } else { + // allocate deviceId here + List devIds = new ArrayList(); + for (int i = 1; i < 15; i++) { + devIds.add(String.valueOf(i)); + } + devIds.remove("3"); + for (VolumeVO vol : vols) { + devIds.remove(vol.getDeviceId().toString().trim()); + } + deviceId = Long.parseLong(devIds.iterator().next()); + } + + return deviceId; + } + + private boolean stateTransitTo(Volume vol, Volume.Event event) + throws NoTransitionException { + return _volStateMachine.transitTo(vol, event, null, _volsDao); + } + + + private String validateUrl(String url) { + try { + URI uri = new URI(url); + if ((uri.getScheme() == null) + || (!uri.getScheme().equalsIgnoreCase("http") + && !uri.getScheme().equalsIgnoreCase("https") && !uri + .getScheme().equalsIgnoreCase("file"))) { + throw new IllegalArgumentException( + "Unsupported scheme for url: " + url); + } + + int port = uri.getPort(); + if (!(port == 80 || port == 443 || port == -1)) { + throw new IllegalArgumentException( + "Only ports 80 and 443 are allowed"); + } + String host = uri.getHost(); + try { + InetAddress hostAddr = InetAddress.getByName(host); + if (hostAddr.isAnyLocalAddress() + || hostAddr.isLinkLocalAddress() + || hostAddr.isLoopbackAddress() + || hostAddr.isMulticastAddress()) { + throw new IllegalArgumentException( + "Illegal host specified in url"); + } + if (hostAddr instanceof Inet6Address) { + throw new IllegalArgumentException( + "IPV6 addresses not supported (" + + hostAddr.getHostAddress() + ")"); + } + } catch (UnknownHostException uhe) { + throw new IllegalArgumentException("Unable to resolve " + host); + } + + return uri.toString(); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Invalid URL " + url); + } + + } + + @Override + public boolean canVmRestartOnAnotherServer(long vmId) { + List vols = _volsDao.findCreatedByInstance(vmId); + for (VolumeVO vol : vols) { + if (!vol.isRecreatable() && !vol.getPoolType().isShared()) { + return false; + } + } + return true; + } + + @Override + public boolean configure(String name, Map params) + throws ConfigurationException { + String _customDiskOfferingMinSizeStr = _configDao + .getValue(Config.CustomDiskOfferingMinSize.toString()); + _customDiskOfferingMinSize = NumbersUtil.parseInt( + _customDiskOfferingMinSizeStr, Integer + .parseInt(Config.CustomDiskOfferingMinSize + .getDefaultValue())); + + String maxVolumeSizeInGbString = _configDao + .getValue("storage.max.volume.size"); + _maxVolumeSizeInGb = NumbersUtil.parseLong(maxVolumeSizeInGbString, + 2000); + + String value = _configDao.getValue(Config.RecreateSystemVmEnabled.key()); + _recreateSystemVmEnabled = Boolean.parseBoolean(value); + _copyvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + + HostTemplateStatesSearch = _vmTemplateHostDao.createSearchBuilder(); + HostTemplateStatesSearch.and("id", HostTemplateStatesSearch.entity() + .getTemplateId(), SearchCriteria.Op.EQ); + HostTemplateStatesSearch.and("state", HostTemplateStatesSearch.entity() + .getDownloadState(), SearchCriteria.Op.EQ); + + SearchBuilder HostSearch = _hostDao.createSearchBuilder(); + HostSearch.and("dcId", HostSearch.entity().getDataCenterId(), + SearchCriteria.Op.EQ); + + HostTemplateStatesSearch.join("host", HostSearch, HostSearch.entity() + .getId(), HostTemplateStatesSearch.entity().getHostId(), + JoinBuilder.JoinType.INNER); + HostSearch.done(); + HostTemplateStatesSearch.done(); + return true; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } + + @Override + public String getName() { + return "Volume Manager"; + } + + @Override + public void destroyVolume(VolumeVO volume) { + try { + this.volService.destroyVolume(volume.getId()); + } catch (ConcurrentOperationException e) { + s_logger.debug("Failed to destroy volume" + volume.getId(), e); + throw new CloudRuntimeException("Failed to destroy volume" + volume.getId(), e); + } + } + +} diff --git a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java index 61b5e1f7752..d747d25c7b5 100755 --- a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java @@ -26,6 +26,8 @@ import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import com.cloud.capacity.CapacityManager; @@ -41,7 +43,6 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; @@ -76,6 +77,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement @Inject ClusterDao _clusterDao; @Inject SwiftManager _swiftMgr; @Inject CapacityManager _capacityMgr; + @Inject DataStoreManager dataStoreMgr; protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1); long _extraBytesPerVolume = 0; Random _rand; @@ -121,7 +123,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement boolean localStorageAllocationNeeded = localStorageAllocationNeeded(dskCh); if (s_logger.isDebugEnabled()) { s_logger.debug("Is localStorageAllocationNeeded? "+ localStorageAllocationNeeded); - s_logger.debug("Is storage pool shared? "+ pool.getPoolType().isShared()); + s_logger.debug("Is storage pool shared? "+ pool.isShared()); } return ((!localStorageAllocationNeeded && pool.getPoolType().isShared()) || (localStorageAllocationNeeded && !pool.getPoolType().isShared())); @@ -133,8 +135,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement if (s_logger.isDebugEnabled()) { s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName()+ " ,poolId: "+ pool.getId()); } - - if (avoid.shouldAvoid(pool)) { + StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + if (avoid.shouldAvoid(pol)) { if (s_logger.isDebugEnabled()) { s_logger.debug("StoragePool is in avoid set, skipping this pool"); } @@ -157,7 +159,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement } // Check that the pool type is correct - if (!poolIsCorrectType(dskCh, pool)) { + if (!poolIsCorrectType(dskCh, pol)) { if (s_logger.isDebugEnabled()) { s_logger.debug("StoragePool is not of correct type, skipping this pool"); } @@ -181,7 +183,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement Volume volume = _volumeDao.findById(dskCh.getVolumeId()); List requestVolumes = new ArrayList(); requestVolumes.add(volume); - return _storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool); + return _storageMgr.storagePoolHasEnoughSpace(requestVolumes, pol); } diff --git a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java index 13a010729e0..f0df3a6f001 100644 --- a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java @@ -27,6 +27,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import com.cloud.deploy.DeploymentPlan; @@ -34,11 +35,10 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.offering.ServiceOffering; import com.cloud.server.StatsCollector; import com.cloud.storage.DiskOfferingVO; -import com.cloud.storage.dao.DiskOfferingDao; -import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StoragePool; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.user.Account; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; @@ -117,7 +117,8 @@ public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator { } if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) { - suitablePools.add(pool); + StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + suitablePools.add(pol); } } diff --git a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java index b6b8e8e98ff..24b4dabe281 100644 --- a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java @@ -25,6 +25,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import com.cloud.capacity.CapacityVO; @@ -36,7 +37,6 @@ import com.cloud.offering.ServiceOffering; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.StoragePoolHostDao; @@ -110,7 +110,8 @@ public class LocalStoragePoolAllocator extends FirstFitStoragePoolAllocator { StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId()); if (pool != null && pool.isLocal()) { s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list"); - suitablePools.add(pool); + StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + suitablePools.add(pol); } if (suitablePools.size() == returnUpTo) { diff --git a/server/src/com/cloud/storage/dao/LaunchPermissionDao.java b/server/src/com/cloud/storage/dao/LaunchPermissionDao.java index 86e5a9bf827..0ad60b50ee8 100644 --- a/server/src/com/cloud/storage/dao/LaunchPermissionDao.java +++ b/server/src/com/cloud/storage/dao/LaunchPermissionDao.java @@ -18,6 +18,7 @@ package com.cloud.storage.dao; import java.util.List; + import com.cloud.storage.LaunchPermissionVO; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.db.GenericDao; diff --git a/server/src/com/cloud/storage/dao/StoragePoolDao.java b/server/src/com/cloud/storage/dao/StoragePoolDao.java index ff8292e9705..64bbd5fb5ed 100644 --- a/server/src/com/cloud/storage/dao/StoragePoolDao.java +++ b/server/src/com/cloud/storage/dao/StoragePoolDao.java @@ -20,8 +20,9 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; import com.cloud.utils.db.GenericDao; /** * Data Access Object for storage_pool table diff --git a/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java b/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java index 4019dffd4ae..ebf2943ec9c 100644 --- a/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java +++ b/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java @@ -28,13 +28,13 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.springframework.stereotype.Component; import com.cloud.host.Status; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePoolDetailVO; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; diff --git a/server/src/com/cloud/storage/dao/VMTemplateDao.java b/server/src/com/cloud/storage/dao/VMTemplateDao.java index a043a2c6079..c39626f54dd 100755 --- a/server/src/com/cloud/storage/dao/VMTemplateDao.java +++ b/server/src/com/cloud/storage/dao/VMTemplateDao.java @@ -20,6 +20,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; + import com.cloud.domain.DomainVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.projects.Project.ListProjectResourcesCriteria; @@ -28,11 +31,12 @@ import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; /* * Data Access Object for vm_templates table */ -public interface VMTemplateDao extends GenericDao { +public interface VMTemplateDao extends GenericDao, StateDao { public List listByPublic(); diff --git a/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java b/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java index 42f10d34c1b..c4928be8fee 100755 --- a/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -16,8 +16,8 @@ // under the License. package com.cloud.storage.dao; -import static com.cloud.utils.StringUtils.*; -import static com.cloud.utils.db.DbUtil.*; +import static com.cloud.utils.StringUtils.join; +import static com.cloud.utils.db.DbUtil.closeResources; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -34,10 +34,12 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import org.apache.cloudstack.api.BaseCmd; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.DomainVO; @@ -55,11 +57,10 @@ import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.tags.ResourceTagVO; -import com.cloud.tags.dao.ResourceTagsDaoImpl; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; import com.cloud.utils.Pair; - import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; @@ -68,7 +69,9 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; +import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -122,14 +125,15 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem private SearchBuilder PublicIsoSearch; private SearchBuilder UserIsoSearch; private GenericSearchBuilder CountTemplatesByAccount; + private SearchBuilder updateStateSearch; - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject ResourceTagDao _tagsDao; private String routerTmpltName; private String consoleProxyTmpltName; - protected VMTemplateDaoImpl() { + public VMTemplateDaoImpl() { } @Override @@ -378,6 +382,12 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem CountTemplatesByAccount.and("account", CountTemplatesByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); CountTemplatesByAccount.and("removed", CountTemplatesByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); CountTemplatesByAccount.done(); + + updateStateSearch = this.createSearchBuilder(); + updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ); + updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ); + updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ); + updateStateSearch.done(); return result; } @@ -1073,4 +1083,39 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem return templateZonePairList; } + @Override + public boolean updateState(TemplateState currentState, TemplateEvent event, + TemplateState nextState, VMTemplateVO vo, Object data) { + Long oldUpdated = vo.getUpdatedCount(); + Date oldUpdatedTime = vo.getUpdated(); + + + SearchCriteria sc = updateStateSearch.create(); + sc.setParameters("id", vo.getId()); + sc.setParameters("state", currentState); + sc.setParameters("updatedCount", vo.getUpdatedCount()); + + vo.incrUpdatedCount(); + + UpdateBuilder builder = getUpdateBuilder(vo); + builder.set(vo, "state", nextState); + builder.set(vo, "updated", new Date()); + + int rows = update((VMTemplateVO) vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { + VMTemplateVO dbVol = findByIdIncludingRemoved(vo.getId()); + if (dbVol != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") + .append(dbVol.getUpdated()); + str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(vo.getUpdatedCount()) + .append("; updatedTime=").append(vo.getUpdated()); + str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) + .append("; updatedTime=").append(oldUpdatedTime); + } else { + s_logger.debug("Unable to update objectIndatastore: id=" + vo.getId() + ", as there is no such object exists in the database anymore"); + } + } + return rows > 0; + } } diff --git a/server/src/com/cloud/storage/dao/VMTemplateHostDao.java b/server/src/com/cloud/storage/dao/VMTemplateHostDao.java index 5625e568ef0..23241cd17da 100755 --- a/server/src/com/cloud/storage/dao/VMTemplateHostDao.java +++ b/server/src/com/cloud/storage/dao/VMTemplateHostDao.java @@ -18,11 +18,15 @@ package com.cloud.storage.dao; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; -public interface VMTemplateHostDao extends GenericDao { +public interface VMTemplateHostDao extends GenericDao, StateDao { List listByHostId(long id); List listByTemplateId(long templateId); @@ -30,6 +34,8 @@ public interface VMTemplateHostDao extends GenericDao { List listByOnlyTemplateId(long templateId); VMTemplateHostVO findByHostTemplate(long hostId, long templateId); + + VMTemplateHostVO findByTemplateId(long templateId); VMTemplateHostVO findByHostTemplate(long hostId, long templateId, boolean lock); diff --git a/server/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java b/server/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java index 4d1ac0208ac..4d19bfc0074 100755 --- a/server/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java @@ -29,6 +29,9 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -42,7 +45,9 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.UpdateBuilder; @Component @Local(value={VMTemplateHostDao.class}) @@ -57,6 +62,7 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase HostDestroyedSearch; protected final SearchBuilder TemplateStatusSearch; protected final SearchBuilder TemplateStatesSearch; + protected final SearchBuilder updateStateSearch; protected SearchBuilder ZONE_TEMPLATE_SEARCH; protected SearchBuilder LOCAL_SECONDARY_STORAGE_SEARCH; @@ -120,6 +126,12 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase sc = HostTemplateSearch.create(); + sc.setParameters("template_id", templateId); + sc.setParameters("destroyed", false); + return findOneIncludingRemovedBy(sc); + } @Override public List listByTemplateStatus(long templateId, VMTemplateHostVO.Status downloadState) { @@ -367,5 +387,42 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase sc = updateStateSearch.create(); + sc.setParameters("id", templateHost.getId()); + sc.setParameters("state", currentState); + sc.setParameters("updatedCount", templateHost.getUpdatedCount()); + + templateHost.incrUpdatedCount(); + + UpdateBuilder builder = getUpdateBuilder(vo); + builder.set(vo, "state", nextState); + builder.set(vo, "updated", new Date()); + + int rows = update((VMTemplateHostVO) vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { + VMTemplateHostVO dbVol = findByIdIncludingRemoved(templateHost.getId()); + if (dbVol != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") + .append(dbVol.getUpdated()); + str.append(": New Data={id=").append(templateHost.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(templateHost.getUpdatedCount()) + .append("; updatedTime=").append(templateHost.getUpdated()); + str.append(": stale Data={id=").append(templateHost.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) + .append("; updatedTime=").append(oldUpdatedTime); + } else { + s_logger.debug("Unable to update objectIndatastore: id=" + templateHost.getId() + ", as there is no such object exists in the database anymore"); + } + } + return rows > 0; + } } diff --git a/server/src/com/cloud/storage/dao/VMTemplatePoolDao.java b/server/src/com/cloud/storage/dao/VMTemplatePoolDao.java index f485be7f05c..501c3ca5cc8 100644 --- a/server/src/com/cloud/storage/dao/VMTemplatePoolDao.java +++ b/server/src/com/cloud/storage/dao/VMTemplatePoolDao.java @@ -18,10 +18,14 @@ package com.cloud.storage.dao; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; -public interface VMTemplatePoolDao extends GenericDao { +public interface VMTemplatePoolDao extends GenericDao, StateDao { public List listByPoolId(long id); public List listByTemplateId(long templateId); @@ -42,5 +46,4 @@ public interface VMTemplatePoolDao extends GenericDao TemplateStatusSearch; protected final SearchBuilder TemplatePoolStatusSearch; protected final SearchBuilder TemplateStatesSearch; + protected final SearchBuilder updateStateSearch; protected static final String UPDATE_TEMPLATE_HOST_REF = "UPDATE template_spool_ref SET download_state = ?, download_pct= ?, last_updated = ? " @@ -94,6 +101,12 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase sc = updateStateSearch.create(); + sc.setParameters("id", templatePool.getId()); + sc.setParameters("state", currentState); + sc.setParameters("updatedCount", templatePool.getUpdatedCount()); + + templatePool.incrUpdatedCount(); + + UpdateBuilder builder = getUpdateBuilder(vo); + builder.set(vo, "state", nextState); + builder.set(vo, "updated", new Date()); + + int rows = update((VMTemplateStoragePoolVO) vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { + VMTemplateStoragePoolVO dbVol = findByIdIncludingRemoved(templatePool.getId()); + if (dbVol != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") + .append(dbVol.getUpdated()); + str.append(": New Data={id=").append(templatePool.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(templatePool.getUpdatedCount()) + .append("; updatedTime=").append(templatePool.getUpdated()); + str.append(": stale Data={id=").append(templatePool.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) + .append("; updatedTime=").append(oldUpdatedTime); + } else { + s_logger.debug("Unable to update objectIndatastore: id=" + templatePool.getId() + ", as there is no such object exists in the database anymore"); + } + } + return rows > 0; + } + } diff --git a/server/src/com/cloud/storage/dao/VolumeDaoImpl.java b/server/src/com/cloud/storage/dao/VolumeDaoImpl.java index a189d00fead..ca3b82a06c1 100755 --- a/server/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -39,7 +39,6 @@ import com.cloud.storage.Volume.Type; import com.cloud.storage.VolumeVO; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.Pair; - import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -250,7 +249,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } } - protected VolumeDaoImpl() { + public VolumeDaoImpl() { AllFieldsSearch = createSearchBuilder(); AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), Op.EQ); AllFieldsSearch.and("accountId", AllFieldsSearch.entity().getAccountId(), Op.EQ); diff --git a/server/src/com/cloud/storage/dao/VolumeHostDao.java b/server/src/com/cloud/storage/dao/VolumeHostDao.java index 6ba82370608..39dda12345b 100755 --- a/server/src/com/cloud/storage/dao/VolumeHostDao.java +++ b/server/src/com/cloud/storage/dao/VolumeHostDao.java @@ -18,10 +18,14 @@ package com.cloud.storage.dao; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.storage.VolumeHostVO; import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; -public interface VolumeHostDao extends GenericDao { +public interface VolumeHostDao extends GenericDao, StateDao{ VolumeHostVO findByHostVolume(long hostId, long volumeId); diff --git a/server/src/com/cloud/storage/dao/VolumeHostDaoImpl.java b/server/src/com/cloud/storage/dao/VolumeHostDaoImpl.java index 57f2153f10b..2fd39e6eeca 100755 --- a/server/src/com/cloud/storage/dao/VolumeHostDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VolumeHostDaoImpl.java @@ -16,28 +16,35 @@ // under the License. package com.cloud.storage.dao; +import java.util.Date; import java.util.List; import javax.ejb.Local; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.storage.VolumeHostVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.UpdateBuilder; @Component @Local(value={VolumeHostDao.class}) public class VolumeHostDaoImpl extends GenericDaoBase implements VolumeHostDao { - + private static final Logger s_logger = Logger.getLogger(VolumeHostDaoImpl.class); protected final SearchBuilder HostVolumeSearch; protected final SearchBuilder ZoneVolumeSearch; protected final SearchBuilder VolumeSearch; protected final SearchBuilder HostSearch; protected final SearchBuilder HostDestroyedSearch; - - VolumeHostDaoImpl(){ + protected final SearchBuilder updateStateSearch; + public VolumeHostDaoImpl(){ HostVolumeSearch = createSearchBuilder(); HostVolumeSearch.and("host_id", HostVolumeSearch.entity().getHostId(), SearchCriteria.Op.EQ); HostVolumeSearch.and("volume_id", HostVolumeSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); @@ -64,6 +71,12 @@ public class VolumeHostDaoImpl extends GenericDaoBase implem HostDestroyedSearch.and("host_id", HostDestroyedSearch.entity().getHostId(), SearchCriteria.Op.EQ); HostDestroyedSearch.and("destroyed", HostDestroyedSearch.entity().getDestroyed(), SearchCriteria.Op.EQ); HostDestroyedSearch.done(); + + updateStateSearch = this.createSearchBuilder(); + updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ); + updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ); + updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ); + updateStateSearch.done(); } @@ -112,4 +125,41 @@ public class VolumeHostDaoImpl extends GenericDaoBase implem return listIncludingRemovedBy(sc); } + @Override + public boolean updateState(State currentState, Event event, + State nextState, DataObjectInStore vo, Object data) { + VolumeHostVO volHost = (VolumeHostVO) vo; + Long oldUpdated = volHost.getUpdatedCount(); + Date oldUpdatedTime = volHost.getUpdated(); + + + SearchCriteria sc = updateStateSearch.create(); + sc.setParameters("id", volHost.getId()); + sc.setParameters("state", currentState); + sc.setParameters("updatedCount", volHost.getUpdatedCount()); + + volHost.incrUpdatedCount(); + + UpdateBuilder builder = getUpdateBuilder(vo); + builder.set(vo, "state", nextState); + builder.set(vo, "updated", new Date()); + + int rows = update((VolumeHostVO) vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { + VolumeHostVO dbVol = findByIdIncludingRemoved(volHost.getId()); + if (dbVol != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") + .append(dbVol.getUpdated()); + str.append(": New Data={id=").append(volHost.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(volHost.getUpdatedCount()) + .append("; updatedTime=").append(volHost.getUpdated()); + str.append(": stale Data={id=").append(volHost.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) + .append("; updatedTime=").append(oldUpdatedTime); + } else { + s_logger.debug("Unable to update objectIndatastore: id=" + volHost.getId() + ", as there is no such object exists in the database anymore"); + } + } + return rows > 0; + } + } diff --git a/server/src/com/cloud/storage/download/DownloadListener.java b/server/src/com/cloud/storage/download/DownloadListener.java index 036d40ad015..d0b186831c8 100755 --- a/server/src/com/cloud/storage/download/DownloadListener.java +++ b/server/src/com/cloud/storage/download/DownloadListener.java @@ -46,11 +46,11 @@ import com.cloud.host.HostVO; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeHostVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.Volume.Event; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VolumeDao; diff --git a/server/src/com/cloud/storage/download/DownloadMonitor.java b/server/src/com/cloud/storage/download/DownloadMonitor.java index 30ec3b1623b..897befa250b 100644 --- a/server/src/com/cloud/storage/download/DownloadMonitor.java +++ b/server/src/com/cloud/storage/download/DownloadMonitor.java @@ -18,6 +18,7 @@ package com.cloud.storage.download; import java.util.Map; + import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostVO; import com.cloud.storage.VMTemplateVO; diff --git a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java index 6d3cf2a101b..e12bc320282 100755 --- a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java +++ b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java @@ -37,10 +37,21 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; -import com.cloud.agent.api.storage.*; + +import com.cloud.agent.api.storage.DeleteTemplateCommand; +import com.cloud.agent.api.storage.DeleteVolumeCommand; +import com.cloud.agent.api.storage.DownloadCommand; + import com.cloud.agent.api.storage.DownloadCommand.Proxy; import com.cloud.agent.api.storage.DownloadCommand.ResourceType; import com.cloud.agent.api.storage.DownloadProgressCommand.RequestType; + +import com.cloud.agent.api.storage.DownloadProgressCommand; +import com.cloud.agent.api.storage.ListTemplateAnswer; +import com.cloud.agent.api.storage.ListTemplateCommand; +import com.cloud.agent.api.storage.ListVolumeAnswer; +import com.cloud.agent.api.storage.ListVolumeCommand; + import com.cloud.agent.manager.Commands; import com.cloud.alert.AlertManager; import com.cloud.configuration.Config; @@ -50,6 +61,7 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; +import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.StorageUnavailableException; @@ -59,13 +71,31 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceManager; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.*; + +import com.cloud.storage.StorageManager; +import com.cloud.storage.SwiftVO; +import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.dao.*; +import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.VolumeHostVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.SwiftDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VMTemplateSwiftDao; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeHostDao; + import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.swift.SwiftManager; import com.cloud.storage.template.TemplateConstants; import com.cloud.storage.template.TemplateInfo; +import com.cloud.template.TemplateManager; import com.cloud.user.Account; import com.cloud.user.ResourceLimitService; import com.cloud.utils.component.ManagerBase; @@ -80,6 +110,7 @@ import com.cloud.vm.SecondaryStorageVmVO; import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.SecondaryStorageVmDao; + import edu.emory.mathcs.backport.java.util.Collections; @@ -124,6 +155,14 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor ConfigurationDao _configDao; @Inject UserVmManager _vmMgr; + + @Inject TemplateManager templateMgr; + + + @Inject + private UsageEventDao _usageEventDao; + + @Inject private ClusterDao _clusterDao; @Inject @@ -233,7 +272,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor if(destTmpltHost != null) { start(); - String sourceChecksum = _vmMgr.getChecksum(srcTmpltHost.getHostId(), srcTmpltHost.getInstallPath()); + String sourceChecksum = this.templateMgr.getChecksum(srcTmpltHost.getHostId(), srcTmpltHost.getInstallPath()); DownloadCommand dcmd = new DownloadCommand(destServer.getStorageUrl(), url, template, TemplateConstants.DEFAULT_HTTP_AUTH_USER, _copyAuthPasswd, maxTemplateSizeInBytes); dcmd.setProxy(getHttpProxy()); @@ -473,6 +512,8 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor long size = -1; if(vmTemplateHost!=null){ size = vmTemplateHost.getPhysicalSize(); + template.setSize(size); + this._templateDao.update(template.getId(), template); } else{ s_logger.warn("Failed to get size for template" + template.getName()); @@ -510,6 +551,8 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor long size = -1; if(volumeHost!=null){ size = volumeHost.getPhysicalSize(); + volume.setSize(size); + this._volumeDao.update(volume.getId(), volume); } else{ s_logger.warn("Failed to get size for volume" + volume.getName()); @@ -925,7 +968,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor s_logger.debug("Found " +templateHostRefList.size()+ " templates with no checksum. Will ask for computation"); for(VMTemplateHostVO templateHostRef : templateHostRefList){ s_logger.debug("Getting checksum for template - " + templateHostRef.getTemplateId()); - String checksum = _vmMgr.getChecksum(hostId, templateHostRef.getInstallPath()); + String checksum = this.templateMgr.getChecksum(hostId, templateHostRef.getInstallPath()); VMTemplateVO template = _templateDao.findById(templateHostRef.getTemplateId()); s_logger.debug("Setting checksum " +checksum+ " for template - " + template.getName()); template.setChecksum(checksum); diff --git a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java index e848a8727a0..df2df7b5267 100755 --- a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java @@ -20,6 +20,8 @@ import java.util.List; import javax.inject.Inject; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import com.cloud.agent.Listener; @@ -37,17 +39,15 @@ import com.cloud.storage.OCFS2Manager; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManagerImpl; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.dao.StoragePoolDao; public class StoragePoolMonitor implements Listener { private static final Logger s_logger = Logger.getLogger(StoragePoolMonitor.class); private final StorageManagerImpl _storageManager; - private final StoragePoolDao _poolDao; + private final PrimaryDataStoreDao _poolDao; @Inject OCFS2Manager _ocfs2Mgr; - public StoragePoolMonitor(StorageManagerImpl mgr, StoragePoolDao poolDao) { + public StoragePoolMonitor(StorageManagerImpl mgr, PrimaryDataStoreDao poolDao) { this._storageManager = mgr; this._poolDao = poolDao; @@ -80,7 +80,7 @@ public class StoragePoolMonitor implements Listener { if (pool.getStatus() != StoragePoolStatus.Up) { continue; } - if (!pool.getPoolType().isShared()) { + if (!pool.isShared()) { continue; } @@ -91,8 +91,8 @@ public class StoragePoolMonitor implements Listener { Long hostId = host.getId(); s_logger.debug("Host " + hostId + " connected, sending down storage pool information ..."); try { - _storageManager.connectHostToSharedPool(hostId, pool); - _storageManager.createCapacityEntry(pool); + _storageManager.connectHostToSharedPool(hostId, pool.getId()); + _storageManager.createCapacityEntry(pool.getId()); } catch (Exception e) { s_logger.warn("Unable to connect host " + hostId + " to pool " + pool + " due to " + e.toString(), e); } diff --git a/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java b/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java index 877b97c185d..8f25514180c 100644 --- a/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java +++ b/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java @@ -46,8 +46,8 @@ import com.cloud.host.Host.Type; import com.cloud.resource.ServerResource; import com.cloud.resource.ServerResourceBase; import com.cloud.storage.Storage; -import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.template.TemplateConstants; import com.cloud.storage.template.TemplateInfo; diff --git a/server/src/com/cloud/storage/s3/S3Manager.java b/server/src/com/cloud/storage/s3/S3Manager.java index 0e47d7273d6..0f74e431376 100644 --- a/server/src/com/cloud/storage/s3/S3Manager.java +++ b/server/src/com/cloud/storage/s3/S3Manager.java @@ -23,6 +23,7 @@ import java.util.List; import com.cloud.agent.api.to.S3TO; import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd; import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd; + import com.cloud.dc.DataCenterVO; import com.cloud.exception.DiscoveryException; import com.cloud.storage.S3; diff --git a/server/src/com/cloud/storage/s3/S3ManagerImpl.java b/server/src/com/cloud/storage/s3/S3ManagerImpl.java index 13fe2b76ed1..61e5573394d 100644 --- a/server/src/com/cloud/storage/s3/S3ManagerImpl.java +++ b/server/src/com/cloud/storage/s3/S3ManagerImpl.java @@ -68,8 +68,8 @@ import com.cloud.storage.S3; import com.cloud.storage.S3VO; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateS3VO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.dao.S3Dao; import com.cloud.storage.dao.VMTemplateDao; diff --git a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java index fca89dcb1cb..46ac7af59f8 100755 --- a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java +++ b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java @@ -98,8 +98,8 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplateDao; diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index e06da75580c..6b48b8237ec 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -28,12 +28,25 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd; +import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; +import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; -import com.cloud.agent.api.*; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BackupSnapshotAnswer; +import com.cloud.agent.api.BackupSnapshotCommand; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.DeleteSnapshotBackupCommand; +import com.cloud.agent.api.DeleteSnapshotsDirCommand; +import com.cloud.agent.api.DownloadSnapshotFromS3Command; +import com.cloud.agent.api.ManageSnapshotAnswer; +import com.cloud.agent.api.ManageSnapshotCommand; +import com.cloud.agent.api.downloadSnapshotFromSwiftCommand; import com.cloud.agent.api.to.S3TO; import com.cloud.agent.api.to.SwiftTO; import com.cloud.alert.AlertManager; @@ -46,7 +59,11 @@ import com.cloud.dc.DataCenter; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.dao.DomainDao; -import com.cloud.event.*; +import com.cloud.event.ActionEvent; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; +import com.cloud.event.EventVO; +import com.cloud.event.UsageEventUtils; import com.cloud.event.dao.EventDao; import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.InvalidParameterValueException; @@ -60,25 +77,46 @@ import com.cloud.org.Grouping; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.resource.ResourceManager; import com.cloud.server.ResourceTag.TaggedResourceType; -import com.cloud.storage.*; +import com.cloud.storage.Snapshot; import com.cloud.storage.Snapshot.Type; +import com.cloud.storage.SnapshotPolicyVO; +import com.cloud.storage.SnapshotScheduleVO; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.storage.dao.*; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotPolicyDao; +import com.cloud.storage.dao.SnapshotScheduleDao; +import com.cloud.storage.dao.StoragePoolDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.listener.SnapshotStateListener; import com.cloud.storage.s3.S3Manager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.swift.SwiftManager; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.user.*; +import com.cloud.template.TemplateManager; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.DomainManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; +import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.utils.DateUtil; import com.cloud.utils.DateUtil.IntervalType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; - -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -86,7 +124,6 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.*; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; @@ -95,6 +132,7 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.UserVmDao; + import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd; import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; @@ -105,9 +143,9 @@ import javax.ejb.Local; import javax.naming.ConfigurationException; import java.util.*; import com.cloud.vm.snapshot.VMSnapshot; -import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; + @Component @Local(value = { SnapshotManager.class, SnapshotService.class }) public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotService { @@ -170,9 +208,16 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, private ResourceTagDao _resourceTagDao; @Inject private ConfigurationDao _configDao; + @Inject private VMSnapshotDao _vmSnapshotDao; String _name; + + @Inject TemplateManager templateMgr; + @Inject VolumeManager volumeMgr; + @Inject DataStoreManager dataStoreMgr; + + private int _totalRetries; private int _pauseInterval; private int _deltaSnapshotMax; @@ -186,8 +231,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, protected Answer sendToPool(Volume vol, Command cmd) { - StoragePool pool = _storagePoolDao.findById(vol.getPoolId()); - + StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId()); long[] hostIdsToTryFirst = null; Long vmHostId = getHostIdForSnapshotOperation(vol); @@ -252,7 +296,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } // Send a ManageSnapshotCommand to the agent - String vmName = _storageMgr.getVmNameOnVolume(volume); + String vmName = this.volumeMgr.getVmNameOnVolume(volume); long volumeId = volume.getId(); long preId = _snapshotDao.getLastSnapshot(volumeId, snapshotId); @@ -264,8 +308,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, preSnapshotPath = preSnapshotVO.getPath(); } } - StoragePoolVO srcPool = _storagePoolDao.findById(volume.getPoolId()); - + StoragePool srcPool = (StoragePool)dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); // RBD volumes do not support snapshotting in the way CloudStack does it. // For now we leave the snapshot feature disabled for RBD volumes if (srcPool.getPoolType() == StoragePoolType.RBD) { @@ -414,12 +457,12 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, if(activeSnapshots.size() > 1) throw new CloudRuntimeException("There is other active snapshot tasks on the instance to which the volume is attached, please try again later"); } - List activeVMSnapshots = _vmSnapshotDao.listByInstanceId(userVm.getId(), + /*List activeVMSnapshots = _vmSnapshotDao.listByInstanceId(userVm.getId(), VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging); if (activeVMSnapshots.size() > 0) { throw new CloudRuntimeException( "There is other active vm snapshot tasks on the instance to which the volume is attached, please try again later"); - } + } */ } } @@ -530,7 +573,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, VolumeVO volume = _volsDao.findById(volumeId); Long dcId = volume.getDataCenterId(); Long accountId = volume.getAccountId(); - HostVO secHost = _storageMgr.getSecondaryStorageHost(dcId); + HostVO secHost = this.templateMgr.getSecondaryStorageHost(dcId); String secondaryStoragePoolUrl = secHost.getStorageUrl(); Long swiftId = ss.getSwiftId(); @@ -581,7 +624,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, final VolumeVO volume = _volsDao.findById(snapshot.getVolumeId()); final Long zoneId = volume.getDataCenterId(); - final HostVO secHost = _storageMgr.getSecondaryStorageHost(zoneId); + final HostVO secHost = this.templateMgr.getSecondaryStorageHost(zoneId); final S3TO s3 = _s3Mgr.getS3TO(snapshot.getS3Id()); final List backupUuids = determineBackupUuids(snapshot); @@ -662,9 +705,9 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, prevSnapshotUuid = prevSnapshot.getPath(); } } - boolean isVolumeInactive = _storageMgr.volumeInactive(volume); - String vmName = _storageMgr.getVmNameOnVolume(volume); - StoragePoolVO srcPool = _storagePoolDao.findById(volume.getPoolId()); + boolean isVolumeInactive = this.volumeMgr.volumeInactive(volume); + String vmName = this.volumeMgr.getVmNameOnVolume(volume); + StoragePool srcPool = (StoragePool)dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); BackupSnapshotCommand backupSnapshotCommand = new BackupSnapshotCommand(secondaryStoragePoolUrl, dcId, accountId, volumeId, snapshot.getId(), volume.getPath(), srcPool, snapshotUuid, snapshot.getName(), prevSnapshotUuid, prevBackupUuid, isVolumeInactive, vmName, _backupsnapshotwait); @@ -735,7 +778,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, if ( id != null) { return _hostDao.findById(id); } - return _storageMgr.getSecondaryStorageHost(dcId); + return this.templateMgr.getSecondaryStorageHost(dcId); } private Long getSnapshotUserId() { @@ -884,7 +927,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, secHost = _hostDao.findById(snapshot.getSecHostId()); } else { Long dcId = snapshot.getDataCenterId(); - secHost = _storageMgr.getSecondaryStorageHost(dcId); + secHost = this.templateMgr.getSecondaryStorageHost(dcId); } return secHost; } diff --git a/server/src/com/cloud/storage/upload/UploadMonitor.java b/server/src/com/cloud/storage/upload/UploadMonitor.java index aada1f43c41..1c3590e91e2 100755 --- a/server/src/com/cloud/storage/upload/UploadMonitor.java +++ b/server/src/com/cloud/storage/upload/UploadMonitor.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.storage.upload; + import com.cloud.async.AsyncJobManager; import com.cloud.host.HostVO; import com.cloud.storage.Upload.Mode; diff --git a/server/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java b/server/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java index 97639564967..a8e1393d6da 100644 --- a/server/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java +++ b/server/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java @@ -17,6 +17,7 @@ package com.cloud.tags.dao; import java.util.List; + import javax.ejb.Local; import org.springframework.stereotype.Component; diff --git a/server/src/com/cloud/template/HyervisorTemplateAdapter.java b/server/src/com/cloud/template/HyervisorTemplateAdapter.java index fe6bc2a86f0..fa72e75612f 100755 --- a/server/src/com/cloud/template/HyervisorTemplateAdapter.java +++ b/server/src/com/cloud/template/HyervisorTemplateAdapter.java @@ -22,12 +22,21 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; import java.util.List; +import java.util.concurrent.ExecutionException; import javax.ejb.Local; import javax.inject.Inject; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; +import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; +import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageService; +import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -43,10 +52,10 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.host.HostVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.TemplateProfile; +import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.download.DownloadMonitor; import com.cloud.storage.secondary.SecondaryStorageVmManager; @@ -70,11 +79,18 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem @Inject DownloadMonitor _downloadMonitor; @Inject SecondaryStorageVmManager _ssvmMgr; @Inject AgentManager _agentMgr; + @Inject DataStoreManager storeMgr; + @Inject ImageService imageService; + @Inject ImageDataFactory imageFactory; + @Inject TemplateManager templateMgr; + @Override public String getName() { return TemplateAdapterType.Hypervisor.getName(); } + + private String validateUrl(String url) { try { @@ -155,7 +171,18 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem throw new CloudRuntimeException("Unable to persist the template " + profile.getTemplate()); } - _downloadMonitor.downloadTemplateToStorage(template, profile.getZoneId()); + DataStore imageStore = this.templateMgr.getImageStore(profile.getImageStoreUuid(), profile.getZoneId()); + + AsyncCallFuture future = this.imageService.createTemplateAsync(this.imageFactory.getTemplate(template.getId()), imageStore); + try { + future.get(); + } catch (InterruptedException e) { + s_logger.debug("create template Failed", e); + throw new CloudRuntimeException("create template Failed", e); + } catch (ExecutionException e) { + s_logger.debug("create template Failed", e); + throw new CloudRuntimeException("create template Failed", e); + } _resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template); return template; diff --git a/server/src/com/cloud/template/TemplateAdapter.java b/server/src/com/cloud/template/TemplateAdapter.java index 19cfef039de..1f8f491cb25 100755 --- a/server/src/com/cloud/template/TemplateAdapter.java +++ b/server/src/com/cloud/template/TemplateAdapter.java @@ -22,6 +22,7 @@ import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; + import com.cloud.exception.ResourceAllocationException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.TemplateProfile; @@ -65,5 +66,5 @@ public interface TemplateAdapter extends Adapter { public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, Long zoneId, HypervisorType hypervisorType, - String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled) throws ResourceAllocationException; + String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid) throws ResourceAllocationException; } diff --git a/server/src/com/cloud/template/TemplateAdapterBase.java b/server/src/com/cloud/template/TemplateAdapterBase.java index fa677acdc5c..c5074ad0a8a 100755 --- a/server/src/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/com/cloud/template/TemplateAdapterBase.java @@ -22,14 +22,17 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; +import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.log4j.Logger; -import org.apache.cloudstack.api.ApiConstants; import com.cloud.api.ApiDBUtils; -import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenterVO; @@ -43,10 +46,10 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Grouping; import com.cloud.storage.GuestOS; -import com.cloud.storage.TemplateProfile; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.TemplateProfile; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VMTemplateZoneDao; @@ -76,6 +79,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat protected @Inject UsageEventDao _usageEventDao; protected @Inject HostDao _hostDao; protected @Inject ResourceLimitService _resourceLimitMgr; + protected @Inject DataStoreManager storeMgr; @Override public boolean stop() { @@ -94,16 +98,26 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat Boolean isExtractable, String format, Long guestOSId, Long zoneId, HypervisorType hypervisorType, String accountName, Long domainId, String chksum, Boolean bootable, Map details) throws ResourceAllocationException { return prepare(isIso, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, format, guestOSId, zoneId, hypervisorType, - chksum, bootable, null, null, details, false); + chksum, bootable, null, null, details, false, null); } public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, Long zoneId, HypervisorType hypervisorType, - String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled) throws ResourceAllocationException { + String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled, + String imageStoreUuid) throws ResourceAllocationException { //Long accountId = null; // parameters verification + String storeUuid = imageStoreUuid; + if (storeUuid != null) { + DataStore store = this.storeMgr.getDataStore(storeUuid, DataStoreRole.Image); + if (store == null) { + throw new InvalidParameterValueException("invalide image store uuid" + storeUuid); + } + + } + if (isPublic == null) { isPublic = Boolean.FALSE; } @@ -200,7 +214,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat Long id = _tmpltDao.getNextInSequence(Long.class, "id"); UserContext.current().setEventDetails("Id: " +id+ " name: " + name); return new TemplateProfile(id, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, - featured, isExtractable, imgfmt, guestOSId, zoneId, hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details, sshkeyEnabled); + featured, isExtractable, imgfmt, guestOSId, zoneId, hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details, sshkeyEnabled, imageStoreUuid); } @Override @@ -210,10 +224,12 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat Account owner = _accountMgr.getAccount(cmd.getEntityOwnerId()); _accountMgr.checkAccess(caller, null, true, owner); + + return prepare(false, UserContext.current().getCallerUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), cmd.getZoneId(), HypervisorType.getType(cmd.getHypervisor()), - cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled()); + cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), cmd.getImageStoreUuid()); } public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException { @@ -224,7 +240,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat return prepare(true, UserContext.current().getCallerUserId(), cmd.getIsoName(), cmd.getDisplayText(), 64, false, true, cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), ImageFormat.ISO.toString(), cmd.getOsTypeId(), - cmd.getZoneId(), HypervisorType.None, cmd.getChecksum(), cmd.isBootable(), null, owner, null, false); + cmd.getZoneId(), HypervisorType.None, cmd.getChecksum(), cmd.isBootable(), null, owner, null, false, cmd.getImageStoreUuid()); } protected VMTemplateVO persistTemplate(TemplateProfile profile) { @@ -234,6 +250,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat profile.getBits(), profile.getAccountId(), profile.getCheckSum(), profile.getDisplayText(), profile.getPasswordEnabled(), profile.getGuestOsId(), profile.getBootable(), profile.getHypervisorType(), profile.getTemplateTag(), profile.getDetails(), profile.getSshKeyEnabled()); + if (zoneId == null || zoneId.longValue() == -1) { List dcs = _dcDao.listAll(); diff --git a/server/src/com/cloud/template/TemplateManager.java b/server/src/com/cloud/template/TemplateManager.java index ad145a911bf..1b054614b20 100755 --- a/server/src/com/cloud/template/TemplateManager.java +++ b/server/src/com/cloud/template/TemplateManager.java @@ -18,16 +18,19 @@ package com.cloud.template; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.dc.DataCenterVO; import com.cloud.exception.InternalErrorException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostVO; import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; +import com.cloud.utils.Pair; /** * TemplateManager manages the templates stored on secondary storage. It is responsible for creating private/public templates. @@ -91,4 +94,27 @@ public interface TemplateManager extends TemplateService{ VMTemplateHostVO prepareISOForCreate(VMTemplateVO template, StoragePool pool); + + VMTemplateHostVO findVmTemplateHost(long templateId, + StoragePool pool); + + Pair getAbsoluteIsoPath(long templateId, long dataCenterId); + + String getSecondaryStorageURL(long zoneId); + + HostVO getSecondaryStorageHost(long zoneId, long tmpltId); + + VMTemplateHostVO getTemplateHostRef(long zoneId, long tmpltId, + boolean readyOnly); + + HostVO getSecondaryStorageHost(long zoneId); + + List getSecondaryStorageHosts(long zoneId); + + Long getTemplateSize(long templateId, long zoneId); + + DataStore getImageStore(String storeUuid, long zoneId); + + String getChecksum(Long hostId, String templatePath); + } diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index f9cf277842d..736f712b9c9 100755 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -26,6 +26,9 @@ import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -35,22 +38,50 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd; import org.apache.cloudstack.api.BaseUpdateTemplateOrIsoPermissionsCmd; -import org.apache.cloudstack.api.command.user.iso.*; -import org.apache.cloudstack.api.command.user.template.*; +import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; +import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd; +import org.apache.cloudstack.api.command.user.iso.ListIsoPermissionsCmd; +import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; +import org.apache.cloudstack.api.command.user.iso.UpdateIsoPermissionsCmd; +import org.apache.cloudstack.api.command.user.template.CopyTemplateCmd; +import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; +import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; +import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd; +import org.apache.cloudstack.api.command.user.template.ListTemplatePermissionsCmd; +import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.api.command.user.template.UpdateTemplatePermissionsCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageService; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import org.apache.cloudstack.acl.SecurityChecker.AccessType; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.AttachIsoCommand; +import com.cloud.agent.api.ComputeChecksumCommand; import com.cloud.agent.api.downloadTemplateFromSwiftToSecondaryStorageCommand; +import com.cloud.agent.api.uploadTemplateToSwiftFromSecondaryStorageCommand; import com.cloud.agent.api.storage.DestroyCommand; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; import com.cloud.agent.api.to.SwiftTO; -import com.cloud.agent.api.uploadTemplateToSwiftFromSecondaryStorageCommand; + +import com.cloud.api.ApiDBUtils; import com.cloud.async.AsyncJobManager; import com.cloud.async.AsyncJobVO; import com.cloud.configuration.Config; @@ -64,12 +95,14 @@ import com.cloud.domain.dao.DomainDao; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; +import com.cloud.event.UsageEventVO; import com.cloud.event.dao.EventDao; import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; @@ -77,34 +110,72 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; import com.cloud.projects.Project; import com.cloud.projects.ProjectManager; -import com.cloud.storage.*; + +import com.cloud.resource.ResourceManager; +import com.cloud.storage.GuestOSVO; +import com.cloud.storage.LaunchPermissionVO; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; + import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.TemplateProfile; import com.cloud.storage.Upload; import com.cloud.storage.Upload.Type; + +import com.cloud.storage.UploadVO; +import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateS3VO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.dao.*; +import com.cloud.storage.VMTemplateSwiftVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.LaunchPermissionDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.StoragePoolDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.UploadDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VMTemplateS3Dao; +import com.cloud.storage.dao.VMTemplateSwiftDao; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.download.DownloadMonitor; import com.cloud.storage.s3.S3Manager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.swift.SwiftManager; import com.cloud.storage.upload.UploadMonitor; import com.cloud.template.TemplateAdapter.TemplateAdapterType; -import com.cloud.user.*; + +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountService; +import com.cloud.user.AccountVO; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; +import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserAccountDao; import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.ManagerBase; - import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.*; import com.cloud.utils.exception.CloudRuntimeException; @@ -123,6 +194,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Inject VMTemplateHostDao _tmpltHostDao; @Inject VMTemplatePoolDao _tmpltPoolDao; @Inject VMTemplateZoneDao _tmpltZoneDao; + @Inject + protected VMTemplateDetailsDao _templateDetailsDao; @Inject VMInstanceDao _vmInstanceDao; @Inject StoragePoolDao _poolDao; @Inject StoragePoolHostDao _poolHostDao; @@ -153,6 +226,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, ClusterDao _clusterDao; @Inject DomainDao _domainDao; @Inject UploadDao _uploadDao; + @Inject + protected GuestOSDao _guestOSDao; long _routerTemplateId = -1; @Inject StorageManager _storageMgr; @Inject AsyncJobManager _asyncMgr; @@ -164,6 +239,20 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Inject SecondaryStorageVmManager _ssvmMgr; @Inject LaunchPermissionDao _launchPermissionDao; @Inject ProjectManager _projectMgr; + @Inject + VolumeDataFactory volFactory; + @Inject + ImageDataFactory tmplFactory; + @Inject + SnapshotDataFactory snapshotFactory; + @Inject + ImageService imageSvr; + @Inject + DataStoreManager dataStoreMgr; + @Inject + protected ResourceManager _resourceMgr; + @Inject VolumeManager volumeMgr; + @Inject VMTemplateHostDao templateHostDao; int _primaryStorageDownloadWait; @@ -217,7 +306,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if(!_accountService.isRootAdmin(account.getType())){ throw new PermissionDeniedException("Parameter templatetag can only be specified by a Root Admin, permission denied"); } - } + } + TemplateAdapter adapter = getAdapter(HypervisorType.getType(cmd.getHypervisor())); TemplateProfile profile = adapter.prepare(cmd); VMTemplateVO template = adapter.create(profile); @@ -228,6 +318,22 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new CloudRuntimeException("Failed to create a template"); } } + + @Override + public DataStore getImageStore(String storeUuid, long zoneId) { + DataStore imageStore = null; + if (storeUuid != null) { + imageStore = this.dataStoreMgr.getDataStore(storeUuid, DataStoreRole.Image); + } else { + List stores = this.dataStoreMgr.getImageStores(new ZoneScope(zoneId)); + if (stores.size() > 1) { + throw new CloudRuntimeException("multiple image stores, don't know which one to use"); + } + imageStore = stores.get(0); + } + + return imageStore; + } @Override @ActionEvent(eventType = EventTypes.EVENT_ISO_EXTRACT, eventDescription = "extracting ISO", async = true) @@ -330,7 +436,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, _accountMgr.checkAccess(caller, AccessType.ModifyEntry, true, template); - List sservers = _storageMgr.getSecondaryStorageHosts(zoneId); + List sservers = getSecondaryStorageHosts(zoneId); VMTemplateHostVO tmpltHostRef = null; if (sservers != null) { @@ -425,7 +531,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, private void reallyRun() { s_logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); - prepareTemplateForCreate(template, pool); + StoragePool pol = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId()); + prepareTemplateForCreate(template, pol); s_logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); } }); @@ -539,8 +646,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } @Override @DB - public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO template, StoragePool pool) { - template = _tmpltDao.findById(template.getId(), true); + public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, StoragePool pool) { + VMTemplateVO template = _tmpltDao.findById(templ.getId(), true); long poolId = pool.getId(); long templateId = template.getId(); @@ -564,7 +671,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } } - templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool); + templateHostRef = findVmTemplateHost(templateId, pool); if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) { String result = downloadTemplateFromSwiftToSecondaryStorage(dcId, templateId); @@ -578,7 +685,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; } - templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool); + templateHostRef = findVmTemplateHost(templateId, pool); if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) { s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; @@ -671,6 +778,61 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, return null; } + + + + @Override + public VMTemplateHostVO findVmTemplateHost(long templateId, + StoragePool pool) { + long dcId = pool.getDataCenterId(); + Long podId = pool.getPodId(); + + List secHosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(dcId); + + + if (secHosts.size() == 1) { + VMTemplateHostVO templateHostVO = this._tmpltHostDao + .findByHostTemplate(secHosts.get(0).getId(), templateId); + return templateHostVO; + } + if (podId != null) { + List templHosts = this._tmpltHostDao + .listByTemplateStatus(templateId, dcId, podId, + VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + if (templHosts != null && !templHosts.isEmpty()) { + Collections.shuffle(templHosts); + return templHosts.get(0); + } + } + List templHosts = this._tmpltHostDao + .listByTemplateStatus(templateId, dcId, + VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + if (templHosts != null && !templHosts.isEmpty()) { + Collections.shuffle(templHosts); + return templHosts.get(0); + } + return null; + } + + @Override + public String getChecksum(Long hostId, String templatePath) { + HostVO ssHost = _hostDao.findById(hostId); + Host.Type type = ssHost.getType(); + if (type != Host.Type.SecondaryStorage + && type != Host.Type.LocalSecondaryStorage) { + return null; + } + String secUrl = ssHost.getStorageUrl(); + Answer answer; + answer = _agentMgr.sendToSecStorage(ssHost, new ComputeChecksumCommand( + secUrl, templatePath)); + if (answer != null && answer.getResult()) { + return answer.getDetails(); + } + return null; + } + @Override @DB public VMTemplateHostVO prepareISOForCreate(VMTemplateVO template, StoragePool pool) { @@ -684,7 +846,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, long templateStoragePoolRefId; String origUrl = null; - templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool); + templateHostRef = findVmTemplateHost(templateId, pool); if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) { String result = downloadTemplateFromSwiftToSecondaryStorage(dcId, templateId); @@ -698,7 +860,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; } - templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool); + templateHostRef = findVmTemplateHost(templateId, pool); if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) { s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; @@ -839,13 +1001,13 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new InvalidParameterValueException("Unable to find template with id"); } - HostVO dstSecHost = _storageMgr.getSecondaryStorageHost(destZoneId, templateId); + HostVO dstSecHost = getSecondaryStorageHost(destZoneId, templateId); if ( dstSecHost != null ) { s_logger.debug("There is template " + templateId + " in secondary storage " + dstSecHost.getId() + " in zone " + destZoneId + " , don't need to copy"); return template; } - HostVO srcSecHost = _storageMgr.getSecondaryStorageHost(sourceZoneId, templateId); + HostVO srcSecHost = getSecondaryStorageHost(sourceZoneId, templateId); if ( srcSecHost == null ) { throw new InvalidParameterValueException("There is no template " + templateId + " in zone " + sourceZoneId ); } @@ -900,7 +1062,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Override public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) { - StoragePoolVO pool = _poolDao.findById(templatePoolVO.getPoolId()); + StoragePool pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(templatePoolVO.getPoolId()); VMTemplateVO template = _tmpltDao.findByIdIncludingRemoved(templatePoolVO.getTemplateId()); @@ -1176,12 +1338,57 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new CloudRuntimeException("Failed to attach iso"); } } + + private boolean attachISOToVM(long vmId, long isoId, boolean attach) { + UserVmVO vm = this._userVmDao.findById(vmId); + + if (vm == null) { + return false; + } else if (vm.getState() != State.Running) { + return true; + } + String isoPath; + VMTemplateVO tmplt = this._tmpltDao.findById(isoId); + if (tmplt == null) { + s_logger.warn("ISO: " + isoId + " does not exist"); + return false; + } + // Get the path of the ISO + Pair isoPathPair = null; + if (tmplt.getTemplateType() == TemplateType.PERHOST) { + isoPath = tmplt.getName(); + } else { + isoPathPair = getAbsoluteIsoPath(isoId, + vm.getDataCenterId()); + if (isoPathPair == null) { + s_logger.warn("Couldn't get absolute iso path"); + return false; + } else { + isoPath = isoPathPair.first(); + } + } + + String vmName = vm.getInstanceName(); + + HostVO host = _hostDao.findById(vm.getHostId()); + if (host == null) { + s_logger.warn("Host: " + vm.getHostId() + " does not exist"); + return false; + } + AttachIsoCommand cmd = new AttachIsoCommand(vmName, isoPath, attach); + if (isoPathPair != null) { + cmd.setStoreUrl(isoPathPair.second()); + } + Answer a = _agentMgr.easySend(vm.getHostId(), cmd); + + return (a != null && a.getResult()); + } private boolean attachISOToVM(long vmId, long userId, long isoId, boolean attach) { UserVmVO vm = _userVmDao.findById(vmId); VMTemplateVO iso = _tmpltDao.findById(isoId); - boolean success = _vmMgr.attachISOToVM(vmId, isoId, attach); + boolean success = attachISOToVM(vmId, isoId, attach); if ( success && attach) { vm.setIsoId(iso.getId()); _userVmDao.update(vmId, vm); @@ -1475,4 +1682,456 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, return true; } + + private String getRandomPrivateTemplateName() { + return UUID.randomUUID().toString(); + } + + + + + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template", async = true) + public VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd command) + throws CloudRuntimeException { + Long userId = UserContext.current().getCallerUserId(); + if (userId == null) { + userId = User.UID_SYSTEM; + } + long templateId = command.getEntityId(); + Long volumeId = command.getVolumeId(); + Long snapshotId = command.getSnapshotId(); + VMTemplateVO privateTemplate = null; + Long accountId = null; + SnapshotVO snapshot = null; + + try { + TemplateInfo tmplInfo = this.tmplFactory.getTemplate(templateId); + snapshot = _snapshotDao.findById(snapshotId); + ZoneScope scope = new ZoneScope(snapshot.getDataCenterId()); + List store = this.dataStoreMgr.getImageStores(scope); + if (store.size() > 1) { + throw new CloudRuntimeException("muliple image data store, don't know which one to use"); + } + AsyncCallFuture future = null; + if (snapshotId != null) { + SnapshotInfo snapInfo = this.snapshotFactory.getSnapshot(snapshotId); + future = this.imageSvr.createTemplateFromSnapshotAsync(snapInfo, tmplInfo, store.get(0)); + } else if (volumeId != null) { + VolumeInfo volInfo = this.volFactory.getVolume(volumeId); + future = this.imageSvr.createTemplateFromVolumeAsync(volInfo, tmplInfo, store.get(0)); + } else { + throw new CloudRuntimeException( + "Creating private Template need to specify snapshotId or volumeId"); + } + + CommandResult result = null; + try { + result = future.get(); + if (result.isFailed()) { + privateTemplate = null; + s_logger.debug("Failed to create template" + result.getResult()); + throw new CloudRuntimeException("Failed to create template" + result.getResult()); + } + + privateTemplate = this._tmpltDao.findById(templateId); + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_TEMPLATE_CREATE, + privateTemplate.getAccountId(), + snapshot.getDataCenterId(), + privateTemplate.getId(), privateTemplate.getName(), + null, privateTemplate.getSourceTemplateId(), + privateTemplate.getSize()); + _usageEventDao.persist(usageEvent); + } catch (InterruptedException e) { + s_logger.debug("Failed to create template", e); + throw new CloudRuntimeException("Failed to create template", e); + } catch (ExecutionException e) { + s_logger.debug("Failed to create template", e); + throw new CloudRuntimeException("Failed to create template", e); + } + + } finally { + /*if (snapshot != null && snapshot.getSwiftId() != null + && secondaryStorageURL != null && zoneId != null + && accountId != null && volumeId != null) { + _snapshotMgr.deleteSnapshotsForVolume(secondaryStorageURL, + zoneId, accountId, volumeId); + }*/ + if (privateTemplate == null) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + // Remove the template record + this._tmpltDao.expunge(templateId); + + // decrement resource count + if (accountId != null) { + _resourceLimitMgr.decrementResourceCount(accountId, + ResourceType.template); + } + txn.commit(); + } + } + + if (privateTemplate != null) { + return privateTemplate; + } else { + throw new CloudRuntimeException("Failed to create a template"); + } + } + + private static boolean isAdmin(short accountType) { + return ((accountType == Account.ACCOUNT_TYPE_ADMIN) + || (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) + || (accountType == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_READ_ONLY_ADMIN)); + } + @Override + @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template", create = true) + public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, + Account templateOwner) throws ResourceAllocationException { + Long userId = UserContext.current().getCallerUserId(); + + Account caller = UserContext.current().getCaller(); + boolean isAdmin = (isAdmin(caller.getType())); + + _accountMgr.checkAccess(caller, null, true, templateOwner); + + String name = cmd.getTemplateName(); + if ((name == null) || (name.length() > 32)) { + throw new InvalidParameterValueException( + "Template name cannot be null and should be less than 32 characters"); + } + + if (cmd.getTemplateTag() != null) { + if (!_accountService.isRootAdmin(caller.getType())) { + throw new PermissionDeniedException( + "Parameter templatetag can only be specified by a Root Admin, permission denied"); + } + } + + // do some parameter defaulting + Integer bits = cmd.getBits(); + Boolean requiresHvm = cmd.getRequiresHvm(); + Boolean passwordEnabled = cmd.isPasswordEnabled(); + Boolean isPublic = cmd.isPublic(); + Boolean featured = cmd.isFeatured(); + int bitsValue = ((bits == null) ? 64 : bits.intValue()); + boolean requiresHvmValue = ((requiresHvm == null) ? true : requiresHvm + .booleanValue()); + boolean passwordEnabledValue = ((passwordEnabled == null) ? false + : passwordEnabled.booleanValue()); + if (isPublic == null) { + isPublic = Boolean.FALSE; + } + boolean allowPublicUserTemplates = Boolean.parseBoolean(_configDao + .getValue("allow.public.user.templates")); + if (!isAdmin && !allowPublicUserTemplates && isPublic) { + throw new PermissionDeniedException("Failed to create template " + + name + ", only private templates can be created."); + } + + Long volumeId = cmd.getVolumeId(); + Long snapshotId = cmd.getSnapshotId(); + if ((volumeId == null) && (snapshotId == null)) { + throw new InvalidParameterValueException( + "Failed to create private template record, neither volume ID nor snapshot ID were specified."); + } + if ((volumeId != null) && (snapshotId != null)) { + throw new InvalidParameterValueException( + "Failed to create private template record, please specify only one of volume ID (" + + volumeId + + ") and snapshot ID (" + + snapshotId + + ")"); + } + + HypervisorType hyperType; + VolumeVO volume = null; + VMTemplateVO privateTemplate = null; + if (volumeId != null) { // create template from volume + volume = this._volumeDao.findById(volumeId); + if (volume == null) { + throw new InvalidParameterValueException( + "Failed to create private template record, unable to find volume " + + volumeId); + } + // check permissions + _accountMgr.checkAccess(caller, null, true, volume); + + // If private template is created from Volume, check that the volume + // will not be active when the private template is + // created + if (!this.volumeMgr.volumeInactive(volume)) { + String msg = "Unable to create private template for volume: " + + volume.getName() + + "; volume is attached to a non-stopped VM, please stop the VM first"; + if (s_logger.isInfoEnabled()) { + s_logger.info(msg); + } + throw new CloudRuntimeException(msg); + } + hyperType = this._volumeDao.getHypervisorType(volumeId); + } else { // create template from snapshot + SnapshotVO snapshot = _snapshotDao.findById(snapshotId); + if (snapshot == null) { + throw new InvalidParameterValueException( + "Failed to create private template record, unable to find snapshot " + + snapshotId); + } + + volume = this._volumeDao.findById(snapshot.getVolumeId()); + VolumeVO snapshotVolume = this._volumeDao + .findByIdIncludingRemoved(snapshot.getVolumeId()); + + // check permissions + _accountMgr.checkAccess(caller, null, true, snapshot); + + if (snapshot.getState() != Snapshot.State.BackedUp) { + throw new InvalidParameterValueException("Snapshot id=" + + snapshotId + " is not in " + Snapshot.State.BackedUp + + " state yet and can't be used for template creation"); + } + + /* + * // bug #11428. Operation not supported if vmware and snapshots + * parent volume = ROOT if(snapshot.getHypervisorType() == + * HypervisorType.VMware && snapshotVolume.getVolumeType() == + * Type.DATADISK){ throw new UnsupportedServiceException( + * "operation not supported, snapshot with id " + snapshotId + + * " is created from Data Disk"); } + */ + + hyperType = snapshot.getHypervisorType(); + } + + _resourceLimitMgr.checkResourceLimit(templateOwner, + ResourceType.template); + + if (!isAdmin || featured == null) { + featured = Boolean.FALSE; + } + Long guestOSId = cmd.getOsTypeId(); + GuestOSVO guestOS = this._guestOSDao.findById(guestOSId); + if (guestOS == null) { + throw new InvalidParameterValueException("GuestOS with ID: " + + guestOSId + " does not exist."); + } + + String uniqueName = Long.valueOf((userId == null) ? 1 : userId) + .toString() + + UUID.nameUUIDFromBytes(name.getBytes()).toString(); + Long nextTemplateId = this._tmpltDao.getNextInSequence(Long.class, "id"); + String description = cmd.getDisplayText(); + boolean isExtractable = false; + Long sourceTemplateId = null; + if (volume != null) { + VMTemplateVO template = ApiDBUtils.findTemplateById(volume + .getTemplateId()); + isExtractable = template != null + && template.isExtractable() + && template.getTemplateType() != Storage.TemplateType.SYSTEM; + if (template != null) { + sourceTemplateId = template.getId(); + } else if (volume.getVolumeType() == Volume.Type.ROOT) { // vm created out + // of blank + // template + UserVm userVm = ApiDBUtils.findUserVmById(volume + .getInstanceId()); + sourceTemplateId = userVm.getIsoId(); + } + } + String templateTag = cmd.getTemplateTag(); + if (templateTag != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Adding template tag: " + templateTag); + } + } + privateTemplate = new VMTemplateVO(nextTemplateId, uniqueName, name, + ImageFormat.RAW, isPublic, featured, isExtractable, + TemplateType.USER, null, null, requiresHvmValue, bitsValue, + templateOwner.getId(), null, description, passwordEnabledValue, + guestOS.getId(), true, hyperType, templateTag, cmd.getDetails()); + if (sourceTemplateId != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("This template is getting created from other template, setting source template Id to: " + + sourceTemplateId); + } + } + privateTemplate.setSourceTemplateId(sourceTemplateId); + + VMTemplateVO template = this._tmpltDao.persist(privateTemplate); + // Increment the number of templates + if (template != null) { + if (cmd.getDetails() != null) { + this._templateDetailsDao.persist(template.getId(), cmd.getDetails()); + } + + _resourceLimitMgr.incrementResourceCount(templateOwner.getId(), + ResourceType.template); + } + + if (template != null) { + return template; + } else { + throw new CloudRuntimeException("Failed to create a template"); + } + + } + + @Override + public Pair getAbsoluteIsoPath(long templateId, + long dataCenterId) { + String isoPath = null; + + List storageHosts = _resourceMgr.listAllHostsInOneZoneByType( + Host.Type.SecondaryStorage, dataCenterId); + if (storageHosts != null) { + for (HostVO storageHost : storageHosts) { + List templateHostVOs = this._tmpltHostDao + .listByTemplateHostStatus( + templateId, + storageHost.getId(), + VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + if (templateHostVOs != null && !templateHostVOs.isEmpty()) { + VMTemplateHostVO tmpHostVO = templateHostVOs.get(0); + isoPath = storageHost.getStorageUrl() + "/" + + tmpHostVO.getInstallPath(); + return new Pair(isoPath, + storageHost.getStorageUrl()); + } + } + } + s_logger.warn("Unable to find secondary storage in zone id=" + + dataCenterId); + return null; + } + + @Override + public String getSecondaryStorageURL(long zoneId) { + // Determine the secondary storage URL + HostVO secondaryStorageHost = getSecondaryStorageHost(zoneId); + + if (secondaryStorageHost == null) { + return null; + } + + return secondaryStorageHost.getStorageUrl(); + } + + @Override + public HostVO getSecondaryStorageHost(long zoneId, long tmpltId) { + List hosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(zoneId); + if (hosts == null || hosts.size() == 0) { + return null; + } + for (HostVO host : hosts) { + VMTemplateHostVO tmpltHost = this._tmpltHostDao.findByHostTemplate( + host.getId(), tmpltId); + if (tmpltHost != null + && !tmpltHost.getDestroyed() + && tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { + return host; + } + } + return null; + } + + @Override + public VMTemplateHostVO getTemplateHostRef(long zoneId, long tmpltId, + boolean readyOnly) { + List hosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(zoneId); + if (hosts == null || hosts.size() == 0) { + return null; + } + VMTemplateHostVO inProgress = null; + VMTemplateHostVO other = null; + for (HostVO host : hosts) { + VMTemplateHostVO tmpltHost = this._tmpltHostDao.findByHostTemplate( + host.getId(), tmpltId); + if (tmpltHost != null && !tmpltHost.getDestroyed()) { + if (tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { + return tmpltHost; + } else if (tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { + inProgress = tmpltHost; + } else { + other = tmpltHost; + } + } + } + if (inProgress != null) { + return inProgress; + } + return other; + } + + @Override + public HostVO getSecondaryStorageHost(long zoneId) { + List hosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(zoneId); + if (hosts == null || hosts.size() == 0) { + hosts = _ssvmMgr.listLocalSecondaryStorageHostsInOneZone(zoneId); + if (hosts.isEmpty()) { + return null; + } + } + + int size = hosts.size(); + Random rn = new Random(); + int index = rn.nextInt(size); + return hosts.get(index); + } + + @Override + public List getSecondaryStorageHosts(long zoneId) { + List hosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(zoneId); + if (hosts == null || hosts.size() == 0) { + hosts = _ssvmMgr.listLocalSecondaryStorageHostsInOneZone(zoneId); + if (hosts.isEmpty()) { + return new ArrayList(); + } + } + return hosts; + } + + @Override + public Long getTemplateSize(long templateId, long zoneId) { + SearchCriteria sc = HostTemplateStatesSearch.create(); + sc.setParameters("id", templateId); + sc.setParameters( + "state", + com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + sc.setJoinParameters("host", "dcId", zoneId); + List tsvs = _tmpltSwiftDao + .listByTemplateId(templateId); + Long size = null; + if (tsvs != null && tsvs.size() > 0) { + size = tsvs.get(0).getSize(); + } + + if (size == null && _s3Mgr.isS3Enabled()) { + VMTemplateS3VO vmTemplateS3VO = _vmS3TemplateDao + .findOneByTemplateId(templateId); + if (vmTemplateS3VO != null) { + size = vmTemplateS3VO.getSize(); + } + } + + if (size == null) { + List sss = this.templateHostDao.search(sc, null); + if (sss == null || sss.size() == 0) { + throw new CloudRuntimeException("Template " + + templateId + + " has not been completely downloaded to zone " + + zoneId); + } + size = sss.get(0).getSize(); + } + return size; + } + } diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index a3f9505c3df..f9a61e89df7 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -104,6 +104,7 @@ import com.cloud.server.auth.UserAuthenticator; import com.cloud.storage.StorageManager; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeManager; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; @@ -118,7 +119,6 @@ import com.cloud.user.dao.UserDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; - import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -226,6 +226,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M Site2SiteVpnManager _vpnMgr; @Inject private AutoScaleManager _autoscaleMgr; + @Inject VolumeManager volumeMgr; @Inject private List _userAuthenticators; @@ -576,7 +577,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M for (VolumeVO volume : volumes) { if (!volume.getState().equals(Volume.State.Destroy)) { try { - _storageMgr.deleteVolume(volume.getId(), caller); + this.volumeMgr.deleteVolume(volume.getId(), caller); } catch (Exception ex) { s_logger.warn("Failed to cleanup volumes as a part of account id=" + accountId + " cleanup due to Exception: ", ex); accountCleanupNeeded = true; diff --git a/server/src/com/cloud/vm/UserVmManager.java b/server/src/com/cloud/vm/UserVmManager.java index 3e4a2dbf27f..cc1fffd780b 100755 --- a/server/src/com/cloud/vm/UserVmManager.java +++ b/server/src/com/cloud/vm/UserVmManager.java @@ -49,16 +49,7 @@ public interface UserVmManager extends VirtualMachineGuru, UserVmServi * @return VirtualMachine */ UserVmVO getVirtualMachine(long vmId); - - /** - * Attaches an ISO to the virtual CDROM device of the specified VM. Will eject any existing virtual CDROM if isoPath is null. - * @param vmId - * @param isoId - * @param attach whether to attach or detach the given iso - * @return - */ - boolean attachISOToVM(long vmId, long isoId, boolean attach); - + /** * Stops the virtual machine * @param userId the id of the user performing the action @@ -101,8 +92,6 @@ public interface UserVmManager extends VirtualMachineGuru, UserVmServi */ Pair, Integer> searchForUserVMs(Criteria c, Account caller, Long domainId, boolean isRecursive, List permittedAccounts, boolean listAll, ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags); - String getChecksum(Long hostId, String templatePath); - Pair> startVirtualMachine(long vmId, Long hostId, Map additionalParams) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index ed8cd3630a8..6022b38f6b2 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -34,10 +34,8 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; -import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; @@ -52,32 +50,19 @@ import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd; import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd; import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; -import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; - import org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntity; import org.apache.cloudstack.engine.service.api.OrchestrationService; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; +import com.cloud.agent.AgentManager.OnError; import com.cloud.agent.api.Answer; -import com.cloud.agent.api.AttachIsoCommand; -import com.cloud.agent.api.AttachVolumeAnswer; -import com.cloud.agent.api.AttachVolumeCommand; -import com.cloud.agent.api.ComputeChecksumCommand; -import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; -import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; import com.cloud.agent.api.GetVmStatsAnswer; import com.cloud.agent.api.GetVmStatsCommand; -import com.cloud.agent.api.SnapshotCommand; import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StopAnswer; -import com.cloud.agent.api.UpgradeSnapshotCommand; import com.cloud.agent.api.VmStatsEntry; -import com.cloud.agent.AgentManager.OnError; -import com.cloud.agent.api.*; -import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; @@ -87,13 +72,9 @@ import com.cloud.agent.api.UnPlugNicAnswer; import com.cloud.agent.api.UnPlugNicCommand; import com.cloud.agent.manager.Commands; import com.cloud.alert.AlertManager; -import com.cloud.api.ApiDBUtils; import com.cloud.api.query.dao.UserVmJoinDao; import com.cloud.api.query.vo.UserVmJoinVO; -import com.cloud.async.AsyncJobExecutor; import com.cloud.async.AsyncJobManager; -import com.cloud.async.AsyncJobVO; -import com.cloud.async.BaseAsyncJobExecutor; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; @@ -133,15 +114,11 @@ import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; -import com.cloud.network.Network; import com.cloud.network.*; import com.cloud.network.Network.IpAddresses; import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; -import com.cloud.network.NetworkManager; -import com.cloud.network.NetworkModel; import com.cloud.network.Networks.TrafficType; -import com.cloud.network.PhysicalNetwork; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; @@ -182,7 +159,6 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOSCategoryVO; import com.cloud.storage.GuestOSVO; -import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; @@ -191,14 +167,10 @@ import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.Volume; -import com.cloud.storage.Volume.Type; -import com.cloud.storage.VolumeHostVO; +import com.cloud.storage.VolumeManager; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; @@ -213,6 +185,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeHostDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.tags.dao.ResourceTagDao; +import com.cloud.template.TemplateManager; import com.cloud.template.VirtualMachineTemplate; import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.user.Account; @@ -278,7 +251,7 @@ import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.VMSnapshotManager; -import com.cloud.vm.snapshot.VMSnapshotVO; +//import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Local(value = { UserVmManager.class, UserVmService.class }) @@ -414,6 +387,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use @Inject VpcManager _vpcMgr; @Inject + TemplateManager templateMgr; + @Inject protected GuestOSCategoryDao _guestOSCategoryDao; @Inject UsageEventDao _usageEventDao; @@ -436,6 +411,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use @Inject protected OrchestrationService _orchSrvc; + + @Inject VolumeManager volumeMgr; @Override public UserVmVO getVirtualMachine(long vmId) { @@ -737,592 +714,21 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - private int getMaxDataVolumesSupported(UserVmVO vm) { - Long hostId = vm.getHostId(); - if (hostId == null) { - hostId = vm.getLastHostId(); - } - HostVO host = _hostDao.findById(hostId); - Integer maxDataVolumesSupported = null; - if (host != null) { - _hostDao.loadDetails(host); - maxDataVolumesSupported = _hypervisorCapabilitiesDao - .getMaxDataVolumesLimit(host.getHypervisorType(), - host.getDetail("product_version")); - } - if (maxDataVolumesSupported == null) { - maxDataVolumesSupported = 6; // 6 data disks by default if nothing - // is specified in - // 'hypervisor_capabilities' table - } - return maxDataVolumesSupported.intValue(); - } - - @Override - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_ATTACH, eventDescription = "attaching volume", async = true) - public Volume attachVolumeToVM(AttachVolumeCmd command) { - Long vmId = command.getVirtualMachineId(); - Long volumeId = command.getId(); - Long deviceId = command.getDeviceId(); - Account caller = UserContext.current().getCaller(); - - // Check that the volume ID is valid - VolumeVO volume = _volsDao.findById(volumeId); - // Check that the volume is a data volume - if (volume == null || volume.getVolumeType() != Volume.Type.DATADISK) { - throw new InvalidParameterValueException( - "Please specify a valid data volume."); - } - - // Check that the volume is not currently attached to any VM - if (volume.getInstanceId() != null) { - throw new InvalidParameterValueException( - "Please specify a volume that is not attached to any VM."); - } - - // Check that the volume is not destroyed - if (volume.getState() == Volume.State.Destroy) { - throw new InvalidParameterValueException( - "Please specify a volume that is not destroyed."); - } - - // Check that the virtual machine ID is valid and it's a user vm - UserVmVO vm = _vmDao.findById(vmId); - if (vm == null || vm.getType() != VirtualMachine.Type.User) { - throw new InvalidParameterValueException( - "Please specify a valid User VM."); - } - - // Check that the VM is in the correct state - if (vm.getState() != State.Running && vm.getState() != State.Stopped) { - throw new InvalidParameterValueException( - "Please specify a VM that is either running or stopped."); - } - - // Check that the device ID is valid - if (deviceId != null) { - if (deviceId.longValue() == 0) { - throw new InvalidParameterValueException( - "deviceId can't be 0, which is used by Root device"); - } - } - - // Check that the number of data volumes attached to VM is less than - // that supported by hypervisor - List existingDataVolumes = _volsDao.findByInstanceAndType( - vmId, Volume.Type.DATADISK); - int maxDataVolumesSupported = getMaxDataVolumesSupported(vm); - if (existingDataVolumes.size() >= maxDataVolumesSupported) { - throw new InvalidParameterValueException( - "The specified VM already has the maximum number of data disks (" - + maxDataVolumesSupported - + "). Please specify another VM."); - } - - // Check that the VM and the volume are in the same zone - if (vm.getDataCenterId() != volume.getDataCenterId()) { - throw new InvalidParameterValueException( - "Please specify a VM that is in the same zone as the volume."); - } - - // If local storage is disabled then attaching a volume with local disk - // offering not allowed - DataCenterVO dataCenter = _dcDao.findById(volume.getDataCenterId()); - if (!dataCenter.isLocalStorageEnabled()) { - DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume - .getDiskOfferingId()); - if (diskOffering.getUseLocalStorage()) { - throw new InvalidParameterValueException( - "Zone is not configured to use local storage but volume's disk offering " - + diskOffering.getName() + " uses it"); - } - } - - // permission check - _accountMgr.checkAccess(caller, null, true, volume, vm); - - //check if vm has snapshot, if true: can't attache volume - boolean attach = true; - checkVMSnapshots(vm, volumeId, attach); - - // Check if volume is stored on secondary Storage. - //Check if volume is stored on secondary Storage. - boolean isVolumeOnSec = false; - VolumeHostVO volHostVO = _volumeHostDao.findByVolumeId(volume.getId()); - if (volHostVO != null) { - isVolumeOnSec = true; - if (!(volHostVO.getDownloadState() == Status.DOWNLOADED)) { - throw new InvalidParameterValueException( - "Volume is not uploaded yet. Please try this operation once the volume is uploaded"); - } - } - - if (!(Volume.State.Allocated.equals(volume.getState()) - || Volume.State.Ready.equals(volume.getState()) || Volume.State.UploadOp - .equals(volume.getState()))) { - throw new InvalidParameterValueException( - "Volume state must be in Allocated, Ready or in Uploaded state"); - } - - VolumeVO rootVolumeOfVm = null; - List rootVolumesOfVm = _volsDao.findByInstanceAndType(vmId, - Volume.Type.ROOT); - if (rootVolumesOfVm.size() != 1) { - throw new CloudRuntimeException( - "The VM " - + vm.getHostName() - + " has more than one ROOT volume and is in an invalid state."); - } else { - rootVolumeOfVm = rootVolumesOfVm.get(0); - } - - HypervisorType rootDiskHyperType = vm.getHypervisorType(); - - HypervisorType dataDiskHyperType = _volsDao.getHypervisorType(volume - .getId()); - if (dataDiskHyperType != HypervisorType.None - && rootDiskHyperType != dataDiskHyperType) { - throw new InvalidParameterValueException( - "Can't attach a volume created by: " + dataDiskHyperType - + " to a " + rootDiskHyperType + " vm"); - } - - // allocate deviceId - List vols = _volsDao.findByInstance(vmId); - if (deviceId != null) { - if (deviceId.longValue() > 15 || deviceId.longValue() == 0 - || deviceId.longValue() == 3) { - throw new RuntimeException("deviceId should be 1,2,4-15"); - } - for (VolumeVO vol : vols) { - if (vol.getDeviceId().equals(deviceId)) { - throw new RuntimeException("deviceId " + deviceId - + " is used by VM " + vm.getHostName()); - } - } - } else { - // allocate deviceId here - List devIds = new ArrayList(); - for (int i = 1; i < 15; i++) { - devIds.add(String.valueOf(i)); - } - devIds.remove("3"); - for (VolumeVO vol : vols) { - devIds.remove(vol.getDeviceId().toString().trim()); - } - deviceId = Long.parseLong(devIds.iterator().next()); - } - - boolean createVolumeOnBackend = true; - if (rootVolumeOfVm.getState() == Volume.State.Allocated) { - createVolumeOnBackend = false; - if (isVolumeOnSec) { - throw new CloudRuntimeException( - "Cant attach uploaded volume to the vm which is not created. Please start it and then retry"); - } - } - - // create volume on the backend only when vm's root volume is allocated - if (createVolumeOnBackend) { - if (volume.getState().equals(Volume.State.Allocated) - || isVolumeOnSec) { - /* Need to create the volume */ - VMTemplateVO rootDiskTmplt = _templateDao.findById(vm - .getTemplateId()); - DataCenterVO dcVO = _dcDao.findById(vm - .getDataCenterId()); - HostPodVO pod = _podDao.findById(vm.getPodIdToDeployIn()); - StoragePoolVO rootDiskPool = _storagePoolDao - .findById(rootVolumeOfVm.getPoolId()); - ServiceOfferingVO svo = _serviceOfferingDao.findById(vm - .getServiceOfferingId()); - DiskOfferingVO diskVO = _diskOfferingDao.findById(volume - .getDiskOfferingId()); - Long clusterId = (rootDiskPool == null ? null : rootDiskPool - .getClusterId()); - - if (!isVolumeOnSec) { - volume = _storageMgr.createVolume(volume, vm, - rootDiskTmplt, dcVO, pod, clusterId, svo, diskVO, - new ArrayList(), volume.getSize(), - rootDiskHyperType); - } else { - try { - // Format of data disk should be the same as root disk - if (!volHostVO - .getFormat() - .getFileExtension() - .equals(_storageMgr - .getSupportedImageFormatForCluster(rootDiskPool - .getClusterId()))) { - throw new InvalidParameterValueException( - "Failed to attach volume to VM since volumes format " - + volHostVO.getFormat() - .getFileExtension() - + " is not compatible with the vm hypervisor type"); - } - - // Check that there is some shared storage. - StoragePoolVO vmRootVolumePool = _storagePoolDao - .findById(rootVolumeOfVm.getPoolId()); - List sharedVMPools = _storagePoolDao - .findPoolsByTags( - vmRootVolumePool.getDataCenterId(), - vmRootVolumePool.getPodId(), - vmRootVolumePool.getClusterId(), null, - true); - if (sharedVMPools.size() == 0) { - throw new CloudRuntimeException( - "Cannot attach volume since there are no shared storage pools in the VM's cluster to copy the uploaded volume to."); - } - - volume = _storageMgr.copyVolumeFromSecToPrimary(volume, - vm, rootDiskTmplt, dcVO, pod, - rootDiskPool.getClusterId(), svo, diskVO, - new ArrayList(), - volume.getSize(), rootDiskHyperType); - } catch (NoTransitionException e) { - throw new CloudRuntimeException( - "Unable to transition the volume ", e); - } - } - - if (volume == null) { - throw new CloudRuntimeException( - "Failed to create volume when attaching it to VM: " - + vm.getHostName()); - } - } - - StoragePoolVO vmRootVolumePool = _storagePoolDao - .findById(rootVolumeOfVm.getPoolId()); - DiskOfferingVO volumeDiskOffering = _diskOfferingDao - .findById(volume.getDiskOfferingId()); - String[] volumeTags = volumeDiskOffering.getTagsArray(); - - boolean isVolumeOnSharedPool = !volumeDiskOffering - .getUseLocalStorage(); - StoragePoolVO sourcePool = _storagePoolDao.findById(volume - .getPoolId()); - List matchingVMPools = _storagePoolDao - .findPoolsByTags(vmRootVolumePool.getDataCenterId(), - vmRootVolumePool.getPodId(), - vmRootVolumePool.getClusterId(), volumeTags, - isVolumeOnSharedPool); - boolean moveVolumeNeeded = true; - if (matchingVMPools.size() == 0) { - String poolType; - if (vmRootVolumePool.getClusterId() != null) { - poolType = "cluster"; - } else if (vmRootVolumePool.getPodId() != null) { - poolType = "pod"; - } else { - poolType = "zone"; - } - throw new CloudRuntimeException( - "There are no storage pools in the VM's " + poolType - + " with all of the volume's tags (" - + volumeDiskOffering.getTags() + ")."); - } else { - long sourcePoolId = sourcePool.getId(); - Long sourcePoolDcId = sourcePool.getDataCenterId(); - Long sourcePoolPodId = sourcePool.getPodId(); - Long sourcePoolClusterId = sourcePool.getClusterId(); - for (StoragePoolVO vmPool : matchingVMPools) { - long vmPoolId = vmPool.getId(); - Long vmPoolDcId = vmPool.getDataCenterId(); - Long vmPoolPodId = vmPool.getPodId(); - Long vmPoolClusterId = vmPool.getClusterId(); - - // Moving a volume is not required if storage pools belongs - // to same cluster in case of shared volume or - // identical storage pool in case of local - if (sourcePoolDcId == vmPoolDcId - && sourcePoolPodId == vmPoolPodId - && sourcePoolClusterId == vmPoolClusterId - && (isVolumeOnSharedPool || sourcePoolId == vmPoolId)) { - moveVolumeNeeded = false; - break; - } - } - } - - if (moveVolumeNeeded) { - if (isVolumeOnSharedPool) { - // Move the volume to a storage pool in the VM's zone, pod, - // or cluster - try { - volume = _storageMgr.moveVolume(volume, - vmRootVolumePool.getDataCenterId(), - vmRootVolumePool.getPodId(), - vmRootVolumePool.getClusterId(), - dataDiskHyperType); - } catch (ConcurrentOperationException e) { - throw new CloudRuntimeException(e.toString()); - } - } else { - throw new CloudRuntimeException( - "Failed to attach local data volume " - + volume.getName() - + " to VM " - + vm.getDisplayName() - + " as migration of local data volume is not allowed"); - } - } - } - - AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor - .getCurrentExecutor(); - if (asyncExecutor != null) { - AsyncJobVO job = asyncExecutor.getJob(); - - if (s_logger.isInfoEnabled()) { - s_logger.info("Trying to attaching volume " + volumeId - + " to vm instance:" + vm.getId() - + ", update async job-" + job.getId() - + " progress status"); - } - - _asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volumeId); - _asyncMgr.updateAsyncJobStatus(job.getId(), - BaseCmd.PROGRESS_INSTANCE_CREATED, volumeId); - } - - String errorMsg = "Failed to attach volume: " + volume.getName() - + " to VM: " + vm.getHostName(); - boolean sendCommand = (vm.getState() == State.Running); - AttachVolumeAnswer answer = null; - Long hostId = vm.getHostId(); - if (hostId == null) { - hostId = vm.getLastHostId(); - HostVO host = _hostDao.findById(hostId); - if (host != null - && host.getHypervisorType() == HypervisorType.VMware) { - sendCommand = true; - } - } - - if (sendCommand) { - StoragePoolVO volumePool = _storagePoolDao.findById(volume - .getPoolId()); - AttachVolumeCommand cmd = new AttachVolumeCommand(true, - vm.getInstanceName(), volume.getPoolType(), - volume.getFolder(), volume.getPath(), volume.getName(), - deviceId, volume.getChainInfo()); - cmd.setPoolUuid(volumePool.getUuid()); - - try { - answer = (AttachVolumeAnswer) _agentMgr.send(hostId, cmd); - } catch (Exception e) { - throw new CloudRuntimeException(errorMsg + " due to: " - + e.getMessage()); - } - } - - if (!sendCommand || (answer != null && answer.getResult())) { - // Mark the volume as attached - if (sendCommand) { - _volsDao.attachVolume(volume.getId(), vmId, - answer.getDeviceId()); - } else { - _volsDao.attachVolume(volume.getId(), vmId, deviceId); - } - return _volsDao.findById(volumeId); - } else { - if (answer != null) { - String details = answer.getDetails(); - if (details != null && !details.isEmpty()) { - errorMsg += "; " + details; - } - } - throw new CloudRuntimeException(errorMsg); - } - } + private void checkVMSnapshots(UserVmVO vm, Long volumeId, boolean attach) { // Check that if vm has any VM snapshot - Long vmId = vm.getId(); + /*Long vmId = vm.getId(); List listSnapshot = _vmSnapshotDao.listByInstanceId(vmId, VMSnapshot.State.Ready, VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging); if (listSnapshot != null && listSnapshot.size() != 0) { throw new InvalidParameterValueException( "The VM has VM snapshots, do not allowed to attach volume. Please delete the VM snapshots first."); - } + }*/ } - @Override - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_DETACH, eventDescription = "event_detaching_volume1", async = true) - public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { - Account caller = UserContext.current().getCaller(); - if ((cmmd.getId() == null && cmmd.getDeviceId() == null && cmmd - .getVirtualMachineId() == null) - || (cmmd.getId() != null && (cmmd.getDeviceId() != null || cmmd - .getVirtualMachineId() != null)) - || (cmmd.getId() == null && (cmmd.getDeviceId() == null || cmmd - .getVirtualMachineId() == null))) { - throw new InvalidParameterValueException( - "Please provide either a volume id, or a tuple(device id, instance id)"); - } - - Long volumeId = cmmd.getId(); - VolumeVO volume = null; - - if (volumeId != null) { - volume = _volsDao.findById(volumeId); - } else { - volume = _volsDao.findByInstanceAndDeviceId( - cmmd.getVirtualMachineId(), cmmd.getDeviceId()).get(0); - } - - Long vmId = null; - - if (cmmd.getVirtualMachineId() == null) { - vmId = volume.getInstanceId(); - } else { - vmId = cmmd.getVirtualMachineId(); - } - - // Check that the volume ID is valid - if (volume == null) { - throw new InvalidParameterValueException( - "Unable to find volume with ID: " + volumeId); - } - - // Permissions check - _accountMgr.checkAccess(caller, null, true, volume); - - // Check that the volume is a data volume - if (volume.getVolumeType() != Volume.Type.DATADISK) { - throw new InvalidParameterValueException( - "Please specify a data volume."); - } - - // Check that the volume is currently attached to a VM - if (vmId == null) { - throw new InvalidParameterValueException( - "The specified volume is not attached to a VM."); - } - - // Check that the VM is in the correct state - UserVmVO vm = _vmDao.findById(vmId); - if (vm.getState() != State.Running && vm.getState() != State.Stopped - && vm.getState() != State.Destroyed) { - throw new InvalidParameterValueException( - "Please specify a VM that is either running or stopped."); - } - - // Check that if the volume has snapshot - boolean attach = false; - checkVMSnapshots(vm, volumeId, attach); - AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor.getCurrentExecutor(); - if (asyncExecutor != null) { - AsyncJobVO job = asyncExecutor.getJob(); - - if (s_logger.isInfoEnabled()) { - s_logger.info("Trying to attaching volume " + volumeId - + "to vm instance:" + vm.getId() - + ", update async job-" + job.getId() - + " progress status"); - } - - _asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volumeId); - _asyncMgr.updateAsyncJobStatus(job.getId(), - BaseCmd.PROGRESS_INSTANCE_CREATED, volumeId); - } - - String errorMsg = "Failed to detach volume: " + volume.getName() - + " from VM: " + vm.getHostName(); - boolean sendCommand = (vm.getState() == State.Running); - Answer answer = null; - - if (sendCommand) { - AttachVolumeCommand cmd = new AttachVolumeCommand(false, - vm.getInstanceName(), volume.getPoolType(), - volume.getFolder(), volume.getPath(), volume.getName(), - cmmd.getDeviceId() != null ? cmmd.getDeviceId() : volume - .getDeviceId(), volume.getChainInfo()); - - StoragePoolVO volumePool = _storagePoolDao.findById(volume - .getPoolId()); - cmd.setPoolUuid(volumePool.getUuid()); - - try { - answer = _agentMgr.send(vm.getHostId(), cmd); - } catch (Exception e) { - throw new CloudRuntimeException(errorMsg + " due to: " - + e.getMessage()); - } - } - - if (!sendCommand || (answer != null && answer.getResult())) { - // Mark the volume as detached - _volsDao.detachVolume(volume.getId()); - if (answer != null && answer instanceof AttachVolumeAnswer) { - volume.setChainInfo(((AttachVolumeAnswer) answer) - .getChainInfo()); - _volsDao.update(volume.getId(), volume); - } - - return _volsDao.findById(volumeId); - } else { - - if (answer != null) { - String details = answer.getDetails(); - if (details != null && !details.isEmpty()) { - errorMsg += "; " + details; - } - } - - throw new CloudRuntimeException(errorMsg); - } - } - - @Override - public boolean attachISOToVM(long vmId, long isoId, boolean attach) { - UserVmVO vm = _vmDao.findById(vmId); - - if (vm == null) { - return false; - } else if (vm.getState() != State.Running) { - return true; - } - String isoPath; - VMTemplateVO tmplt = _templateDao.findById(isoId); - if (tmplt == null) { - s_logger.warn("ISO: " + isoId + " does not exist"); - return false; - } - // Get the path of the ISO - Pair isoPathPair = null; - if (tmplt.getTemplateType() == TemplateType.PERHOST) { - isoPath = tmplt.getName(); - } else { - isoPathPair = _storageMgr.getAbsoluteIsoPath(isoId, - vm.getDataCenterId()); - if (isoPathPair == null) { - s_logger.warn("Couldn't get absolute iso path"); - return false; - } else { - isoPath = isoPathPair.first(); - } - } - - String vmName = vm.getInstanceName(); - - HostVO host = _hostDao.findById(vm.getHostId()); - if (host == null) { - s_logger.warn("Host: " + vm.getHostId() + " does not exist"); - return false; - } - AttachIsoCommand cmd = new AttachIsoCommand(vmName, isoPath, attach); - if (isoPathPair != null) { - cmd.setStoreUrl(isoPathPair.second()); - } - Answer a = _agentMgr.easySend(vm.getHostId(), cmd); - - return (a != null && a.getResult()); - } + private UserVm rebootVirtualMachine(long userId, long vmId) throws InsufficientCapacityException, ResourceUnavailableException { @@ -1368,7 +774,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use _itMgr.checkIfCanUpgrade(vmInstance, svcOffId); // remove diskAndMemory VM snapshots - List vmSnapshots = _vmSnapshotDao.findByVm(vmId); + /* List vmSnapshots = _vmSnapshotDao.findByVm(vmId); for (VMSnapshotVO vmSnapshotVO : vmSnapshots) { if(vmSnapshotVO.getType() == VMSnapshot.Type.DiskAndMemory){ if(!_vmSnapshotMgr.deleteAllVMSnapshots(vmId, VMSnapshot.Type.DiskAndMemory)){ @@ -1378,7 +784,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - } + }*/ _itMgr.upgradeVmDb(vmId, svcOffId); @@ -1962,474 +1368,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - @Override - @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template", create = true) - public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, - Account templateOwner) throws ResourceAllocationException { - Long userId = UserContext.current().getCallerUserId(); - - Account caller = UserContext.current().getCaller(); - boolean isAdmin = (isAdmin(caller.getType())); - - _accountMgr.checkAccess(caller, null, true, templateOwner); - - String name = cmd.getTemplateName(); - if ((name == null) || (name.length() > 32)) { - throw new InvalidParameterValueException( - "Template name cannot be null and should be less than 32 characters"); - } - - if (cmd.getTemplateTag() != null) { - if (!_accountService.isRootAdmin(caller.getType())) { - throw new PermissionDeniedException( - "Parameter templatetag can only be specified by a Root Admin, permission denied"); - } - } - - // do some parameter defaulting - Integer bits = cmd.getBits(); - Boolean requiresHvm = cmd.getRequiresHvm(); - Boolean passwordEnabled = cmd.isPasswordEnabled(); - Boolean isPublic = cmd.isPublic(); - Boolean featured = cmd.isFeatured(); - int bitsValue = ((bits == null) ? 64 : bits.intValue()); - boolean requiresHvmValue = ((requiresHvm == null) ? true : requiresHvm - .booleanValue()); - boolean passwordEnabledValue = ((passwordEnabled == null) ? false - : passwordEnabled.booleanValue()); - if (isPublic == null) { - isPublic = Boolean.FALSE; - } - boolean allowPublicUserTemplates = Boolean.parseBoolean(_configDao - .getValue("allow.public.user.templates")); - if (!isAdmin && !allowPublicUserTemplates && isPublic) { - throw new PermissionDeniedException("Failed to create template " - + name + ", only private templates can be created."); - } - - Long volumeId = cmd.getVolumeId(); - Long snapshotId = cmd.getSnapshotId(); - if ((volumeId == null) && (snapshotId == null)) { - throw new InvalidParameterValueException( - "Failed to create private template record, neither volume ID nor snapshot ID were specified."); - } - if ((volumeId != null) && (snapshotId != null)) { - throw new InvalidParameterValueException( - "Failed to create private template record, please specify only one of volume ID (" - + volumeId - + ") and snapshot ID (" - + snapshotId - + ")"); - } - - HypervisorType hyperType; - VolumeVO volume = null; - VMTemplateVO privateTemplate = null; - if (volumeId != null) { // create template from volume - volume = _volsDao.findById(volumeId); - if (volume == null) { - throw new InvalidParameterValueException( - "Failed to create private template record, unable to find volume " - + volumeId); - } - // check permissions - _accountMgr.checkAccess(caller, null, true, volume); - - // If private template is created from Volume, check that the volume - // will not be active when the private template is - // created - if (!_storageMgr.volumeInactive(volume)) { - String msg = "Unable to create private template for volume: " - + volume.getName() - + "; volume is attached to a non-stopped VM, please stop the VM first"; - if (s_logger.isInfoEnabled()) { - s_logger.info(msg); - } - throw new CloudRuntimeException(msg); - } - hyperType = _volsDao.getHypervisorType(volumeId); - } else { // create template from snapshot - SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snapshot == null) { - throw new InvalidParameterValueException( - "Failed to create private template record, unable to find snapshot " - + snapshotId); - } - - volume = _volsDao.findById(snapshot.getVolumeId()); - - // check permissions - _accountMgr.checkAccess(caller, null, true, snapshot); - - if (snapshot.getState() != Snapshot.State.BackedUp) { - throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.State.BackedUp + " state yet and can't be used for template creation"); - } - - /* - * // bug #11428. Operation not supported if vmware and snapshots - * parent volume = ROOT if(snapshot.getHypervisorType() == - * HypervisorType.VMware && snapshotVolume.getVolumeType() == - * Type.DATADISK){ throw new UnsupportedServiceException( - * "operation not supported, snapshot with id " + snapshotId + - * " is created from Data Disk"); } - */ - - hyperType = snapshot.getHypervisorType(); - } - - _resourceLimitMgr.checkResourceLimit(templateOwner, - ResourceType.template); - - if (!isAdmin || featured == null) { - featured = Boolean.FALSE; - } - Long guestOSId = cmd.getOsTypeId(); - GuestOSVO guestOS = _guestOSDao.findById(guestOSId); - if (guestOS == null) { - throw new InvalidParameterValueException("GuestOS with ID: " - + guestOSId + " does not exist."); - } - - String uniqueName = Long.valueOf((userId == null) ? 1 : userId) - .toString() - + UUID.nameUUIDFromBytes(name.getBytes()).toString(); - Long nextTemplateId = _templateDao.getNextInSequence(Long.class, "id"); - String description = cmd.getDisplayText(); - boolean isExtractable = false; - Long sourceTemplateId = null; - if (volume != null) { - VMTemplateVO template = ApiDBUtils.findTemplateById(volume - .getTemplateId()); - isExtractable = template != null - && template.isExtractable() - && template.getTemplateType() != Storage.TemplateType.SYSTEM; - if (template != null) { - sourceTemplateId = template.getId(); - } else if (volume.getVolumeType() == Type.ROOT) { // vm created out - // of blank - // template - UserVm userVm = ApiDBUtils.findUserVmById(volume - .getInstanceId()); - sourceTemplateId = userVm.getIsoId(); - } - } - String templateTag = cmd.getTemplateTag(); - if (templateTag != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding template tag: " + templateTag); - } - } - privateTemplate = new VMTemplateVO(nextTemplateId, uniqueName, name, - ImageFormat.RAW, isPublic, featured, isExtractable, - TemplateType.USER, null, null, requiresHvmValue, bitsValue, - templateOwner.getId(), null, description, passwordEnabledValue, - guestOS.getId(), true, hyperType, templateTag, cmd.getDetails()); - if (sourceTemplateId != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("This template is getting created from other template, setting source template Id to: " - + sourceTemplateId); - } - } - privateTemplate.setSourceTemplateId(sourceTemplateId); - - VMTemplateVO template = _templateDao.persist(privateTemplate); - // Increment the number of templates - if (template != null) { - if (cmd.getDetails() != null) { - _templateDetailsDao.persist(template.getId(), cmd.getDetails()); - } - - _resourceLimitMgr.incrementResourceCount(templateOwner.getId(), - ResourceType.template); - } - - if (template != null) { - return template; - } else { - throw new CloudRuntimeException("Failed to create a template"); - } - - } - - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template", async = true) - public VMTemplateVO createPrivateTemplate(CreateTemplateCmd command) - throws CloudRuntimeException { - Long userId = UserContext.current().getCallerUserId(); - if (userId == null) { - userId = User.UID_SYSTEM; - } - long templateId = command.getEntityId(); - Long volumeId = command.getVolumeId(); - Long snapshotId = command.getSnapshotId(); - SnapshotCommand cmd = null; - VMTemplateVO privateTemplate = null; - - String uniqueName = getRandomPrivateTemplateName(); - - StoragePoolVO pool = null; - HostVO secondaryStorageHost = null; - Long zoneId = null; - Long accountId = null; - SnapshotVO snapshot = null; - String secondaryStorageURL = null; - try { - if (snapshotId != null) { // create template from snapshot - snapshot = _snapshotDao.findById(snapshotId); - if (snapshot == null) { - throw new CloudRuntimeException( - "Unable to find Snapshot for Id " + snapshotId); - } - zoneId = snapshot.getDataCenterId(); - secondaryStorageHost = _snapshotMgr - .getSecondaryStorageHost(snapshot); - secondaryStorageURL = _snapshotMgr - .getSecondaryStorageURL(snapshot); - String name = command.getTemplateName(); - String backupSnapshotUUID = snapshot.getBackupSnapshotId(); - if (backupSnapshotUUID == null) { - throw new CloudRuntimeException( - "Unable to create private template from snapshot " - + snapshotId - + " due to there is no backupSnapshotUUID for this snapshot"); - } - - Long dcId = snapshot.getDataCenterId(); - accountId = snapshot.getAccountId(); - volumeId = snapshot.getVolumeId(); - - String origTemplateInstallPath = null; - List pools = _storageMgr - .ListByDataCenterHypervisor(zoneId, - snapshot.getHypervisorType()); - if (pools == null || pools.size() == 0) { - throw new CloudRuntimeException( - "Unable to find storage pools in zone " + zoneId); - } - pool = pools.get(0); - if (snapshot.getVersion() != null - && snapshot.getVersion().equalsIgnoreCase("2.1")) { - VolumeVO volume = _volsDao - .findByIdIncludingRemoved(volumeId); - if (volume == null) { - throw new CloudRuntimeException( - "failed to upgrade snapshot " - + snapshotId - + " due to unable to find orignal volume:" - + volumeId + ", try it later "); - } - if (volume.getTemplateId() == null) { - _snapshotDao.updateSnapshotVersion(volumeId, "2.1", - "2.2"); - } else { - VMTemplateVO template = _templateDao - .findByIdIncludingRemoved(volume - .getTemplateId()); - if (template == null) { - throw new CloudRuntimeException( - "failed to upgrade snapshot " - + snapshotId - + " due to unalbe to find orignal template :" - + volume.getTemplateId() - + ", try it later "); - } - Long origTemplateId = template.getId(); - Long origTmpltAccountId = template.getAccountId(); - if (!_volsDao.lockInLockTable(volumeId.toString(), 10)) { - throw new CloudRuntimeException( - "failed to upgrade snapshot " + snapshotId - + " due to volume:" + volumeId - + " is being used, try it later "); - } - cmd = new UpgradeSnapshotCommand(null, - secondaryStorageURL, dcId, accountId, volumeId, - origTemplateId, origTmpltAccountId, null, - snapshot.getBackupSnapshotId(), - snapshot.getName(), "2.1"); - if (!_volsDao.lockInLockTable(volumeId.toString(), 10)) { - throw new CloudRuntimeException( - "Creating template failed due to volume:" - + volumeId - + " is being used, try it later "); - } - Answer answer = null; - try { - answer = _storageMgr.sendToPool(pool, cmd); - cmd = null; - } catch (StorageUnavailableException e) { - } finally { - _volsDao.unlockFromLockTable(volumeId.toString()); - } - if ((answer != null) && answer.getResult()) { - _snapshotDao.updateSnapshotVersion(volumeId, "2.1", - "2.2"); - } else { - throw new CloudRuntimeException( - "Unable to upgrade snapshot"); - } - } - } - if (snapshot.getSwiftId() != null && snapshot.getSwiftId() != 0) { - _snapshotMgr.downloadSnapshotsFromSwift(snapshot); - } - cmd = new CreatePrivateTemplateFromSnapshotCommand(pool, secondaryStorageURL, dcId, accountId, snapshot.getVolumeId(), backupSnapshotUUID, snapshot.getName(), - origTemplateInstallPath, templateId, name, _createprivatetemplatefromsnapshotwait); - } else if (volumeId != null) { - VolumeVO volume = _volsDao.findById(volumeId); - if (volume == null) { - throw new CloudRuntimeException( - "Unable to find volume for Id " + volumeId); - } - accountId = volume.getAccountId(); - - if (volume.getPoolId() == null) { - _templateDao.remove(templateId); - throw new CloudRuntimeException("Volume " + volumeId - + " is empty, can't create template on it"); - } - String vmName = _storageMgr.getVmNameOnVolume(volume); - zoneId = volume.getDataCenterId(); - secondaryStorageHost = _storageMgr - .getSecondaryStorageHost(zoneId); - if (secondaryStorageHost == null) { - throw new CloudRuntimeException( - "Can not find the secondary storage for zoneId " - + zoneId); - } - secondaryStorageURL = secondaryStorageHost.getStorageUrl(); - - pool = _storagePoolDao.findById(volume.getPoolId()); - cmd = new CreatePrivateTemplateFromVolumeCommand(pool, secondaryStorageURL, templateId, accountId, command.getTemplateName(), uniqueName, volume.getPath(), vmName, _createprivatetemplatefromvolumewait); - - } else { - throw new CloudRuntimeException( - "Creating private Template need to specify snapshotId or volumeId"); - } - // FIXME: before sending the command, check if there's enough - // capacity - // on the storage server to create the template - - // This can be sent to a KVM host too. - CreatePrivateTemplateAnswer answer = null; - if (snapshotId != null) { - if (!_snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { - throw new CloudRuntimeException( - "Creating template from snapshot failed due to snapshot:" - + snapshotId - + " is being used, try it later "); - } - } else { - if (!_volsDao.lockInLockTable(volumeId.toString(), 10)) { - throw new CloudRuntimeException( - "Creating template from volume failed due to volume:" - + volumeId - + " is being used, try it later "); - } - } - try { - answer = (CreatePrivateTemplateAnswer) _storageMgr.sendToPool( - pool, cmd); - } catch (StorageUnavailableException e) { - } finally { - if (snapshotId != null) { - _snapshotDao.unlockFromLockTable(snapshotId.toString()); - } else { - _volsDao.unlockFromLockTable(volumeId.toString()); - } - } - if ((answer != null) && answer.getResult()) { - privateTemplate = _templateDao.findById(templateId); - String answerUniqueName = answer.getUniqueName(); - if (answerUniqueName != null) { - privateTemplate.setUniqueName(answerUniqueName); - } else { - privateTemplate.setUniqueName(uniqueName); - } - ImageFormat format = answer.getImageFormat(); - if (format != null) { - privateTemplate.setFormat(format); - } else { - // This never occurs. - // Specify RAW format makes it unusable for snapshots. - privateTemplate.setFormat(ImageFormat.RAW); - } - - String checkSum = getChecksum(secondaryStorageHost.getId(), - answer.getPath()); - - Transaction txn = Transaction.currentTxn(); - - txn.start(); - - privateTemplate.setChecksum(checkSum); - _templateDao.update(templateId, privateTemplate); - - // add template zone ref for this template - _templateDao.addTemplateToZone(privateTemplate, zoneId); - VMTemplateHostVO templateHostVO = new VMTemplateHostVO( - secondaryStorageHost.getId(), templateId); - templateHostVO.setDownloadPercent(100); - templateHostVO.setDownloadState(Status.DOWNLOADED); - templateHostVO.setInstallPath(answer.getPath()); - templateHostVO.setLastUpdated(new Date()); - templateHostVO.setSize(answer.getVirtualSize()); - templateHostVO.setPhysicalSize(answer.getphysicalSize()); - _templateHostDao.persist(templateHostVO); - - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_TEMPLATE_CREATE, privateTemplate.getAccountId(), - secondaryStorageHost.getDataCenterId(), privateTemplate.getId(), - privateTemplate.getName(), null, privateTemplate.getSourceTemplateId(), - templateHostVO.getSize(), VirtualMachineTemplate.class.getName(), privateTemplate.getUuid()); - txn.commit(); - } - } finally { - if (snapshot != null && snapshot.getSwiftId() != null - && secondaryStorageURL != null && zoneId != null - && accountId != null && volumeId != null) { - _snapshotMgr.deleteSnapshotsForVolume(secondaryStorageURL, - zoneId, accountId, volumeId); - } - if (privateTemplate == null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - // Remove the template record - _templateDao.expunge(templateId); - - // decrement resource count - if (accountId != null) { - _resourceLimitMgr.decrementResourceCount(accountId, - ResourceType.template); - } - txn.commit(); - } - } - - if (privateTemplate != null) { - return privateTemplate; - } else { - throw new CloudRuntimeException("Failed to create a template"); - } - } - - @Override - public String getChecksum(Long hostId, String templatePath) { - HostVO ssHost = _hostDao.findById(hostId); - Host.Type type = ssHost.getType(); - if (type != Host.Type.SecondaryStorage - && type != Host.Type.LocalSecondaryStorage) { - return null; - } - String secUrl = ssHost.getStorageUrl(); - Answer answer; - answer = _agentMgr.sendToSecStorage(ssHost, new ComputeChecksumCommand( - secUrl, templatePath)); - if (answer != null && answer.getResult()) { - return answer.getDetails(); - } - return null; - } - // used for vm transitioning to error state private void updateVmStateForFailedVmCreation(Long vmId, Long hostId) { @@ -2449,14 +1387,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use List volumesForThisVm = _volsDao .findUsableVolumesForInstance(vm.getId()); for (VolumeVO volume : volumesForThisVm) { - try { - if (volume.getState() != Volume.State.Destroy) { - _storageMgr.destroyVolume(volume); - } - } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to delete volume:" - + volume.getId() + " for vm:" + vmId - + " whilst transitioning to error state"); + if (volume.getState() != Volume.State.Destroy) { + this.volumeMgr.destroyVolume(volume); } } String msg = "Failed to deploy Vm with Id: " + vmId + ", on Host with Id: " + hostId; @@ -3577,7 +2509,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use + vm.getIsoId()); } - Pair isoPathPair = _storageMgr.getAbsoluteIsoPath( + Pair isoPathPair = this.templateMgr.getAbsoluteIsoPath( template.getId(), vm.getDataCenterId()); if (template.getTemplateType() == TemplateType.PERHOST) { @@ -4893,22 +3825,20 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use /* If new template is provided allocate a new volume from new template otherwise allocate new volume from original template */ VolumeVO newVol = null; if (newTemplateId != null){ - newVol = _storageMgr.allocateDuplicateVolume(root, newTemplateId); + newVol = volumeMgr.allocateDuplicateVolume(root, newTemplateId); vm.setGuestOSId(template.getGuestOSId()); vm.setTemplateId(newTemplateId); _vmDao.update(vmId, vm); - } else newVol = _storageMgr.allocateDuplicateVolume(root, null); + } else { + newVol = volumeMgr.allocateDuplicateVolume(root, null); + } _volsDao.attachVolume(newVol.getId(), vmId, newVol.getDeviceId()); /* Detach and destory the old root volume */ - try { - _volsDao.detachVolume(root.getId()); - _storageMgr.destroyVolume(root); - } catch (ConcurrentOperationException e) { - s_logger.debug("Unable to delete old root volume " + root.getId() - + ", user may manually delete it", e); - } + + _volsDao.detachVolume(root.getId()); + this.volumeMgr.destroyVolume(root); if (needRestart) { try { diff --git a/server/src/com/cloud/vm/VirtualMachineManager.java b/server/src/com/cloud/vm/VirtualMachineManager.java index 6a959fcefb6..7b34f7f0616 100644 --- a/server/src/com/cloud/vm/VirtualMachineManager.java +++ b/server/src/com/cloud/vm/VirtualMachineManager.java @@ -20,6 +20,7 @@ import java.net.URI; import java.util.List; import java.util.Map; + import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.deploy.DeployDestination; diff --git a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java index d0a2305ecab..5d48f146769 100755 --- a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -36,6 +36,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -118,10 +119,10 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; +import com.cloud.storage.VolumeManager; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; @@ -164,6 +165,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Inject protected StorageManager _storageMgr; @Inject + DataStoreManager dataStoreMgr; + @Inject protected NetworkManager _networkMgr; @Inject protected NetworkModel _networkModel; @@ -232,6 +235,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Inject protected ConfigurationDao _configDao; + @Inject + VolumeManager volumeMgr; Map> _vmGurus = new HashMap>(); protected StateMachine2 _stateMachine; @@ -298,15 +303,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } if (template.getFormat() == ImageFormat.ISO) { - _storageMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), rootDiskOffering.second(), vm, owner); + this.volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), rootDiskOffering.second(), vm, owner); } else if (template.getFormat() == ImageFormat.BAREMETAL) { // Do nothing } else { - _storageMgr.allocateTemplatedVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), template, vm, owner); + this.volumeMgr.allocateTemplatedVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), template, vm, owner); } for (Pair offering : dataDiskOfferings) { - _storageMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vm.getId(), offering.first(), offering.second(), vm, owner); + this.volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vm.getId(), offering.first(), offering.second(), vm, owner); } txn.commit(); @@ -394,7 +399,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.debug("Cleaning up NICS"); _networkMgr.cleanupNics(profile); // Clean up volumes based on the vm's instance id - _storageMgr.cleanupVolumes(vm.getId()); + this.volumeMgr.cleanupVolumes(vm.getId()); VirtualMachineGuru guru = getVmGuru(vm); guru.finalizeExpunge(vm); @@ -661,7 +666,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac continue; } - StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); + StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId()); if (!pool.isInMaintenance()) { if (s_logger.isDebugEnabled()) { s_logger.debug("Root volume is ready, need to place VM in volume's cluster"); @@ -738,7 +743,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } _networkMgr.prepare(vmProfile, dest, ctx); if (vm.getHypervisorType() != HypervisorType.BareMetal) { - _storageMgr.prepare(vmProfile, dest); + this.volumeMgr.prepare(vmProfile, dest); } //since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity if(!reuseVolume){ @@ -953,7 +958,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.warn("Unable to release some network resources.", e); } - _storageMgr.release(profile); + this.volumeMgr.release(profile); s_logger.debug("Successfully cleanued up resources for the vm " + vm + " in " + state + " state"); return true; } @@ -1102,7 +1107,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { if (vm.getHypervisorType() != HypervisorType.BareMetal) { - _storageMgr.release(profile); + this.volumeMgr.release(profile); s_logger.debug("Successfully released storage resources for the vm " + vm); } } catch (Exception e) { @@ -1221,7 +1226,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); boolean migrationResult = false; try { - migrationResult = _storageMgr.StorageMigration(profile, destPool); + migrationResult = this.volumeMgr.StorageMigration(profile, destPool); if (migrationResult) { //if the vm is migrated to different pod in basic mode, need to reallocate ip @@ -1306,7 +1311,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); _networkMgr.prepareNicForMigration(profile, dest); - _storageMgr.prepareForMigration(profile, dest); + this.volumeMgr.prepareForMigration(profile, dest); VirtualMachineTO to = toVmTO(profile); PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to); diff --git a/server/src/com/cloud/vm/VirtualMachineProfileImpl.java b/server/src/com/cloud/vm/VirtualMachineProfileImpl.java index e83d6a0d926..eb9e5ad29c2 100644 --- a/server/src/com/cloud/vm/VirtualMachineProfileImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineProfileImpl.java @@ -21,6 +21,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; + import com.cloud.agent.api.to.VolumeTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.offering.ServiceOffering; diff --git a/server/test/com/cloud/vm/MockUserVmManagerImpl.java b/server/test/com/cloud/vm/MockUserVmManagerImpl.java index 1ee627fb738..09825a8eeb6 100644 --- a/server/test/com/cloud/vm/MockUserVmManagerImpl.java +++ b/server/test/com/cloud/vm/MockUserVmManagerImpl.java @@ -25,7 +25,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; -import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; @@ -40,8 +39,6 @@ import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd; import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd; import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; -import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; import org.springframework.stereotype.Component; import com.cloud.agent.api.StopAnswer; @@ -69,7 +66,6 @@ import com.cloud.offering.ServiceOffering; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.server.Criteria; import com.cloud.storage.StoragePool; -import com.cloud.storage.Volume; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.uservm.UserVm; @@ -155,11 +151,6 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } - @Override - public boolean attachISOToVM(long vmId, long isoId, boolean attach) { - // TODO Auto-generated method stub - return false; - } @Override public boolean stopVirtualMachine(long userId, long vmId) { @@ -209,12 +200,6 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } - @Override - public String getChecksum(Long hostId, String templatePath) { - // TODO Auto-generated method stub - return null; - } - @Override public boolean configure(String name, Map params) throws ConfigurationException { return true; @@ -255,24 +240,6 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } - @Override - public UserVm resetVMSSHKey(ResetVMSSHKeyCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException { - // TODO Auto-generated method stub - return null; - } - - @Override - public Volume attachVolumeToVM(AttachVolumeCmd cmd) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { - // TODO Auto-generated method stub - return null; - } - @Override public UserVm startVirtualMachine(StartVMCmd cmd) throws StorageUnavailableException, ExecutionException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { @@ -316,18 +283,6 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } - @Override - public VirtualMachineTemplate createPrivateTemplateRecord(CreateTemplateCmd cmd, Account templateOwner) throws ResourceAllocationException { - // TODO Auto-generated method stub - return null; - } - - @Override - public VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd cmd) { - // TODO Auto-generated method stub - return null; - } - @Override public UserVm startVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { // TODO Auto-generated method stub @@ -469,4 +424,9 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return false; } + @Override + public UserVm resetVMSSHKey(ResetVMSSHKeyCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException { + // TODO Auto-generated method stub + return null; + } } diff --git a/setup/db/templates.sql b/setup/db/templates.sql index 9980b159630..7abc92da63b 100755 --- a/setup/db/templates.sql +++ b/setup/db/templates.sql @@ -15,28 +15,28 @@ -- specific language governing permissions and limitations -- under the License. -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (1, UUID(), 'routing-1', 'SystemVM Template (XenServer)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2', 'f613f38c96bf039f2e5cbf92fa8ad4f8', 0, 'SystemVM Template (XenServer)', 'VHD', 133, 0, 1, 'XenServer'); -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, removed, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable) - VALUES (2, UUID(), 'centos53-x86_64', 'CentOS 5.3(64-bit) no GUI (XenServer)', 1, now(), now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/templates/builtin/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2', 'b63d854a9560c013142567bbae8d98cf', 0, 'CentOS 5.3(64-bit) no GUI (XenServer)', 'VHD', 12, 1, 1, 'XenServer', 1); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, image_data_store_id) + VALUES (1, UUID(), 'routing-1', 'SystemVM Template (XenServer)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2', 'f613f38c96bf039f2e5cbf92fa8ad4f8', 0, 'SystemVM Template (XenServer)', 'VHD', 133, 0, 1, 'XenServer', 1); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, removed, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable, image_data_store_id) + VALUES (2, UUID(), 'centos53-x86_64', 'CentOS 5.3(64-bit) no GUI (XenServer)', 1, now(), now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/templates/builtin/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2', 'b63d854a9560c013142567bbae8d98cf', 0, 'CentOS 5.3(64-bit) no GUI (XenServer)', 'VHD', 12, 1, 1, 'XenServer', 1, 1); -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (3, UUID(), 'routing-3', 'SystemVM Template (KVM)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2', '2755de1f9ef2ce4d6f2bee2efbb4da92', 0, 'SystemVM Template (KVM)', 'QCOW2', 15, 0, 1, 'KVM'); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, image_data_store_id) + VALUES (3, UUID(), 'routing-3', 'SystemVM Template (KVM)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2', '2755de1f9ef2ce4d6f2bee2efbb4da92', 0, 'SystemVM Template (KVM)', 'QCOW2', 15, 0, 1, 'KVM', 1); -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, display_text, enable_password, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable) - VALUES (4, UUID(), 'centos55-x86_64', 'CentOS 5.5(64-bit) no GUI (KVM)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/releases/2.2.0/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2', 'ed0e788280ff2912ea40f7f91ca7a249', 'CentOS 5.5(64-bit) no GUI (KVM)', 0, 'QCOW2', 112, 1, 1, 'KVM', 1); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, display_text, enable_password, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable, image_data_store_id) + VALUES (4, UUID(), 'centos55-x86_64', 'CentOS 5.5(64-bit) no GUI (KVM)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/releases/2.2.0/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2', 'ed0e788280ff2912ea40f7f91ca7a249', 'CentOS 5.5(64-bit) no GUI (KVM)', 0, 'QCOW2', 112, 1, 1, 'KVM', 1, 1); -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable) - VALUES (5, UUID(), 'centos56-x86_64-xen', 'CentOS 5.6(64-bit) no GUI (XenServer)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/templates/builtin/centos56-x86_64.vhd.bz2', '905cec879afd9c9d22ecc8036131a180', 0, 'CentOS 5.6(64-bit) no GUI (XenServer)', 'VHD', 12, 1, 1, 'XenServer', 1); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable, image_data_store_id) + VALUES (5, UUID(), 'centos56-x86_64-xen', 'CentOS 5.6(64-bit) no GUI (XenServer)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/templates/builtin/centos56-x86_64.vhd.bz2', '905cec879afd9c9d22ecc8036131a180', 0, 'CentOS 5.6(64-bit) no GUI (XenServer)', 'VHD', 12, 1, 1, 'XenServer', 1, 1); -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable) - VALUES (7, UUID(), 'centos53-x64', 'CentOS 5.3(64-bit) no GUI (vSphere)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/releases/2.2.0/CentOS5.3-x86_64.ova', 'f6f881b7f2292948d8494db837fe0f47', 0, 'CentOS 5.3(64-bit) no GUI (vSphere)', 'OVA', 12, 1, 1, 'VMware', 1); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable, image_data_store_id) + VALUES (7, UUID(), 'centos53-x64', 'CentOS 5.3(64-bit) no GUI (vSphere)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/releases/2.2.0/CentOS5.3-x86_64.ova', 'f6f881b7f2292948d8494db837fe0f47', 0, 'CentOS 5.3(64-bit) no GUI (vSphere)', 'OVA', 12, 1, 1, 'VMware', 1, 1); -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (8, UUID(), 'routing-8', 'SystemVM Template (vSphere)', 0, now(), 'SYSTEM', 0, 32, 1, 'http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova', '7137e453f950079ea2ba6feaafd939e8', 0, 'SystemVM Template (vSphere)', 'OVA', 15, 0, 1, 'VMware'); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, image_data_store_id) + VALUES (8, UUID(), 'routing-8', 'SystemVM Template (vSphere)', 0, now(), 'SYSTEM', 0, 32, 1, 'http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova', '7137e453f950079ea2ba6feaafd939e8', 0, 'SystemVM Template (vSphere)', 'OVA', 15, 0, 1, 'VMware', 1); -INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (9, UUID(), 'routing-9', 'SystemVM Template (HyperV)', 0, now(), 'SYSTEM', 0, 32, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2', 'f613f38c96bf039f2e5cbf92fa8ad4f8', 0, 'SystemVM Template (HyperV)', 'VHD', 15, 0, 1, 'Hyperv'); +INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, image_data_store_id) + VALUES (9, UUID(), 'routing-9', 'SystemVM Template (HyperV)', 0, now(), 'SYSTEM', 0, 32, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2', 'f613f38c96bf039f2e5cbf92fa8ad4f8', 0, 'SystemVM Template (HyperV)', 'VHD', 15, 0, 1, 'Hyperv', 1); INSERT INTO `cloud`.`guest_os_category` (id, uuid, name) VALUES (1, UUID(), 'CentOS'); INSERT INTO `cloud`.`guest_os_category` (id, uuid, name) VALUES (2, UUID(), 'Debian'); From ec09e34f86ede94cc9f131025236d88785aff6d5 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Wed, 30 Jan 2013 21:06:12 -0800 Subject: [PATCH 111/486] fix downloading template --- .../subsystem/api/storage/ClusterScope.java | 14 +++++++------- .../engine/subsystem/api/storage/HostScope.java | 6 +++--- .../engine/subsystem/api/storage/Scope.java | 2 +- .../engine/subsystem/api/storage/ZoneScope.java | 6 +++--- .../src/com/cloud/storage/TemplateProfile.java | 11 +++++------ .../cloud/template/HyervisorTemplateAdapter.java | 8 ++++---- .../com/cloud/template/TemplateAdapterBase.java | 16 +++++++++++----- .../src/com/cloud/template/TemplateManager.java | 2 +- .../com/cloud/template/TemplateManagerImpl.java | 2 +- 9 files changed, 36 insertions(+), 31 deletions(-) diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java index 50d5444233b..fce7d82cb99 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ClusterScope.java @@ -21,11 +21,11 @@ package org.apache.cloudstack.engine.subsystem.api.storage; public class ClusterScope implements Scope { private ScopeType type = ScopeType.CLUSTER; - private long clusterId; - private long podId; - private long zoneId; + private Long clusterId; + private Long podId; + private Long zoneId; - public ClusterScope(long clusterId, long podId, long zoneId) { + public ClusterScope(Long clusterId, Long podId, Long zoneId) { this.clusterId = clusterId; this.podId = podId; this.zoneId = zoneId; @@ -37,15 +37,15 @@ public class ClusterScope implements Scope { } @Override - public long getScopeId() { + public Long getScopeId() { return this.clusterId; } - public long getPodId() { + public Long getPodId() { return this.podId; } - public long getZoneId() { + public Long getZoneId() { return this.zoneId; } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java index da36e439376..71d1952c625 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/HostScope.java @@ -21,8 +21,8 @@ package org.apache.cloudstack.engine.subsystem.api.storage; public class HostScope implements Scope { private ScopeType type = ScopeType.HOST; - private long hostId; - public HostScope(long hostId) { + private Long hostId; + public HostScope(Long hostId) { this.hostId = hostId; } @Override @@ -31,7 +31,7 @@ public class HostScope implements Scope { } @Override - public long getScopeId() { + public Long getScopeId() { return this.hostId; } } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java index a9601a138bf..c1596d4f5f7 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/Scope.java @@ -20,5 +20,5 @@ package org.apache.cloudstack.engine.subsystem.api.storage; public interface Scope { public ScopeType getScopeType(); - public long getScopeId(); + public Long getScopeId(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java index 7f211f4f9e9..ac277af36de 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ZoneScope.java @@ -21,9 +21,9 @@ package org.apache.cloudstack.engine.subsystem.api.storage; public class ZoneScope implements Scope { private ScopeType type = ScopeType.ZONE; - private long zoneId; + private Long zoneId; - public ZoneScope(long zoneId) { + public ZoneScope(Long zoneId) { this.zoneId = zoneId; } @@ -33,7 +33,7 @@ public class ZoneScope implements Scope { } @Override - public long getScopeId() { + public Long getScopeId() { return this.zoneId; } diff --git a/server/src/com/cloud/storage/TemplateProfile.java b/server/src/com/cloud/storage/TemplateProfile.java index 41bbaaa1057..0b55f1fbea2 100755 --- a/server/src/com/cloud/storage/TemplateProfile.java +++ b/server/src/com/cloud/storage/TemplateProfile.java @@ -18,7 +18,6 @@ package com.cloud.storage; import java.util.Map; - import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.ImageFormat; @@ -46,7 +45,7 @@ public class TemplateProfile { Long templateId; VMTemplateVO template; String templateTag; - String imageStoreUuid; + Long imageStoreId; Map details; public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, @@ -85,11 +84,11 @@ public class TemplateProfile { public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, Long zoneId, HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, Boolean sshKeyEnabled, - String imageStoreUuid) { + Long imageStoreId) { this(templateId, userId, name, displayText, bits, passwordEnabled, requiresHvm, url, isPublic, featured, isExtractable, format, guestOsId, zoneId, hypervisorType, accountName, domainId, accountId, chksum, bootable, details, sshKeyEnabled); this.templateTag = templateTag; - this.imageStoreUuid = imageStoreUuid; + this.imageStoreId = imageStoreId; } public Long getTemplateId() { @@ -256,7 +255,7 @@ public class TemplateProfile { return this.sshKeyEnbaled; } - public String getImageStoreUuid() { - return this.imageStoreUuid; + public Long getImageStoreId() { + return this.imageStoreId; } } diff --git a/server/src/com/cloud/template/HyervisorTemplateAdapter.java b/server/src/com/cloud/template/HyervisorTemplateAdapter.java index fa72e75612f..c1177f4a060 100755 --- a/server/src/com/cloud/template/HyervisorTemplateAdapter.java +++ b/server/src/com/cloud/template/HyervisorTemplateAdapter.java @@ -34,6 +34,7 @@ import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.ImageService; import org.apache.cloudstack.framework.async.AsyncCallFuture; @@ -54,8 +55,8 @@ import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.TemplateProfile; import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.download.DownloadMonitor; import com.cloud.storage.secondary.SecondaryStorageVmManager; @@ -79,6 +80,7 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem @Inject DownloadMonitor _downloadMonitor; @Inject SecondaryStorageVmManager _ssvmMgr; @Inject AgentManager _agentMgr; + @Inject DataStoreManager storeMgr; @Inject ImageService imageService; @Inject ImageDataFactory imageFactory; @@ -89,8 +91,6 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem public String getName() { return TemplateAdapterType.Hypervisor.getName(); } - - private String validateUrl(String url) { try { @@ -171,7 +171,7 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem throw new CloudRuntimeException("Unable to persist the template " + profile.getTemplate()); } - DataStore imageStore = this.templateMgr.getImageStore(profile.getImageStoreUuid(), profile.getZoneId()); + DataStore imageStore = this.storeMgr.getDataStore(profile.getImageStoreId(), DataStoreRole.Image); AsyncCallFuture future = this.imageService.createTemplateAsync(this.imageFactory.getTemplate(template.getId()), imageStore); try { diff --git a/server/src/com/cloud/template/TemplateAdapterBase.java b/server/src/com/cloud/template/TemplateAdapterBase.java index c5074ad0a8a..247ce636cf2 100755 --- a/server/src/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/com/cloud/template/TemplateAdapterBase.java @@ -20,7 +20,6 @@ import java.util.List; import java.util.Map; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; @@ -46,10 +45,10 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Grouping; import com.cloud.storage.GuestOS; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.TemplateProfile; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VMTemplateZoneDao; @@ -80,6 +79,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat protected @Inject HostDao _hostDao; protected @Inject ResourceLimitService _resourceLimitMgr; protected @Inject DataStoreManager storeMgr; + @Inject TemplateManager templateMgr; @Override public boolean stop() { @@ -211,10 +211,16 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat } } + DataStore imageStore = this.templateMgr.getImageStore(imageStoreUuid, zoneId); + if (imageStore == null) { + throw new IllegalArgumentException("Cann't find an image store"); + } + Long imageStoreId = imageStore.getId(); + Long id = _tmpltDao.getNextInSequence(Long.class, "id"); UserContext.current().setEventDetails("Id: " +id+ " name: " + name); return new TemplateProfile(id, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, - featured, isExtractable, imgfmt, guestOSId, zoneId, hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details, sshkeyEnabled, imageStoreUuid); + featured, isExtractable, imgfmt, guestOSId, zoneId, hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details, sshkeyEnabled, imageStoreId); } @Override @@ -224,7 +230,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat Account owner = _accountMgr.getAccount(cmd.getEntityOwnerId()); _accountMgr.checkAccess(caller, null, true, owner); - + return prepare(false, UserContext.current().getCallerUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), @@ -251,7 +257,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat profile.getPasswordEnabled(), profile.getGuestOsId(), profile.getBootable(), profile.getHypervisorType(), profile.getTemplateTag(), profile.getDetails(), profile.getSshKeyEnabled()); - + template.setImageDataStoreId(profile.getImageStoreId()); if (zoneId == null || zoneId.longValue() == -1) { List dcs = _dcDao.listAll(); diff --git a/server/src/com/cloud/template/TemplateManager.java b/server/src/com/cloud/template/TemplateManager.java index 1b054614b20..19ba3b52734 100755 --- a/server/src/com/cloud/template/TemplateManager.java +++ b/server/src/com/cloud/template/TemplateManager.java @@ -113,7 +113,7 @@ public interface TemplateManager extends TemplateService{ Long getTemplateSize(long templateId, long zoneId); - DataStore getImageStore(String storeUuid, long zoneId); + DataStore getImageStore(String storeUuid, Long zoneId); String getChecksum(Long hostId, String templatePath); diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index 736f712b9c9..101c3d9c714 100755 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -320,7 +320,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } @Override - public DataStore getImageStore(String storeUuid, long zoneId) { + public DataStore getImageStore(String storeUuid, Long zoneId) { DataStore imageStore = null; if (storeUuid != null) { imageStore = this.dataStoreMgr.getDataStore(storeUuid, DataStoreRole.Image); From 0b86b33c56b903dd6b7827c79e441eb1095e395a Mon Sep 17 00:00:00 2001 From: Edison Su Date: Tue, 5 Feb 2013 18:20:51 -0800 Subject: [PATCH 112/486] rebase to master --- core/src/com/cloud/storage/SnapshotVO.java | 1 + .../api/commands/GetUsageRecordsCmd.java | 372 ------------------ .../com/cloud/storage/VolumeManagerImpl.java | 4 +- 3 files changed, 3 insertions(+), 374 deletions(-) delete mode 100644 server/src/com/cloud/api/commands/GetUsageRecordsCmd.java diff --git a/core/src/com/cloud/storage/SnapshotVO.java b/core/src/com/cloud/storage/SnapshotVO.java index 68336cb97ec..f44212ff396 100644 --- a/core/src/com/cloud/storage/SnapshotVO.java +++ b/core/src/com/cloud/storage/SnapshotVO.java @@ -248,6 +248,7 @@ public class SnapshotVO implements Snapshot { return state; } + public void setState(State state) { this.state = state; } diff --git a/server/src/com/cloud/api/commands/GetUsageRecordsCmd.java b/server/src/com/cloud/api/commands/GetUsageRecordsCmd.java deleted file mode 100644 index 36d66d9dc96..00000000000 --- a/server/src/com/cloud/api/commands/GetUsageRecordsCmd.java +++ /dev/null @@ -1,372 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.api.commands; - -import java.text.DecimalFormat; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.List; -import java.util.TimeZone; - -import org.apache.cloudstack.api.response.AccountResponse; -import org.apache.cloudstack.api.response.DomainResponse; -import org.apache.cloudstack.api.response.ProjectResponse; -import org.apache.log4j.Logger; - -import org.apache.cloudstack.api.ApiConstants; -import com.cloud.api.ApiDBUtils; -import com.cloud.dc.DataCenter; -import com.cloud.domain.Domain; - -import org.apache.cloudstack.api.BaseListCmd; -import org.apache.cloudstack.api.APICommand; -import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.response.ListResponse; -import com.cloud.projects.Project; -import com.cloud.server.ManagementServerExt; -import com.cloud.storage.VMTemplateVO; - -import org.apache.cloudstack.api.response.UsageRecordResponse; - -import com.cloud.usage.UsageTypes; -import com.cloud.usage.UsageVO; -import com.cloud.user.Account; -import com.cloud.uuididentity.dao.IdentityDao; -import com.cloud.uuididentity.dao.IdentityDaoImpl; -import com.cloud.vm.VMInstanceVO; - -@APICommand(name = "listUsageRecords", description="Lists usage records for accounts", responseObject=UsageRecordResponse.class) -public class GetUsageRecordsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(GetUsageRecordsCmd.class.getName()); - - private static final String s_name = "listusagerecordsresponse"; - - ///////////////////////////////////////////////////// - //////////////// API parameters ///////////////////// - ///////////////////////////////////////////////////// - - @Parameter(name=ApiConstants.ACCOUNT, type=CommandType.STRING, description="List usage records for the specified user.") - private String accountName; - - @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.UUID, entityType = DomainResponse.class, - description="List usage records for the specified domain.") - private Long domainId; - - @Parameter(name=ApiConstants.END_DATE, type=CommandType.DATE, required=true, description="End date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-03.") - private Date endDate; - - @Parameter(name=ApiConstants.START_DATE, type=CommandType.DATE, required=true, description="Start date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-01.") - private Date startDate; - - @Parameter(name=ApiConstants.ACCOUNT_ID, type=CommandType.UUID, entityType = AccountResponse.class, - description="List usage records for the specified account") - private Long accountId; - - @Parameter(name=ApiConstants.PROJECT_ID, type=CommandType.UUID, entityType = ProjectResponse.class, - description="List usage records for specified project") - private Long projectId; - - @Parameter(name=ApiConstants.TYPE, type=CommandType.LONG, description="List usage records for the specified usage type") - private Long usageType; - - ///////////////////////////////////////////////////// - /////////////////// Accessors /////////////////////// - ///////////////////////////////////////////////////// - - public String getAccountName() { - return accountName; - } - - public Long getDomainId() { - return domainId; - } - - public Date getEndDate() { - return endDate; - } - - public Date getStartDate() { - return startDate; - } - - public Long getAccountId() { - return accountId; - } - - public Long getUsageType() { - return usageType; - } - - public Long getProjectId() { - return projectId; - } - - ///////////////////////////////////////////////////// - /////////////// Misc parameters /////////////////// - ///////////////////////////////////////////////////// - - private TimeZone usageTimezone; - - public TimeZone getUsageTimezone() { - return usageTimezone; - } - - public void setUsageTimezone(TimeZone tz) { - this.usageTimezone = tz; - } - - ///////////////////////////////////////////////////// - /////////////// API Implementation/////////////////// - ///////////////////////////////////////////////////// - - @Override - public String getCommandName() { - return s_name; - } - - public String getDateStringInternal(Date inputDate) { - if (inputDate == null) return null; - - TimeZone tz = getUsageTimezone(); - Calendar cal = Calendar.getInstance(tz); - cal.setTime(inputDate); - - StringBuffer sb = new StringBuffer(); - sb.append(cal.get(Calendar.YEAR)+"-"); - - int month = cal.get(Calendar.MONTH) + 1; - if (month < 10) { - sb.append("0" + month + "-"); - } else { - sb.append(month+"-"); - } - - int day = cal.get(Calendar.DAY_OF_MONTH); - if (day < 10) { - sb.append("0" + day); - } else { - sb.append(""+day); - } - - sb.append("'T'"); - - int hour = cal.get(Calendar.HOUR_OF_DAY); - if (hour < 10) { - sb.append("0" + hour + ":"); - } else { - sb.append(hour+":"); - } - - int minute = cal.get(Calendar.MINUTE); - if (minute < 10) { - sb.append("0" + minute + ":"); - } else { - sb.append(minute+":"); - } - - int seconds = cal.get(Calendar.SECOND); - if (seconds < 10) { - sb.append("0" + seconds); - } else { - sb.append(""+seconds); - } - - double offset = cal.get(Calendar.ZONE_OFFSET); - if (tz.inDaylightTime(inputDate)) { - offset += (1.0*tz.getDSTSavings()); // add the timezone's DST value (typically 1 hour expressed in milliseconds) - } - - offset = offset / (1000d*60d*60d); - int hourOffset = (int)offset; - double decimalVal = Math.abs(offset) - Math.abs(hourOffset); - int minuteOffset = (int)(decimalVal * 60); - - if (hourOffset < 0) { - if (hourOffset > -10) { - sb.append("-0"+Math.abs(hourOffset)); - } else { - sb.append("-"+Math.abs(hourOffset)); - } - } else { - if (hourOffset < 10) { - sb.append("+0" + hourOffset); - } else { - sb.append("+" + hourOffset); - } - } - - sb.append(":"); - - if (minuteOffset == 0) { - sb.append("00"); - } else if (minuteOffset < 10) { - sb.append("0" + minuteOffset); - } else { - sb.append("" + minuteOffset); - } - - return sb.toString(); - } - - @Override - public void execute(){ - ManagementServerExt _mgrExt = (ManagementServerExt)_mgr; - List usageRecords = _mgrExt.getUsageRecords(this); - IdentityDao identityDao = new IdentityDaoImpl(); - ListResponse response = new ListResponse(); - List usageResponses = new ArrayList(); - for (Object usageRecordGeneric : usageRecords) { - UsageRecordResponse usageRecResponse = new UsageRecordResponse(); - if (usageRecordGeneric instanceof UsageVO) { - UsageVO usageRecord = (UsageVO)usageRecordGeneric; - - Account account = ApiDBUtils.findAccountByIdIncludingRemoved(usageRecord.getAccountId()); - if (account.getType() == Account.ACCOUNT_TYPE_PROJECT) { - //find the project - Project project = ApiDBUtils.findProjectByProjectAccountId(account.getId()); - usageRecResponse.setProjectId(project.getUuid()); - usageRecResponse.setProjectName(project.getName()); - } else { - usageRecResponse.setAccountId(account.getUuid()); - usageRecResponse.setAccountName(account.getAccountName()); - } - - Domain domain = ApiDBUtils.findDomainById(usageRecord.getDomainId()); - if (domain != null) { - usageRecResponse.setDomainId(domain.getUuid()); - } - - if (usageRecord.getZoneId() != null) { - DataCenter zone = ApiDBUtils.findZoneById(usageRecord.getZoneId()); - if (zone != null) { - usageRecResponse.setZoneId(zone.getUuid()); - } - } - usageRecResponse.setDescription(usageRecord.getDescription()); - usageRecResponse.setUsage(usageRecord.getUsageDisplay()); - usageRecResponse.setUsageType(usageRecord.getUsageType()); - if (usageRecord.getVmInstanceId() != null) { - VMInstanceVO vm = ApiDBUtils.findVMInstanceById(usageRecord.getVmInstanceId()); - if (vm != null) { - usageRecResponse.setVirtualMachineId(vm.getUuid()); - } - } - usageRecResponse.setVmName(usageRecord.getVmName()); - if (usageRecord.getTemplateId() != null) { - VMTemplateVO template = ApiDBUtils.findTemplateById(usageRecord.getTemplateId()); - if (template != null) { - usageRecResponse.setTemplateId(template.getUuid()); - } - } - - if(usageRecord.getUsageType() == UsageTypes.RUNNING_VM || usageRecord.getUsageType() == UsageTypes.ALLOCATED_VM){ - //Service Offering Id - usageRecResponse.setOfferingId(identityDao.getIdentityUuid("disk_offering", usageRecord.getOfferingId().toString())); - //VM Instance ID - usageRecResponse.setUsageId(identityDao.getIdentityUuid("vm_instance", usageRecord.getUsageId().toString())); - //Hypervisor Type - usageRecResponse.setType(usageRecord.getType()); - - } else if(usageRecord.getUsageType() == UsageTypes.IP_ADDRESS){ - //isSourceNAT - usageRecResponse.setSourceNat((usageRecord.getType().equals("SourceNat"))?true:false); - //isSystem - usageRecResponse.setSystem((usageRecord.getSize() == 1)?true:false); - //IP Address ID - usageRecResponse.setUsageId(identityDao.getIdentityUuid("user_ip_address", usageRecord.getUsageId().toString())); - - } else if(usageRecord.getUsageType() == UsageTypes.NETWORK_BYTES_SENT || usageRecord.getUsageType() == UsageTypes.NETWORK_BYTES_RECEIVED){ - //Device Type - usageRecResponse.setType(usageRecord.getType()); - if(usageRecord.getType().equals("DomainRouter")){ - //Domain Router Id - usageRecResponse.setUsageId(identityDao.getIdentityUuid("vm_instance", usageRecord.getUsageId().toString())); - } else { - //External Device Host Id - usageRecResponse.setUsageId(identityDao.getIdentityUuid("host", usageRecord.getUsageId().toString())); - } - //Network ID - usageRecResponse.setNetworkId(identityDao.getIdentityUuid("networks", usageRecord.getNetworkId().toString())); - - } else if(usageRecord.getUsageType() == UsageTypes.VOLUME){ - //Volume ID - usageRecResponse.setUsageId(identityDao.getIdentityUuid("volumes", usageRecord.getUsageId().toString())); - //Volume Size - usageRecResponse.setSize(usageRecord.getSize()); - //Disk Offering Id - if(usageRecord.getOfferingId() != null){ - usageRecResponse.setOfferingId(identityDao.getIdentityUuid("disk_offering", usageRecord.getOfferingId().toString())); - } - - } else if(usageRecord.getUsageType() == UsageTypes.TEMPLATE || usageRecord.getUsageType() == UsageTypes.ISO){ - //Template/ISO ID - usageRecResponse.setUsageId(identityDao.getIdentityUuid("vm_template", usageRecord.getUsageId().toString())); - //Template/ISO Size - usageRecResponse.setSize(usageRecord.getSize()); - - } else if(usageRecord.getUsageType() == UsageTypes.SNAPSHOT){ - //Snapshot ID - usageRecResponse.setUsageId(identityDao.getIdentityUuid("snapshots", usageRecord.getUsageId().toString())); - //Snapshot Size - usageRecResponse.setSize(usageRecord.getSize()); - - } else if(usageRecord.getUsageType() == UsageTypes.LOAD_BALANCER_POLICY){ - //Load Balancer Policy ID - usageRecResponse.setUsageId(usageRecord.getUsageId().toString()); - - } else if(usageRecord.getUsageType() == UsageTypes.PORT_FORWARDING_RULE){ - //Port Forwarding Rule ID - usageRecResponse.setUsageId(usageRecord.getUsageId().toString()); - - } else if(usageRecord.getUsageType() == UsageTypes.NETWORK_OFFERING){ - //Network Offering Id - usageRecResponse.setOfferingId(identityDao.getIdentityUuid("network_offerings", usageRecord.getOfferingId().toString())); - //is Default - usageRecResponse.setDefault((usageRecord.getUsageId() == 1)? true:false); - - } else if(usageRecord.getUsageType() == UsageTypes.VPN_USERS){ - //VPN User ID - usageRecResponse.setUsageId(usageRecord.getUsageId().toString()); - - } else if(usageRecord.getUsageType() == UsageTypes.SECURITY_GROUP){ - //Security Group Id - usageRecResponse.setUsageId(identityDao.getIdentityUuid("security_group", usageRecord.getUsageId().toString())); - } - - if (usageRecord.getRawUsage() != null) { - DecimalFormat decimalFormat = new DecimalFormat("###########.######"); - usageRecResponse.setRawUsage(decimalFormat.format(usageRecord.getRawUsage())); - } - - if (usageRecord.getStartDate() != null) { - usageRecResponse.setStartDate(getDateStringInternal(usageRecord.getStartDate())); - } - if (usageRecord.getEndDate() != null) { - usageRecResponse.setEndDate(getDateStringInternal(usageRecord.getEndDate())); - } - } - - usageRecResponse.setObjectName("usagerecord"); - usageResponses.add(usageRecResponse); - } - - response.setResponses(usageResponses); - response.setResponseName(getCommandName()); - this.setResponseObject(response); - } -} diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java index 5843dddbec5..8c0e1428e41 100644 --- a/server/src/com/cloud/storage/VolumeManagerImpl.java +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -893,9 +893,9 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { "unable to find a snapshot with id " + snapshotId); } - if (snapshotCheck.getStatus() != Snapshot.Status.BackedUp) { + if (snapshotCheck.getState() != Snapshot.State.BackedUp) { throw new InvalidParameterValueException("Snapshot id=" - + snapshotId + " is not in " + Snapshot.Status.BackedUp + + snapshotId + " is not in " + Snapshot.State.BackedUp + " state yet and can't be used for volume creation"); } From 621a779446c44b996a8cd0f98dc6447afbebeacf Mon Sep 17 00:00:00 2001 From: Edison Su Date: Thu, 7 Feb 2013 14:57:06 -0800 Subject: [PATCH 113/486] use correct disk offering id --- .../storage/motion/AncientDataMotionStrategy.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index d686336a7d7..70f65c7a5fa 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -282,10 +282,10 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } protected String cloneVolume(DataObject template, DataObject volume) { - - DiskOfferingVO offering = diskOfferingDao.findById(volume.getId()); - VMTemplateStoragePoolVO tmpltStoredOn = templatePoolDao.findByPoolTemplate(template.getDataStore().getId(), template.getId()); VolumeInfo volInfo = (VolumeInfo)volume; + DiskOfferingVO offering = diskOfferingDao.findById(volInfo.getDiskOfferingId()); + VMTemplateStoragePoolVO tmpltStoredOn = templatePoolDao.findByPoolTemplate(template.getDataStore().getId(), template.getId()); + DiskProfile diskProfile = new DiskProfile(volInfo, offering, null); CreateCommand cmd = new CreateCommand(diskProfile, From 020be66f9d8025198e9f6dc0d15d54349eeb61a5 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Fri, 8 Feb 2013 18:22:00 -0800 Subject: [PATCH 114/486] add copy volume and create volume from snapshot --- .../com/cloud/storage/VolumeApiService.java | 3 +- .../command/user/volume/MigrateVolumeCmd.java | 2 +- .../motion/AncientDataMotionStrategy.java | 58 ++++- .../storage/volume/VolumeServiceImpl.java | 97 +++++++- .../src/com/cloud/storage/VolumeManager.java | 8 +- .../com/cloud/storage/VolumeManagerImpl.java | 212 ++++-------------- .../storage/snapshot/SnapshotManager.java | 3 +- .../storage/snapshot/SnapshotManagerImpl.java | 2 +- .../cloud/vm/VirtualMachineManagerImpl.java | 2 +- 9 files changed, 205 insertions(+), 182 deletions(-) diff --git a/api/src/com/cloud/storage/VolumeApiService.java b/api/src/com/cloud/storage/VolumeApiService.java index 92880f4184d..8517988dfc6 100644 --- a/api/src/com/cloud/storage/VolumeApiService.java +++ b/api/src/com/cloud/storage/VolumeApiService.java @@ -21,6 +21,7 @@ package com.cloud.storage; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; @@ -61,7 +62,7 @@ public interface VolumeApiService { */ Volume resizeVolume(ResizeVolumeCmd cmd); - Volume migrateVolume(Long volumeId, Long storagePoolId) throws ConcurrentOperationException; + Volume migrateVolume(MigrateVolumeCmd cmd) throws ConcurrentOperationException; /** * Uploads the volume to secondary storage diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java index 8c09f8fbb72..287241a8d90 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java @@ -92,7 +92,7 @@ public class MigrateVolumeCmd extends BaseAsyncCmd { public void execute(){ Volume result; try { - result = _volumeService.migrateVolume(getVolumeId(), getStoragePoolId()); + result = _volumeService.migrateVolume(this); if (result != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(result); response.setResponseName(getCommandName()); diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index 70f65c7a5fa..ed3ca6aa8d9 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -329,6 +329,59 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { return errMsg; } + + protected String copyVolumeBetweenPools(DataObject srcData, DataObject destData) { + VolumeInfo volume = (VolumeInfo)srcData; + VolumeInfo destVolume = (VolumeInfo)destData; + String secondaryStorageURL = this.templateMgr.getSecondaryStorageURL(volume + .getDataCenterId()); + StoragePool srcPool = (StoragePool)this.dataStoreMgr.getDataStore(volume + .getPoolId(), DataStoreRole.Primary); + + StoragePool destPool = (StoragePool)this.dataStoreMgr.getDataStore(destVolume.getPoolId(), DataStoreRole.Primary); + + String value = this.configDao.getValue(Config.CopyVolumeWait.toString()); + int _copyvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), + volume.getPath(), srcPool, secondaryStorageURL, true, + _copyvolumewait); + CopyVolumeAnswer cvAnswer; + try { + cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(srcPool, cvCmd); + } catch (StorageUnavailableException e1) { + throw new CloudRuntimeException( + "Failed to copy the volume from the source primary storage pool to secondary storage.", + e1); + } + + if (cvAnswer == null || !cvAnswer.getResult()) { + throw new CloudRuntimeException( + "Failed to copy the volume from the source primary storage pool to secondary storage."); + } + + String secondaryStorageVolumePath = cvAnswer.getVolumePath(); + + cvCmd = new CopyVolumeCommand(volume.getId(), + secondaryStorageVolumePath, destPool, + secondaryStorageURL, false, _copyvolumewait); + try { + cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(destPool, cvCmd); + } catch (StorageUnavailableException e1) { + throw new CloudRuntimeException( + "Failed to copy the volume from secondary storage to the destination primary storage pool."); + } + + if (cvAnswer == null || !cvAnswer.getResult()) { + throw new CloudRuntimeException( + "Failed to copy the volume from secondary storage to the destination primary storage pool."); + } + + VolumeVO destVol = this.volDao.findById(destVolume.getId()); + destVol.setPath(cvAnswer.getVolumePath()); + this.volDao.update(destVol.getId(), destVol); + return null; + } @Override public Void copyAsync(DataObject srcData, DataObject destData, @@ -336,7 +389,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { String errMsg = null; try { if (destData.getType() == DataObjectType.VOLUME - && srcData.getType() == DataObjectType.VOLUME) { + && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Image) { errMsg = copyVolumeFromImage(srcData, destData); } else if (destData.getType() == DataObjectType.TEMPLATE && srcData.getType() == DataObjectType.TEMPLATE) { @@ -353,6 +406,9 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } else if (srcData.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.VOLUME) { errMsg = cloneVolume(srcData, destData); + } else if (destData.getType() == DataObjectType.VOLUME + && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Primary) { + errMsg = copyVolumeBetweenPools(srcData, destData); } } catch (Exception e) { s_logger.debug("copy failed", e); diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 891ad1249df..ef99a49b809 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -45,12 +45,14 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.exception.ConcurrentOperationException; +import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.utils.db.DB; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; @@ -420,14 +422,105 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture createVolumeFromSnapshot( VolumeInfo volume, DataStore store, SnapshotInfo snapshot) { - // TODO Auto-generated method stub + AsyncCallFuture future = new AsyncCallFuture(); + VolumeApiResult result = new VolumeApiResult(volume); return null; } + + protected VolumeVO duplicateVolumeOnAnotherStorage(Volume volume, StoragePool pool) { + Long lastPoolId = volume.getPoolId(); + VolumeVO newVol = new VolumeVO(volume); + newVol.setPoolId(pool.getId()); + newVol.setFolder(pool.getPath()); + newVol.setPodId(pool.getPodId()); + newVol.setPoolId(pool.getId()); + newVol.setLastPoolId(lastPoolId); + newVol.setPodId(pool.getPodId()); + return this.volDao.persist(newVol); + } + + private class CopyVolumeContext extends AsyncRpcConext { + final VolumeInfo srcVolume; + final VolumeInfo destVolume; + final DataStore destStore; + final AsyncCallFuture future; + /** + * @param callback + */ + public CopyVolumeContext(AsyncCompletionCallback callback, AsyncCallFuture future, VolumeInfo srcVolume, VolumeInfo destVolume, + DataStore destStore) { + super(callback); + this.srcVolume = srcVolume; + this.destVolume = destVolume; + this.destStore = destStore; + this.future = future; + } + + } @Override public AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataStore destStore) { - // TODO Auto-generated method stub + AsyncCallFuture future = new AsyncCallFuture(); + VolumeApiResult res = new VolumeApiResult(srcVolume); + try { + if (!this.snapshotMgr.canOperateOnVolume(srcVolume)) { + s_logger.debug( + "There are snapshots creating on this volume, can not move this volume"); + + res.setResult("There are snapshots creating on this volume, can not move this volume"); + future.complete(res); + return future; + } + + VolumeVO destVol = duplicateVolumeOnAnotherStorage(srcVolume, (StoragePool)destStore); + VolumeInfo destVolume = this.volFactory.getVolume(destVol.getId(), destStore); + destVolume.processEvent(Event.CreateOnlyRequested); + srcVolume.processEvent(Event.CopyingRequested); + + CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, + destVolume, + destStore); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)) + .setContext(context); + this.motionSrv.copyAsync(srcVolume, destVolume, caller); + } catch (Exception e) { + s_logger.debug("Failed to copy volume", e); + res.setResult(e.toString()); + future.complete(res); + } + return future; + } + + protected Void copyVolumeCallBack(AsyncCallbackDispatcher callback, CopyVolumeContext context) { + VolumeInfo srcVolume = context.srcVolume; + VolumeInfo destVolume = context.destVolume; + CopyCommandResult result = callback.getResult(); + AsyncCallFuture future = context.future; + VolumeApiResult res = new VolumeApiResult(destVolume); + try { + if (result.isFailed()) { + res.setResult(result.getResult()); + destVolume.processEvent(Event.OperationFailed); + srcVolume.processEvent(Event.OperationFailed); + AsyncCallFuture destroyFuture = this.expungeVolumeAsync(destVolume); + destroyFuture.get(); + future.complete(res); + return null; + } + srcVolume.processEvent(Event.OperationSuccessed); + destVolume.processEvent(Event.OperationSuccessed); + AsyncCallFuture destroyFuture = this.expungeVolumeAsync(srcVolume); + destroyFuture.get(); + future.complete(res); + return null; + } catch (Exception e) { + s_logger.debug("Failed to process copy volume callback",e); + res.setResult(e.toString()); + future.complete(res); + } + return null; } diff --git a/server/src/com/cloud/storage/VolumeManager.java b/server/src/com/cloud/storage/VolumeManager.java index 41434f41dcc..ebb9e54cd35 100644 --- a/server/src/com/cloud/storage/VolumeManager.java +++ b/server/src/com/cloud/storage/VolumeManager.java @@ -21,6 +21,7 @@ package com.cloud.storage; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; @@ -76,12 +77,11 @@ public interface VolumeManager extends VolumeApiService { void cleanupVolumes(long vmId) throws ConcurrentOperationException; - Volume migrateVolume(Long volumeId, Long storagePoolId) - throws ConcurrentOperationException; + Volume migrateVolume(MigrateVolumeCmd cmd); - boolean StorageMigration( + boolean storageMigration( VirtualMachineProfile vm, - StoragePool destPool) throws ConcurrentOperationException; + StoragePool destPool); void prepareForMigration( VirtualMachineProfile vm, diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java index 8c0e1428e41..573b8e90e12 100644 --- a/server/src/com/cloud/storage/VolumeManagerImpl.java +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -39,6 +39,7 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -351,11 +352,9 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { throw new CloudRuntimeException( "Failed to find a storage pool with enough capacity to move the volume to."); } - - List vols = new ArrayList(); - vols.add(volume); - migrateVolumes(vols, destPool); - return this.volFactory.getVolume(volume.getId()); + + Volume newVol = migrateVolume(volume, destPool); + return this.volFactory.getVolume(newVol.getId()); } /* @@ -1012,10 +1011,15 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { @DB @ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true) public VolumeVO resizeVolume(ResizeVolumeCmd cmd) { - VolumeVO volume = _volsDao.findById(cmd.getEntityId()); Long newSize = null; boolean shrinkOk = cmd.getShrinkOk(); boolean success = false; + + VolumeVO volume = _volsDao.findById(cmd.getEntityId()); + if (volume == null) { + throw new InvalidParameterValueException("No such volume"); + } + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume .getDiskOfferingId()); DiskOfferingVO newDiskOffering = null; @@ -1039,9 +1043,6 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { "Cloudstack currently only supports volumes marked as KVM or XenServer hypervisor for resize"); } - if (volume == null) { - throw new InvalidParameterValueException("No such volume"); - } if (volume.getState() != Volume.State.Ready) { throw new InvalidParameterValueException( @@ -1995,8 +1996,10 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { @DB @Override - public Volume migrateVolume(Long volumeId, Long storagePoolId) - throws ConcurrentOperationException { + public Volume migrateVolume(MigrateVolumeCmd cmd) { + Long volumeId = cmd.getVolumeId(); + Long storagePoolId = cmd.getStoragePoolId(); + VolumeVO vol = _volsDao.findById(volumeId); if (vol == null) { throw new InvalidParameterValueException( @@ -2025,171 +2028,36 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { "Migration of volume from local storage pool is not supported"); } - List vols = new ArrayList(); - vols.add(vol); - - migrateVolumes(vols, destPool); - return vol; + Volume newVol = migrateVolume(vol, destPool); + return newVol; } + + @DB - public boolean migrateVolumes(List volumes, StoragePool destPool) - throws ConcurrentOperationException { - Transaction txn = Transaction.currentTxn(); - txn.start(); - - boolean transitResult = false; - long checkPointTaskId = -1; + protected Volume migrateVolume(Volume volume, StoragePool destPool) { + VolumeInfo vol = this.volFactory.getVolume(volume.getId()); + AsyncCallFuture future = this.volService.copyVolume(vol, (DataStore)destPool); try { - List volIds = new ArrayList(); - for (Volume volume : volumes) { - if (!_snapshotMgr.canOperateOnVolume((VolumeVO) volume)) { - throw new CloudRuntimeException( - "There are snapshots creating on this volume, can not move this volume"); - } - - try { - if (!stateTransitTo(volume, Volume.Event.MigrationRequested)) { - throw new ConcurrentOperationException( - "Failed to transit volume state"); - } - } catch (NoTransitionException e) { - s_logger.debug("Failed to set state into migrate: " - + e.toString()); - throw new CloudRuntimeException( - "Failed to set state into migrate: " + e.toString()); - } - volIds.add(volume.getId()); - } - - transitResult = true; - } finally { - if (!transitResult) { - txn.rollback(); - } else { - txn.commit(); + VolumeApiResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("migrate volume failed:" + result.getResult()); + return null; } + return result.getVolume(); + } catch (InterruptedException e) { + s_logger.debug("migrate volume failed", e); + return null; + } catch (ExecutionException e) { + s_logger.debug("migrate volume failed", e); + return null; } - - // At this stage, nobody can modify volumes. Send the copyvolume command - List> destroyCmds = new ArrayList>(); - List answers = new ArrayList(); - try { - for (Volume volume : volumes) { - String secondaryStorageURL = this._tmpltMgr.getSecondaryStorageURL(volume - .getDataCenterId()); - StoragePool srcPool = (StoragePool)this.dataStoreMgr.getDataStore(volume - .getPoolId(), DataStoreRole.Primary); - CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), - volume.getPath(), srcPool, secondaryStorageURL, true, - _copyvolumewait); - CopyVolumeAnswer cvAnswer; - try { - cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(srcPool, cvCmd); - } catch (StorageUnavailableException e1) { - throw new CloudRuntimeException( - "Failed to copy the volume from the source primary storage pool to secondary storage.", - e1); - } - - if (cvAnswer == null || !cvAnswer.getResult()) { - throw new CloudRuntimeException( - "Failed to copy the volume from the source primary storage pool to secondary storage."); - } - - String secondaryStorageVolumePath = cvAnswer.getVolumePath(); - - // Copy the volume from secondary storage to the destination - // storage - // pool - cvCmd = new CopyVolumeCommand(volume.getId(), - secondaryStorageVolumePath, destPool, - secondaryStorageURL, false, _copyvolumewait); - try { - cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool, cvCmd); - } catch (StorageUnavailableException e1) { - throw new CloudRuntimeException( - "Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - - if (cvAnswer == null || !cvAnswer.getResult()) { - throw new CloudRuntimeException( - "Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - - answers.add(cvAnswer); - destroyCmds.add(new Pair( - srcPool, new DestroyCommand(srcPool, volume, null))); - } - } finally { - if (answers.size() != volumes.size()) { - // this means one of copying volume failed - for (Volume volume : volumes) { - try { - stateTransitTo(volume, Volume.Event.OperationFailed); - } catch (NoTransitionException e) { - s_logger.debug("Failed to change volume state: " - + e.toString()); - } - } - } else { - // Need a transaction, make sure all the volumes get migrated to - // new storage pool - txn = Transaction.currentTxn(); - txn.start(); - - transitResult = false; - try { - for (int i = 0; i < volumes.size(); i++) { - CopyVolumeAnswer answer = answers.get(i); - VolumeVO volume = (VolumeVO) volumes.get(i); - Long oldPoolId = volume.getPoolId(); - volume.setPath(answer.getVolumePath()); - volume.setFolder(destPool.getPath()); - volume.setPodId(destPool.getPodId()); - volume.setPoolId(destPool.getId()); - volume.setLastPoolId(oldPoolId); - volume.setPodId(destPool.getPodId()); - try { - stateTransitTo(volume, - Volume.Event.OperationSucceeded); - } catch (NoTransitionException e) { - s_logger.debug("Failed to change volume state: " - + e.toString()); - throw new CloudRuntimeException( - "Failed to change volume state: " - + e.toString()); - } - } - transitResult = true; - } finally { - if (!transitResult) { - txn.rollback(); - } else { - txn.commit(); - } - } - - } - } - - // all the volumes get migrated to new storage pool, need to delete the - // copy on old storage pool - for (Pair cmd : destroyCmds) { - try { - Answer cvAnswer = this.storageMgr.sendToPool(cmd.first(), cmd.second()); - } catch (StorageUnavailableException e) { - s_logger.debug("Unable to delete the old copy on storage pool: " - + e.toString()); - } - } - return true; } @Override - public boolean StorageMigration( + public boolean storageMigration( VirtualMachineProfile vm, - StoragePool destPool) throws ConcurrentOperationException { + StoragePool destPool) { List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); List volumesNeedToMigrate = new ArrayList(); @@ -2215,7 +2083,13 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { return true; } - return migrateVolumes(volumesNeedToMigrate, destPool); + for (Volume vol : volumesNeedToMigrate) { + Volume result = migrateVolume(vol, destPool); + if (result == null) { + return false; + } + } + return true; } @Override @@ -2452,9 +2326,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { vol = task.volume; } else if (task.type == VolumeTaskType.MIGRATE) { pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary); - List volumes = new ArrayList(); - volumes.add(task.volume); - migrateVolumes(volumes, pool); + migrateVolume(task.volume, pool); vol = task.volume; } else if (task.type == VolumeTaskType.RECREATE) { Pair result = recreateVolume(task.volume, vm, dest); diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManager.java b/server/src/com/cloud/storage/snapshot/SnapshotManager.java index a7692de7107..72e8163c05a 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManager.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManager.java @@ -22,6 +22,7 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.host.HostVO; import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.utils.db.Filter; @@ -138,5 +139,5 @@ public interface SnapshotManager { void deleteSnapshotsDirForVolume(String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId); - boolean canOperateOnVolume(VolumeVO volume); + boolean canOperateOnVolume(Volume volume); } diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 6b48b8237ec..58ca9a41cfa 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -1578,7 +1578,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } @Override - public boolean canOperateOnVolume(VolumeVO volume) { + public boolean canOperateOnVolume(Volume volume) { List snapshots = _snapshotDao.listByStatus(volume.getId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); if (snapshots.size() > 0) { diff --git a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java index 5d48f146769..4b7a4dbe111 100755 --- a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -1226,7 +1226,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); boolean migrationResult = false; try { - migrationResult = this.volumeMgr.StorageMigration(profile, destPool); + migrationResult = this.volumeMgr.storageMigration(profile, destPool); if (migrationResult) { //if the vm is migrated to different pod in basic mode, need to reallocate ip From ff047e75d314392c3e889400a2ce55ba76f2071d Mon Sep 17 00:00:00 2001 From: Edison Su Date: Wed, 13 Feb 2013 11:11:45 -0800 Subject: [PATCH 115/486] refactor snapshot, move existing snapshot code into its own snapshotstrategy --- api/src/com/cloud/storage/Snapshot.java | 18 +- core/src/com/cloud/storage/SnapshotVO.java | 2 +- .../api/storage/CopyCommandResult.java | 10 +- .../api/storage/DataObjectInStore.java | 1 + .../api/storage/DataStoreDriver.java | 1 + .../ObjectInDataStoreStateMachine.java | 1 + .../api/storage}/PrimaryDataStoreDriver.java | 7 +- .../subsystem/api/storage/SnapshotInfo.java | 6 +- .../api/storage/SnapshotStrategy.java | 10 + .../subsystem/api/storage/VolumeInfo.java | 3 + .../subsystem/api/storage/VolumeService.java | 2 + .../AncientImageDataStoreDriverImpl.java | 69 +- .../DefaultImageDataStoreDriverImpl.java | 7 + .../motion/DefaultImageMotionStrategy.java | 6 +- .../test/MockStorageMotionStrategy.java | 2 +- .../snapshot/SnapshotDataFactoryImpl.java | 34 +- .../storage/snapshot/SnapshotObject.java | 167 ++++- .../storage/snapshot/SnapshotServiceImpl.java | 6 + .../snapshot/SnapshotStateMachineManager.java | 9 + .../SnapshotStateMachineManagerImpl.java | 37 + .../strategy/AncientSnasphotStrategy.java | 562 ++++++++++++++ .../strategy/HypervisorBasedSnapshot.java | 44 -- .../strategy/StorageBasedSnapshot.java | 42 -- .../PrimaryDataStoreProviderManager.java | 2 +- .../motion/AncientDataMotionStrategy.java | 148 +++- .../storage/snapshot/SnapshotEntityImpl.java | 13 +- .../storage/snapshot/SnapshotStrategy.java | 25 - .../datastore/DefaultPrimaryDataStore.java | 2 +- .../AncientPrimaryDataStoreDriverImpl.java | 484 ++++++------ .../DefaultPrimaryDataStoreDriverImpl.java | 23 +- ...ltPrimaryDataStoreProviderManagerImpl.java | 2 +- .../AncientPrimaryDataStoreProviderImpl.java | 2 +- .../DefaultPrimaryDatastoreProviderImpl.java | 2 +- .../storage/volume/VolumeObject.java | 17 + .../storage/volume/VolumeServiceImpl.java | 104 ++- .../SolidfirePrimaryDataStoreDriver.java | 22 +- server/src/com/cloud/api/ApiDBUtils.java | 2 +- .../src/com/cloud/api/ApiResponseHelper.java | 2 +- .../src/com/cloud/configuration/Config.java | 3 +- .../cloud/storage/ResizeVolumePayload.java | 14 + .../src/com/cloud/storage/VolumeManager.java | 4 +- .../com/cloud/storage/VolumeManagerImpl.java | 88 +-- .../com/cloud/storage/dao/SnapshotDao.java | 2 +- .../cloud/storage/dao/SnapshotDaoImpl.java | 2 +- .../listener/SnapshotStateListener.java | 30 +- .../storage/snapshot/SnapshotManager.java | 95 +-- .../storage/snapshot/SnapshotManagerImpl.java | 694 +++--------------- 47 files changed, 1627 insertions(+), 1201 deletions(-) rename engine/{storage/src/org/apache/cloudstack/storage/volume => api/src/org/apache/cloudstack/engine/subsystem/api/storage}/PrimaryDataStoreDriver.java (78%) create mode 100644 engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java create mode 100644 engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java create mode 100644 engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java create mode 100644 engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java delete mode 100644 engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java delete mode 100644 engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java delete mode 100644 engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java create mode 100644 server/src/com/cloud/storage/ResizeVolumePayload.java diff --git a/api/src/com/cloud/storage/Snapshot.java b/api/src/com/cloud/storage/Snapshot.java index 3f6b8f5a8e4..9c2217e0972 100644 --- a/api/src/com/cloud/storage/Snapshot.java +++ b/api/src/com/cloud/storage/Snapshot.java @@ -61,22 +61,6 @@ public interface Snapshot extends ControlledEntity, Identity, InternalIdentity, BackedUp, Error; - private final static StateMachine2 s_fsm = new StateMachine2(); - - public static StateMachine2 getStateMachine() { - return s_fsm; - } - - static { - s_fsm.addTransition(null, Event.CreateRequested, Creating); - s_fsm.addTransition(Creating, Event.OperationSucceeded, CreatedOnPrimary); - s_fsm.addTransition(Creating, Event.OperationNotPerformed, BackedUp); - s_fsm.addTransition(Creating, Event.OperationFailed, Error); - s_fsm.addTransition(CreatedOnPrimary, Event.BackupToSecondary, BackingUp); - s_fsm.addTransition(BackingUp, Event.OperationSucceeded, BackedUp); - s_fsm.addTransition(BackingUp, Event.OperationFailed, Error); - } - public String toString() { return this.name(); } @@ -107,7 +91,7 @@ public interface Snapshot extends ControlledEntity, Identity, InternalIdentity, Date getCreated(); - Type getType(); + Type getRecurringType(); State getState(); diff --git a/core/src/com/cloud/storage/SnapshotVO.java b/core/src/com/cloud/storage/SnapshotVO.java index f44212ff396..1bb0854e006 100644 --- a/core/src/com/cloud/storage/SnapshotVO.java +++ b/core/src/com/cloud/storage/SnapshotVO.java @@ -175,7 +175,7 @@ public class SnapshotVO implements Snapshot { } @Override - public Type getType() { + public Type getRecurringType() { if (snapshotType < 0 || snapshotType >= Type.values().length) { return null; } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java index 100fd4edba3..571a77c3786 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java @@ -18,14 +18,22 @@ */ package org.apache.cloudstack.engine.subsystem.api.storage; +import com.cloud.agent.api.Answer; + public class CopyCommandResult extends CommandResult { private final String path; - public CopyCommandResult(String path) { + private final Answer answer; + public CopyCommandResult(String path, Answer answer) { super(); this.path = path; + this.answer = answer; } public String getPath() { return this.path; } + + public Answer getAnswer() { + return this.answer; + } } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectInStore.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectInStore.java index 60dfb9fb71f..32ea996e638 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectInStore.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataObjectInStore.java @@ -23,4 +23,5 @@ import com.cloud.utils.fsm.StateObject; public interface DataObjectInStore extends StateObject { public String getInstallPath(); + public void setInstallPath(String path); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java index 4aba9bfdbff..cf5759b2924 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java @@ -30,4 +30,5 @@ public interface DataStoreDriver { public void deleteAsync(DataObject data, AsyncCompletionCallback callback); public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback); public boolean canCopy(DataObject srcData, DataObject destData); + public void resize(DataObject data, AsyncCompletionCallback callback); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java index af9974e1118..726ce0821c5 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java @@ -49,6 +49,7 @@ public interface ObjectInDataStoreStateMachine extends StateObject callback); + public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback); public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java index 30cf182e5c9..b90404c5667 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java @@ -16,9 +16,13 @@ // under the License. package org.apache.cloudstack.engine.subsystem.api.storage; +import com.cloud.storage.Snapshot; -public interface SnapshotInfo extends DataObject { + +public interface SnapshotInfo extends DataObject, Snapshot { public SnapshotInfo getParent(); public SnapshotInfo getChild(); public VolumeInfo getBaseVolume(); + Long getDataCenterId(); + public Long getPrevSnapshotId(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java new file mode 100644 index 00000000000..f854f6bb5d4 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java @@ -0,0 +1,10 @@ +package org.apache.cloudstack.engine.subsystem.api.storage; + + +public interface SnapshotStrategy { + public boolean canHandle(SnapshotInfo snapshot); + public SnapshotInfo takeSnapshot(VolumeInfo volume, Long snapshotId); + public SnapshotInfo backupSnapshot(SnapshotInfo snapshot); + public boolean deleteSnapshot(SnapshotInfo snapshot); + public boolean revertSnapshot(SnapshotInfo snapshot); +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java index f2a3d5e8749..349325af45d 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java @@ -18,10 +18,13 @@ */ package org.apache.cloudstack.engine.subsystem.api.storage; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Volume; public interface VolumeInfo extends DataObject, Volume { public boolean isAttachedVM(); public void addPayload(Object data); public Object getpayload(); + public HypervisorType getHypervisorType(); + public Long getLastPoolId(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java index 58258ebd659..102c47174b1 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java @@ -74,5 +74,7 @@ public interface VolumeService { boolean destroyVolume(long volumeId) throws ConcurrentOperationException; AsyncCallFuture registerVolume(VolumeInfo volume, DataStore store); + + AsyncCallFuture resize(VolumeInfo volume); } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/AncientImageDataStoreDriverImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/AncientImageDataStoreDriverImpl.java index 2c19c7fc039..97ea6c48c79 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/AncientImageDataStoreDriverImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/AncientImageDataStoreDriverImpl.java @@ -38,22 +38,30 @@ import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.DeleteSnapshotBackupCommand; import com.cloud.agent.api.storage.DeleteVolumeCommand; +import com.cloud.agent.api.to.S3TO; +import com.cloud.agent.api.to.SwiftTO; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.storage.RegisterVolumePayload; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.SnapshotVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.VolumeHostVO; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeHostDao; import com.cloud.storage.download.DownloadMonitor; +import com.cloud.storage.s3.S3Manager; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.storage.swift.SwiftManager; import com.cloud.utils.exception.CloudRuntimeException; public class AncientImageDataStoreDriverImpl implements ImageDataStoreDriver { @@ -69,7 +77,13 @@ public class AncientImageDataStoreDriverImpl implements ImageDataStoreDriver { @Inject VolumeDao volumeDao; @Inject VolumeHostDao volumeHostDao; @Inject HostDao hostDao; + @Inject SnapshotDao snapshotDao; @Inject AgentManager agentMgr; + @Inject SnapshotManager snapshotMgr; + @Inject + private SwiftManager _swiftMgr; + @Inject + private S3Manager _s3Mgr; @Override public String grantAccess(DataObject data, EndPoint ep) { // TODO Auto-generated method stub @@ -158,6 +172,49 @@ public class AncientImageDataStoreDriverImpl implements ImageDataStoreDriver { } + private void deleteSnapshot(DataObject data, AsyncCompletionCallback callback) { + Long snapshotId = data.getId(); + SnapshotVO snapshot = this.snapshotDao.findByIdIncludingRemoved(snapshotId); + CommandResult result = new CommandResult(); + if (snapshot == null) { + s_logger.debug("Destroying snapshot " + snapshotId + " backup failed due to unable to find snapshot "); + result.setResult("Unable to find snapshot: " + snapshotId); + callback.complete(result); + return; + } + + try { + String secondaryStoragePoolUrl = this.snapshotMgr.getSecondaryStorageURL(snapshot); + Long dcId = snapshot.getDataCenterId(); + Long accountId = snapshot.getAccountId(); + Long volumeId = snapshot.getVolumeId(); + + String backupOfSnapshot = snapshot.getBackupSnapshotId(); + if (backupOfSnapshot == null) { + callback.complete(result); + return; + } + SwiftTO swift = _swiftMgr.getSwiftTO(snapshot.getSwiftId()); + S3TO s3 = _s3Mgr.getS3TO(); + + DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand( + swift, s3, secondaryStoragePoolUrl, dcId, accountId, volumeId, + backupOfSnapshot, false); + Answer answer = agentMgr.sendToSSVM(dcId, cmd); + + if ((answer != null) && answer.getResult()) { + snapshot.setBackupSnapshotId(null); + snapshotDao.update(snapshotId, snapshot); + } else if (answer != null) { + result.setResult(answer.getDetails()); + } + } catch (Exception e) { + s_logger.debug("failed to delete snapshot: " + snapshotId + ": " + e.toString()); + result.setResult(e.toString()); + } + callback.complete(result); + } + @Override public void deleteAsync(DataObject data, AsyncCompletionCallback callback) { @@ -165,10 +222,9 @@ public class AncientImageDataStoreDriverImpl implements ImageDataStoreDriver { deleteVolume(data, callback); } else if (data.getType() == DataObjectType.TEMPLATE) { deleteTemplate(data, callback); + } else if (data.getType() == DataObjectType.SNAPSHOT) { + deleteSnapshot(data, callback); } - - - } @Override @@ -184,4 +240,11 @@ public class AncientImageDataStoreDriverImpl implements ImageDataStoreDriver { return false; } + @Override + public void resize(DataObject data, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java index 1a506fa782b..3d46c73cde2 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/driver/DefaultImageDataStoreDriverImpl.java @@ -116,4 +116,11 @@ public class DefaultImageDataStoreDriverImpl implements ImageDataStoreDriver { // TODO Auto-generated method stub } + + @Override + public void resize(DataObject data, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } } diff --git a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java index 561c1cb288f..c49a521a3ca 100644 --- a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java +++ b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java @@ -101,7 +101,7 @@ public class DefaultImageMotionStrategy implements ImageMotionStrategy { DataStore destStore = destData.getDataStore(); DataStore srcStore = srcData.getDataStore(); EndPoint ep = selector.select(srcData, destData); - CopyCommandResult result = new CopyCommandResult(""); + CopyCommandResult result = new CopyCommandResult("", null); if (ep == null) { result.setResult("can't find end point"); callback.complete(result); @@ -125,12 +125,12 @@ public class DefaultImageMotionStrategy implements ImageMotionStrategy { AsyncCompletionCallback parentCall = context.getParentCallback(); Answer answer = (Answer)callback.getResult(); if (!answer.getResult()) { - CopyCommandResult result = new CopyCommandResult(""); + CopyCommandResult result = new CopyCommandResult("", null); result.setResult(answer.getDetails()); parentCall.complete(result); } else { CopyCmdAnswer ans = (CopyCmdAnswer)answer; - CopyCommandResult result = new CopyCommandResult(ans.getPath()); + CopyCommandResult result = new CopyCommandResult(ans.getPath(), null); parentCall.complete(result); } return null; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java index e2e8f9439c5..b619ee9240f 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java @@ -34,7 +34,7 @@ public class MockStorageMotionStrategy implements DataMotionStrategy { @Override public Void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) { - CopyCommandResult result = new CopyCommandResult("something"); + CopyCommandResult result = new CopyCommandResult("something", null); callback.complete(result); return null; } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java index 095320907c7..5af5260c340 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java @@ -25,39 +25,55 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.snapshot.db.SnapshotDao2; -import org.apache.cloudstack.storage.snapshot.db.SnapshotVO; import org.springframework.stereotype.Component; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.utils.exception.CloudRuntimeException; + @Component public class SnapshotDataFactoryImpl implements SnapshotDataFactory { @Inject - SnapshotDao2 snapshotDao; + SnapshotDao snapshotDao; @Inject ObjectInDataStoreManager objMap; @Inject DataStoreManager storeMgr; + @Inject + VolumeDataFactory volumeFactory; @Override public SnapshotInfo getSnapshot(long snapshotId, DataStore store) { - SnapshotVO snapshot = snapshotDao.findById(snapshotId); + SnapshotVO snapshot = snapshotDao.findByIdIncludingRemoved(snapshotId); DataObjectInStore obj = objMap.findObject(snapshot.getUuid(), DataObjectType.SNAPSHOT, store.getUuid(), store.getRole()); if (obj == null) { return null; } - SnapshotObject so = new SnapshotObject(snapshot, store); + SnapshotObject so = SnapshotObject.getSnapshotObject(snapshot, store); return so; } @Override public SnapshotInfo getSnapshot(long snapshotId) { - // TODO Auto-generated method stub - return null; + SnapshotVO snapshot = snapshotDao.findByIdIncludingRemoved(snapshotId); + SnapshotObject so = null; + if (snapshot.getState() == Snapshot.State.BackedUp) { + DataStore store = objMap.findStore(snapshot.getUuid(), DataObjectType.SNAPSHOT, DataStoreRole.Image); + so = SnapshotObject.getSnapshotObject(snapshot, store); + } else { + VolumeInfo volume = this.volumeFactory.getVolume(snapshot.getVolumeId()); + so = SnapshotObject.getSnapshotObject(snapshot, volume.getDataStore()); + } + return so; } + @Override public SnapshotInfo getSnapshot(DataObject obj, DataStore store) { - // TODO Auto-generated method stub - return null; + throw new CloudRuntimeException("not implemented yet"); } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index d9fc8aabfe8..a82be6de01d 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -18,21 +18,54 @@ */ package org.apache.cloudstack.storage.snapshot; +import java.util.Date; + +import javax.inject.Inject; + import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; -import org.apache.cloudstack.storage.snapshot.db.SnapshotVO; +import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; +import org.apache.log4j.Logger; + +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; public class SnapshotObject implements SnapshotInfo { + private static final Logger s_logger = Logger.getLogger(SnapshotObject.class); private SnapshotVO snapshot; private DataStore store; - - public SnapshotObject(SnapshotVO snapshot, DataStore store) { - this.snapshot = snapshot; - this.store = store; + @Inject + protected SnapshotDao snapshotDao; + @Inject + protected VolumeDao volumeDao; + @Inject protected VolumeDataFactory volFactory; + @Inject protected SnapshotStateMachineManager stateMachineMgr; + @Inject + ObjectInDataStoreManager ojbectInStoreMgr; + protected SnapshotObject() { + + } + + protected void configure(SnapshotVO snapshot, DataStore store) { + this.snapshot = snapshot; + this.store = store; + } + + public static SnapshotObject getSnapshotObject(SnapshotVO snapshot, DataStore store) { + SnapshotObject snapObj = ComponentContext.inject(SnapshotObject.class); + snapObj.configure(snapshot, store); + return snapObj; } public DataStore getStore() { @@ -53,56 +86,138 @@ public class SnapshotObject implements SnapshotInfo { @Override public VolumeInfo getBaseVolume() { - // TODO Auto-generated method stub - return null; + return volFactory.getVolume(this.snapshot.getVolumeId()); } @Override public long getId() { - // TODO Auto-generated method stub - return 0; + return this.snapshot.getId(); } @Override public String getUri() { - // TODO Auto-generated method stub - return null; + return this.snapshot.getUuid(); } @Override public DataStore getDataStore() { - // TODO Auto-generated method stub - return null; + return this.store; } @Override public Long getSize() { - // TODO Auto-generated method stub - return 0L; + return this.getSize(); } @Override public DataObjectType getType() { - // TODO Auto-generated method stub - return null; + return DataObjectType.SNAPSHOT; } @Override public DiskFormat getFormat() { - // TODO Auto-generated method stub return null; } @Override public String getUuid() { - // TODO Auto-generated method stub - return null; + return this.snapshot.getUuid(); } - @Override - public void processEvent(Event event) { - // TODO Auto-generated method stub - - } + @Override + public void processEvent( + ObjectInDataStoreStateMachine.Event event) { + try { + ojbectInStoreMgr.update(this, event); + } catch (Exception e) { + s_logger.debug("Failed to update state:" + e.toString()); + throw new CloudRuntimeException("Failed to update state: " + e.toString()); + } + } + @Override + public long getAccountId() { + return this.snapshot.getAccountId(); + } + + @Override + public long getVolumeId() { + return this.snapshot.getVolumeId(); + } + + @Override + public String getPath() { + return this.snapshot.getPath(); + } + + public void setPath(String path) { + this.snapshot.setPath(path); + } + + @Override + public String getName() { + return this.snapshot.getName(); + } + + @Override + public Date getCreated() { + return this.snapshot.getCreated(); + } + + @Override + public Type getRecurringType() { + return this.snapshot.getRecurringType(); + } + + @Override + public State getState() { + return this.snapshot.getState(); + } + + @Override + public HypervisorType getHypervisorType() { + return this.snapshot.getHypervisorType(); + } + + @Override + public boolean isRecursive() { + return this.snapshot.isRecursive(); + } + + @Override + public short getsnapshotType() { + return this.snapshot.getsnapshotType(); + } + + @Override + public long getDomainId() { + return this.snapshot.getDomainId(); + } + + public void setPrevSnapshotId(Long id) { + this.snapshot.setPrevSnapshotId(id); + } + + @Override + public Long getDataCenterId() { + return this.snapshot.getDataCenterId(); + } + + public void processEvent(Snapshot.Event event) + throws NoTransitionException { + stateMachineMgr.processEvent(this.snapshot, event); + } + + @Override + public Long getPrevSnapshotId() { + return this.snapshot.getPrevSnapshotId(); + } + + public void setBackupSnapshotId(String id) { + this.snapshot.setBackupSnapshotId(id); + } + + public String getBackupSnapshotId() { + return this.snapshot.getBackupSnapshotId(); + } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index bd3caf4c0bc..1b64fd0cae3 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -22,6 +22,10 @@ import org.springframework.stereotype.Component; @Component public class SnapshotServiceImpl implements SnapshotService { + + public SnapshotServiceImpl() { + + } @Override public SnapshotEntity getSnapshotEntity(long snapshotId) { @@ -46,5 +50,7 @@ public class SnapshotServiceImpl implements SnapshotService { // TODO Auto-generated method stub return false; } + + } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java new file mode 100644 index 00000000000..1c3ac28d2f7 --- /dev/null +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java @@ -0,0 +1,9 @@ +package org.apache.cloudstack.storage.snapshot; + +import com.cloud.storage.Snapshot.Event; +import com.cloud.storage.SnapshotVO; +import com.cloud.utils.fsm.NoTransitionException; + +public interface SnapshotStateMachineManager { + public void processEvent(SnapshotVO snapshot, Event event) throws NoTransitionException; +} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java new file mode 100644 index 00000000000..a20a2c8b876 --- /dev/null +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java @@ -0,0 +1,37 @@ +package org.apache.cloudstack.storage.snapshot; + +import javax.inject.Inject; + +import org.springframework.stereotype.Component; + +import com.cloud.storage.Snapshot; +import com.cloud.storage.Snapshot.Event; +import com.cloud.storage.Snapshot.State; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.listener.SnapshotStateListener; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; + +@Component +public class SnapshotStateMachineManagerImpl implements +SnapshotStateMachineManager { + private StateMachine2 stateMachine = new StateMachine2(); + @Inject + protected SnapshotDao snapshotDao; + public SnapshotStateMachineManagerImpl() { + stateMachine.addTransition(null, Event.CreateRequested, Snapshot.State.Creating); + stateMachine.addTransition(Snapshot.State.Creating, Event.OperationSucceeded, Snapshot.State.CreatedOnPrimary); + stateMachine.addTransition(Snapshot.State.Creating, Event.OperationNotPerformed, Snapshot.State.BackedUp); + stateMachine.addTransition(Snapshot.State.Creating, Event.OperationFailed, Snapshot.State.Error); + stateMachine.addTransition(Snapshot.State.CreatedOnPrimary, Event.BackupToSecondary, Snapshot.State.BackingUp); + stateMachine.addTransition(Snapshot.State.BackingUp, Event.OperationSucceeded, Snapshot.State.BackedUp); + stateMachine.addTransition(Snapshot.State.BackingUp, Event.OperationFailed, Snapshot.State.Error); + + stateMachine.registerListener(new SnapshotStateListener()); + } + + public void processEvent(SnapshotVO snapshot, Event event) throws NoTransitionException { + stateMachine.transitTo(snapshot, event, null, snapshotDao); + } +} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java new file mode 100644 index 00000000000..2e3b90f0490 --- /dev/null +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java @@ -0,0 +1,562 @@ +package org.apache.cloudstack.storage.snapshot.strategy; + +import java.util.List; +import java.util.concurrent.ExecutionException; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcConext; +import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; +import org.apache.cloudstack.storage.motion.DataMotionService; +import org.apache.cloudstack.storage.snapshot.SnapshotObject; +import org.apache.cloudstack.storage.snapshot.SnapshotStateMachineManager; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BackupSnapshotAnswer; +import com.cloud.agent.api.DeleteSnapshotBackupCommand; +import com.cloud.agent.api.to.S3TO; +import com.cloud.agent.api.to.SwiftTO; +import com.cloud.configuration.Resource.ResourceType; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventUtils; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.StoragePoolDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.s3.S3Manager; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.storage.swift.SwiftManager; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.dao.UserVmDao; + +@Component +public class AncientSnasphotStrategy implements SnapshotStrategy { + private static final Logger s_logger = Logger.getLogger(AncientSnasphotStrategy.class); + @Inject + protected VolumeDao _volsDao; + @Inject + protected UserVmDao _vmDao; + @Inject + protected StoragePoolDao _storagePoolDao; + @Inject + protected ClusterDao _clusterDao; + @Inject + protected SnapshotDao snapshotDao; + @Inject + private ResourceManager _resourceMgr; + @Inject + protected SnapshotDao _snapshotDao; + @Inject + protected SnapshotManager snapshotMgr; + @Inject + protected VolumeManager volumeMgr; + @Inject + private ConfigurationDao _configDao; + @Inject + protected SnapshotStateMachineManager stateMachineManager; + @Inject + private VolumeDao volumeDao; + @Inject + SnapshotDataFactory snapshotfactory; + @Inject + DataStoreManager dataStoreMgr; + @Inject + DataMotionService motionSrv; + @Inject + ObjectInDataStoreManager objInStoreMgr; + + + @Override + public boolean canHandle(SnapshotInfo snapshot) { + return true; + } + + static private class CreateSnapshotContext extends AsyncRpcConext { + final VolumeInfo volume; + final SnapshotInfo snapshot; + final AsyncCallFuture future; + public CreateSnapshotContext(AsyncCompletionCallback callback, VolumeInfo volume, + SnapshotInfo snapshot, + AsyncCallFuture future) { + super(callback); + this.volume = volume; + this.snapshot = snapshot; + this.future = future; + } + } + + static private class DeleteSnapshotContext extends AsyncRpcConext { + final SnapshotInfo snapshot; + final AsyncCallFuture future; + public DeleteSnapshotContext(AsyncCompletionCallback callback, SnapshotInfo snapshot, + AsyncCallFuture future) { + super(callback); + this.snapshot = snapshot; + this.future = future; + } + + } + + static private class CopySnapshotContext extends AsyncRpcConext { + final SnapshotInfo srcSnapshot; + final SnapshotInfo destSnapshot; + final AsyncCallFuture future; + public CopySnapshotContext(AsyncCompletionCallback callback, + SnapshotInfo srcSnapshot, + SnapshotInfo destSnapshot, + AsyncCallFuture future) { + super(callback); + this.srcSnapshot = srcSnapshot; + this.destSnapshot = destSnapshot; + this.future = future; + } + + } + + protected Void createSnapshotAsyncCallback(AsyncCallbackDispatcher callback, + CreateSnapshotContext context) { + CreateCmdResult result = callback.getResult(); + SnapshotObject snapshot = (SnapshotObject)context.snapshot; + VolumeInfo volume = context.volume; + AsyncCallFuture future = context.future; + SnapshotResult snapResult = new SnapshotResult(snapshot); + if (result.isFailed()) { + s_logger.debug("create snapshot " + context.snapshot.getName() + " failed: " + result.getResult()); + try { + snapshot.processEvent(Snapshot.Event.OperationFailed); + } catch (NoTransitionException nte) { + s_logger.debug("Failed to update snapshot state due to " + nte.getMessage()); + } + + + snapResult.setResult(result.getResult()); + future.complete(snapResult); + return null; + } + + try { + SnapshotVO preSnapshotVO = this.snapshotMgr.getParentSnapshot(volume, snapshot); + String preSnapshotPath = preSnapshotVO.getPath(); + SnapshotVO snapshotVO = this.snapshotDao.findById(snapshot.getId()); + // The snapshot was successfully created + if (preSnapshotPath != null && preSnapshotPath.equals(result.getPath())) { + // empty snapshot + s_logger.debug("CreateSnapshot: this is empty snapshot "); + + snapshotVO.setPath(preSnapshotPath); + snapshotVO.setBackupSnapshotId(preSnapshotVO.getBackupSnapshotId()); + snapshotVO.setSwiftId(preSnapshotVO.getSwiftId()); + snapshotVO.setPrevSnapshotId(preSnapshotVO.getId()); + snapshotVO.setSecHostId(preSnapshotVO.getSecHostId()); + snapshot.processEvent(Snapshot.Event.OperationNotPerformed); + } else { + long preSnapshotId = 0; + + if (preSnapshotVO != null && preSnapshotVO.getBackupSnapshotId() != null) { + preSnapshotId = preSnapshotVO.getId(); + int _deltaSnapshotMax = NumbersUtil.parseInt(_configDao.getValue("snapshot.delta.max"), SnapshotManager.DELTAMAX); + int deltaSnap = _deltaSnapshotMax; + + int i; + for (i = 1; i < deltaSnap; i++) { + String prevBackupUuid = preSnapshotVO.getBackupSnapshotId(); + // previous snapshot doesn't have backup, create a full snapshot + if (prevBackupUuid == null) { + preSnapshotId = 0; + break; + } + long preSSId = preSnapshotVO.getPrevSnapshotId(); + if (preSSId == 0) { + break; + } + preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preSSId); + } + if (i >= deltaSnap) { + preSnapshotId = 0; + } + } + + //If the volume is moved around, backup a full snapshot to secondary storage + if (volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId())) { + preSnapshotId = 0; + //TODO: fix this hack + VolumeVO volumeVO = this.volumeDao.findById(volume.getId()); + volumeVO.setLastPoolId(volume.getPoolId()); + this.volumeDao.update(volume.getId(), volumeVO); + } + + snapshot.setPath(result.getPath()); + snapshot.setPrevSnapshotId(preSnapshotId); + + snapshot.processEvent(Snapshot.Event.OperationSucceeded); + snapResult = new SnapshotResult(this.snapshotfactory.getSnapshot(snapshot.getId())); + } + } catch (Exception e) { + s_logger.debug("Failed to create snapshot: ", e); + snapResult.setResult(e.toString()); + } + + future.complete(snapResult); + return null; + } + + class SnapshotResult extends CommandResult { + SnapshotInfo snashot; + public SnapshotResult(SnapshotInfo snapshot) { + this.snashot = snapshot; + } + } + + protected SnapshotInfo createSnapshotOnPrimary(VolumeInfo volume, Long snapshotId) { + SnapshotObject snapshot = (SnapshotObject)this.snapshotfactory.getSnapshot(snapshotId); + if (snapshot == null) { + throw new CloudRuntimeException("Can not find snapshot " + snapshotId); + } + + try { + snapshot.processEvent(Snapshot.Event.CreateRequested); + } catch (NoTransitionException nte) { + s_logger.debug("Failed to update snapshot state due to " + nte.getMessage()); + throw new CloudRuntimeException("Failed to update snapshot state due to " + nte.getMessage()); + } + AsyncCallFuture future = new AsyncCallFuture(); + + CreateSnapshotContext context = new CreateSnapshotContext( + null, volume, snapshot, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher + .create(this); + caller.setCallback( + caller.getTarget().createSnapshotAsyncCallback(null, null)) + .setContext(context); + PrimaryDataStoreDriver primaryStore = (PrimaryDataStoreDriver)volume.getDataStore().getDriver(); + + primaryStore.takeSnapshot(snapshot, caller); + SnapshotResult result; + try { + result = future.get(); + if (result.isFailed()) { + s_logger.debug("Failed to create snapshot:" + result.getResult()); + throw new CloudRuntimeException(result.getResult()); + } + return result.snashot; + } catch (InterruptedException e) { + s_logger.debug("Failed to create snapshot", e); + throw new CloudRuntimeException("Failed to create snapshot", e); + } catch (ExecutionException e) { + s_logger.debug("Failed to create snapshot", e); + throw new CloudRuntimeException("Failed to create snapshot", e); + } + + } + + private boolean hostSupportSnapsthot(HostVO host) { + if (host.getHypervisorType() != HypervisorType.KVM) { + return true; + } + // Determine host capabilities + String caps = host.getCapabilities(); + + if (caps != null) { + String[] tokens = caps.split(","); + for (String token : tokens) { + if (token.contains("snapshot")) { + return true; + } + } + } + return false; + } + + protected boolean supportedByHypervisor(VolumeInfo volume) { + if (volume.getHypervisorType().equals(HypervisorType.KVM)) { + StoragePool storagePool = (StoragePool)volume.getDataStore(); + ClusterVO cluster = _clusterDao.findById(storagePool.getClusterId()); + List hosts = _resourceMgr.listAllHostsInCluster(cluster.getId()); + if (hosts != null && !hosts.isEmpty()) { + HostVO host = hosts.get(0); + if (!hostSupportSnapsthot(host)) { + throw new CloudRuntimeException("KVM Snapshot is not supported on cluster: " + host.getId()); + } + } + } + + // if volume is attached to a vm in destroyed or expunging state; disallow + if (volume.getInstanceId() != null) { + UserVmVO userVm = _vmDao.findById(volume.getInstanceId()); + if (userVm != null) { + if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) { + throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volume.getId() + " is associated with vm:" + userVm.getInstanceName() + " is in " + + userVm.getState().toString() + " state"); + } + + if(userVm.getHypervisorType() == HypervisorType.VMware || userVm.getHypervisorType() == HypervisorType.KVM) { + List activeSnapshots = _snapshotDao.listByInstanceId(volume.getInstanceId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); + if(activeSnapshots.size() > 1) + throw new CloudRuntimeException("There is other active snapshot tasks on the instance to which the volume is attached, please try again later"); + } + + List activeVMSnapshots = _vmSnapshotDao.listByInstanceId(userVm.getId(), + VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging); + if (activeVMSnapshots.size() > 0) { + throw new CloudRuntimeException( + "There is other active vm snapshot tasks on the instance to which the volume is attached, please try again later"); + } + } + } + + return true; + } + + @Override + public SnapshotInfo takeSnapshot(VolumeInfo volume, Long snapshotId) { + + supportedByHypervisor(volume); + + SnapshotInfo snapshot = createSnapshotOnPrimary(volume, snapshotId); + return snapshot; + } + + @Override + public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) { + SnapshotObject snapObj = (SnapshotObject)snapshot; + AsyncCallFuture future = new AsyncCallFuture(); + SnapshotResult result = new SnapshotResult(snapshot); + try { + + snapObj.processEvent(Snapshot.Event.BackupToSecondary); + + ZoneScope scope = new ZoneScope(snapshot.getDataCenterId()); + List stores = this.dataStoreMgr.getImageStores(scope); + if (stores.size() != 1) { + throw new CloudRuntimeException("find out more than one image stores"); + } + + DataStore imageStore = stores.get(0); + SnapshotInfo snapshotOnImageStore = (SnapshotInfo)imageStore.create(snapshot); + + snapshotOnImageStore.processEvent(Event.CreateOnlyRequested); + CopySnapshotContext context = new CopySnapshotContext(null, snapshot, + snapshotOnImageStore, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher + .create(this); + caller.setCallback( + caller.getTarget().copySnapshotAsyncCallback(null, null)) + .setContext(context); + this.motionSrv.copyAsync(snapshot, snapshotOnImageStore, caller); + } catch (Exception e) { + s_logger.debug("Failed to copy snapshot", e); + result.setResult("Failed to copy snapshot:" +e.toString()); + future.complete(result); + } + + try { + SnapshotResult res = future.get(); + SnapshotInfo destSnapshot = res.snashot; + return destSnapshot; + } catch (InterruptedException e) { + s_logger.debug("failed copy snapshot", e); + throw new CloudRuntimeException("Failed to copy snapshot" , e); + } catch (ExecutionException e) { + s_logger.debug("Failed to copy snapshot", e); + throw new CloudRuntimeException("Failed to copy snapshot" , e); + } + + } + + protected Void copySnapshotAsyncCallback(AsyncCallbackDispatcher callback, + CopySnapshotContext context) { + CopyCommandResult result = callback.getResult(); + SnapshotInfo destSnapshot = context.destSnapshot; + SnapshotObject srcSnapshot = (SnapshotObject)context.srcSnapshot; + AsyncCallFuture future = context.future; + SnapshotResult snapResult = new SnapshotResult(destSnapshot); + if (result.isFailed()) { + snapResult.setResult(result.getResult()); + future.complete(snapResult); + return null; + } + + try { + BackupSnapshotAnswer answer = (BackupSnapshotAnswer)result.getAnswer(); + + DataObjectInStore dataInStore = objInStoreMgr.findObject(destSnapshot, destSnapshot.getDataStore()); + dataInStore.setInstallPath(answer.getBackupSnapshotName()); + objInStoreMgr.update(destSnapshot, Event.OperationSuccessed); + + srcSnapshot.processEvent(Snapshot.Event.OperationSucceeded); + snapResult = new SnapshotResult(this.snapshotfactory.getSnapshot(destSnapshot.getId())); + future.complete(snapResult); + } catch (Exception e) { + s_logger.debug("Failed to update snapshot state", e); + snapResult.setResult(e.toString()); + future.complete(snapResult); + } + return null; + } + + @DB + protected boolean destroySnapshotBackUp(SnapshotVO snapshot) { + DataStore store = objInStoreMgr.findStore(snapshot.getUuid(), DataObjectType.SNAPSHOT, DataStoreRole.Image); + if (store == null) { + s_logger.debug("Can't find snapshot" + snapshot.getId() + " backed up into image store"); + return false; + } + + try { + SnapshotInfo snapshotInfo = this.snapshotfactory.getSnapshot(snapshot.getId(), store); + snapshotInfo.processEvent(ObjectInDataStoreStateMachine.Event.DestroyRequested); + + AsyncCallFuture future = new AsyncCallFuture(); + DeleteSnapshotContext context = new DeleteSnapshotContext(null, + snapshotInfo, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher + .create(this); + caller.setCallback( + caller.getTarget().deleteSnapshotCallback(null, null)) + .setContext(context); + + store.getDriver().deleteAsync(snapshotInfo, caller); + + SnapshotResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("Failed to delete snapsoht: " + result.getResult()); + } + return result.isSuccess(); + } catch (Exception e) { + s_logger.debug("Failed to delete snapshot", e); + return false; + } + } + + protected Void deleteSnapshotCallback(AsyncCallbackDispatcher callback, + DeleteSnapshotContext context) { + CommandResult result = callback.getResult(); + AsyncCallFuture future = context.future; + SnapshotInfo snapshot = context.snapshot; + if (result.isFailed()) { + s_logger.debug("delete snapshot failed" + result.getResult()); + snapshot.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + SnapshotResult res = new SnapshotResult(context.snapshot); + future.complete(res); + return null; + } + snapshot.processEvent(ObjectInDataStoreStateMachine.Event.OperationSuccessed); + SnapshotResult res = new SnapshotResult(context.snapshot); + future.complete(res); + return null; + } + + @Override + public boolean deleteSnapshot(SnapshotInfo snapInfo) { + Long snapshotId = snapInfo.getId(); + SnapshotObject snapshot = (SnapshotObject)snapInfo; + + if (!Snapshot.State.BackedUp.equals(snapshot.getState())) { + throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is not in BackedUp Status"); + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Calling deleteSnapshot for snapshotId: " + snapshotId); + } + SnapshotVO lastSnapshot = null; + if (snapshot.getBackupSnapshotId() != null) { + List snaps = _snapshotDao.listByBackupUuid(snapshot.getVolumeId(), snapshot.getBackupSnapshotId()); + if (snaps != null && snaps.size() > 1) { + snapshot.setBackupSnapshotId(null); + SnapshotVO snapshotVO = this._snapshotDao.findById(snapshotId); + _snapshotDao.update(snapshot.getId(), snapshotVO); + } + } + + _snapshotDao.remove(snapshotId); + + long lastId = snapshotId; + boolean destroy = false; + while (true) { + lastSnapshot = _snapshotDao.findNextSnapshot(lastId); + if (lastSnapshot == null) { + // if all snapshots after this snapshot in this chain are removed, remove those snapshots. + destroy = true; + break; + } + if (lastSnapshot.getRemoved() == null) { + // if there is one child not removed, then can not remove back up snapshot. + break; + } + lastId = lastSnapshot.getId(); + } + if (destroy) { + lastSnapshot = _snapshotDao.findByIdIncludingRemoved(lastId); + while (lastSnapshot.getRemoved() != null) { + String BackupSnapshotId = lastSnapshot.getBackupSnapshotId(); + if (BackupSnapshotId != null) { + List snaps = _snapshotDao.listByBackupUuid(lastSnapshot.getVolumeId(), BackupSnapshotId); + if (snaps != null && snaps.size() > 1) { + lastSnapshot.setBackupSnapshotId(null); + _snapshotDao.update(lastSnapshot.getId(), lastSnapshot); + } else { + if (destroySnapshotBackUp(lastSnapshot)) { + + } else { + s_logger.debug("Destroying snapshot backup failed " + lastSnapshot); + break; + } + } + } + lastId = lastSnapshot.getPrevSnapshotId(); + if (lastId == 0) { + break; + } + lastSnapshot = _snapshotDao.findByIdIncludingRemoved(lastId); + } + } + return true; + + } + + @Override + public boolean revertSnapshot(SnapshotInfo snapshot) { + // TODO Auto-generated method stub + return false; + } + +} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java deleted file mode 100644 index 8ef09275bcd..00000000000 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.storage.snapshot.strategy; - -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotStrategy; -import org.springframework.stereotype.Component; - -@Component -public class HypervisorBasedSnapshot implements SnapshotStrategy { - - @Override - public boolean takeSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean revertSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean deleteSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - -} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java deleted file mode 100644 index 7af395acb96..00000000000 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java +++ /dev/null @@ -1,42 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.storage.snapshot.strategy; - -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotStrategy; - -public class StorageBasedSnapshot implements SnapshotStrategy { - - @Override - public boolean takeSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean revertSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean deleteSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java index 664c2d1c216..d1c26e1a272 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java @@ -19,7 +19,7 @@ package org.apache.cloudstack.storage.datastore; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; public interface PrimaryDataStoreProviderManager { diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index ed3ca6aa8d9..c067a1b651c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -28,6 +28,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -36,6 +37,8 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BackupSnapshotAnswer; +import com.cloud.agent.api.BackupSnapshotCommand; import com.cloud.agent.api.Command; import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; @@ -47,7 +50,9 @@ import com.cloud.agent.api.storage.CopyVolumeCommand; import com.cloud.agent.api.storage.CreateAnswer; import com.cloud.agent.api.storage.CreateCommand; import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; +import com.cloud.agent.api.to.S3TO; import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.agent.api.to.SwiftTO; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.exception.StorageUnavailableException; @@ -72,7 +77,9 @@ import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeHostDao; +import com.cloud.storage.s3.S3Manager; import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.storage.swift.SwiftManager; import com.cloud.template.TemplateManager; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.DB; @@ -112,6 +119,10 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { @Inject VMTemplatePoolDao templatePoolDao; @Inject VolumeManager volumeMgr; + @Inject + private SwiftManager _swiftMgr; + @Inject + private S3Manager _s3Mgr; @Override public boolean canHandle(DataObject srcData, DataObject destData) { @@ -120,7 +131,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } @DB - protected String copyVolumeFromImage(DataObject srcData, DataObject destData) { + protected Answer copyVolumeFromImage(DataObject srcData, DataObject destData) { String value = configDao.getValue(Config.RecreateSystemVmEnabled.key()); int _copyvolumewait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); @@ -162,16 +173,17 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { this.volDao.update(vol.getId(), vol); volumeHostDao.remove(volumeHostVO.getId()); txn.commit(); - return errMsg; + return cvAnswer; } - private void copyTemplate(DataObject srcData, DataObject destData) { + private Answer copyTemplate(DataObject srcData, DataObject destData) { VMTemplateVO template = this.templateDao.findById(srcData.getId()); templateMgr.prepareTemplateForCreate(template, (StoragePool) destData.getDataStore()); + return null; } - protected String copyFromSnapshot(DataObject snapObj, DataObject volObj) { + protected Answer copyFromSnapshot(DataObject snapObj, DataObject volObj) { SnapshotVO snapshot = this.snapshotDao.findById(snapObj.getId()); StoragePool pool = (StoragePool) volObj.getDataStore(); String vdiUUID = null; @@ -227,8 +239,8 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { if ((answer != null) && answer.getResult()) { snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); } else { - return "Unable to upgrade snapshot from 2.1 to 2.2 for " - + snapshot.getId(); + throw new CloudRuntimeException("Unable to upgrade snapshot from 2.1 to 2.2 for " + + snapshot.getId()); } } } @@ -277,11 +289,10 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { snapshotMgr.deleteSnapshotsDirForVolume( secondaryStoragePoolUrl, dcId, accountId, volumeId); } - snapshotDao.unlockFromLockTable(snapshotId.toString()); } } - protected String cloneVolume(DataObject template, DataObject volume) { + protected Answer cloneVolume(DataObject template, DataObject volume) { VolumeInfo volInfo = (VolumeInfo)volume; DiskOfferingVO offering = diskOfferingDao.findById(volInfo.getDiskOfferingId()); VMTemplateStoragePoolVO tmpltStoredOn = templatePoolDao.findByPoolTemplate(template.getDataStore().getId(), template.getId()); @@ -298,8 +309,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { answer = storagMgr.sendToPool(pool, null, cmd); } catch (StorageUnavailableException e) { s_logger.debug("Failed to send to storage pool", e); - errMsg = e.toString(); - return errMsg; + throw new CloudRuntimeException("Failed to send to storage pool", e); } if (answer.getResult()) { @@ -327,10 +337,10 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { errMsg = answer.getDetails(); } - return errMsg; + return answer; } - protected String copyVolumeBetweenPools(DataObject srcData, DataObject destData) { + protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) { VolumeInfo volume = (VolumeInfo)srcData; VolumeInfo destVolume = (VolumeInfo)destData; String secondaryStorageURL = this.templateMgr.getSecondaryStorageURL(volume @@ -380,41 +390,45 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { VolumeVO destVol = this.volDao.findById(destVolume.getId()); destVol.setPath(cvAnswer.getVolumePath()); this.volDao.update(destVol.getId(), destVol); - return null; + return cvAnswer; } @Override public Void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) { + Answer answer = null; String errMsg = null; try { if (destData.getType() == DataObjectType.VOLUME && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Image) { - errMsg = copyVolumeFromImage(srcData, destData); + answer = copyVolumeFromImage(srcData, destData); } else if (destData.getType() == DataObjectType.TEMPLATE && srcData.getType() == DataObjectType.TEMPLATE) { - copyTemplate(srcData, destData); + answer = copyTemplate(srcData, destData); } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) { - errMsg = copyFromSnapshot(srcData, destData); + answer = copyFromSnapshot(srcData, destData); } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) { - errMsg = createTemplateFromSnashot(srcData, destData); + answer = createTemplateFromSnashot(srcData, destData); } else if (srcData.getType() == DataObjectType.VOLUME && destData.getType() == DataObjectType.TEMPLATE) { - errMsg = createTemplateFromVolume(srcData, destData); + answer = createTemplateFromVolume(srcData, destData); } else if (srcData.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.VOLUME) { - errMsg = cloneVolume(srcData, destData); + answer = cloneVolume(srcData, destData); } else if (destData.getType() == DataObjectType.VOLUME && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Primary) { - errMsg = copyVolumeBetweenPools(srcData, destData); + answer = copyVolumeBetweenPools(srcData, destData); + } else if (srcData.getType() == DataObjectType.SNAPSHOT && + destData.getType() == DataObjectType.SNAPSHOT) { + answer = copySnapshot(srcData, destData); } } catch (Exception e) { s_logger.debug("copy failed", e); errMsg = e.toString(); } - CopyCommandResult result = new CopyCommandResult(null); + CopyCommandResult result = new CopyCommandResult(null, answer); result.setResult(errMsg); callback.complete(result); @@ -422,7 +436,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } @DB - protected String createTemplateFromSnashot(DataObject srcData, + protected Answer createTemplateFromSnashot(DataObject srcData, DataObject destData) { long snapshotId = srcData.getId(); SnapshotVO snapshot = snapshotDao.findById(snapshotId); @@ -538,7 +552,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } @DB - protected String sendCommand(Command cmd, StoragePool pool, + protected Answer sendCommand(Command cmd, StoragePool pool, long templateId, long zoneId, long hostId) { CreatePrivateTemplateAnswer answer = null; @@ -551,11 +565,8 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { e); } - if (answer == null) { - return "Failed to execute CreatePrivateTemplateFromSnapshotCommand"; - } else if (!answer.getResult()) { - return "Failed to execute CreatePrivateTemplateFromSnapshotCommand" - + answer.getDetails(); + if (answer == null || !answer.getResult()) { + return answer; } VMTemplateVO privateTemplate = templateDao.findById(templateId); @@ -594,10 +605,10 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { templateHostVO.setPhysicalSize(answer.getphysicalSize()); templateHostDao.persist(templateHostVO); txn.close(); - return null; + return answer; } - private String createTemplateFromVolume(DataObject srcObj, + private Answer createTemplateFromVolume(DataObject srcObj, DataObject destObj) { long volumeId = srcObj.getId(); VolumeVO volume = this.volDao.findById(volumeId); @@ -633,5 +644,82 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { return sendCommand(cmd, pool, template.getId(), zoneId, secondaryStorageHost.getId()); } + + private HostVO getSecHost(long volumeId, long dcId) { + Long id = snapshotDao.getSecHostId(volumeId); + if ( id != null) { + return hostDao.findById(id); + } + return this.templateMgr.getSecondaryStorageHost(dcId); + } + + protected Answer copySnapshot(DataObject srcObject, DataObject destObject) { + SnapshotInfo srcSnapshot = (SnapshotInfo)srcObject; + VolumeInfo baseVolume = srcSnapshot.getBaseVolume(); + Long dcId = baseVolume.getDataCenterId(); + Long accountId = baseVolume.getAccountId(); + + HostVO secHost = getSecHost(baseVolume.getId(), baseVolume.getDataCenterId()); + + String secondaryStoragePoolUrl = secHost.getStorageUrl(); + String snapshotUuid = srcSnapshot.getPath(); + // In order to verify that the snapshot is not empty, + // we check if the parent of the snapshot is not the same as the parent of the previous snapshot. + // We pass the uuid of the previous snapshot to the plugin to verify this. + SnapshotVO prevSnapshot = null; + String prevSnapshotUuid = null; + String prevBackupUuid = null; + + + SwiftTO swift = _swiftMgr.getSwiftTO(); + S3TO s3 = _s3Mgr.getS3TO(); + + long prevSnapshotId = srcSnapshot.getPrevSnapshotId(); + if (prevSnapshotId > 0) { + prevSnapshot = snapshotDao.findByIdIncludingRemoved(prevSnapshotId); + if ( prevSnapshot.getBackupSnapshotId() != null && swift == null) { + if (prevSnapshot.getVersion() != null && prevSnapshot.getVersion().equals("2.2")) { + prevBackupUuid = prevSnapshot.getBackupSnapshotId(); + prevSnapshotUuid = prevSnapshot.getPath(); + } + } else if ((prevSnapshot.getSwiftId() != null && swift != null) + || (prevSnapshot.getS3Id() != null && s3 != null)) { + prevBackupUuid = prevSnapshot.getBackupSnapshotId(); + prevSnapshotUuid = prevSnapshot.getPath(); + } + } + boolean isVolumeInactive = this.volumeMgr.volumeInactive(baseVolume); + String vmName = this.volumeMgr.getVmNameOnVolume(baseVolume); + StoragePool srcPool = (StoragePool)dataStoreMgr.getPrimaryDataStore(baseVolume.getPoolId()); + String value = configDao.getValue(Config.BackupSnapshotWait.toString()); + int _backupsnapshotwait = NumbersUtil.parseInt(value, Integer.parseInt(Config.BackupSnapshotWait.getDefaultValue())); + BackupSnapshotCommand backupSnapshotCommand = new BackupSnapshotCommand(secondaryStoragePoolUrl, dcId, accountId, baseVolume.getId(), srcSnapshot.getId(), baseVolume.getPath(), srcPool, snapshotUuid, + srcSnapshot.getName(), prevSnapshotUuid, prevBackupUuid, isVolumeInactive, vmName, _backupsnapshotwait); + + if ( swift != null ) { + backupSnapshotCommand.setSwift(swift); + } else if (s3 != null) { + backupSnapshotCommand.setS3(s3); + } + BackupSnapshotAnswer answer = (BackupSnapshotAnswer) this.snapshotMgr.sendToPool(baseVolume, backupSnapshotCommand); + if (answer != null && answer.getResult()) { + SnapshotVO snapshotVO = this.snapshotDao.findById(srcSnapshot.getId()); + if (backupSnapshotCommand.getSwift() != null ) { + snapshotVO.setSwiftId(swift.getId()); + snapshotVO.setBackupSnapshotId(answer.getBackupSnapshotName()); + } else if (backupSnapshotCommand.getS3() != null) { + snapshotVO.setS3Id(s3.getId()); + snapshotVO.setBackupSnapshotId(answer.getBackupSnapshotName()); + } else { + snapshotVO.setSecHostId(secHost.getId()); + snapshotVO.setBackupSnapshotId(answer.getBackupSnapshotName()); + } + if (answer.isFull()) { + snapshotVO.setPrevSnapshotId(0L); + } + this.snapshotDao.update(srcSnapshot.getId(), snapshotVO); + } + return answer; + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java index 6a7d78a972a..0a91186aaab 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java @@ -105,13 +105,6 @@ public class SnapshotEntityImpl implements SnapshotEntity { return null; } - @Override - public Type getType() { - // TODO Auto-generated method stub - return null; - } - - @Override public HypervisorType getHypervisorType() { // TODO Auto-generated method stub @@ -190,4 +183,10 @@ public class SnapshotEntityImpl implements SnapshotEntity { return null; } + @Override + public Type getRecurringType() { + // TODO Auto-generated method stub + return null; + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java deleted file mode 100644 index 8c4c815eb7d..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategy.java +++ /dev/null @@ -1,25 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.storage.snapshot; - -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; - -public interface SnapshotStrategy { - public boolean takeSnapshot(SnapshotInfo snapshot); - public boolean revertSnapshot(SnapshotInfo snapshot); - public boolean deleteSnapshot(SnapshotInfo snapshot); -} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java index 72c1843da42..f2a999330ad 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; @@ -42,7 +43,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.log4j.Logger; diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java index 9946fba5f63..657ba80e971 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java @@ -29,261 +29,325 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ManageSnapshotAnswer; +import com.cloud.agent.api.ManageSnapshotCommand; import com.cloud.agent.api.storage.CreateAnswer; import com.cloud.agent.api.storage.CreateCommand; import com.cloud.agent.api.storage.DestroyCommand; +import com.cloud.agent.api.storage.ResizeVolumeAnswer; +import com.cloud.agent.api.storage.ResizeVolumeCommand; import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.SnapshotVO; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VolumeManager; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.template.TemplateManager; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.dao.VMInstanceDao; public class AncientPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { - private static final Logger s_logger = Logger - .getLogger(AncientPrimaryDataStoreDriverImpl.class); - @Inject DiskOfferingDao diskOfferingDao; - @Inject VMTemplateDao templateDao; - @Inject VolumeDao volumeDao; - @Inject TemplateManager templateMgr; - @Inject HostDao hostDao; - @Inject StorageManager storageMgr; - @Inject VMInstanceDao vmDao; - @Inject PrimaryDataStoreDao primaryStoreDao; - @Override - public String grantAccess(DataObject data, EndPoint ep) { - // TODO Auto-generated method stub - return null; - } + private static final Logger s_logger = Logger + .getLogger(AncientPrimaryDataStoreDriverImpl.class); + @Inject DiskOfferingDao diskOfferingDao; + @Inject VMTemplateDao templateDao; + @Inject VolumeDao volumeDao; + @Inject TemplateManager templateMgr; + @Inject HostDao hostDao; + @Inject StorageManager storageMgr; + @Inject VolumeManager volumeMgr; + @Inject VMInstanceDao vmDao; + @Inject SnapshotDao snapshotDao; + @Inject PrimaryDataStoreDao primaryStoreDao; + @Inject SnapshotManager snapshotMgr; + @Override + public String grantAccess(DataObject data, EndPoint ep) { + // TODO Auto-generated method stub + return null; + } - @Override - public boolean revokeAccess(DataObject data, EndPoint ep) { - // TODO Auto-generated method stub - return false; - } + @Override + public boolean revokeAccess(DataObject data, EndPoint ep) { + // TODO Auto-generated method stub + return false; + } - @Override - public Set listObjects(DataStore store) { - // TODO Auto-generated method stub - return null; - } + @Override + public Set listObjects(DataStore store) { + // TODO Auto-generated method stub + return null; + } - public boolean createVolume( - VolumeInfo volume) throws StorageUnavailableException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating volume: " + volume); - } + public boolean createVolume( + VolumeInfo volume) throws StorageUnavailableException { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating volume: " + volume); + } + + DiskOfferingVO offering = diskOfferingDao.findById(volume.getDiskOfferingId()); + DiskProfile diskProfile = new DiskProfile(volume, offering, + null); + + VMTemplateVO template = null; + if (volume.getTemplateId() != null) { + template = templateDao.findById(volume.getTemplateId()); + } + + StoragePool pool = (StoragePool)volume.getDataStore(); + VolumeVO vol = volumeDao.findById(volume.getId()); + if (pool != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Trying to create in " + pool); + } + vol.setPoolId(pool.getId()); + + CreateCommand cmd = null; + VMTemplateStoragePoolVO tmpltStoredOn = null; + + for (int i = 0; i < 2; i++) { + if (template != null + && template.getFormat() != Storage.ImageFormat.ISO) { + if (pool.getPoolType() == StoragePoolType.CLVM) { + // prepareISOForCreate does what we need, which is to + // tell us where the template is + VMTemplateHostVO tmpltHostOn = templateMgr + .prepareISOForCreate(template, pool); + if (tmpltHostOn == null) { + s_logger.debug("cannot find template " + + template.getId() + " " + + template.getName()); + throw new CloudRuntimeException("cannot find template" + + template.getId() + + template.getName()); + } + HostVO secondaryStorageHost = hostDao + .findById(tmpltHostOn.getHostId()); + String tmpltHostUrl = secondaryStorageHost + .getStorageUrl(); + String fullTmpltUrl = tmpltHostUrl + "/" + + tmpltHostOn.getInstallPath(); + cmd = new CreateCommand(diskProfile, fullTmpltUrl, + new StorageFilerTO(pool)); + } else { + tmpltStoredOn = templateMgr.prepareTemplateForCreate( + template, pool); + if (tmpltStoredOn == null) { + s_logger.debug("Cannot use this pool " + pool + + " because we can't propagate template " + + template); + throw new CloudRuntimeException("Cannot use this pool " + pool + + " because we can't propagate template " + + template); + } + cmd = new CreateCommand(diskProfile, + tmpltStoredOn.getLocalDownloadPath(), + new StorageFilerTO(pool)); + } + } else { + if (template != null + && Storage.ImageFormat.ISO == template.getFormat()) { + VMTemplateHostVO tmpltHostOn = templateMgr + .prepareISOForCreate(template, pool); + if (tmpltHostOn == null) { + throw new CloudRuntimeException( + "Did not find ISO in secondry storage in zone " + + pool.getDataCenterId()); + } + } + cmd = new CreateCommand(diskProfile, new StorageFilerTO( + pool)); + } + + Answer answer = storageMgr.sendToPool(pool, null, cmd); + if (answer.getResult()) { + CreateAnswer createAnswer = (CreateAnswer) answer; + vol.setFolder(pool.getPath()); + vol.setPath(createAnswer.getVolume().getPath()); + vol.setSize(createAnswer.getVolume().getSize()); + vol.setPoolType(pool.getPoolType()); + vol.setPoolId(pool.getId()); + vol.setPodId(pool.getPodId()); + this.volumeDao.update(vol.getId(), vol); + return true; + } else { + if (tmpltStoredOn != null + && (answer instanceof CreateAnswer) + && ((CreateAnswer) answer) + .templateReloadRequested()) { + if (!templateMgr + .resetTemplateDownloadStateOnPool(tmpltStoredOn + .getId())) { + break; // break out of template-redeploy retry loop + } + } else { + break; + } + } + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Unable to create volume " + volume.getId()); + } + return false; + } + + @Override + public void createAsync(DataObject data, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + String errMsg = null; + if (data.getType() == DataObjectType.VOLUME) { + try { + createVolume((VolumeInfo)data); + } catch (StorageUnavailableException e) { + s_logger.debug("failed to create volume", e); + errMsg = e.toString(); + } catch (Exception e) { + s_logger.debug("failed to create volume", e); + errMsg = e.toString(); + } + } + CreateCmdResult result = new CreateCmdResult(null, null); + if (errMsg != null) { + result.setResult(errMsg); + } + + callback.complete(result); + + } + + @Override + public void deleteAsync(DataObject data, + AsyncCompletionCallback callback) { + + String vmName = null; + VolumeVO vol = this.volumeDao.findById(data.getId()); + + + StoragePool pool = (StoragePool)data.getDataStore(); + + DestroyCommand cmd = new DestroyCommand(pool, vol, vmName); + + CommandResult result = new CommandResult(); + try { + Answer answer = this.storageMgr.sendToPool(pool, cmd); + if (answer != null && !answer.getResult()) { + result.setResult(answer.getDetails()); + s_logger.info("Will retry delete of " + vol + " from " + pool.getId()); + } + } catch (StorageUnavailableException e) { + s_logger.error("Storage is unavailable currently. Will retry delete of " + + vol + " from " + pool.getId(), e); + result.setResult(e.toString()); + } catch (Exception ex) { + s_logger.debug("Unable to destoy volume" + vol + " from " + pool.getId(), ex); + result.setResult(ex.toString()); + } + callback.complete(result); + } + + @Override + public void copyAsync(DataObject srcdata, DataObject destData, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public boolean canCopy(DataObject srcData, DataObject destData) { + // TODO Auto-generated method stub + return false; + } + + @Override + public void takeSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + VolumeInfo volume = snapshot.getBaseVolume(); + String vmName = this.volumeMgr.getVmNameOnVolume(volume); + SnapshotVO preSnapshotVO = this.snapshotMgr.getParentSnapshot(volume, snapshot); + StoragePool srcPool = (StoragePool)volume.getDataStore(); + + ManageSnapshotCommand cmd = new ManageSnapshotCommand(snapshot.getId(), volume.getPath(), srcPool, preSnapshotVO.getPath(), snapshot.getName(), vmName); + + ManageSnapshotAnswer answer = (ManageSnapshotAnswer) this.snapshotMgr.sendToPool(volume, cmd); - DiskOfferingVO offering = diskOfferingDao.findById(volume.getDiskOfferingId()); - DiskProfile diskProfile = new DiskProfile(volume, offering, - null); - - VMTemplateVO template = null; - if (volume.getTemplateId() != null) { - template = templateDao.findById(volume.getTemplateId()); - } - - StoragePool pool = (StoragePool)volume.getDataStore(); - VolumeVO vol = volumeDao.findById(volume.getId()); - if (pool != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to create in " + pool); - } - vol.setPoolId(pool.getId()); - - CreateCommand cmd = null; - VMTemplateStoragePoolVO tmpltStoredOn = null; - - for (int i = 0; i < 2; i++) { - if (template != null - && template.getFormat() != Storage.ImageFormat.ISO) { - if (pool.getPoolType() == StoragePoolType.CLVM) { - // prepareISOForCreate does what we need, which is to - // tell us where the template is - VMTemplateHostVO tmpltHostOn = templateMgr - .prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - s_logger.debug("cannot find template " - + template.getId() + " " - + template.getName()); - throw new CloudRuntimeException("cannot find template" - + template.getId() - + template.getName()); - } - HostVO secondaryStorageHost = hostDao - .findById(tmpltHostOn.getHostId()); - String tmpltHostUrl = secondaryStorageHost - .getStorageUrl(); - String fullTmpltUrl = tmpltHostUrl + "/" - + tmpltHostOn.getInstallPath(); - cmd = new CreateCommand(diskProfile, fullTmpltUrl, - new StorageFilerTO(pool)); - } else { - tmpltStoredOn = templateMgr.prepareTemplateForCreate( - template, pool); - if (tmpltStoredOn == null) { - s_logger.debug("Cannot use this pool " + pool - + " because we can't propagate template " - + template); - throw new CloudRuntimeException("Cannot use this pool " + pool - + " because we can't propagate template " - + template); - } - cmd = new CreateCommand(diskProfile, - tmpltStoredOn.getLocalDownloadPath(), - new StorageFilerTO(pool)); - } - } else { - if (template != null - && Storage.ImageFormat.ISO == template.getFormat()) { - VMTemplateHostVO tmpltHostOn = templateMgr - .prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - throw new CloudRuntimeException( - "Did not find ISO in secondry storage in zone " - + pool.getDataCenterId()); - } - } - cmd = new CreateCommand(diskProfile, new StorageFilerTO( - pool)); - } - - Answer answer = storageMgr.sendToPool(pool, null, cmd); - if (answer.getResult()) { - CreateAnswer createAnswer = (CreateAnswer) answer; - vol.setFolder(pool.getPath()); - vol.setPath(createAnswer.getVolume().getPath()); - vol.setSize(createAnswer.getVolume().getSize()); - vol.setPoolType(pool.getPoolType()); - vol.setPoolId(pool.getId()); - vol.setPodId(pool.getPodId()); - this.volumeDao.update(vol.getId(), vol); - return true; - } else { - if (tmpltStoredOn != null - && (answer instanceof CreateAnswer) - && ((CreateAnswer) answer) - .templateReloadRequested()) { - if (!templateMgr - .resetTemplateDownloadStateOnPool(tmpltStoredOn - .getId())) { - break; // break out of template-redeploy retry loop - } - } else { - break; - } - } - } - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to create volume " + volume.getId()); - } - return false; - } - - @Override - public void createAsync(DataObject data, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - String errMsg = null; - if (data.getType() == DataObjectType.VOLUME) { - try { - createVolume((VolumeInfo)data); - } catch (StorageUnavailableException e) { - s_logger.debug("failed to create volume", e); - errMsg = e.toString(); - } catch (Exception e) { - s_logger.debug("failed to create volume", e); - errMsg = e.toString(); - } - } - CreateCmdResult result = new CreateCmdResult(null, null); - if (errMsg != null) { - result.setResult(errMsg); + CreateCmdResult result = null; + if ((answer != null) && answer.getResult()) { + result = new CreateCmdResult(answer.getSnapshotPath(), null); + } else { + result = new CreateCmdResult(null, null); } callback.complete(result); - - } + } - @Override - public void deleteAsync(DataObject data, - AsyncCompletionCallback callback) { + @Override + public void revertSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub - String vmName = null; - VolumeVO vol = this.volumeDao.findById(data.getId()); + } + @Override + public void resize(DataObject data, + AsyncCompletionCallback callback) { + VolumeObject vol = (VolumeObject)data; + StoragePool pool = (StoragePool)data.getDataStore(); + ResizeVolumePayload resizeParameter = (ResizeVolumePayload)vol.getpayload(); - StoragePool pool = (StoragePool)data.getDataStore(); + ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand( + vol.getPath(), new StorageFilerTO(pool), vol.getSize(), + resizeParameter.newSize, resizeParameter.shrinkOk, resizeParameter.instanceName); + CreateCmdResult result = new CreateCmdResult(null, null); + try { + ResizeVolumeAnswer answer = (ResizeVolumeAnswer) this.storageMgr.sendToPool(pool, + resizeParameter.hosts, resizeCmd); + if (answer != null && answer.getResult()) { + long finalSize = answer.getNewSize(); + s_logger.debug("Resize: volume started at size " + vol.getSize() + + " and ended at size " + finalSize); - DestroyCommand cmd = new DestroyCommand(pool, vol, vmName); + vol.setSize(finalSize); + vol.update(); + } else if (answer != null) { + result.setResult(answer.getDetails()); + } else { + s_logger.debug("return a null answer, mark it as failed for unknown reason"); + result.setResult("return a null answer, mark it as failed for unknown reason"); + } - CommandResult result = new CommandResult(); - try { - Answer answer = this.storageMgr.sendToPool(pool, cmd); - if (answer != null && !answer.getResult()) { - result.setResult(answer.getDetails()); - s_logger.info("Will retry delete of " + vol + " from " + pool.getId()); - } - } catch (StorageUnavailableException e) { - s_logger.error("Storage is unavailable currently. Will retry delete of " - + vol + " from " + pool.getId(), e); - result.setResult(e.toString()); - } catch (Exception ex) { - s_logger.debug("Unable to destoy volume" + vol + " from " + pool.getId(), ex); - result.setResult(ex.toString()); - } - callback.complete(result); - } + } catch (Exception e) { + s_logger.debug("sending resize command failed", e); + result.setResult(e.toString()); + } - @Override - public void copyAsync(DataObject srcdata, DataObject destData, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - - @Override - public boolean canCopy(DataObject srcData, DataObject destData) { - // TODO Auto-generated method stub - return false; - } - - @Override - public void takeSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - - @Override - public void revertSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } + callback.complete(result); + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java index efd04d18294..6d0c2c6862b 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -36,7 +37,6 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.datastore.DataObjectManager; import org.apache.cloudstack.storage.endpoint.EndPointSelector; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; @@ -210,13 +210,6 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver return null; } - @Override - public void takeSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - @Override public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { @@ -238,5 +231,19 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver // TODO Auto-generated method stub } + + @Override + public void resize(DataObject data, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public void takeSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java index fdbe4b47c1e..f395061d633 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java @@ -27,13 +27,13 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.springframework.stereotype.Component; @Component diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java index 702ab238ba8..0ef17040a52 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java @@ -23,10 +23,10 @@ import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl; import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCyclImpl; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.springframework.stereotype.Component; import com.cloud.utils.component.ComponentContext; diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java index 85a5d0226d7..a1402c13b3d 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java @@ -22,10 +22,10 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl; import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.springframework.stereotype.Component; import com.cloud.utils.component.ComponentContext; diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java index 87951ceeb64..6ad6cc9486f 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.log4j.Logger; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; @@ -73,6 +74,10 @@ public class VolumeObject implements VolumeInfo { public void setPath(String uuid) { volumeVO.setPath(uuid); } + + public void setSize(Long size) { + volumeVO.setSize(size); + } public Volume.State getState() { return volumeVO.getState(); @@ -182,6 +187,8 @@ public class VolumeObject implements VolumeInfo { volEvent = Volume.Event.OperationSucceeded; } else if (event == ObjectInDataStoreStateMachine.Event.OperationFailed) { volEvent = Volume.Event.OperationFailed; + } else if (event == ObjectInDataStoreStateMachine.Event.ResizeRequested) { + volEvent = Volume.Event.ResizeRequested; } this.stateTransit(volEvent); } catch (Exception e) { @@ -310,4 +317,14 @@ public class VolumeObject implements VolumeInfo { public Object getpayload() { return this.payload; } + + @Override + public HypervisorType getHypervisorType() { + return this.volumeDao.getHypervisorType(this.volumeVO.getId()); + } + + @Override + public Long getLastPoolId() { + return this.volumeVO.getLastPoolId(); + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index ef99a49b809..c019374d9b9 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -56,9 +56,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; -//1. change volume state -//2. orchestrator of volume, control most of the information of volume, storage pool id, voluem state, scope etc. - @Component public class VolumeServiceImpl implements VolumeService { private static final Logger s_logger = Logger @@ -423,8 +420,49 @@ public class VolumeServiceImpl implements VolumeService { public AsyncCallFuture createVolumeFromSnapshot( VolumeInfo volume, DataStore store, SnapshotInfo snapshot) { AsyncCallFuture future = new AsyncCallFuture(); - VolumeApiResult result = new VolumeApiResult(volume); - return null; + + try { + DataObject volumeOnStore = store.create(volume); + volume.processEvent(Event.CreateOnlyRequested); + CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(null, + (VolumeObject)volume, store, volumeOnStore, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().createVolumeFromSnapshotCallback(null, null)) + .setContext(context); + this.motionSrv.copyAsync(snapshot, volumeOnStore, caller); + } catch (Exception e) { + s_logger.debug("create volume from snapshot failed", e); + VolumeApiResult result = new VolumeApiResult(volume); + result.setResult(e.toString()); + future.complete(result); + } + + return future; + } + + protected Void createVolumeFromSnapshotCallback(AsyncCallbackDispatcher callback, + CreateVolumeFromBaseImageContext context) { + CopyCommandResult result = callback.getResult(); + VolumeInfo volume = context.vo; + VolumeApiResult apiResult = new VolumeApiResult(volume); + Event event = null; + if (result.isFailed()) { + apiResult.setResult(result.getResult()); + event = Event.OperationFailed; + } else { + event = Event.OperationSuccessed; + } + + try { + volume.processEvent(event); + } catch (Exception e) { + s_logger.debug("create volume from snapshot failed", e); + apiResult.setResult(e.toString()); + } + + AsyncCallFuture future = context.future; + future.complete(apiResult); + return null; } protected VolumeVO duplicateVolumeOnAnotherStorage(Volume volume, StoragePool pool) { @@ -552,5 +590,61 @@ public class VolumeServiceImpl implements VolumeService { context.future.complete(res); return null; } + + + @Override + public AsyncCallFuture resize(VolumeInfo volume) { + AsyncCallFuture future = new AsyncCallFuture(); + VolumeApiResult result = new VolumeApiResult(volume); + try { + volume.processEvent(Event.ResizeRequested); + } catch (Exception e) { + s_logger.debug("Failed to change state to resize", e); + result.setResult(e.toString()); + future.complete(result); + return future; + } + CreateVolumeContext context = new CreateVolumeContext(null, volume, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().registerVolumeCallback(null, null)) + .setContext(context); + volume.getDataStore().getDriver().resize(volume, caller); + return future; + } + + protected Void resizeVolumeCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { + CreateCmdResult result = callback.getResult(); + AsyncCallFuture future = context.future; + VolumeInfo volume = (VolumeInfo)context.volume; + + if (result.isFailed()) { + try { + volume.processEvent(Event.OperationFailed); + } catch (Exception e) { + s_logger.debug("Failed to change state", e); + } + VolumeApiResult res = new VolumeApiResult(volume); + res.setResult(result.getResult()); + future.complete(res); + return null; + } + + try { + volume.processEvent(Event.OperationSuccessed); + } catch(Exception e) { + s_logger.debug("Failed to change state", e); + VolumeApiResult res = new VolumeApiResult(volume); + res.setResult(result.getResult()); + future.complete(res); + return null; + } + + VolumeApiResult res = new VolumeApiResult(volume); + future.complete(res); + + return null; + } + + } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java index 88c53740f32..f31126c2aeb 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java @@ -24,9 +24,9 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { @@ -72,17 +72,25 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { return false; } - @Override - public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - @Override public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { // TODO Auto-generated method stub } + @Override + public void resize(DataObject data, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public void takeSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + } diff --git a/server/src/com/cloud/api/ApiDBUtils.java b/server/src/com/cloud/api/ApiDBUtils.java index 0a203528f85..c28daefa69a 100755 --- a/server/src/com/cloud/api/ApiDBUtils.java +++ b/server/src/com/cloud/api/ApiDBUtils.java @@ -618,7 +618,7 @@ public class ApiDBUtils { public static String getSnapshotIntervalTypes(long snapshotId) { SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - return snapshot.getType().name(); + return snapshot.getRecurringType().name(); } public static String getStoragePoolTags(long poolId) { diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index 3da31689d1d..845b242d573 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -341,7 +341,7 @@ public class ApiResponseHelper implements ResponseGenerator { populateOwner(snapshotResponse, snapshot); VolumeVO volume = findVolumeById(snapshot.getVolumeId()); - String snapshotTypeStr = snapshot.getType().name(); + String snapshotTypeStr = snapshot.getRecurringType().name(); snapshotResponse.setSnapshotType(snapshotTypeStr); if (volume != null) { snapshotResponse.setVolumeId(volume.getUuid()); diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index c0c23b6b641..8c77715c0cf 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -137,7 +137,8 @@ public enum Config { SnapshotMonthlyMax("Snapshots", SnapshotManager.class, Integer.class, "snapshot.max.monthly", "8", "Maximum monthly snapshots for a volume", null), SnapshotPollInterval("Snapshots", SnapshotManager.class, Integer.class, "snapshot.poll.interval", "300", "The time interval in seconds when the management server polls for snapshots to be scheduled.", null), SnapshotDeltaMax("Snapshots", SnapshotManager.class, Integer.class, "snapshot.delta.max", "16", "max delta snapshots between two full snapshots.", null), - + BackupSnapshotAferTakingSnapshot("Snapshots", SnapshotManager.class, Boolean.class, "snapshot.backup.rightafter", "true", "backup snapshot right after snapshot is taken", null), + // Advanced JobExpireMinutes("Advanced", ManagementServer.class, String.class, "job.expire.minutes", "1440", "Time (in minutes) for async-jobs to be kept in system", null), JobCancelThresholdMinutes("Advanced", ManagementServer.class, String.class, "job.cancel.threshold.minutes", "60", "Time (in minutes) for async-jobs to be forcely cancelled if it has been in process for long", null), diff --git a/server/src/com/cloud/storage/ResizeVolumePayload.java b/server/src/com/cloud/storage/ResizeVolumePayload.java new file mode 100644 index 00000000000..472d627dd4a --- /dev/null +++ b/server/src/com/cloud/storage/ResizeVolumePayload.java @@ -0,0 +1,14 @@ +package com.cloud.storage; + +public class ResizeVolumePayload { + public final Long newSize; + public final boolean shrinkOk; + public final String instanceName; + public final long[] hosts; + public ResizeVolumePayload(Long newSize, boolean shrinkOk, String instanceName, long[] hosts) { + this.newSize = newSize; + this.shrinkOk = shrinkOk; + this.instanceName = instanceName; + this.hosts = hosts; + } +} diff --git a/server/src/com/cloud/storage/VolumeManager.java b/server/src/com/cloud/storage/VolumeManager.java index ebb9e54cd35..af3cbbfbae5 100644 --- a/server/src/com/cloud/storage/VolumeManager.java +++ b/server/src/com/cloud/storage/VolumeManager.java @@ -52,9 +52,9 @@ public interface VolumeManager extends VolumeApiService { boolean volumeOnSharedStoragePool(VolumeVO volume); - boolean volumeInactive(VolumeVO volume); + boolean volumeInactive(Volume volume); - String getVmNameOnVolume(VolumeVO volume); + String getVmNameOnVolume(Volume volume); VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException; diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java index 573b8e90e12..a69607f1f3f 100644 --- a/server/src/com/cloud/storage/VolumeManagerImpl.java +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -66,12 +66,6 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.AttachVolumeAnswer; import com.cloud.agent.api.AttachVolumeCommand; -import com.cloud.agent.api.storage.CopyVolumeAnswer; -import com.cloud.agent.api.storage.CopyVolumeCommand; -import com.cloud.agent.api.storage.DestroyCommand; -import com.cloud.agent.api.storage.ResizeVolumeAnswer; -import com.cloud.agent.api.storage.ResizeVolumeCommand; -import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.VolumeTO; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; @@ -763,7 +757,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { } @Override - public boolean volumeInactive(VolumeVO volume) { + public boolean volumeInactive(Volume volume) { Long vmId = volume.getInstanceId(); if (vmId != null) { UserVm vm = _userVmDao.findById(vmId); @@ -779,7 +773,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { } @Override - public String getVmNameOnVolume(VolumeVO volume) { + public String getVmNameOnVolume(Volume volume) { Long vmId = volume.getInstanceId(); if (vmId != null) { VMInstanceVO vm = _vmInstanceDao.findById(vmId); @@ -1013,7 +1007,6 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { public VolumeVO resizeVolume(ResizeVolumeCmd cmd) { Long newSize = null; boolean shrinkOk = cmd.getShrinkOk(); - boolean success = false; VolumeVO volume = _volsDao.findById(cmd.getEntityId()); if (volume == null) { @@ -1170,64 +1163,31 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager { "VM must be stopped or disk detached in order to resize with the Xen HV"); } } - + + ResizeVolumePayload payload = new ResizeVolumePayload(newSize, shrinkOk, instanceName, hosts); + try { - try { - stateTransitTo(volume, Volume.Event.ResizeRequested); - } catch (NoTransitionException etrans) { - throw new CloudRuntimeException( - "Unable to change volume state for resize: " - + etrans.toString()); - } + VolumeInfo vol = this.volFactory.getVolume(volume.getId()); + vol.addPayload(payload); + + AsyncCallFuture future = this.volService.resize(vol); + future.get(); + volume = _volsDao.findById(volume.getId()); - ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand( - volume.getPath(), new StorageFilerTO(pool), currentSize, - newSize, shrinkOk, instanceName); - ResizeVolumeAnswer answer = (ResizeVolumeAnswer) this.storageMgr.sendToPool(pool, - hosts, resizeCmd); + if (newDiskOffering != null) { + volume.setDiskOfferingId(cmd.getNewDiskOfferingId()); + } + _volsDao.update(volume.getId(), volume); - /* - * need to fetch/store new volume size in database. This value comes - * from hypervisor rather than trusting that a success means we have - * a volume of the size we requested - */ - if (answer != null && answer.getResult()) { - long finalSize = answer.getNewSize(); - s_logger.debug("Resize: volume started at size " + currentSize - + " and ended at size " + finalSize); - volume.setSize(finalSize); - if (newDiskOffering != null) { - volume.setDiskOfferingId(cmd.getNewDiskOfferingId()); - } - _volsDao.update(volume.getId(), volume); - - success = true; - return volume; - } else if (answer != null) { - s_logger.debug("Resize: returned '" + answer.getDetails() + "'"); - } - } catch (StorageUnavailableException e) { - s_logger.debug("volume failed to resize: " + e); - return null; - } finally { - if (success) { - try { - stateTransitTo(volume, Volume.Event.OperationSucceeded); - } catch (NoTransitionException etrans) { - throw new CloudRuntimeException( - "Failed to change volume state: " - + etrans.toString()); - } - } else { - try { - stateTransitTo(volume, Volume.Event.OperationFailed); - } catch (NoTransitionException etrans) { - throw new CloudRuntimeException( - "Failed to change volume state: " - + etrans.toString()); - } - } - } + return volume; + } catch (InterruptedException e) { + s_logger.debug("failed get resize volume result", e); + } catch (ExecutionException e) { + s_logger.debug("failed get resize volume result", e); + } catch (Exception e) { + s_logger.debug("failed get resize volume result", e); + } + return null; } diff --git a/server/src/com/cloud/storage/dao/SnapshotDao.java b/server/src/com/cloud/storage/dao/SnapshotDao.java index 3b961f6fa89..0e378a724b4 100644 --- a/server/src/com/cloud/storage/dao/SnapshotDao.java +++ b/server/src/com/cloud/storage/dao/SnapshotDao.java @@ -25,7 +25,7 @@ import com.cloud.utils.fsm.StateDao; import java.util.List; -public interface SnapshotDao extends GenericDao, StateDao { +public interface SnapshotDao extends GenericDao, StateDao { List listByVolumeId(long volumeId); List listByVolumeId(Filter filter, long volumeId); SnapshotVO findNextSnapshot(long parentSnapId); diff --git a/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java b/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java index a8a07dcc3a6..5b3f2732f99 100644 --- a/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java +++ b/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java @@ -324,7 +324,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements } @Override - public boolean updateState(State currentState, Event event, State nextState, Snapshot snapshot, Object data) { + public boolean updateState(State currentState, Event event, State nextState, SnapshotVO snapshot, Object data) { Transaction txn = Transaction.currentTxn(); txn.start(); SnapshotVO snapshotVO = (SnapshotVO)snapshot; diff --git a/server/src/com/cloud/storage/listener/SnapshotStateListener.java b/server/src/com/cloud/storage/listener/SnapshotStateListener.java index 17ccce54c82..8f94f23a27c 100644 --- a/server/src/com/cloud/storage/listener/SnapshotStateListener.java +++ b/server/src/com/cloud/storage/listener/SnapshotStateListener.java @@ -17,24 +17,24 @@ package com.cloud.storage.listener; -import com.cloud.event.EventCategory; -import com.cloud.storage.Snapshot; -import com.cloud.storage.Snapshot.Event; -import com.cloud.storage.Snapshot.State; -import com.cloud.server.ManagementServer; -import com.cloud.utils.fsm.StateListener; - -import org.apache.cloudstack.framework.events.EventBus; -import org.apache.cloudstack.framework.events.EventBusException; -import org.apache.log4j.Logger; - -import java.util.Enumeration; import java.util.HashMap; import java.util.Map; import javax.inject.Inject; -public class SnapshotStateListener implements StateListener { +import org.apache.cloudstack.framework.events.EventBus; +import org.apache.cloudstack.framework.events.EventBusException; +import org.apache.log4j.Logger; + +import com.cloud.event.EventCategory; +import com.cloud.server.ManagementServer; +import com.cloud.storage.Snapshot; +import com.cloud.storage.Snapshot.Event; +import com.cloud.storage.Snapshot.State; +import com.cloud.storage.SnapshotVO; +import com.cloud.utils.fsm.StateListener; + +public class SnapshotStateListener implements StateListener { // get the event bus provider if configured @Inject protected EventBus _eventBus; @@ -46,13 +46,13 @@ public class SnapshotStateListener implements StateListener listSnapsforPolicy(long policyId, Filter filter); - */ - /** - * List all policies which are assigned to the specified volume - */ - List listPoliciesforVolume(long volumeId); - - /** - * List all policies to which a specified snapshot belongs. For ex: A snapshot may belong to a hourly snapshot and a daily - * snapshot run at the same time - */ - /* - * List listPoliciesforSnapshot(long snapshotId); - */ - /** - * List all snapshots for a specified volume irrespective of the policy which created the snapshot - */ - List listSnapsforVolume(long volumeId); - void deletePoliciesForVolume(Long volumeId); /** @@ -109,35 +55,20 @@ public interface SnapshotManager { * The account which is to be deleted. */ boolean deleteSnapshotDirsForAccount(long accountId); - - SnapshotPolicyVO getPolicyForVolume(long volumeId); - - boolean destroySnapshotBackUp(long snapshotId); - - /** - * Create a snapshot of a volume - * - * @param cmd - * the API command wrapping the parameters for creating the snapshot (mainly volumeId) - * @return the Snapshot that was created - */ - SnapshotVO createSnapshotOnPrimary(VolumeVO volume, Long polocyId, Long snapshotId) throws ResourceAllocationException; - - List listPoliciesforSnapshot(long snapshotId); - - List listSnapsforPolicy(long policyId, Filter filter); - + void downloadSnapshotsFromSwift(SnapshotVO ss); void downloadSnapshotsFromS3(SnapshotVO snapshot); - HostVO getSecondaryStorageHost(SnapshotVO snapshot); - String getSecondaryStorageURL(SnapshotVO snapshot); - void deleteSnapshotsForVolume (String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId ); - void deleteSnapshotsDirForVolume(String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId); boolean canOperateOnVolume(Volume volume); + + Answer sendToPool(Volume vol, Command cmd); + + SnapshotVO getParentSnapshot(VolumeInfo volume, Snapshot snapshot); + + Snapshot backupSnapshot(Long snapshotId); } diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 58ca9a41cfa..7df99d67be2 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -32,20 +32,21 @@ import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; -import com.cloud.agent.api.BackupSnapshotAnswer; -import com.cloud.agent.api.BackupSnapshotCommand; import com.cloud.agent.api.Command; import com.cloud.agent.api.DeleteSnapshotBackupCommand; import com.cloud.agent.api.DeleteSnapshotsDirCommand; import com.cloud.agent.api.DownloadSnapshotFromS3Command; -import com.cloud.agent.api.ManageSnapshotAnswer; -import com.cloud.agent.api.ManageSnapshotCommand; import com.cloud.agent.api.downloadSnapshotFromSwiftCommand; import com.cloud.agent.api.to.S3TO; import com.cloud.agent.api.to.SwiftTO; @@ -65,7 +66,6 @@ import com.cloud.event.EventTypes; import com.cloud.event.EventVO; import com.cloud.event.UsageEventUtils; import com.cloud.event.dao.EventDao; -import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; @@ -75,7 +75,6 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Grouping; import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.resource.ResourceManager; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.storage.Snapshot; import com.cloud.storage.Snapshot.Type; @@ -83,7 +82,6 @@ import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.SnapshotScheduleVO; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; -import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateVO; @@ -97,7 +95,6 @@ import com.cloud.storage.dao.SnapshotScheduleDao; import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.listener.SnapshotStateListener; import com.cloud.storage.s3.S3Manager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.swift.SwiftManager; @@ -123,14 +120,8 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; -import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.UserVmDao; import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd; @@ -189,8 +180,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject protected ClusterDao _clusterDao; @Inject - private UsageEventDao _usageEventDao; - @Inject private ResourceLimitService _resourceLimitMgr; @Inject private SwiftManager _swiftMgr; @@ -199,12 +188,8 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject private SecondaryStorageVmManager _ssvmMgr; @Inject - private ResourceManager _resourceMgr; - @Inject private DomainManager _domainMgr; @Inject - private VolumeDao _volumeDao; - @Inject private ResourceTagDao _resourceTagDao; @Inject private ConfigurationDao _configDao; @@ -216,21 +201,20 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject TemplateManager templateMgr; @Inject VolumeManager volumeMgr; @Inject DataStoreManager dataStoreMgr; + @Inject List snapshotStrategies; + @Inject VolumeDataFactory volFactory; + @Inject SnapshotDataFactory snapshotFactory; private int _totalRetries; private int _pauseInterval; - private int _deltaSnapshotMax; private int _backupsnapshotwait; - private StateMachine2 _snapshotFsm; - protected SearchBuilder PolicySnapshotSearch; protected SearchBuilder PoliciesForSnapSearch; - - - protected Answer sendToPool(Volume vol, Command cmd) { + @Override + public Answer sendToPool(Volume vol, Command cmd) { StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId()); long[] hostIdsToTryFirst = null; @@ -282,127 +266,11 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return null; } - @Override - public SnapshotVO createSnapshotOnPrimary(VolumeVO volume, Long policyId, Long snapshotId) { - SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snapshot == null) { - throw new CloudRuntimeException("Can not find snapshot " + snapshotId); - } - - try { - stateTransitTo(snapshot, Snapshot.Event.CreateRequested); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update snapshot state due to " + nte.getMessage()); - } - - // Send a ManageSnapshotCommand to the agent - String vmName = this.volumeMgr.getVmNameOnVolume(volume); - long volumeId = volume.getId(); - long preId = _snapshotDao.getLastSnapshot(volumeId, snapshotId); - - String preSnapshotPath = null; - SnapshotVO preSnapshotVO = null; - if (preId != 0 && !(volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId()))) { - preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preId); - if (preSnapshotVO != null && preSnapshotVO.getBackupSnapshotId() != null) { - preSnapshotPath = preSnapshotVO.getPath(); - } - } - StoragePool srcPool = (StoragePool)dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); - // RBD volumes do not support snapshotting in the way CloudStack does it. - // For now we leave the snapshot feature disabled for RBD volumes - if (srcPool.getPoolType() == StoragePoolType.RBD) { - throw new CloudRuntimeException("RBD volumes do not support snapshotting"); - } - - ManageSnapshotCommand cmd = new ManageSnapshotCommand(snapshotId, volume.getPath(), srcPool, preSnapshotPath, snapshot.getName(), vmName); - - ManageSnapshotAnswer answer = (ManageSnapshotAnswer) sendToPool(volume, cmd); - // Update the snapshot in the database - if ((answer != null) && answer.getResult()) { - // The snapshot was successfully created - if (preSnapshotPath != null && preSnapshotPath.equals(answer.getSnapshotPath())) { - // empty snapshot - s_logger.debug("CreateSnapshot: this is empty snapshot "); - try { - snapshot.setPath(preSnapshotPath); - snapshot.setBackupSnapshotId(preSnapshotVO.getBackupSnapshotId()); - snapshot.setSwiftId(preSnapshotVO.getSwiftId()); - snapshot.setPrevSnapshotId(preId); - snapshot.setSecHostId(preSnapshotVO.getSecHostId()); - stateTransitTo(snapshot, Snapshot.Event.OperationNotPerformed); - } catch (NoTransitionException nte) { - s_logger.debug("CreateSnapshot: failed to update state of snapshot due to " + nte.getMessage()); - } - } else { - long preSnapshotId = 0; - - if (preSnapshotVO != null && preSnapshotVO.getBackupSnapshotId() != null) { - preSnapshotId = preId; - // default delta snap number is 16 - int deltaSnap = _deltaSnapshotMax; - - int i; - for (i = 1; i < deltaSnap; i++) { - String prevBackupUuid = preSnapshotVO.getBackupSnapshotId(); - // previous snapshot doesn't have backup, create a full snapshot - if (prevBackupUuid == null) { - preSnapshotId = 0; - break; - } - long preSSId = preSnapshotVO.getPrevSnapshotId(); - if (preSSId == 0) { - break; - } - preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preSSId); - } - if (i >= deltaSnap) { - preSnapshotId = 0; - } - } - - //If the volume is moved around, backup a full snapshot to secondary storage - if (volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId())) { - preSnapshotId = 0; - volume.setLastPoolId(volume.getPoolId()); - _volumeDao.update(volume.getId(), volume); - } - snapshot = updateDBOnCreate(snapshotId, answer.getSnapshotPath(), preSnapshotId); - } - // Get the snapshot_schedule table entry for this snapshot and - // policy id. - // Set the snapshotId to retrieve it back later. - if (policyId != Snapshot.MANUAL_POLICY_ID) { - SnapshotScheduleVO snapshotSchedule = _snapshotScheduleDao.getCurrentSchedule(volumeId, policyId, true); - assert snapshotSchedule != null; - snapshotSchedule.setSnapshotId(snapshotId); - _snapshotScheduleDao.update(snapshotSchedule.getId(), snapshotSchedule); - } - - } else { - if (answer != null) { - s_logger.error(answer.getDetails()); - } - try { - stateTransitTo(snapshot, Snapshot.Event.OperationFailed); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update snapshot state due to " + nte.getMessage()); - } - throw new CloudRuntimeException("Creating snapshot for volume " + volumeId + " on primary storage failed."); - } - - return snapshot; - } - - public SnapshotVO createSnapshotImpl(long volumeId, long policyId) throws ResourceAllocationException { - return null; - } - @Override @DB @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "creating snapshot", async = true) - public SnapshotVO createSnapshot(Long volumeId, Long policyId, Long snapshotId, Account snapshotOwner) { - VolumeVO volume = _volsDao.findById(volumeId); + public Snapshot createSnapshot(Long volumeId, Long policyId, Long snapshotId, Account snapshotOwner) { + VolumeInfo volume = this.volFactory.getVolume(volumeId); if (volume == null) { throw new InvalidParameterValueException("No such volume exist"); } @@ -411,120 +279,50 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, throw new InvalidParameterValueException("Volume is not in ready state"); } - SnapshotVO snapshot = null; + SnapshotInfo snapshot = null; boolean backedUp = false; - UserVmVO uservm = null; // does the caller have the authority to act on this volume _accountMgr.checkAccess(UserContext.current().getCaller(), null, true, volume); + + SnapshotInfo snap = this.snapshotFactory.getSnapshot(snapshotId); + SnapshotStrategy strategy = null; + for (SnapshotStrategy st : snapshotStrategies) { + if (st.canHandle(snap)) { + strategy = st; + break; + } + } + try { - - Long poolId = volume.getPoolId(); - if (poolId == null) { - throw new CloudRuntimeException("You cannot take a snapshot of a volume until it has been attached to an instance"); - } - - if (_volsDao.getHypervisorType(volume.getId()).equals(HypervisorType.KVM)) { - uservm = _vmDao.findById(volume.getInstanceId()); - if (uservm != null && uservm.getType() != VirtualMachine.Type.User) { - throw new CloudRuntimeException("Can't take a snapshot on system vm "); - } - - StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); - ClusterVO cluster = _clusterDao.findById(storagePool.getClusterId()); - List hosts = _resourceMgr.listAllHostsInCluster(cluster.getId()); - if (hosts != null && !hosts.isEmpty()) { - HostVO host = hosts.get(0); - if (!hostSupportSnapsthot(host)) { - throw new CloudRuntimeException("KVM Snapshot is not supported on cluster: " + host.getId()); - } - } - } - - // if volume is attached to a vm in destroyed or expunging state; disallow - // if volume is attached to a vm in taking vm snapshot; disallow - if (volume.getInstanceId() != null) { - UserVmVO userVm = _vmDao.findById(volume.getInstanceId()); - if (userVm != null) { - if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) { - throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volumeId + " is associated with vm:" + userVm.getInstanceName() + " is in " - + userVm.getState().toString() + " state"); - } - - if(userVm.getHypervisorType() == HypervisorType.VMware || userVm.getHypervisorType() == HypervisorType.KVM) { - List activeSnapshots = _snapshotDao.listByInstanceId(volume.getInstanceId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); - if(activeSnapshots.size() > 1) - throw new CloudRuntimeException("There is other active snapshot tasks on the instance to which the volume is attached, please try again later"); - } - /*List activeVMSnapshots = _vmSnapshotDao.listByInstanceId(userVm.getId(), - VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging); - if (activeVMSnapshots.size() > 0) { - throw new CloudRuntimeException( - "There is other active vm snapshot tasks on the instance to which the volume is attached, please try again later"); - } */ - } - } - - snapshot = createSnapshotOnPrimary(volume, policyId, snapshotId); - if (snapshot != null) { - if (snapshot.getState() == Snapshot.State.CreatedOnPrimary) { - backedUp = backupSnapshotToSecondaryStorage(snapshot); - } else if (snapshot.getState() == Snapshot.State.BackedUp) { - // For empty snapshot we set status to BackedUp in createSnapshotOnPrimary - backedUp = true; - } else { - throw new CloudRuntimeException("Failed to create snapshot: " + snapshot + " on primary storage"); - } - if (!backedUp) { - throw new CloudRuntimeException("Created snapshot: " + snapshot + " on primary but failed to backup on secondary"); - } - } else { - throw new CloudRuntimeException("Failed to create snapshot: " + snapshot + " on primary storage"); - } - } finally { - // Cleanup jobs to do after the snapshot has been created; decrement resource count - if (snapshot != null) { - postCreateSnapshot(volumeId, snapshot.getId(), policyId, backedUp); - //Check if the snapshot was removed while backingUp. If yes, do not log snapshot create usage event - SnapshotVO freshSnapshot = _snapshotDao.findById(snapshot.getId()); - if ((freshSnapshot != null) && backedUp) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), - snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, - volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); - } - if( !backedUp ) { - - } else { - _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); - } - } - - /* - try { - _storageMgr.stateTransitTo(volume, Volume.Event.OperationSucceeded); - } catch (NoTransitionException e) { - s_logger.debug("Failed to transit volume state: " + e.toString()); - }*/ + snapshot = strategy.takeSnapshot(volume, snapshotId); + if (snapshot != null) { + postCreateSnapshot(volumeId, snapshot.getId(), policyId); + //Check if the snapshot was removed while backingUp. If yes, do not log snapshot create usage event + SnapshotVO freshSnapshot = _snapshotDao.findById(snapshot.getId()); + if ((freshSnapshot != null) && backedUp) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), + snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, + volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); + } + _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); + } + + Boolean backup = Boolean.parseBoolean(this._configDao.getValue(Config.BackupSnapshotAferTakingSnapshot.toString())); + if (backup) { + this.backupSnapshot(snapshotId); + } + } catch(Exception e) { + s_logger.debug("Failed to create snapshot", e); + throw new CloudRuntimeException("Failed to create snapshot", e); } return snapshot; } - private SnapshotVO updateDBOnCreate(Long id, String snapshotPath, long preSnapshotId) { - SnapshotVO createdSnapshot = _snapshotDao.findByIdIncludingRemoved(id); - createdSnapshot.setPath(snapshotPath); - createdSnapshot.setPrevSnapshotId(preSnapshotId); - try { - stateTransitTo(createdSnapshot, Snapshot.Event.OperationSucceeded); - } catch (NoTransitionException nte) { - s_logger.debug("Faile to update state of snapshot due to " + nte.getMessage()); - } - return createdSnapshot; - } - - private static void checkObjectStorageConfiguration(SwiftTO swift, S3TO s3) { + private void checkObjectStorageConfiguration(SwiftTO swift, S3TO s3) { if (swift != null && s3 != null) { throw new CloudRuntimeException( @@ -533,26 +331,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } - @Override - public void deleteSnapshotsForVolume (String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId ){ - SwiftTO swift = _swiftMgr.getSwiftTO(); - S3TO s3 = _s3Mgr.getS3TO(); - - checkObjectStorageConfiguration(swift, s3); - - DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand( - swift, s3, secondaryStoragePoolUrl, dcId, accountId, volumeId, - null, true); - try { - Answer ans = _agentMgr.sendToSSVM(dcId, cmd); - if ( ans == null || !ans.getResult() ) { - s_logger.warn("DeleteSnapshotBackupCommand failed due to " + ans.getDetails() + " volume id: " + volumeId); - } - } catch (Exception e) { - s_logger.warn("DeleteSnapshotBackupCommand failed due to" + e.toString() + " volume id: " + volumeId); - } - } - @Override public void deleteSnapshotsDirForVolume(String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId) { DeleteSnapshotsDirCommand cmd = new DeleteSnapshotsDirCommand(secondaryStoragePoolUrl, dcId, accountId, volumeId); @@ -566,6 +344,23 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } + @Override + public Snapshot backupSnapshot(Long snapshotId) { + SnapshotInfo snapshot = this.snapshotFactory.getSnapshot(snapshotId); + if (snapshot == null) { + throw new CloudRuntimeException("Can't find snapshot:" + snapshotId); + } + + SnapshotStrategy strategy = null; + for (SnapshotStrategy st : snapshotStrategies) { + if (st.canHandle(snapshot)) { + strategy = st; + break; + } + } + + return strategy.backupSnapshot(snapshot); + } @Override public void downloadSnapshotsFromSwift(SnapshotVO ss) { @@ -652,133 +447,17 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } - + @Override - @DB - public boolean backupSnapshotToSecondaryStorage(SnapshotVO ss) { - long snapshotId = ss.getId(); - SnapshotVO snapshot = _snapshotDao.acquireInLockTable(snapshotId); - if (snapshot == null) { - throw new CloudRuntimeException("Can not acquire lock for snapshot: " + ss); - } - try { - try { - stateTransitTo(snapshot, Snapshot.Event.BackupToSecondary); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update the state of snapshot while backing up snapshot"); - } + public SnapshotVO getParentSnapshot(VolumeInfo volume, Snapshot snapshot) { + long preId = _snapshotDao.getLastSnapshot(volume.getId(), snapshot.getId()); - long volumeId = snapshot.getVolumeId(); - VolumeVO volume = _volsDao.lockRow(volumeId, true); - - Long dcId = volume.getDataCenterId(); - Long accountId = volume.getAccountId(); - - HostVO secHost = getSecHost(volumeId, volume.getDataCenterId()); - - String secondaryStoragePoolUrl = secHost.getStorageUrl(); - String snapshotUuid = snapshot.getPath(); - // In order to verify that the snapshot is not empty, - // we check if the parent of the snapshot is not the same as the parent of the previous snapshot. - // We pass the uuid of the previous snapshot to the plugin to verify this. - SnapshotVO prevSnapshot = null; - String prevSnapshotUuid = null; - String prevBackupUuid = null; - - - SwiftTO swift = _swiftMgr.getSwiftTO(); - S3TO s3 = _s3Mgr.getS3TO(); - - checkObjectStorageConfiguration(swift, s3); - - long prevSnapshotId = snapshot.getPrevSnapshotId(); - if (prevSnapshotId > 0) { - prevSnapshot = _snapshotDao.findByIdIncludingRemoved(prevSnapshotId); - if ( prevSnapshot.getBackupSnapshotId() != null && swift == null) { - if (prevSnapshot.getVersion() != null && prevSnapshot.getVersion().equals("2.2")) { - prevBackupUuid = prevSnapshot.getBackupSnapshotId(); - prevSnapshotUuid = prevSnapshot.getPath(); - } - } else if ((prevSnapshot.getSwiftId() != null && swift != null) - || (prevSnapshot.getS3Id() != null && s3 != null)) { - prevBackupUuid = prevSnapshot.getBackupSnapshotId(); - prevSnapshotUuid = prevSnapshot.getPath(); - } - } - boolean isVolumeInactive = this.volumeMgr.volumeInactive(volume); - String vmName = this.volumeMgr.getVmNameOnVolume(volume); - StoragePool srcPool = (StoragePool)dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); - BackupSnapshotCommand backupSnapshotCommand = new BackupSnapshotCommand(secondaryStoragePoolUrl, dcId, accountId, volumeId, snapshot.getId(), volume.getPath(), srcPool, snapshotUuid, - snapshot.getName(), prevSnapshotUuid, prevBackupUuid, isVolumeInactive, vmName, _backupsnapshotwait); - - if ( swift != null ) { - backupSnapshotCommand.setSwift(swift); - } else if (s3 != null) { - backupSnapshotCommand.setS3(s3); - } - - String backedUpSnapshotUuid = null; - // By default, assume failed. - boolean backedUp = false; - BackupSnapshotAnswer answer = (BackupSnapshotAnswer) sendToPool(volume, backupSnapshotCommand); - if (answer != null && answer.getResult()) { - backedUpSnapshotUuid = answer.getBackupSnapshotName(); - if (backedUpSnapshotUuid != null) { - backedUp = true; - } - } else if (answer != null) { - s_logger.error(answer.getDetails()); - } - // Update the status in all cases. - Transaction txn = Transaction.currentTxn(); - txn.start(); - - if (backedUp) { - if (backupSnapshotCommand.getSwift() != null ) { - snapshot.setSwiftId(swift.getId()); - snapshot.setBackupSnapshotId(backedUpSnapshotUuid); - } else if (backupSnapshotCommand.getS3() != null) { - snapshot.setS3Id(s3.getId()); - snapshot.setBackupSnapshotId(backedUpSnapshotUuid); - } else { - snapshot.setSecHostId(secHost.getId()); - snapshot.setBackupSnapshotId(backedUpSnapshotUuid); - } - if (answer.isFull()) { - snapshot.setPrevSnapshotId(0); - } - try { - stateTransitTo(snapshot, Snapshot.Event.OperationSucceeded); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update the state of snapshot while backing up snapshot"); - } - - } else { - try { - stateTransitTo(snapshot, Snapshot.Event.OperationFailed); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update the state of snapshot while backing up snapshot"); - } - s_logger.warn("Failed to back up snapshot on secondary storage, deleting the record from the DB"); - _snapshotDao.remove(snapshotId); - } - txn.commit(); - - return backedUp; - } finally { - if (snapshot != null) { - _snapshotDao.releaseFromLockTable(snapshotId); - } - } - - } - - private HostVO getSecHost(long volumeId, long dcId) { - Long id = _snapshotDao.getSecHostId(volumeId); - if ( id != null) { - return _hostDao.findById(id); - } - return this.templateMgr.getSecondaryStorageHost(dcId); + SnapshotVO preSnapshotVO = null; + if (preId != 0 && !(volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId()))) { + preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preId); + } + + return preSnapshotVO; } private Long getSnapshotUserId() { @@ -789,11 +468,15 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return userId; } - @Override - @DB - public void postCreateSnapshot(Long volumeId, Long snapshotId, Long policyId, boolean backedUp) { + private void postCreateSnapshot(Long volumeId, Long snapshotId, Long policyId) { Long userId = getSnapshotUserId(); SnapshotVO snapshot = _snapshotDao.findById(snapshotId); + if (policyId != Snapshot.MANUAL_POLICY_ID) { + SnapshotScheduleVO snapshotSchedule = _snapshotScheduleDao.getCurrentSchedule(volumeId, policyId, true); + assert snapshotSchedule != null; + snapshotSchedule.setSnapshotId(snapshotId); + _snapshotScheduleDao.update(snapshotSchedule.getId(), snapshotSchedule); + } if (snapshot != null && snapshot.isRecursive()) { postCreateRecurringSnapshotForPolicy(userId, volumeId, snapshotId, policyId); @@ -803,7 +486,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, private void postCreateRecurringSnapshotForPolicy(long userId, long volumeId, long snapshotId, long policyId) { // Use count query SnapshotVO spstVO = _snapshotDao.findById(snapshotId); - Type type = spstVO.getType(); + Type type = spstVO.getRecurringType(); int maxSnaps = type.getMax(); List snaps = listSnapsforVolumeType(volumeId, type); @@ -815,7 +498,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotVO oldestSnapshot = snaps.get(0); long oldSnapId = oldestSnapshot.getId(); s_logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId); - if(deleteSnapshotInternal(oldSnapId)){ + if(deleteSnapshot(oldSnapId)){ //log Snapshot delete event ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, oldestSnapshot.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_SNAPSHOT_DELETE, "Successfully deleted oldest snapshot: " + oldSnapId, 0); } @@ -830,98 +513,38 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, Account caller = UserContext.current().getCaller(); // Verify parameters - Snapshot snapshotCheck = _snapshotDao.findById(snapshotId); + SnapshotInfo snapshotCheck = this.snapshotFactory.getSnapshot(snapshotId); if (snapshotCheck == null) { throw new InvalidParameterValueException("unable to find a snapshot with id " + snapshotId); } _accountMgr.checkAccess(caller, null, true, snapshotCheck); - if( !Snapshot.State.BackedUp.equals(snapshotCheck.getState() ) ) { - throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is not in BackedUp Status"); + SnapshotStrategy strategy = null; + for (SnapshotStrategy st : snapshotStrategies) { + if (st.canHandle(snapshotCheck)) { + strategy = st; + break; + } + } + try { + boolean result = strategy.deleteSnapshot(snapshotCheck); + if (result) { + if (snapshotCheck.getState() == Snapshot.State.BackedUp) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshotCheck.getAccountId(), + snapshotCheck.getDataCenterId(), snapshotId, snapshotCheck.getName(), null, null, 0L, + snapshotCheck.getClass().getName(), snapshotCheck.getUuid()); + } + _resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.snapshot); + } + return result; + } catch (Exception e) { + s_logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString()); + throw new CloudRuntimeException("Failed to delete snapshot:" + e.toString()); } - - return deleteSnapshotInternal(snapshotId); } - @DB - private boolean deleteSnapshotInternal(Long snapshotId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Calling deleteSnapshot for snapshotId: " + snapshotId); - } - SnapshotVO lastSnapshot = null; - SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snapshot.getBackupSnapshotId() != null) { - List snaps = _snapshotDao.listByBackupUuid(snapshot.getVolumeId(), snapshot.getBackupSnapshotId()); - if (snaps != null && snaps.size() > 1) { - snapshot.setBackupSnapshotId(null); - _snapshotDao.update(snapshot.getId(), snapshot); - } - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - _snapshotDao.remove(snapshotId); - if (snapshot.getState() == Snapshot.State.BackedUp) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshot.getAccountId(), - snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, 0L, - snapshot.getClass().getName(), snapshot.getUuid()); - } - _resourceLimitMgr.decrementResourceCount(snapshot.getAccountId(), ResourceType.snapshot); - txn.commit(); - - long lastId = snapshotId; - boolean destroy = false; - while (true) { - lastSnapshot = _snapshotDao.findNextSnapshot(lastId); - if (lastSnapshot == null) { - // if all snapshots after this snapshot in this chain are removed, remove those snapshots. - destroy = true; - break; - } - if (lastSnapshot.getRemoved() == null) { - // if there is one child not removed, then can not remove back up snapshot. - break; - } - lastId = lastSnapshot.getId(); - } - if (destroy) { - lastSnapshot = _snapshotDao.findByIdIncludingRemoved(lastId); - while (lastSnapshot.getRemoved() != null) { - String BackupSnapshotId = lastSnapshot.getBackupSnapshotId(); - if (BackupSnapshotId != null) { - List snaps = _snapshotDao.listByBackupUuid(lastSnapshot.getVolumeId(), BackupSnapshotId); - if (snaps != null && snaps.size() > 1) { - lastSnapshot.setBackupSnapshotId(null); - _snapshotDao.update(lastSnapshot.getId(), lastSnapshot); - } else { - if (destroySnapshotBackUp(lastId)) { - - } else { - s_logger.debug("Destroying snapshot backup failed " + lastSnapshot); - break; - } - } - } - lastId = lastSnapshot.getPrevSnapshotId(); - if (lastId == 0) { - break; - } - lastSnapshot = _snapshotDao.findByIdIncludingRemoved(lastId); - } - } - return true; - } - - @Override - @DB - public boolean destroySnapshot(long userId, long snapshotId, long policyId) { - return true; - } - - - @Override - public HostVO getSecondaryStorageHost(SnapshotVO snapshot) { + private HostVO getSecondaryStorageHost(SnapshotVO snapshot) { HostVO secHost = null; if( snapshot.getSwiftId() == null || snapshot.getSwiftId() == 0) { secHost = _hostDao.findById(snapshot.getSecHostId()); @@ -941,51 +564,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, throw new CloudRuntimeException("Can not find secondary storage"); } - @Override - @DB - public boolean destroySnapshotBackUp(long snapshotId) { - boolean success = false; - String details; - SnapshotVO snapshot = _snapshotDao.findByIdIncludingRemoved(snapshotId); - if (snapshot == null) { - throw new CloudRuntimeException("Destroying snapshot " + snapshotId + " backup failed due to unable to find snapshot "); - } - String secondaryStoragePoolUrl = getSecondaryStorageURL(snapshot); - Long dcId = snapshot.getDataCenterId(); - Long accountId = snapshot.getAccountId(); - Long volumeId = snapshot.getVolumeId(); - - String backupOfSnapshot = snapshot.getBackupSnapshotId(); - if (backupOfSnapshot == null) { - return true; - } - SwiftTO swift = _swiftMgr.getSwiftTO(snapshot.getSwiftId()); - S3TO s3 = _s3Mgr.getS3TO(); - - checkObjectStorageConfiguration(swift, s3); - - DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand( - swift, s3, secondaryStoragePoolUrl, dcId, accountId, volumeId, - backupOfSnapshot, false); - Answer answer = _agentMgr.sendToSSVM(dcId, cmd); - - if ((answer != null) && answer.getResult()) { - snapshot.setBackupSnapshotId(null); - _snapshotDao.update(snapshotId, snapshot); - success = true; - details = "Successfully deleted snapshot " + snapshotId + " for volumeId: " + volumeId; - s_logger.debug(details); - } else if (answer != null) { - details = "Failed to destroy snapshot id:" + snapshotId + " for volume: " + volumeId + " due to "; - if (answer.getDetails() != null) { - details += answer.getDetails(); - } - s_logger.error(details); - } - return success; - - } - @Override public Pair, Integer> listSnapshots(ListSnapshotsCmd cmd) { Long volumeId = cmd.getVolumeId(); @@ -1161,7 +739,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, List snapshots = listSnapsforVolume(volumeId); for (SnapshotVO snapshot : snapshots) { if (_snapshotDao.expunge(snapshot.getId())) { - if (snapshot.getType() == Type.MANUAL) { + if (snapshot.getRecurringType() == Type.MANUAL) { _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.snapshot); } @@ -1271,8 +849,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return policy; } - @Override - public boolean deletePolicy(long userId, Long policyId) { + protected boolean deletePolicy(long userId, Long policyId) { SnapshotPolicyVO snapshotPolicy = _snapshotPolicyDao.findById(policyId); _snapSchedMgr.removeSchedule(snapshotPolicy.getVolumeId(), snapshotPolicy.getId()); return _snapshotPolicyDao.remove(policyId); @@ -1290,31 +867,16 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return new Pair, Integer>(result.first(), result.second()); } - @Override - public List listPoliciesforVolume(long volumeId) { + + private List listPoliciesforVolume(long volumeId) { return _snapshotPolicyDao.listByVolumeId(volumeId); } - - @Override - public List listPoliciesforSnapshot(long snapshotId) { - SearchCriteria sc = PoliciesForSnapSearch.create(); - sc.setJoinParameters("policyRef", "snapshotId", snapshotId); - return _snapshotPolicyDao.search(sc, null); - } - - @Override - public List listSnapsforPolicy(long policyId, Filter filter) { - SearchCriteria sc = PolicySnapshotSearch.create(); - sc.setJoinParameters("policy", "policyId", policyId); - return _snapshotDao.search(sc, filter); - } - - @Override - public List listSnapsforVolume(long volumeId) { + + private List listSnapsforVolume(long volumeId) { return _snapshotDao.listByVolumeId(volumeId); } - public List listSnapsforVolumeType(long volumeId, Type type) { + private List listSnapsforVolumeType(long volumeId, Type type) { return _snapshotDao.listByVolumeIdType(volumeId, type); } @@ -1333,9 +895,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } - /** - * {@inheritDoc} - */ @Override public List findRecurringSnapshotSchedule(ListRecurringSnapshotScheduleCmd cmd) { Long volumeId = cmd.getVolumeId(); @@ -1374,12 +933,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return snapshotSchedules; } - @Override - public SnapshotPolicyVO getPolicyForVolume(long volumeId) { - return _snapshotPolicyDao.findOneByVolume(volumeId); - } - - public Type getSnapshotType(Long policyId) { + private Type getSnapshotType(Long policyId) { if (policyId.equals(Snapshot.MANUAL_POLICY_ID)) { return Type.MANUAL; } else { @@ -1389,7 +943,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } - public Type getSnapshotType(IntervalType intvType) { + private Type getSnapshotType(IntervalType intvType) { if (intvType.equals(IntervalType.HOURLY)) { return Type.HOURLY; } else if (intvType.equals(IntervalType.DAILY)) { @@ -1489,15 +1043,11 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, Type.DAILY.setMax(NumbersUtil.parseInt(_configDao.getValue("snapshot.max.daily"), DAILYMAX)); Type.WEEKLY.setMax(NumbersUtil.parseInt(_configDao.getValue("snapshot.max.weekly"), WEEKLYMAX)); Type.MONTHLY.setMax(NumbersUtil.parseInt(_configDao.getValue("snapshot.max.monthly"), MONTHLYMAX)); - _deltaSnapshotMax = NumbersUtil.parseInt(_configDao.getValue("snapshot.delta.max"), DELTAMAX); _totalRetries = NumbersUtil.parseInt(_configDao.getValue("total.retries"), 4); _pauseInterval = 2 * NumbersUtil.parseInt(_configDao.getValue("ping.interval"), 60); s_logger.info("Snapshot Manager is configured."); - _snapshotFsm = Snapshot.State.getStateMachine(); - _snapshotFsm.registerListener(new SnapshotStateListener()); - return true; } @@ -1558,24 +1108,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return success; } - - private boolean hostSupportSnapsthot(HostVO host) { - if (host.getHypervisorType() != HypervisorType.KVM) { - return true; - } - // Determine host capabilities - String caps = host.getCapabilities(); - - if (caps != null) { - String[] tokens = caps.split(","); - for (String token : tokens) { - if (token.contains("snapshot")) { - return true; - } - } - } - return false; - } @Override public boolean canOperateOnVolume(Volume volume) { @@ -1586,8 +1118,4 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } return true; } - - protected boolean stateTransitTo(Snapshot snapshot, Snapshot.Event e) throws NoTransitionException { - return _snapshotFsm.transitTo(snapshot, e, null, _snapshotDao); - } } From 0f532c4b3a84f8b0d098a55e22c9a3669c99eeb9 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Wed, 13 Feb 2013 17:08:39 -0800 Subject: [PATCH 116/486] rebase to master --- api/src/com/cloud/storage/Snapshot.java | 2 +- core/src/com/cloud/storage/SnapshotVO.java | 2 +- .../storage/snapshot/SnapshotStateMachineManagerImpl.java | 2 +- .../storage/snapshot/strategy/AncientSnasphotStrategy.java | 5 +++++ .../src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java | 7 +++++-- 5 files changed, 13 insertions(+), 5 deletions(-) diff --git a/api/src/com/cloud/storage/Snapshot.java b/api/src/com/cloud/storage/Snapshot.java index 9c2217e0972..f71265cd230 100644 --- a/api/src/com/cloud/storage/Snapshot.java +++ b/api/src/com/cloud/storage/Snapshot.java @@ -19,7 +19,6 @@ package com.cloud.storage; import java.util.Date; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.utils.fsm.StateMachine2; import com.cloud.utils.fsm.StateObject; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.api.Identity; @@ -55,6 +54,7 @@ public interface Snapshot extends ControlledEntity, Identity, InternalIdentity, } public enum State { + Allocated, Creating, CreatedOnPrimary, BackingUp, diff --git a/core/src/com/cloud/storage/SnapshotVO.java b/core/src/com/cloud/storage/SnapshotVO.java index 1bb0854e006..78b96ec9779 100644 --- a/core/src/com/cloud/storage/SnapshotVO.java +++ b/core/src/com/cloud/storage/SnapshotVO.java @@ -117,7 +117,7 @@ public class SnapshotVO implements Snapshot { this.snapshotType = snapshotType; this.typeDescription = typeDescription; this.size = size; - this.state = State.Creating; + this.state = State.Allocated; this.prevSnapshotId = 0; this.hypervisorType = hypervisorType; this.version = "2.2"; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java index a20a2c8b876..ad65deb43fd 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java @@ -20,7 +20,7 @@ SnapshotStateMachineManager { @Inject protected SnapshotDao snapshotDao; public SnapshotStateMachineManagerImpl() { - stateMachine.addTransition(null, Event.CreateRequested, Snapshot.State.Creating); + stateMachine.addTransition(Snapshot.State.Allocated, Event.CreateRequested, Snapshot.State.Creating); stateMachine.addTransition(Snapshot.State.Creating, Event.OperationSucceeded, Snapshot.State.CreatedOnPrimary); stateMachine.addTransition(Snapshot.State.Creating, Event.OperationNotPerformed, Snapshot.State.BackedUp); stateMachine.addTransition(Snapshot.State.Creating, Event.OperationFailed, Snapshot.State.Error); diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java index 2e3b90f0490..8d72be2098f 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java @@ -67,6 +67,9 @@ import com.cloud.utils.fsm.NoTransitionException; import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Component public class AncientSnasphotStrategy implements SnapshotStrategy { @@ -103,6 +106,8 @@ public class AncientSnasphotStrategy implements SnapshotStrategy { DataMotionService motionSrv; @Inject ObjectInDataStoreManager objInStoreMgr; + @Inject + VMSnapshotDao _vmSnapshotDao; @Override diff --git a/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index a0335634113..01f3dd3afe2 100644 --- a/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -28,6 +28,8 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.user.vmsnapshot.ListVMSnapshotCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -61,7 +63,7 @@ import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.storage.GuestOSVO; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; -import com.cloud.storage.StoragePoolVO; +import com.cloud.storage.StoragePool; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.SnapshotDao; @@ -115,6 +117,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana @Inject StoragePoolDao _storagePoolDao; @Inject SnapshotDao _snapshotDao; @Inject VirtualMachineManager _itMgr; + @Inject DataStoreManager dataStoreMgr; @Inject ConfigurationDao _configDao; int _vmSnapshotMax; StateMachine2 _vmSnapshottateMachine ; @@ -393,7 +396,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana List volumeVos = _volumeDao.findByInstance(vmId); for (VolumeVO volume : volumeVos) { - StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId()); + StoragePool pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); VolumeTO volumeTO = new VolumeTO(volume, pool); volumeTOs.add(volumeTO); } From fb4036ece408842fd86bbb76b4474fb7336b5368 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Wed, 13 Feb 2013 17:32:08 -0800 Subject: [PATCH 117/486] add test folder for snpahost project --- engine/storage/snapshot/pom.xml | 8 ++- .../resource/SnapshotManagerTestContext.xml | 42 ++++++++++++++++ .../test/src/SnapshotDataFactoryTest.java | 50 +++++++++++++++++++ 3 files changed, 98 insertions(+), 2 deletions(-) create mode 100644 engine/storage/snapshot/test/resource/SnapshotManagerTestContext.xml create mode 100644 engine/storage/snapshot/test/src/SnapshotDataFactoryTest.java diff --git a/engine/storage/snapshot/pom.xml b/engine/storage/snapshot/pom.xml index 723c21081ad..211cdac574e 100644 --- a/engine/storage/snapshot/pom.xml +++ b/engine/storage/snapshot/pom.xml @@ -44,7 +44,11 @@ install - src - test + ${project.basedir}/test + + + ${project.basedir}/test/resource + + diff --git a/engine/storage/snapshot/test/resource/SnapshotManagerTestContext.xml b/engine/storage/snapshot/test/resource/SnapshotManagerTestContext.xml new file mode 100644 index 00000000000..d99c2e2dbac --- /dev/null +++ b/engine/storage/snapshot/test/resource/SnapshotManagerTestContext.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine/storage/snapshot/test/src/SnapshotDataFactoryTest.java b/engine/storage/snapshot/test/src/SnapshotDataFactoryTest.java new file mode 100644 index 00000000000..e722ab55c70 --- /dev/null +++ b/engine/storage/snapshot/test/src/SnapshotDataFactoryTest.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package src; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.cloud.utils.component.ComponentContext; + +import junit.framework.TestCase; + +//@RunWith(SpringJUnit4ClassRunner.class) +//@ContextConfiguration(locations = "classpath:/SnapshotManagerTestContext.xml") +public class SnapshotDataFactoryTest extends TestCase { + //@Inject SnapshotDataFactory snapshotFactory; + + @Before + public void setup() throws Exception { + //ComponentContext.initComponentsLifeCycle(); + + } + + @Test + public void testGestSnapshot() { + //snapshotFactory.getSnapshot(snapshotId); + } + +} From ae902590d397fcbbad2901426b56f3151f4dcd46 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Thu, 14 Feb 2013 13:52:37 -0800 Subject: [PATCH 118/486] fix snapshot --- .../snapshot/SnapshotDataFactoryImpl.java | 7 ++- .../SnapshotStateMachineManagerImpl.java | 2 +- .../strategy/AncientSnasphotStrategy.java | 48 ++++++++++++++----- .../AncientPrimaryDataStoreDriverImpl.java | 39 +++++++++------ .../storage/snapshot/SnapshotManagerImpl.java | 4 ++ 5 files changed, 71 insertions(+), 29 deletions(-) diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java index 5af5260c340..fa7772a979d 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java @@ -74,6 +74,11 @@ public class SnapshotDataFactoryImpl implements SnapshotDataFactory { @Override public SnapshotInfo getSnapshot(DataObject obj, DataStore store) { - throw new CloudRuntimeException("not implemented yet"); + SnapshotVO snapshot = snapshotDao.findByIdIncludingRemoved(obj.getId()); + if (snapshot == null) { + throw new CloudRuntimeException("Can't find snapshot: " + obj.getId()); + } + SnapshotObject so = SnapshotObject.getSnapshotObject(snapshot, store); + return so; } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java index ad65deb43fd..c2213b67cf7 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java @@ -26,7 +26,7 @@ SnapshotStateMachineManager { stateMachine.addTransition(Snapshot.State.Creating, Event.OperationFailed, Snapshot.State.Error); stateMachine.addTransition(Snapshot.State.CreatedOnPrimary, Event.BackupToSecondary, Snapshot.State.BackingUp); stateMachine.addTransition(Snapshot.State.BackingUp, Event.OperationSucceeded, Snapshot.State.BackedUp); - stateMachine.addTransition(Snapshot.State.BackingUp, Event.OperationFailed, Snapshot.State.Error); + stateMachine.addTransition(Snapshot.State.BackingUp, Event.OperationFailed, Snapshot.State.CreatedOnPrimary); stateMachine.registerListener(new SnapshotStateListener()); } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java index 8d72be2098f..b2a58a4b9f1 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java @@ -180,7 +180,10 @@ public class AncientSnasphotStrategy implements SnapshotStrategy { try { SnapshotVO preSnapshotVO = this.snapshotMgr.getParentSnapshot(volume, snapshot); - String preSnapshotPath = preSnapshotVO.getPath(); + String preSnapshotPath = null; + if (preSnapshotVO != null) { + preSnapshotPath = preSnapshotVO.getPath(); + } SnapshotVO snapshotVO = this.snapshotDao.findById(snapshot.getId()); // The snapshot was successfully created if (preSnapshotPath != null && preSnapshotPath.equals(result.getPath())) { @@ -238,6 +241,11 @@ public class AncientSnasphotStrategy implements SnapshotStrategy { } catch (Exception e) { s_logger.debug("Failed to create snapshot: ", e); snapResult.setResult(e.toString()); + try { + snapshot.processEvent(Snapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + s_logger.debug("Failed to change snapshot state: " + e1.toString()); + } } future.complete(snapResult); @@ -263,19 +271,30 @@ public class AncientSnasphotStrategy implements SnapshotStrategy { s_logger.debug("Failed to update snapshot state due to " + nte.getMessage()); throw new CloudRuntimeException("Failed to update snapshot state due to " + nte.getMessage()); } + AsyncCallFuture future = new AsyncCallFuture(); - - CreateSnapshotContext context = new CreateSnapshotContext( - null, volume, snapshot, future); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher - .create(this); - caller.setCallback( - caller.getTarget().createSnapshotAsyncCallback(null, null)) - .setContext(context); - PrimaryDataStoreDriver primaryStore = (PrimaryDataStoreDriver)volume.getDataStore().getDriver(); - - primaryStore.takeSnapshot(snapshot, caller); + try { + CreateSnapshotContext context = new CreateSnapshotContext( + null, volume, snapshot, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher + .create(this); + caller.setCallback( + caller.getTarget().createSnapshotAsyncCallback(null, null)) + .setContext(context); + PrimaryDataStoreDriver primaryStore = (PrimaryDataStoreDriver)volume.getDataStore().getDriver(); + primaryStore.takeSnapshot(snapshot, caller); + } catch (Exception e) { + s_logger.debug("Failed to take snapshot: " + snapshot.getId(), e); + try { + snapshot.processEvent(Snapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + s_logger.debug("Failed to change state for event: OperationFailed" , e); + } + throw new CloudRuntimeException("Failed to take snapshot" + snapshot.getId()); + } + SnapshotResult result; + try { result = future.get(); if (result.isFailed()) { @@ -390,6 +409,11 @@ public class AncientSnasphotStrategy implements SnapshotStrategy { } catch (Exception e) { s_logger.debug("Failed to copy snapshot", e); result.setResult("Failed to copy snapshot:" +e.toString()); + try { + snapObj.processEvent(Snapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + s_logger.debug("Failed to change state: " + e1.toString()); + } future.complete(result); } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java index 657ba80e971..440cb8c5ea0 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java @@ -288,22 +288,31 @@ public class AncientPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver @Override public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { - VolumeInfo volume = snapshot.getBaseVolume(); - String vmName = this.volumeMgr.getVmNameOnVolume(volume); - SnapshotVO preSnapshotVO = this.snapshotMgr.getParentSnapshot(volume, snapshot); - StoragePool srcPool = (StoragePool)volume.getDataStore(); + CreateCmdResult result = null; + try { + VolumeInfo volume = snapshot.getBaseVolume(); + String vmName = this.volumeMgr.getVmNameOnVolume(volume); + SnapshotVO preSnapshotVO = this.snapshotMgr.getParentSnapshot(volume, snapshot); + String parentSnapshotPath = null; + if (preSnapshotVO != null) { + parentSnapshotPath = preSnapshotVO.getPath(); + } + StoragePool srcPool = (StoragePool)volume.getDataStore(); + + ManageSnapshotCommand cmd = new ManageSnapshotCommand(snapshot.getId(), volume.getPath(), srcPool, parentSnapshotPath, snapshot.getName(), vmName); - ManageSnapshotCommand cmd = new ManageSnapshotCommand(snapshot.getId(), volume.getPath(), srcPool, preSnapshotVO.getPath(), snapshot.getName(), vmName); - - ManageSnapshotAnswer answer = (ManageSnapshotAnswer) this.snapshotMgr.sendToPool(volume, cmd); - - CreateCmdResult result = null; - if ((answer != null) && answer.getResult()) { - result = new CreateCmdResult(answer.getSnapshotPath(), null); - } else { - result = new CreateCmdResult(null, null); - } - + ManageSnapshotAnswer answer = (ManageSnapshotAnswer) this.snapshotMgr.sendToPool(volume, cmd); + + if ((answer != null) && answer.getResult()) { + result = new CreateCmdResult(answer.getSnapshotPath(), null); + } else { + result = new CreateCmdResult(null, null); + } + } catch (Exception e) { + s_logger.debug("Failed to take snapshot: " + snapshot.getId(), e); + result = new CreateCmdResult(null, null); + result.setResult(e.toString()); + } callback.complete(result); } diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 7df99d67be2..ed48bd1b45b 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -351,6 +351,10 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, throw new CloudRuntimeException("Can't find snapshot:" + snapshotId); } + if (snapshot.getState() == Snapshot.State.BackedUp) { + return snapshot; + } + SnapshotStrategy strategy = null; for (SnapshotStrategy st : snapshotStrategies) { if (st.canHandle(snapshot)) { From 9b5774363d41c82bf7430bd328970d4c85b2cf55 Mon Sep 17 00:00:00 2001 From: Hugo Trippaers Date: Fri, 15 Feb 2013 15:50:09 +0100 Subject: [PATCH 119/486] Revert "Make sure initial log messages go somewhere for agent and usage" The dreaded wrong-tree-active-syndrome This reverts commit 9589f9fe5f401df90f7b356c9186d012c8f887ab. --- packaging/centos63/cloud-agent.rc | 5 ++--- packaging/centos63/cloud-usage.rc | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/packaging/centos63/cloud-agent.rc b/packaging/centos63/cloud-agent.rc index 19782b78ce1..acf81316479 100755 --- a/packaging/centos63/cloud-agent.rc +++ b/packaging/centos63/cloud-agent.rc @@ -31,8 +31,7 @@ whatami=cloudstack-agent SHORTNAME="$whatami" PIDFILE=/var/run/"$whatami".pid LOCKFILE=/var/lock/subsys/"$SHORTNAME" -LOGDIR=/var/log/cloudstack/agent -LOGFILE=${LOGFIR}/agent.log +LOGFILE=/var/log/cloudstack/agent/agent.log PROGNAME="Cloud Agent" CLASS="com.cloud.agent.AgentShell" JSVC=`which jsvc 2>/dev/null`; @@ -68,7 +67,7 @@ export CLASSPATH="$SCP:$DCP:$ACP:$JCP:/etc/cloudstack/agent:/usr/share/cloudstac start() { echo -n $"Starting $PROGNAME: " if hostname --fqdn >/dev/null 2>&1 ; then - $JSVC -cp "$CLASSPATH" -outfile "${LOGDIR}/cloudstack-agent.out" -errfile "${LOGDIR}/cloudstack-agent.err" -pidfile "$PIDFILE" $CLASS + $JSVC -cp "$CLASSPATH" -pidfile "$PIDFILE" $CLASS RETVAL=$? echo else diff --git a/packaging/centos63/cloud-usage.rc b/packaging/centos63/cloud-usage.rc index dc97cd36f89..8bee5aeb6a0 100755 --- a/packaging/centos63/cloud-usage.rc +++ b/packaging/centos63/cloud-usage.rc @@ -35,8 +35,7 @@ SHORTNAME="cloudstack-usage" PIDFILE=/var/run/"$SHORTNAME".pid LOCKFILE=/var/lock/subsys/"$SHORTNAME" -LOGDIR=/var/log/cloudstack/usage -LOGFILE=${LOGDIR}/usage.log +LOGFILE=/var/log/cloudstack/usage/usage.log PROGNAME="CloudStack Usage Monitor" CLASS="com.cloud.usage.UsageServer" PROG="jsvc" @@ -80,7 +79,7 @@ start() { echo -n "Starting $PROGNAME" "$SHORTNAME" - if daemon --pidfile $PIDFILE $DAEMON -cp "$CLASSPATH" -outfile "${LOGDIR}/cloudstack-usage.out" -errfile "${LOGDIR}/cloudstack-usage.err" -pidfile "$PIDFILE" -user "$USER" -Dpid=$$ $CLASS + if daemon --pidfile $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -user "$USER" -errfile SYSLOG -Dpid=$$ $CLASS RETVAL=$? then rc=0 From 8eca74134a4be332ca15fadd905f16a69bfdbeb8 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Wed, 20 Feb 2013 13:42:04 -0800 Subject: [PATCH 120/486] rebase to master --- framework/api/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/framework/api/pom.xml b/framework/api/pom.xml index 3212d7c2644..5260ebc4bf6 100644 --- a/framework/api/pom.xml +++ b/framework/api/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack-framework - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-utils - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT From 18e496b057e59074476a0c65f4e19fb54c620720 Mon Sep 17 00:00:00 2001 From: Edison Su Date: Thu, 21 Feb 2013 11:14:35 -0800 Subject: [PATCH 121/486] rebase to master --- .../test/com/cloud/vm/UserVmManagerTest.java | 11 +++-- setup/db/create-schema.sql | 1 + setup/db/db/schema-40to410.sql | 40 --------------- setup/db/db/schema-410to420.sql | 49 +++++++++++++++++++ 4 files changed, 56 insertions(+), 45 deletions(-) diff --git a/server/test/com/cloud/vm/UserVmManagerTest.java b/server/test/com/cloud/vm/UserVmManagerTest.java index 07cad479771..0795a359fdd 100755 --- a/server/test/com/cloud/vm/UserVmManagerTest.java +++ b/server/test/com/cloud/vm/UserVmManagerTest.java @@ -35,6 +35,7 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.storage.StorageManager; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VolumeManager; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; @@ -53,7 +54,7 @@ public class UserVmManagerTest { @Spy UserVmManagerImpl _userVmMgr = new UserVmManagerImpl(); @Mock VirtualMachineManager _itMgr; - @Mock StorageManager _storageMgr; + @Mock VolumeManager _storageMgr; @Mock Account _account; @Mock AccountManager _accountMgr; @Mock AccountDao _accountDao; @@ -76,7 +77,7 @@ public class UserVmManagerTest { _userVmMgr._templateDao = _templateDao; _userVmMgr._volsDao = _volsDao; _userVmMgr._itMgr = _itMgr; - _userVmMgr._storageMgr = _storageMgr; + _userVmMgr.volumeMgr = _storageMgr; _userVmMgr._accountDao = _accountDao; _userVmMgr._userDao = _userDao; _userVmMgr._accountMgr = _accountMgr; @@ -116,7 +117,7 @@ public class UserVmManagerTest { doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); when(_volumeMock.getId()).thenReturn(3L); doNothing().when(_volsDao).detachVolume(anyLong()); - when(_storageMgr.destroyVolume(_volumeMock)).thenReturn(true); + when(_templateMock.getUuid()).thenReturn("e0552266-7060-11e2-bbaa-d55f5db67735"); _userVmMgr.restoreVMInternal(_account, _vmMock, null); @@ -141,7 +142,7 @@ public class UserVmManagerTest { doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); when(_volumeMock.getId()).thenReturn(3L); doNothing().when(_volsDao).detachVolume(anyLong()); - when(_storageMgr.destroyVolume(_volumeMock)).thenReturn(true); + when(_templateMock.getUuid()).thenReturn("e0552266-7060-11e2-bbaa-d55f5db67735"); _userVmMgr.restoreVMInternal(_account, _vmMock, null); @@ -171,7 +172,7 @@ public class UserVmManagerTest { doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); when(_volumeMock.getId()).thenReturn(3L); doNothing().when(_volsDao).detachVolume(anyLong()); - when(_storageMgr.destroyVolume(_volumeMock)).thenReturn(true); + when(_templateMock.getUuid()).thenReturn("b1a3626e-72e0-4697-8c7c-a110940cc55d"); _userVmMgr.restoreVMInternal(_account, _vmMock, 14L); diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index 7361681da47..1556913812d 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -1049,6 +1049,7 @@ CREATE TABLE `cloud`.`vm_template` ( `source_template_id` bigint unsigned COMMENT 'Id of the original template, if this template is created from snapshot', `template_tag` varchar(255) COMMENT 'template tag', `sort_key` int(32) NOT NULL default 0 COMMENT 'sort key used for customising sort method', + `image_data_store_id` bigint unsigned, PRIMARY KEY (`id`), INDEX `i_vm_template__removed`(`removed`), CONSTRAINT `uc_vm_template__uuid` UNIQUE (`uuid`) diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 47b7cbe14d7..774a76700d1 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -21,7 +21,6 @@ use cloud; -alter table vm_template add image_data_store_id bigint unsigned; alter table vm_template add size bigint unsigned; alter table vm_template add state varchar(255); alter table vm_template add update_count bigint unsigned; @@ -58,45 +57,6 @@ alter table cluster add column owner varchar(255); alter table cluster add column created datetime COMMENT 'date created'; alter table cluster add column lastUpdated datetime COMMENT 'last updated'; alter table cluster add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; -CREATE TABLE `cloud`.`object_datastore_ref` ( - `id` bigint unsigned NOT NULL auto_increment, - `datastore_id` bigint unsigned NOT NULL, - `datastore_role` varchar(255) NOT NULL, - `object_id` bigint unsigned NOT NULL, - `object_type` varchar(255) NOT NULL, - `created` DATETIME NOT NULL, - `last_updated` DATETIME, - `job_id` varchar(255), - `download_pct` int(10) unsigned, - `download_state` varchar(255), - `error_str` varchar(255), - `local_path` varchar(255), - `install_path` varchar(255), - `size` bigint unsigned COMMENT 'the size of the template on the pool', - `state` varchar(255) NOT NULL, - `update_count` bigint unsigned NOT NULL, - `updated` DATETIME, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`data_store_provider` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `name` varchar(255) NOT NULL COMMENT 'name of primary data store provider', - `uuid` varchar(255) NOT NULL COMMENT 'uuid of primary data store provider', - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`image_data_store` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `name` varchar(255) NOT NULL COMMENT 'name of data store', - `image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider', - `protocol` varchar(255) NOT NULL COMMENT 'protocol of data store', - `data_center_id` bigint unsigned COMMENT 'datacenter id of data store', - `scope` varchar(255) COMMENT 'scope of data store', - `uuid` varchar(255) COMMENT 'uuid of data store', - PRIMARY KEY(`id`), - CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `cloud`.`vm_compute_tags` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index 0335f2a0781..eed739025c0 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -24,6 +24,55 @@ UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `h INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32); DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max'; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen'); +alter table template_host_ref add state varchar(255); +alter table template_host_ref add update_count bigint unsigned; +alter table template_host_ref add updated datetime; +alter table volume_host_ref add state varchar(255); +alter table volume_host_ref add update_count bigint unsigned; +alter table volume_host_ref add updated datetime; +alter table template_spool_ref add updated datetime; +CREATE TABLE `cloud`.`object_datastore_ref` ( + `id` bigint unsigned NOT NULL auto_increment, + `datastore_uuid` varchar(255) NOT NULL, + `datastore_role` varchar(255) NOT NULL, + `object_uuid` varchar(255) NOT NULL, + `object_type` varchar(255) NOT NULL, + `created` DATETIME NOT NULL, + `last_updated` DATETIME, + `job_id` varchar(255), + `download_pct` int(10) unsigned, + `download_state` varchar(255), + `url` varchar(255), + `format` varchar(255), + `checksum` varchar(255), + `error_str` varchar(255), + `local_path` varchar(255), + `install_path` varchar(255), + `size` bigint unsigned COMMENT 'the size of the template on the pool', + `state` varchar(255) NOT NULL, + `update_count` bigint unsigned NOT NULL, + `updated` DATETIME, + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`data_store_provider` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `name` varchar(255) NOT NULL COMMENT 'name of primary data store provider', + `uuid` varchar(255) NOT NULL COMMENT 'uuid of primary data store provider', + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`image_data_store` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `name` varchar(255) NOT NULL COMMENT 'name of data store', + `image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider', + `protocol` varchar(255) NOT NULL COMMENT 'protocol of data store', + `data_center_id` bigint unsigned COMMENT 'datacenter id of data store', + `scope` varchar(255) COMMENT 'scope of data store', + `uuid` varchar(255) COMMENT 'uuid of data store', + PRIMARY KEY(`id`), + CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; ALTER TABLE `cloud`.`service_offering` ADD COLUMN `is_volatile` tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk'; From d0d6ba9781aa9328ccb621d0f15d07cd894d7a03 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Thu, 21 Feb 2013 12:23:27 -0800 Subject: [PATCH 122/486] Add new dropdowns to primary storage form Adds the following new fields, to support targeted storage pool: -Scope drop-down (zone/cluster/host) -Host list drop-down --- ui/scripts/system.js | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/ui/scripts/system.js b/ui/scripts/system.js index e51bf90ba81..3c52442308c 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -8695,6 +8695,20 @@ createForm: { title: 'label.add.primary.storage', fields: { + scope: { + label: 'label.scope', + select: function(args) { + var scope = [ + { id: 'zone-wide', description: _l('label.zone.wide') }, + { id: 'cluster', description: _l('label.cluster') }, + { id: 'host', description: _l('label.host') } + ]; + + args.response.success({ + data: scope + }); + } + }, zoneid: { label: 'Zone', docID: 'helpPrimaryStorageZone', @@ -8768,6 +8782,29 @@ } }, + hostId: { + label: 'label.host', + validation: { required: true }, + dependsOn: 'clusterId', + select: function(args) { + $.ajax({ + url: createURL('listHosts'), + data: { + clusterid: args.clusterId + }, + success: function(json) { + var hosts = json.listhostsresponse.host ? + json.listhostsresponse.host : [] + args.response.success({ + data: $.map(hosts, function(host) { + return { id: host.id, description: host.name } + }) + }); + } + }); + } + }, + name: { label: 'label.name', docID: 'helpPrimaryStorageName', From 6ef7928a9c342a99cfec2d471da3005e99a7afac Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 22 Feb 2013 02:53:26 +0530 Subject: [PATCH 123/486] zone wide primary storage --- ui/scripts/system.js | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 3c52442308c..375d7778bdc 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -8707,6 +8707,37 @@ args.response.success({ data: scope }); + + args.$select.change(function() { + var $form = $(this).closest('form'); + var scope = $(this).val(); + + if(scope == 'zone-wide'){ + $form.find('.form-item[rel=podId]').hide(); + $form.find('.form-item[rel=clusterId]').hide(); + $form.find('.form-item[rel=hostId]').hide(); + + + } + + else if(scope == 'cluster'){ + + $form.find('.form-item[rel=hostId]').hide(); + $form.find('.form-item[rel=podId]').css('display', 'inline-block'); + $form.find('.form-item[rel=clusterId]').css('display', 'inline-block'); + + + } + + else if(scope == 'host'){ + $form.find('.form-item[rel=podId]').css('display', 'inline-block'); + $form.find('.form-item[rel=clusterId]').css('display', 'inline-block'); + $form.find('.form-item[rel=hostId]').css('display', 'inline-block'); + + } + + }) + } }, zoneid: { @@ -8867,6 +8898,7 @@ var protocol = $(this).val(); if(protocol == null) return; + if(protocol == "nfs") { //$("#add_pool_server_container", $dialogAddPool).show(); From 6cc1123a3e8d1a3b4ac970f1498a3d2d22ce0c22 Mon Sep 17 00:00:00 2001 From: Chip Childers Date: Thu, 21 Feb 2013 16:29:05 -0500 Subject: [PATCH 124/486] Correcting license headers that broke the build Signed-off-by: Chip Childers --- .../api/storage/SnapshotStrategy.java | 17 ++++++++++++++++ .../snapshot/SnapshotStateMachineManager.java | 17 ++++++++++++++++ .../SnapshotStateMachineManagerImpl.java | 17 ++++++++++++++++ .../strategy/AncientSnasphotStrategy.java | 17 ++++++++++++++++ .../storage/datastore/manager/data model.ucls | 20 ++++++++++++++++++- .../cloud/storage/ResizeVolumePayload.java | 17 ++++++++++++++++ test/selenium/lib/Global_Locators.py | 17 ++++++++++++++++ test/selenium/lib/initialize.py | 16 +++++++++++++++ test/selenium/smoke/Login_and_Accounts.py | 19 +++++++++++++++++- test/selenium/smoke/Service_Offering.py | 19 +++++++++++++++++- test/selenium/smoke/TemplatesAndISO.py | 19 +++++++++++++++++- test/selenium/smoke/VM_lifeCycle.py | 19 +++++++++++++++++- test/selenium/smoke/main.py | 17 ++++++++++++++++ 13 files changed, 226 insertions(+), 5 deletions(-) diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java index f854f6bb5d4..e9492c4afc6 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package org.apache.cloudstack.engine.subsystem.api.storage; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java index 1c3ac28d2f7..c6057704cd8 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package org.apache.cloudstack.storage.snapshot; import com.cloud.storage.Snapshot.Event; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java index c2213b67cf7..aa1cf684d7a 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package org.apache.cloudstack.storage.snapshot; import javax.inject.Inject; diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java index b2a58a4b9f1..5cbe2243faa 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package org.apache.cloudstack.storage.snapshot.strategy; import java.util.List; diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls index f1590397b8f..8d7a696957d 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls @@ -1,3 +1,21 @@ + - \ No newline at end of file + diff --git a/server/src/com/cloud/storage/ResizeVolumePayload.java b/server/src/com/cloud/storage/ResizeVolumePayload.java index 472d627dd4a..205fafa4bb0 100644 --- a/server/src/com/cloud/storage/ResizeVolumePayload.java +++ b/server/src/com/cloud/storage/ResizeVolumePayload.java @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package com.cloud.storage; public class ResizeVolumePayload { diff --git a/test/selenium/lib/Global_Locators.py b/test/selenium/lib/Global_Locators.py index ec3de57d32b..b2d93cd997e 100644 --- a/test/selenium/lib/Global_Locators.py +++ b/test/selenium/lib/Global_Locators.py @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ''' Variable Names are as follows Logical Page Descriptor_____What Element Represents and/or where it is_____LocatorType diff --git a/test/selenium/lib/initialize.py b/test/selenium/lib/initialize.py index 6da7166e9ac..e8cc49adff4 100644 --- a/test/selenium/lib/initialize.py +++ b/test/selenium/lib/initialize.py @@ -1,3 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. ''' This will help pass webdriver (Browser instance) across our test cases. ''' diff --git a/test/selenium/smoke/Login_and_Accounts.py b/test/selenium/smoke/Login_and_Accounts.py index 8ff17f466b9..c5132d9754c 100644 --- a/test/selenium/smoke/Login_and_Accounts.py +++ b/test/selenium/smoke/Login_and_Accounts.py @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + import sys, os sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) @@ -233,4 +250,4 @@ class tearAcc(unittest.TestCase): -################################################################################################################################################ \ No newline at end of file +################################################################################################################################################ diff --git a/test/selenium/smoke/Service_Offering.py b/test/selenium/smoke/Service_Offering.py index fa9b449e703..66478e60414 100644 --- a/test/selenium/smoke/Service_Offering.py +++ b/test/selenium/smoke/Service_Offering.py @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + import sys, os sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) @@ -406,4 +423,4 @@ class Compute_offering_Delete(unittest.TestCase): def tearDown(self): - self.assertEqual([], self.verificationErrors) \ No newline at end of file + self.assertEqual([], self.verificationErrors) diff --git a/test/selenium/smoke/TemplatesAndISO.py b/test/selenium/smoke/TemplatesAndISO.py index 3bb6f5bf699..120c8d10d9f 100644 --- a/test/selenium/smoke/TemplatesAndISO.py +++ b/test/selenium/smoke/TemplatesAndISO.py @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + ''' ISO PART YET TO BE ADDED:: remove this after adding it. ''' @@ -224,4 +241,4 @@ class Template_Delete(unittest.TestCase): def tearDown(self): - self.assertEqual([], self.verificationErrors) \ No newline at end of file + self.assertEqual([], self.verificationErrors) diff --git a/test/selenium/smoke/VM_lifeCycle.py b/test/selenium/smoke/VM_lifeCycle.py index cd17f86332e..845a5cb316f 100644 --- a/test/selenium/smoke/VM_lifeCycle.py +++ b/test/selenium/smoke/VM_lifeCycle.py @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + import sys, os sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) @@ -593,4 +610,4 @@ class stopVM(unittest.TestCase): self.assertEqual([], self.verificationErrors) -######################################################################################################################################################### \ No newline at end of file +######################################################################################################################################################### diff --git a/test/selenium/smoke/main.py b/test/selenium/smoke/main.py index d7835317496..86bb9308c2f 100644 --- a/test/selenium/smoke/main.py +++ b/test/selenium/smoke/main.py @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + import unittest import HTMLTestRunner import xmlrunner From 31c26beb7f29c57012b22920014f5b18ef62f83f Mon Sep 17 00:00:00 2001 From: Pranav Saxena Date: Fri, 22 Feb 2013 03:28:01 +0530 Subject: [PATCH 125/486] zone wide primary storage server side changes --- ui/scripts/system.js | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 375d7778bdc..997bd53f9af 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -8654,7 +8654,8 @@ name: { label: 'label.name' }, ipaddress: { label: 'label.server' }, path: { label: 'label.path' }, - clustername: { label: 'label.cluster'} + clustername: { label: 'label.cluster'}, + scope:{label:'Scope'} }, dataProvider: function(args) { @@ -9218,9 +9219,24 @@ action: function(args) { var array1 = []; + array1.push("&scope=" + todb(args.data.scope)); + array1.push("&zoneid=" + args.data.zoneid); - array1.push("&podId=" + args.data.podId); + + if(args.data.scope == 'cluster'){ + + array1.push("&podid=" + args.data.podId); array1.push("&clusterid=" + args.data.clusterId); + + } + + if(args.data.scope == 'host'){ + array1.push("&podid=" + args.data.podId); + array1.push("&clusterid=" + args.data.clusterId); + array1.push("&hostid=" + args.data.hostId); + + } + array1.push("&name=" + todb(args.data.name)); var server = args.data.server; From d84a8601e532d3626349341694cfe68c8f053cb1 Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Thu, 21 Feb 2013 14:01:52 -0800 Subject: [PATCH 126/486] CLOUDSTACK-1362: EC2 dns-name filter support for EC2 describeInstances API is broken Mapped CS UserVm 'name' propertyto the dns-name value to filter out the results. --- .../cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java index e0aae7364d9..b5b7c7840df 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java @@ -50,6 +50,7 @@ public class EC2InstanceFilterSet { filterTypes.put( "group-id", "string" ); filterTypes.put( "tag-key", "string" ); filterTypes.put( "tag-value", "string" ); + filterTypes.put( "dns-name", "string" ); } @@ -184,6 +185,8 @@ public class EC2InstanceFilterSet { } } return false; + }else if (filterName.equalsIgnoreCase( "dns-name" )){ + return containsString( vm.getName(), valueSet ); } else return false; } From 667a1d18a4a1d88e9d2c7d6585f5c25ebf032575 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 21 Feb 2013 14:42:42 -0800 Subject: [PATCH 127/486] CLOUDSTACK-1288: Fix regression on remove LB rules It's a regression caused by: commit 69d24545c4f0e316dba534b6d5d99308795db6d4 Author: Murali Reddy Date: Fri Feb 8 18:50:18 2013 +0530 CLOUDSTACK-1208: Failed to shutdown guest network Firewall manager was being used instead of LoadBalancingRules manager while applying the load balancer rules in shut down network. Changing it to LoadBalancingRules manager. The parameter transferre to applyRules() should be lbRules, rather than the lbs object. --- server/src/com/cloud/network/NetworkManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index f5868658751..2e19fa11d09 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -3070,7 +3070,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L } try { - if (!_lbMgr.applyRules(network, Purpose.LoadBalancing, lbs)) { + if (!_lbMgr.applyRules(network, Purpose.LoadBalancing, lbRules)) { s_logger.warn("Failed to cleanup lb rules as a part of shutdownNetworkRules"); success = false; } From fe6fc0a20e106735b522e204ebae25527611434a Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Mon, 14 Jan 2013 10:09:40 -0800 Subject: [PATCH 128/486] Remove old screenshot files from UI folder --- ui/images/sample-wizard/step1.png | Bin 52950 -> 0 bytes ui/images/sample-wizard/step2.png | Bin 42277 -> 0 bytes ui/images/sample-wizard/step3.png | Bin 44303 -> 0 bytes ui/images/sample-wizard/step4.png | Bin 37188 -> 0 bytes ui/images/sample-wizard/step5.png | Bin 41363 -> 0 bytes ui/images/sample-wizard/step6.png | Bin 39897 -> 0 bytes ui/images/screens/Dashboard.jpg | Bin 69188 -> 0 bytes ui/images/screens/Dashboard2.jpg | Bin 150035 -> 0 bytes ui/images/screens/Events-Details.jpg | Bin 35278 -> 0 bytes ui/images/screens/Events.jpg | Bin 37224 -> 0 bytes ui/images/screens/MultiEdit.jpg | Bin 14080 -> 0 bytes ui/images/screens/Network-Details.jpg | Bin 40540 -> 0 bytes ui/images/screens/Network.jpg | Bin 39980 -> 0 bytes ui/images/screens/ProjectDashboard.png | Bin 97769 -> 0 bytes ui/images/screens/Projects-Details.jpg | Bin 63045 -> 0 bytes ui/images/screens/Projects.jpg | Bin 37662 -> 0 bytes ui/images/screens/Storage-Details.jpg | Bin 37152 -> 0 bytes ui/images/screens/Storage.jpg | Bin 39991 -> 0 bytes ui/images/screens/Templates-Details.jpg | Bin 37459 -> 0 bytes ui/images/screens/Templates.jpg | Bin 41406 -> 0 bytes ui/images/screens/ZoneWizard-AddCluster.jpg | Bin 13883 -> 0 bytes ui/images/screens/ZoneWizard-AddHost.jpg | Bin 21496 -> 0 bytes .../screens/ZoneWizard-AddPrimaryStorage.jpg | Bin 19489 -> 0 bytes .../screens/ZoneWizard-AddSecondaryStorage.jpg | Bin 11123 -> 0 bytes .../screens/ZoneWizard-SetupGuestTraffic.jpg | Bin 15217 -> 0 bytes .../screens/ZoneWizard-StorageTraffic.jpg | Bin 41681 -> 0 bytes 26 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 ui/images/sample-wizard/step1.png delete mode 100644 ui/images/sample-wizard/step2.png delete mode 100644 ui/images/sample-wizard/step3.png delete mode 100644 ui/images/sample-wizard/step4.png delete mode 100644 ui/images/sample-wizard/step5.png delete mode 100644 ui/images/sample-wizard/step6.png delete mode 100644 ui/images/screens/Dashboard.jpg delete mode 100644 ui/images/screens/Dashboard2.jpg delete mode 100644 ui/images/screens/Events-Details.jpg delete mode 100644 ui/images/screens/Events.jpg delete mode 100644 ui/images/screens/MultiEdit.jpg delete mode 100644 ui/images/screens/Network-Details.jpg delete mode 100644 ui/images/screens/Network.jpg delete mode 100644 ui/images/screens/ProjectDashboard.png delete mode 100644 ui/images/screens/Projects-Details.jpg delete mode 100644 ui/images/screens/Projects.jpg delete mode 100644 ui/images/screens/Storage-Details.jpg delete mode 100644 ui/images/screens/Storage.jpg delete mode 100644 ui/images/screens/Templates-Details.jpg delete mode 100644 ui/images/screens/Templates.jpg delete mode 100644 ui/images/screens/ZoneWizard-AddCluster.jpg delete mode 100644 ui/images/screens/ZoneWizard-AddHost.jpg delete mode 100644 ui/images/screens/ZoneWizard-AddPrimaryStorage.jpg delete mode 100644 ui/images/screens/ZoneWizard-AddSecondaryStorage.jpg delete mode 100644 ui/images/screens/ZoneWizard-SetupGuestTraffic.jpg delete mode 100644 ui/images/screens/ZoneWizard-StorageTraffic.jpg diff --git a/ui/images/sample-wizard/step1.png b/ui/images/sample-wizard/step1.png deleted file mode 100644 index 1e0d40d2687542d8f1e8201a858ef4e022081d28..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 52950 zcmb6BWmH>T&^`{QEk%k23dKEG@nXeY0tJd&ad&NTf)&@`Uc7j5clYAODemq8@=x#Q zdA|JC`{g}rWu3Lo?99oYJzM6QYZCTZQ3mr3@f!dDfC-kBQ~>~9P5=Nej!|DCo>*ld zi6U-jjif1ekCve*7DGPA#m&i z@zgIb{DNL|y2gE!NA<%4hyVw zriF#=t*t+Dz2dq?!)oSQp9)APplCtH z%R4bLj}7SXN3v<^Ua)bK)_c57Wn8Cd$+XDJ4+Yd=;!wsu+9|l{#u_v1mGBxj;Fq=V z3$HkqpL!;*Zsh5KTCSZNw*bIJi$m881L`Y3%fP=AZtzo)Cz&6#06%ju)CmC4mjtn@ z4cCYapaKArKm6bS5+k|lB4qAFChdB?+=c#V$P*+E>g^T>ioY@SBXu@pEDI242p;=I z_TG?fMwE=JQ`s^&-VuwnTfGrW$PxR&2sNvVq17J^No)}Htr68?=nLa;Et+Ulg4FMK zR9&)2xDg=Ap-?D;m4#+7 z>Li+e@)8G0=c4~Mb>&EldG{mzw{tDAP$W;z?(b4d?8O*ii2?S`Jwg=DUR!q7{vPqF zkIZQCCL>kUWf-OX#J@DkUbTc>n@m)>A>sNxoIoz1HY(TG2iC$h1ZkWUe`li^I;HrGJ zDcmXPDSH)~edgkHE?Hul_@PBpn`#uk1ZGO6!S7p{TRmHxTO?al7rJjUjYXaE?^HJ! zREDkPh_->-T-(TIDIdht^U_pSe&(p*aL4F~m*>>0R4Rx3e9QGUjc+XQ9az4gJU4Ng z*pk5V)~a&PwHaq6kz6tPx8~HyzRX_o-m5D&49sBsz=2Q;2BLIA6haFkH^R!4PlZI& z5kv!O?7SR@DZ8oIsSfH|@9W=tZOC)*ErdC{1Sh8Azu70ULq83xEuj*4F zt-;(k~))FC9Qk=sl!70b7S)+29U@n5Pnc{Sp;i`zX z%TSbj=^MU^cgncINK(l4sfokvi%m&Va*|f5Y^ido%wZc(#c@$)VVhKoHq0MOY(S3w z3%!g*%=qifUDXBEm7+IA?-YdiuPazfYzjznS|r^%?2ArPvfB0ivOYN!t$MWWK9Ksb zJ*hp}JrrHbU}z!NV1SU9FvbZ5`?#GP1j*P2md=$W^m;@k3XBqslK%$9HBAcU%}Pf} zV@F!`f7t|W+V$J@=g}e4QPIgO94MTmsVFoRG8PJa68JPQHaeC&HkZnsF2phaVK;p@ zeLj7y!BEFthq?Y|eWMw;dP}EJ`=q|r{<{vJc7c|mn!P*>V1qXj0r3n`YL)(;@jeVnth_4gitU;7O+ambjW;N zYMt@;HTYN4#xqVteEP1E_Sr17CV&%vI5+FxjxhFGWvZr6%UBRtG`R#`hDLL z3$vz^^sfxIZOOS_JL&!PoVw0TuSxGd{<&0n!gFGE60)2<%9YKSrOoN`yJt{hU3q&_ z1U);E-Da)KS4l$2-=c1phRuX6PP2UeS3`kY$*s{Tf$)v6bM~h2rXcU?hgEHZQQppm z!G;l~9)~Q0?`ske8n`Lk6Fvzj@}qrW@hZ$;%wP8x zzF15^dBAp8IhEz$QEWNY?QY6=+_)9i_s_n2bH7oHc8nr=d@<<1hkVxz7KwHY)gqqc zNal8%T$XTIlYEd&l3Y!<;85bLWRc^3FI>WEEx5?r%+)N?&uXqcI;)kb1-6wZ*08M2YNfxhWQkis^o=jf3$-hK7G=v^@@E;E0U=%-M{j|pdD!1q7jMbcip zb)$`bWbT_X?iSqqtUX4^SUx0rPfK5PflM>nF@lE9v}M z@6x_e+IZ{rAr#>F~`mryCh*IOJ|?3jNWDKq*<<8lY%@&OCKv!Q{bIkILxZpmCJ+*xJ8E zTe~I4tii2^%=OHkWG z?ow^YYH>ecI^N=9A~G{E)5n+fe%Wv-x87^lWPHsU+u^hs==yg`3Ldd5&?+$g<)YE` zqIzljt!ATUS|z`M-GS?Y;B)-6(m+c_OIk(K7n`ON&$c=*^V@xrEZ*^zt2U4GcksE{ zEIy%?wo6Zk+rHcDwRbDN&h$^?BP|PV^e5%5O05Q0qQ_zj5-TCIBD&AZe@>Sn)}3xY zE+>~|ZjrwSPCUn5$6!mmj!BIf3C0XQ%=#$I$G0Kk{IvgABr#z+L6=4FocladLGX^v z_ipxZeta@zGAH>WIljZryWr`d^N^@=slU6QynV=>lSBVX19sfyw(LfBnzx3nwB=0+ zVSroSS6!?AE`r4q#mB>uUZq?KdnQP~+1!H)pWDRQfG;>~CwZa)RU{^wC@7 z8HD$Zolp>9?(2Rw%GryZ<@x3X;->)sqeLLY`~O{^N&dSncmC#N4B|kR|4;ry6xe(8 zGs-aTMZl~7#8TD_-G+44fm!<2NN~16tbY&h5+X39Fa{wb)tl7;|0F?=|Jz)+PQ^qH z(*FqwMW}5C71906Lc1Ru>&Ge9b(|&E$}@J zB#iXgQG(<#?;$viy#M6us?6MHy}Wo}sBAd2uUoy}j^U%R$@KIznUj;D@xK-Tyv3zO zYinzM0RamYcm6sYvbQcvNh};39LRw$M*qbV06%eY@c>RntAS`Tp_P?n7Qt(N#*1C( z)z#JBo@FfYzb1enLw3U4ygY>FNsL@()~5F){U3RF-sS(-aX>OIX;oDfC)!?yJ?0-G zMVtRjysj!MI~-N2_CMwa%wzq;{7=>Y^b7#7{vX35I{)9}AOB+tK;8e0-~WmKcl7)g z|7!$DVcBac^@PU_yNSFnD?U9rrg$x3z&MiJuk**=!ZR!8Z5Q$U;9tGdRnOg5hC?>H zE0E{wxPj~|q)zC(bw3KlL!{__C!XPu60kw$x6PHCt$s!V&?B!TD!SrtN5{9T&zS<> zCZUmEl`lU@H&$qd=N*i7#=T z_wwSh>z~@#uD_ENt6gm}N@yr44&F|O^kqEe)xVZZpPwi4`o5(69T!l-$bIJ7+WA|p z&(ZNc4hPY%6wYpze#Zz7J}S@Cs@#qzms0|fn)7%y63wpYwgMFEFRKb}?|8arTg}|rQJMMB%_SWr0%$AIXWoCC&0V&GDWT=cs?J2^^Q^kVBh4pw@`N05EUbcp zP^@k~p?&ike`>C#NqNESA!PW$(&XrG3sy#l&vZ8l*iLQs+x1w4815Ibe^zdM!JdBg zHR{R5Gju_dh z#j#;kq-Xd78Fd#FA+w1^;`F1f%sV%aPEGeF(Yz&kjdCn4y2ufGjF76@iS7#9RAZI_ zl3IN|2bH20JE?JB2T28?N~(nBi#d+=jWCD-hvAA7r4DV7JIS003e<)a@>W>=b_9AM z^S4tP1D{)piSc|QOa+!UpQn*z=GMPuMUSVW$;%N=X`dQU< z?NHgevN{{VpkRq4OsLAA(!dvx%?a#UD4<2?X5m#n zKNhEY^_I^^!%+)QiazCzhJgeH7Fp0$y%Kv>A@13ro z{Wom-d)zl072`)P6qeF5--px=Wv)OV6y!8J6)>^v7NTF|1J4se&1rF1BUO2(R}yiV zPtwNssJRuBt6wdcK^6mE_}lBe;PpzvlTvQNCkl=`6;;OLi#8#Y-bb<5U|K!0eq{ne zlik{|W&R#|PR0>{Rx4(_8Sd~>+uD&-X1aFB^o@r&2z+tx?Jw$QwjhrdJv;KQ^=>Y@ z$oB@KD3j00VZYz)L0s2d)pY#HY1Sb7-XQ4r1O)a)(LBA3guQ?1+cCZDPM~JCo&DkC zShj&qv*UA)NfD#QoQQX|l$H{%()za`u zWX@ORxnBP3x!}a1E5F&tt9$QwY$L3i=f@gsbI<@y(0dya$nDX{IAVT%eFIsl;@oYp z*WvV`A(F2196Nx(E_zU@DktC%UIJ_0kb%wYw$ta92Ff6JVJ>*pB`U6|RO{!GP7WF3cJ;@V#&X2%IDUnfQ>NgZ4$l|)Mypjol!1OJZpauN4t_z@aZ_`hd<1Wy#AbK$9 zxLaR8ll4&dF|mQUJs<8<_}tpmMX`WNJ#HvYse@3Z^_KoX2`QN7*On(kz&o0dXA~)p zS~*?yr9U&PHD9N#!FbgwZ+FZfE4!De7&<|*(&Z|PmkY2{2Iley!}*+~3g4(zKMoC> zucgWF8dpv%y(G2OSL)oTo##cTxx{-%#~8p+N>rWlq{9877E3PTU?Xqg>_@QKPE?b_ z;e*BVO|aC2dHzS2>Zy!#EHJeGm|s-o2v+&nRM2+F3-Pr=I14ph@QOcTYlKTn%cScp zDP;+DxK4Tp{eIruwO?p7Uz_CQ^Y8ro)&k*z2#+c+vGH|)Wnq>kcib(ukjzy)XTu*> zb2fLsQ3#&b4Q!gQDt&cz@VjL zkCTG=(^k74^$5PcNIa>Da3XwN1y%fhUZbqeg}VI505M?Dq-Dkz5-?Oc&&k#{k`lE~ z38OuVMAFVM@|T~azErqki*#u&+oIVwgv*En&h;B0E!0h{xvR zgv-TVF6nj$1V#`got~PMu@hNZVFOiS2cDTk^07Kb3571>E>Km@?&GnIn~gEt3S1;F z$|q*5dL|ny`}Pm_-Q#m(Mvd7)Job-UN4G*w7N({a5bilY(f)yT<{*tQZA?+AA8Pnv z?~OC(+L}jW1LQ|to6T5wfi-@qiVZ`!)&5?~*QanNj@udDR(%IM`53u*_x9YR+|&r z-S|B2rg4_f{mP{oxBR=U*?KI~f{*O|yCbE%6$UL|H-}e36f=blszHP5!BxFZFSe0y zU{8$WclFUHew4!@5HDEHF8mbofD&>`DoDpWaESf5!v@#tum;PiY^Zw=Q3rf3=C-br z^Q}$1^D9lV8E$~aOw(Ks9Ykt3cw|?dAfpL9j1BZq6X0;M^lA7!< z%diy39=rodaoN_Lc!HOMOEHf}?i86YUhHm~gp*=9!+&&?p}V zm~T|LLZsUjHkKwYmxERn1NG?Ru$NadzNsbm!CJ>zf710xq-W6=h>o+ji_O4eS-wBj z{Mz&V>TUbY{nb`bUeSci5{)MeCEIuB9{&8d!wCEa2WY4F3J&XW_=VB7*~z#-_8yGe z@0{#ksbj~GX9E9G+SUU3&7B0TJL|o-)@@{DWKAn9-XCLtzi;^-;FTLjQ0(L&GE%KM zbIny(OUCDPQZ=!fjz-9y-RXg>QP8|Ac}Me8RHk`X%Z*IO*-FQQ3ixeTnVgN(&L zZ4M9g1Vv<^pdduFW0kbs6w#ZpYPj0!8O9I>60_HDbFUA6w*c9^c}QDqMhO{6DqnTY z{kceN|E*tYzb+%gR+>(e^;{zyp>c3)QM6v3&6<1fe{h{H{Et_+^tdngZT7=MrZy`g8 zYiz(xA!@}4oV%SGhf?As z#ST)1Noa}JB2N-aPPw=82IfwNR#x}ij_D`Eo$KHu9K1db@^wyqq&{C--A7*A4!s{qe310r zBrn$-FPFyb@1*pk*iZd)-S+R?A@kN51@er8yn;4PVY=bX>!*$)cx@x)ggvH` zlCxoul=9X~eg9qde)wg}?u?F}c?l;A737OYl7atFaHj*01#koi)=&lO*?QPIEJRbpifZDZ_$1%T`|kaf*2eYj}<4 zNVNvQeHm&THyre3B@Z$M%yv8MIrG+p^-JqDcNgf1vW*N!ua8_YMy(JB9kM594~|%!#{VzOe~KQqjAO zwhiM>%MbYI_S!9KH>sC`Y`4ZX$>VFSo@in~(t-OY-=N=q2*v>8auq!mx6Hd@9;TRgE}Knsld26;z%6NVA@VJnQ=AKk0uv2(4u{A)PPto zqJqt&ff+`LWAi`Pr~zl?^uir3@N)bH8_W|U{bLjoT6HMt5HsOo#+>5rmOXTU? z8zb3mHX7pkpvYTteonz12~;ERE+{HhWEk`yslMX*dSq9xPnNDjfjWqbc#Jmab$h}Z z8kL&1Dr8;jFE;%89I@@)H8y%Rl-;^NRr>y7nSKpy`J@Rr+K?q9eF_J8co?SPD=83^1{!XAqR+Xv(+;FWL~%A}_lR>d+g(vC3+Vvk z=5~SgrMqDG`(}O!xA@-KzKXVWXzu#$y`-`sA>Xn;bZx$rSOCLQLELuBY~{L3B2N%q z1Oz<~e;~6-*=Ne`*gcF9VaPV51R^3`>adWLKWBqJ4MUT!NC)inTRwS!69rlvAp0-G zfTp@Wha=c1FJE-5!Bbm!g&x*5%-Fnwwh@5^rNT568nnyo#5uZ8a(#d&TN|(Vq!G0_n%iOL%h$A;g55h*sEU;C$cr2V(^KxgC4O2BkW~LFF5e}V@Eb6I8kSe{iqRjr?00-zM;Y( zphA#ba4R~o=yEU(eDgT6nm1w{Jt1_W4e>n*bLeJ5by%^)=v|g(T{ysEk>j86 z`&}1v+%}HwCDSvMfOIyMOv=!M2lEDA(Mc}x&>Y6hh4sruSV*vx^6vwT@b!-d$`lkP z*+$8r@La(g6qL9q$V?;dA*1TOL`B5>A?D4-r*|F~GiXE}bmZa;rfg!sglzLZXivpt z@Eof~L6RauU! z-A4_Oe{TOVqA58paIv*}n!uf8tD8OHhVX@3J3SEu`b%cD)M!oBpR(DASXnT5Ogx>t zvwffSwjBj0U{Ai0Rn*rf%dHRo0z5+y*weFv;EgC^b|4vzO?ez@GNwygs?F!$tl(n$H279wx>hagva)NaN3I7a4$F*!JlagtXqoDW0|dnqszK*(1c4`e@IcH=1! zVD9jFx{ER3ld(n%n4}DlL~1H+JS~QOWRcg)5$fl3M3ULk>vQ*LYkA3GZ+%Qh&#!@& zml&~YQN(9Mk_Lb7hoR!|w`GAC11PDu4lC%L-kOVkMjQlqL=&ab$^-vt*w zKMd<03kA}-5cX6fiN)g0{+UXI?lr2cNuQzfB5>VTth!nSDjtI5(VJRHnZEg4X*BlQ z>Sxci+8=jk)^5?j5 zZ-qR=5KvFK75MyirD|dGoTs6F&G!&F=yz&2w7n}QBeJ#|8V)=8Lu!0)*Mh~Y9h*&f zYH4o=xqKp1@;L+t{g!FL-m4|PfByyE9@P|9p2!N%W*GhXNBub@>?O+e@@~}NN=wJj z-F>?!bYCs_lK?eNfdr~hTlwjJ406`$T*o;YW|}#!E*$H`st*PMl(*ze%%v4d@`~P%2#Q2jJl=PC;EK;HO*hn-0nQH} zs5|!Mc-eejvGyU*a*er$6G8VW+YjECBiZx6YAMf#f6no)r+FYCGe^>*Ny`MtYQk-f zxZ4__*---bNg~g|PNeg)J6RYm>RqOn^^O(JwNBhJHEcE{;6oH8O*25nh}Klp&MMg8 zs_$u|36c7m$^5)^41sL+oI(zV8lImcCrQ5@UEQ}HMShf7zUX^x#R#?tXQBm{{H!1V zs;X$ygH^25Glf1Jap(*N(GA z6zpZMmT$&7)ATk&u}c%iRfDS^UMHDtGea(H`bXa)M8qh;sJyus8e$RnihDMiUt;NgeQB_V*9J-RC}|&>ui^ptQk) zp-b9J#fZDaYaEyjQBaDF{gN8|UWkCtI{hr=pIB>G-v$p|@-I=kk>uyEpDzFD=65d~ec4 z5PNmD_hCo}>Z&jS(nrPhd&7Yb1F!WV4i@rW7CP0JSuBTW+igfDm2QRR2~SgRtm_s{ zKQQWHw}a2`pSqIpkp6BpZ6FdYX$t-HNTaW@ZTq%Rfxl?TLvf>j`;v@p=nVBqDe<_?j zaeEJQfVk~aZI6hn+tHNrcsJhuama?0L)^>Cy*hu+S7TUV^GYpr*Lj-Q$Y`qS&iB_b zA@B%}{yGJ{xNo{Ta?W2864CE8SyR`hW@9)sin)swQ3%ea@xYu%HcMD5^;YFV)V2?F zKBGx$pY6wEqjbtOQi;5@fz(=c(nXw~%N{;?^w8WojL9kV67tsGCeQQ0Z<^+Ztuk@fRHevc8X%Uo`ijU>lf;?r6;<4SF38$?N|WBk2*9}=iyp%B06pfU1>f@guKmITJUq$ zTHBhT{cu60noj;>sftmgf$w2nOT%il=e{=rwV$hRJ3ak)wg9WwnC0>Q1^d(Cz0a9i ztQa$Ydm{sTz8Mx+dmP@~J+LoiP{so(CvbA0zC3kmLB#jCq#Swn6AM#1W=(I!@KTsk z8R>NqDDA?g95i3Ql7fUJ-6Mc4W*&A@-$t8Q_Y5F-Yqip7zL0z0gi3|b-eO^3dwTRV zPcOVC!hW^bA}|Hr?0BjhC@MZUcM!b36?rsPgdwu@3{ap)oOI-J4XicGF~aD6K1-Jb$wFpk{JE%#VR zl~GKN#rw`-u0_ZH-g; zHRc|r-ath7kYuzS^66e*TKNhQCcE$E`HX9MNj7eK{@U8|6+PkG=8alcqJN(#mx>Kp zkn0)iuN&EUi-dz?B&7N!4rn5Ux{tXN#Y8ZqSgcU`P7a!o0o2vfAzJ_b{j&O{KOhMd zUPz>YZ|hMPPKA~bpQ@vyJ|WLQ!+7#ZnMh3e(q)P+BtXw@reA@ifdg!ZG+-C{R@Jmp z!5*)!8e1^t_vMAifXE1bT{Q`W_xUHaBvsHVYi<}DP0$Q#&M$`{k)b#zW^qYnbkB7U ztVx}q9roK~St%MSKiaGxYN$rxosfqH1?+MK0lgKkTX@zPdiY47#V--=Il*`oMoDWV zNMeRm5m8UccA}nDBI}ej5iv0}>c;q3DPrSi?{C4*e%{zUb&=HGKE__FyGKCWI$A2q zj^ByU&7+lzb{hTCa<{~6jC|+F(lw%*gb;fY*Ut4wTR8}fh{=eJni4G2TUToZ`y_nb z)Lvq3EeA}yZM=baZLb9N28ldh$ncjJR@a{^v9(rATYs2_F7Y?_*iClapHf6eMG3(! z1eAPWhkX0v>+bzSt?&aE2?+_874DR%=A7sk!A9F>36?F_m$$3^zpqfdV(PzQGrSFK z6795&N(r-%=MKWv>bz{l#lWeqJi5`G!^)_x&xA#1Ff$W?0oH$=sGOM#7QK($n ze)s!0ZtiTlFOfc?6#st2=|!HRPl58%#TRz2S?X^WtqQai8ss@_=_MhXjpZas@SE>$ z+KI>rNG{>*d3D;igN={s=J*)pljkuaYrB;yK)6{p)5|N&KUl)WkC~o@XP`0N%pf8( ziF1Fkq&NFjAv$I7t%dv)bi*t=0xB*IZ0t=5HA8n^U9P{ii4k@b()y5}FK8rJUzDMt zHEG8@^6>PWDtJL?!n&SZIX>mFUxY5d<;%t^R8>{rGrQ3`y~`9nV$*(=Do`_>d3q{z za)X}T;+tX#rzF`+dj4U}-iA zy)R-CP#qRqfNhK;xM$@_uS>Sy~70}Z9TPWvWwK)db6q+FUHID z?vL9MX&4eX;rZcUNunoDo?r`+W<;<>JuH@3A)+6B3-_2}1RXbQhjm;*_>M3jD zrL@Hs)+A!z+dr0a8;_|UmlD1nC4_yaB63LVFiWn>m@^x`9TY-|;jaA#-&=#vzBAe< zA@sfJBG(ot zs2Ap=AEhA!l+>5DR)RZzHPLl43;ZA&9sy{cNbsAZur8p8{nRlFEVOm>3pAIlC*Y z8xe7mAAYG>l?BC8_SgzEk+mQssc@G8iVJ6-{%zrTy6@wiR?Qe8F!ia0_oB35>Ckjj zKpX|hCc~xfuUW|WqvX)2Q>QMpi^Bx4@GEp|ailmznCeO`I)6-mk^iuk-reBz)j&mD z!wj}Hi~MxM*QC&!QHqJ?6rA|!c8yjtw(R-yc4y9TR|E#=4eqY__qa-1#+0Gs0&hy} zB;xp+1a9;iL5Aigw&J{vu{TsKayBiX)tuvs zjBdX2%v+KtaA5HVj$9fJ)Py)< zc0E0BrM2m=`Nz|CJPo;llnF^mO|1^rvK}8BAXn-5@^7%nMJ#HAL~e&+he_LsYP>b? zG3CjJz)GIa6b`ez4MqH9ZmZ40RIE2=VVsBw1&hg8Z2iVZ9r~DYHCG?)Po-%kwbJ?f zY)+xnVdd8ws#+WPujAZEET3w!Uehr){3)}mORVYBH|ymM526bZRoq<2V8pFf2CclBT8PgknPx1Gas@1kT*cRW@PuMrFimeC1Pa`Jw{B2e$+)mnBw!|>^? zSDSw1d57S)j)~oP+7(2S)FCi15RvCJAaBXuy^n1Y&km)l zqDVnTmIx;@pMr0Qz`vTB_WmepfBx=;;KR%(CUyjt86-hQ4!%2|P@4@5B^AkTXo`3{ zBQockjhG|5lU(I9aK;n#$B=yg_w#(ksjgZrojnHa3 z{@l58t*c{-r=5zWxgx$z#4d1AGo8>)i(hxB!JHvbCkC{mhexUmMmi^wsWw>drD^1zAvB!7Db zf8J~9p)u)rz8q;NGgw%hRM($>7%+H7HF<;)th)A2Y1)k<4AZ&psI3q`X*p+E$2A57 zh4q`5zaqa)0Xe#@)hp_w?uRq?ut(%TWNyjm*KZ|&L)dg7vL7U}c0)o! zwuMbE!cZ*c*8_6(#YaOqG|G4n(fEWt*80_j4SB;HHaDf!%_1u`}a8~3)zZwRFau? z5Go%52)vFs8`JgnDVj((RVwp4)#)7H!nO^imGLlgl>3nT(O*2<3lT*OW529sVC%6+8v+sXg9%VXSC~C#t zj$!WN{pD<-Sal_+AfHRfoXOD-Wl+I0!{kI0B(T9IK_{+X^Qj zh_zj6u^T&+;i^$aJ25TSGKhV^$v2yqYOeiTAAe$aTlhoZ+8}}`H8>dRoi`lMllWca zw?cxmS2QytYQV*y4I+CmQlRz0l*6uX+M|Z!yzOwCv@gY52wEYH4R9>3Ut%y6h{ly zP6djk#T7P-^`~2!!jKq23mo~4AauEIXu?J$%n?qMJ)ew6@w8geL)T_L3%y5u%`(fw zvE<|}1+;3-mQ@HBP}ed?@FZNX=eXLn)%&I8|xOx2yxc#lW@$V z_2BDAB=G=B%i4$Q5rRR3cRXcU?81MiPt&zJHO5)|eS|Bk^UIHTQ170qc`iJK=2=%Z z$T+f_1~jynelN`GYU`2UTPcM`faM%}rDgQ%us-KTn63pc*d{rsC7C5^nRDS}drdQ^ zd>}|la1g)WPonVXSYxYnC$I%aZ`!BLx>gFT^$!&BfHt~^hT)HDC727RtR)vG+WMO( z74YLG&LUk`91w-j!7UZ<)JIxW-O zt~A@7Ru#eTpi7$yU^(wC|8fq*#(k&lYV@QV^f9`{X8HpAmURF4P)SK~x!LG-zsg`* z0oBOXFgGHPEb>wCp=RxAC6$oiEt$gTYD?MBPlIN53aYE#`39fEA(<`Lof>yCiT;ay z8*@wR&5}L{Y!`;TQa40QJ}2_=sy_sF(rQ*GSfHJD=4ArzMm36bQF%+OE1Oz98cJ^9R}XTt4jpeDhdu2u&F^XIcs$f#2)F?mu8;W@lz)XSN^r+@5rp zNEqi{E{^Tp!V-xrR9AmmQp5MhxTMFw({9I_n%OZ%NxwA)n=y8!a|WIjnJK=Z1{MbF z@Pkz-aEiKL7KdgkB z1BVSYJiuUyEOMBLgOMCK^65yZ0g*ayAy`!{Rd<s?1l24%sK)!!#HcCu|I?KnHIEqrx71+%u%t9i@5 zASiwe;lkNUXlbyHY;XDciDrCs9C7{1dE*(O`=%D{B*gI}+V-=rII7QeoEL&Fr=yZl zEme_IT#&hWxAu6qh9J@&3kRFS$~e;Mb`|S8x0XT9ewRfX1-41djJs?`gmk8?RY!xZ z3todt5e~0X#!&Xd=@R{Ei^TN%Xg&t7duo^?XYLHF-8;*BCF>W}eNQh^!uclTdeSxO zs6K1h#bKu9>)=qSBB~tKrGu$QI>83VBduqij|CFczy_PD50C@$wUVA5^$t0b%vYjo z7z(#lKPQ~q78u>B-Se1kX;TgNE-Lkm{OLVp`H2Z=oP4CtM$KjL6gnaFY6O8@K-|JY zh(SL!faF&+Wr{M;PU7ThR_F>#QNLpL$a`V^s-N%ImcrLG7ycG4j_snqSwy6yk>Ttk zkkK)8KEXu<{l7mFSK39;#_DD^c<5&iC|lNY{b6j%1qmBBGVaHcFwxHEW+NnCkk$o% z^Q?zhW#*Ey*UfDI)c2w5-9r>7)-NLel`1?tMR^cxZLb(wZ|1ZR91B06{Nh)qo^eF0 ze4RzW-j8YuV{VHJq_r4=p1tSOW!o~8{~#H4ConlX(J`<%>h*n?L9@thj6?fW zPJ0YJiz8Jfah*;SM*xR&eZTB4kn72OI$0M6qP8QW;yJEx+#lfqI}f5RxU>)D^t`E& z1HMm3oO!NE+2L_J4DYTBcf14tc72ui3|=-2w~@i?^G((bTHjtU9NdmW75Vd*nCSYh z8AZAa>t79ufggnv3_0ZPnrUZZ;_LYJ-%tv{>zTevzl$kMM0&-*{c5tTtaM&LpTce3 zQr%3!f|9cHYnuuKaj6>hpMP~SDd>^Ndfz;|FQoS8^QlmCb8|Pgll7ql_}S*E6MR?v zmfT0j>6Z>`8Q28)|Li3^7HjzL9|Hh@|G%nm{0_rMwwC-NfBpB>FEs1_%D)iT|5Lj2 z|LFYR(jCD6Q`v)Xr~j`1r?Th&3V;4r{C~Rsukh#p_p2P+#FXHi{dYRnx9&bYG&H1a zhLK=|M>KBH@G@imF zo|Fon6fTk&uzC*A_Fvr+MfOa}XHr5FG!p~@tb!{WKp84FAF7czXT9^gheOla#LzM& z$11ImFs;xkEtesU8qN5N10|B@f3;A;_HF*>jjKRJA&QiTWIy38Hs-*Rm0{(bPrCf5y6hHQaba$!Of&^#76emO*hg?Yd}$ zB=7>s5D4z>?ivUX1`X~IoWTk1L4xbxo}j^l46ea~ySqCC2yVOcu5X<>XH}i5U3*vY zgJOnx`kCqJzWcKKapJ#bEnyUdFI6~7ND_oNhf*TqJwgXxE$H_ggx62>Q4mGf9@*>?RUNgq2eS%<_AHGtTv2 zP$%}ti)qxR$qK!Cn>T&XE`sK4Apid6^wm7d1+akvQ{P(b)))0fVP!oMjP$P@Nw{j@ zDucg&N)J=jm;-9Q#}u>H$KMxsyM=P{1`l&t3-107IC8szY3R0Y?%oTnvfKmWDPo z2Y8+)@Jpmi+&s2c0xmVo8jYx(PM)a2s$wvxO)P-TYo-9jWvL>xBTK2cw)kXxX0_Q@ zG+iw=Hrc%|kSnp!?&7|EL)D46YzP~p*>R-NtxmJ8h5ig^u_G+i`}+oV4-Xg^OGfbq z&`Wd|i98Y+e??Wybss$exA5L>odJ}Oh#^wDaoI{IT`Yt;f%7Y(?+j_ z{nM&Or%62Us@2$NRNl14gzmbBACG>yYRjJ)o2%JsE#It;-1#rFeMXHcG4j_|8kk23 zgj&AHemS&ixst)=w$?oZvL>3F6%*39iQ`qATwL$x7!9y14TEwnlU(0pb>Wg#Z7q*% z>AQ)L#+1Jd>N^n8)R86h*0tZKnxhatf`xCt_V%p=geQg#~A&U zd4KYXCbY1EYLYt;I`N4p(bf5^U2ZHa zw!P$Y+xPS2dBmbIVh*)$?8)M(kN=^r$O1U!w^J(lIoU)yjgU(F? z#`nA9EeliwP6<|TMb7Qg%ZV7$U|(O~QTS`};M9&9MJ2s=g^pTHA(V6hkc6Co?H?Pb zN^JPtTex|IjiHJh5pm?$dL_!B9st6gJ!3HbDP7jLAmIhyW zu>4ApX}(|atr6yrPE5yuRdRFJ5Hl1v@11Lt)EfzPO>b@8-%pmhI1{^Vnk{*)@H||R zw0NCOqCd05-m)XaVJA%&^l0{&z5v!d?H12w&xjfl-n+I^{|F{)D2hE<{;B@b4L&qd z!S?+6iI11VMeFZh6c@$sk-dHxWd_l-oOcc}GyM9Ad-HUj!^daq?A7_^mhrX|f4r7a z7*tAL{;KTATh(TsEU^#JBuw%FvOX5OzO% z;@f$(lR8<#>($JjQ&_Q^Cuf<+GN_;~E$n(T3!b=w`33n~Rt0`GpVi)V?nxP^gkY`H zS)#DVtFCDPrV1GemvtXqD0pAU$vF$VZsj3lPJxuYNX~JZi{Fd1RY902a_ttg@wn}< zxow3-_JFd#O-zDW%*IcRu?-u2aEGTuX(>sAd4xMrQcJ3j>YJq%R1WvZQto zOH%Q*8u&y#ucKJpC3UkXW01a$;JJ3Z48IiHh3()kv)+X*wJe{8LvfWU3pi5v6!(b; zMUkCeq2<$bmZLHGq_^RwqSfaX&R96$E_W zf)vq$mqp28ZjCU$6Z$`b3Z!DWldM|)gCA{Rd;Nn_zTJNp|61rckV{bJt+zbOXkV4s zllvG><%=WXn?rjgm!2_0hn=Xp?Q|WNgXAVv9h7RL+hn&;;r4LaJr$Y8)Z(#69j{gL znyyJPzaQ@#omTFIWI(c5;pp}s{B-#~+$#4qS7;zY{5!d_f^sBYBMWIr>I zt>)(8{v_&p^QB~N9*mlft(jD$peoNTMR#$jxu!PnOeV9*!`?ga6LM(QvwQOFkSyAe zEJM2E)y*DuDPh!j>FfytSV~MyG*+&XWj}Rg+t(noe{aWaJW#&qI9%72MMPgwIXboU zcK70i1->>Ep?Oau3c)`Me}en)`{$l}#O6iIrGiOtJY@)V$h3Kpr7;p&&KT?|{6W9j z?qc1G?~vR!K(9znF1X1vS@Z7^#rB@`h?L^UCoNZ)d?e60I^aG}yk`N#NMWEj^j(6lYXz3%*x$qWsj%lsHe z4g@s`CGmU(^}He470ORyP}_Zm)gG^Mzb1--Q_>6j7X}XUEG_ZzZ-VOe&-vZP22Xcm z<4Fp0h@q0sAKAi%Mgt;KM}QlkEp9t|q|EWJ5se^!tLm68J#2=D;xh+l-tP{z+}`QG zE9`}aiRh~E@8c_mqBrP7d98^o&QueSVc{6LM7Z}B?Z%zZ3}Q94+(^l2Z~bAi+TBji zOswP;S@^09w5O44fa78K=Qi$(LK>aBW%oDj$+E?IZ&>h}v7Xwep3WF-GZ}gmc)t`h zt@$zP3486^Wn%;Ak7tW_=0j7>7h@xAJc2S{wzUqd&HaKpWYAm2ti8nxwEY~xp%KDNA#-WU9n!~NeQeb)D;WAa;l{Z=Z$2eVnZc3!~K^&ui z9oVOXAiYDb@2Zn(+^-_CIEtSY2KNXn_0L&2Q1{?%*!M$ z>QbX#F4mfA(N4Rb6J_^*Ml1ZR8kn-O@>Y!P4^AXb*9`Z^ZfJh?cTrvKkR0+&j?P!O zdcPFg?!P*zG(ptZ>R1o&F>hczezeTLGF>3%7pMH3v_y>v6IL@vZd!Cw(To3HHm=hu zi|&4Hj}b#3FWj)XsDXt8uUB-3tda|2X>S+o}8g=He2rk0i#n;uj&mzQDiENkwY zEr7kPH10#TM6Kcu2=r^WzVNs-w_H#TJ;POdLxOWLOteS-vme(85$d&nuU&m0Qum;O z=dz8Ktu!>$=lyXHzYY#oc|t+elm4Ye*X_Zx)X7q`((^;@$y%@545O{4Lq$Kztt}Iu zy&`Szi;Ih*-4wC;*H8VK;}^w-Q_|lL*1gwV6*muCT5CQkWhz&!W)ZI1@E7V{n)49D zLPAJ-Y%%RkGWE6W=R?Ae!QM6ji(Sf{bpXy_!du48R3JUWeV?+PQrncBDuN zaamby1-CZDFq^_G_awkINUbF0BtN10r61`&x1~zX-7Y-~eL+gy?DheP)!ue8^s4YW zq4U%M{TeUVme~HrR7sXkvvx_0)cg0iG8x4`T^f;Emp`+k@^W)^b(_+gyyz2D%KzRx>v|iKBN79s0!Cc*F#O&kL*cwERPJM;x5?d@z@IQy zCDK~sa%LDn&X{KxrU*fjkcA+%vGzw>T77gyB8%H}49@yax&EPO!jJUVL)>Y3mCJ>P zmv@QeL2$*1U|E&8aI{X8T6Sne)yb{vM{2n-U9We{~jX#`?RD8e4ejkUoh#j1xn2ad(G z)k%PYUjJj@<=uU>%=1SiPj8}dthEpyb`<7`mv*v75q&u10q!@?wPyKUMS~-&dYXd_ zrlfS=aj+my{ytXsiChD!Y)Q%PDttL-N%}r^IVS5y(m!Xfa;a#pZ+(O9P`hf&d_Lm@ z-20N%;SQa~BjkcfUlS^JxRQv~3F(2a$~-)!UeZjAu=~^reLydB2%k6s_Xt_OXL@u| zb#DC7Q9O@#jW5#H#`=haaMXgcpeMvVy*jUS#3n4-i~b;efj;4Y2p6B&zFT7M^{Szm zAfB#}7n&SRN9U|H#+jTou=kw&^F}KIpKSfy6|6GrW40EsF&LuaC_k^1O^6FN6vJ^^ zshpakjUhIo=v;T#UK+DG`Lb+iXjsMK6L}x&8VvyftTJ$@S2df~A`sTVh~0;_*MTd8 z+lSZWD!T3_+-m>fj3JHW|#%FpO z*M^uB^y;m&P@8C;AE~%xl45{nf>&%5^Az2KP@QS+{Z3^?FOQ~ifJ7<1>$)=1SZ+14 z-`kh~ho9oC7T*1rp3OhU@)=}gL|452fkG9AdIusxL+a`mHgKRDnfxVM*(3Fml5Aa_ z@d!S=0wDOxs?Wgbs9NvHs@OF)Yn(Cv{vGyFTj2GY(NKI`s$AZF|DVCb=AZBrkby&0VspP$|}evKTG(sG5cnUDPCy zf@U|c(6QY+8o(+z7Ga@B+q**QI?bNPx63%h@mlSyjAGw&vQiAD_xl~EeodE;N$jaJ zu_eozs5$|`PlKeSC4tstu?8pSs=-(s6tVH8E9u zXbH?bWk2&%{U5cis?{*5HobB8Qr~Ew1h)6f-qTT&!oweWN~B0-GmxWYW^`jz6w*2bMS|flwVtX$fgkqiB6Uo~Jt>-x4B1iHID+ zUdJTHQ$v-+lj;!J1Y|j*+w96|oN`dO_*i&a()*GLppSSh!A9Qi`CFV!6taSi>5FVx zYvR}+!8U5a<=*tIvVn)XcX4ti7IL(`LlPbmDJ^aHF`VLO|BI;1Q`aW`gUc6@m($+^ zt21%}rbpOnSV$T5bx1=EYdhCb?G~Dz4qj^1OsS7ev-5IRSdDlF=22K1q>)ndX8E_+ zyIj0v))!Qe_mfppSz5N!GqsSJ`ph9LnY=#49zI}kK!OX#U|_IP8p9Sa3{oW;DX_`J zUmJ_s@b?X47;JuvDUVUCnK9SkW$Cs-Z^yM9FRagVPFJJIEM_7{%iCF?1j!`DhC4wv zy4Tl(u@riG7=#*dcvkzNM#J(h2xrpSQb|YLGtmdYf<0SI z7A-(L;T@M>nJ<1K8H~d=4%Ok5P4a7JS5%5BdbY?<48pUst=>@>u_>N6i>iz2ZMH@i zRl58@RK8z#lg!Pco!xpxYw->UWVZl7dG)k0*oBa{&Eav1sd=CiKe>dWtaPxDa~|U& zM)nkJok|4B_Z3P$DstBC3=Ub2p?OOt@S!Z%EgRw*fsgSQ*Dk8vq+J;0w-`m2qgFc` z)6iRDfo6OhF*s!mWlu%_c2YxmZi$cGv=w(>NU*pfDde%z+NL7GNxH#s!O>FHcBX*B zHD5UL6C=Vm-&!o&$-`HqpYqTEAk{8}df61INTqPbJs!}nditc~{qqw%D$%LLF!b0O zuoneH&wSTzu)5z)$}cS_DL`;^;&neGszH>1C>DA>-;cMVnJ#>hW_Zsj!m0RpjxU|x z#~pldm}`v=%c0HI6JTAJmUqnIBa(g^XYVBz8-h%hU;-q9xz9AE*)-pB);Qy%T}K|? zPk%>FFc?;e)J`zqjf~ACikj+Utx~dPb+*)inl#a_Y)dhiOuP*M@4zeVmf|c4LcG;?)f4oDfBbAg|{^Vd!X)RqX zJ=D!yY)K*8`-eiV`{5#-&e>3m^d3C?Dh~`tRB>}oF0E1kB=cHBSMv26$EaVnD$VRYnwjC~0fYrY}B^a!{*pSYrtu{c`%k@TbloMTuw`lz4> z!jAOI7a-Yy6&L=RFeDO)(uqd6MIn)ybT936a+t^kge1zw)=ly=nF?0jghx$5Xa=0J zfRv}Huo`x=Na5b(ttcw&fkL9~n&dXjrKLCqfmLpr=19Sf*>}Sjc6lSm_8fbk$gW@; z`6%ZL{q%z<70v8(nn}pDZ*PBZ`5J*21YvKpcgH*V**0w^JG&HdwnGxY_hxzDI!Ha9_C; z$CoX#D)8(j8rU~Y*{=71;4m?1Tl6bf(MERziEg4|k>s7J+0hQpt_CFE*-NAkZS@Vw z2WcOY`^$+u<>ul#+uc3b-OFsi`=Kl(K`A%ZD>z!TJY24fMCRfCL%t{?A|$;2r0#8E zR&6mmGdK0^DqaaYVK@5Ax-4?*vj`^SZ*e&R3?+?-rgtw5Qx;AD{t2oRXG?L@>y1Qh z0lW2%>xn)VZCSn>+##_-RRPJcA7VzD-BprKl8BG^BNZ&R)zOEZMsX8{ZxWd(H+YtP=x z8dq>UCgbE?FD$TJc5SmkPt?ETi!3U%1}nv`W6JG5T7t=5s+McU(E%XLWzOC2IOSeE z@y%%Vj@@FsX4eb%cfK($0a}!Wq=*bG(=7x($cAJ_%+mrV#Ce4kF&4FtLLkexJCik2` zgJB%b@PvBS^&%D`!*ubn_xeCxWR&uPo}@PI#4Zg2{U9!OmV3n$mDv=csb5 zztCGF8wHQ|QyKEc%(*OX#H;8Y z>8w9{j44a2kD>r*HpQ|r^D0n8e=|@wfabPiqyl~4;)J^H+-C^CwvJ6y)t0H*pO^Lc zsi5G_AB=}k#;2x#Q-aB-Gl(L`t52Mg%PYvS?VhCzc>#7}?R1K>%KffUXW!9)PBa>= z$WY@AGzJtAQ@@g=a|?@M(W#)U*Ano6h!P6uVws|V{GnweXsO_@9?7C{{LpV0Vqd7< z5bKlj2vGFDc3z%_MXn7{E8Dwqb=wOy-Nhp!R#vc64E{>Kn3a+z5as>BLEQ0$WdH1FaUn)?5Qb>|Hy+aUKUi27WjV5Wxz1Vq#(>OeP4s z9SB`+`>$AKH00F+g+{%rYk^z{uJYTXr?a!yp8y0N4X~nR8)~S^nAwuHIuQDR0nOkO z>%Xm*R9N~5GfAhQrlw%Zt;>GjOdea@)+YDz5sCjS2rEVsc(gp1BxxN$eQ1oQ1<3^( zuiLcZ%W0{9i4_nwWtu}3it<0n`mDa60IDIqArqYS6i7S~X=d?6;4J7SVg z!@(H=zRZD?C;`X_wHjCNNFA%A6=ft*jdsT1x7ERRQ^5pe#ZxdRcP|gK8Vc&dx{|SE z$kfCb2_|NG3RV$weced-kZ6#g?`v)4Pb z$_BT5jA*A3(j&=MqLG|2%(6lz%M<~$ZA~|UAqdFSGxK|T=&;PcRnu~3ZDy9g+Op2e zOijHxw{(AwJ{}`~DvAV1PT9xZs7RuAekyun8ajGJX&2NCOa} z-;HnQZ)C^>z1lH42GTdf8G!vx7gR6$iiUNv$6QFFVK{&5#O2P}`iaYfaCSULW7JNf z0f}tES`k_{NwlHQ(+BrZqmc^K%GU^}Vo0SjMIaUtm*%y(zI&OrJ~wkL&6kD}5?^kI z_V@R@i9Pnagwg?Y;9=%sGf5*NV3a~-k*|~NY(Cvt5w4e?+KdZ0RV{m(==nOdZk_%1|OipqoJ3S$f55}lo^*+mNPCcw3d~VM^htsowE<}arV&A8>AWblrY!Up)0_tt;JO|X0e;1b#omA zdd(knv~-4o`C1F;?L zyFT075;F^6)WE;ZTAJfo#>Z>=jg%bjtlhEx#UNE{meEw%!7`{(qo|R)cy6s}{G{Y@ zBniGV-Wh+u2Wv0ohh@mSF|r^@(Y*m6i>?>S_!CFTIZ;s+i}P6m*~!rvcs9rbfOE4P zbFY_vF?gBEqQDMSZY(xR0v<<-49dZ#yQkFIWK1(V_(buLnK_Z~6W`ywKUp_CNe*OW z+BRf7$2yh)iwh8{9dbhe{uzxnSoW6Z%wKEOHG-|Zd+x-3grs?3O*AhRGF^Ok` zNdhA7nv`p6x86i_6ZC%JN)v`_RM|5;Iianimo2N9Fh%vr|zIc#U#A>eymzMb8Xrx^eV87J*(cZ+`%1Tzt;*c&0aZ~d% zan+R+mDM5q*RICCh z34x>p_`a+xZE5kr9?74p+gw$20|SF{wh{U_9`!%=mAbB~>|>2DQl8Nab@pMt7-CF} zP52s8u=ptpGe|k<}@;)>x?xXD0*H3r7 zTPPcL=7n@epQH)-D9LNlKMBO|?DQ(e-Ey}nC`zrF_{$E;Q{cp^8-e2jY-K$b9Tx7% zFU=9hTM_;4MW)lQkUnHT)|tx_j5pdns&Z~hQz+33s2?t;D?E-HS=l#Za2U_7o(3tT zgODuyU)d25`VtG)RJ3EZ^U>bf-@kuMOEHN3B3YJCqtmPO6jgMSbIg%joE{x%KP8la z1DrxY2zqD-2r%{N)Vv%Ve8Nv$2q%DoSy>$5;+;~_bR|VR_xWtJjfy#@H7jtJ79GDE z=S3Ez@Wxg{@q?PG8A4Kr_j+da%a!Jr+zA3Tri7c!&{^t|P`1?YH%U z5Bb2>@A|!gG#V)cgKglX2f)azchr7;XM~XE%0P?DTQ8?? zMQMA#g+$kS3>H8{QpAX}rK+w2%*5JQTtiD*%ue`hn92ghu-C{SKr=9fmo9@81>lmt zue?u_3vs_dR;e7<9ocYxV^a387P3}8j-iy3!-};t8+vQ`{$jlAD(Osfh7xUi?z9+y3O_4`Uw^iEd{hGvbj|m5`tiUvrVm*V6J_ zLkgG}F@D1WL+P_!F#(l z&zJQntXDYLUS4v4*7TV@R%O9|@d$@_wm{6{v%(AYmM$kh3TopvrKqX=vB(!lj%P2W zgXbOA9b1oL#oO>Psz3ESQH+zvPI$&Um<&nW3{S04P%dJw^emFqb9PsRot-zmpTL(Z z(~;`4k>7Kclc3c(R# zXV1GWR4g(zOjGD}NADTlaCLM}jQsc)0_LH8lV)`#2 zN8C+M^SRnf8W3?_luGZrC9RsM)vT(=ov?MaoQ;YTROZ&W_=rY)78LQg4j!Ssdx?WX zZ$EEkke?u<~$b;olkws0ct7>=8XFTc-B9xgCs%<+N@lWM9DPPVt*>LXF#%lTX z$8v5kKtU#MJ74?@9{P319>p^`5D1#<{mH}gXN~}n@cnpQt6*bM;o$IaoiDes$j6(S$}OXenPsTIr|zEuG8Yio%7|X-dBPm~zI)lQ?RYs6UpzkPFgp81 zd&T~7B^65f2&bIO1$t+A3DJyveOsSlEeC zbwVf7`E97J&t3EyNhim-oG;F``@8!8LJ>4Pu>J!)V&}8-SI>Ew!5N`{4MlScyHYP>c%wU+oWSP$B$V3>%V2{*490Xf7uJ^oA&pyv4 zJ&Br{8(G+swTn}oC{WqW=?DPlW`N&5yPA>4i(HdW6Ut1^$eOaSB+z~y6j(W`Tx3b$ zejUUGYY!dRe_q#!<90`LR0_DxeWv^l{c)MB51GsUEpZnH9) z`|-u{<|Cv2VHG7Y#*~|oJ(8_*(Dy_6Ere38ZxAY0q6V0g{XcQhuA7miiyZ-%DrSOW zzYMvFdG&&+%Q9wpzE16lsgoQWB9i3X{V^!8{>>6eh6ia=yCO6sbDPJ@k^O#2J9UZA zLd5iZDl*rAAaMQJN0t-j*R*WR+!J^~Y=b?rB?Y$UvRV*CWrKRz)XYBj6$FGCpE|I= zo2>iN@Z{0wC^9FcoimU zZlUVF>pv-B>I!ELKe39UsKtYl<=A2-Pj&-WD|SKPshZX&JB!oh2}2aW#_@92F8YK7 z9%;A~64N7(H=yr@5m z6C>zvqRyW_t)5mqUtWP>^aP`@09*XvfJ8-U_`6Esmn(a+z4?Dt+*XfgnwVFAg;WAG z&*BXxhN-FQ?sBGK_Cd+P!n<|~xn=(E&N1`J*Z&+w; zMTAF$iS(_o^jNvs5%TwrLE4r{=jb!D!Nf$BN#HVk=7u9DIbktAgGUDdDkCE!&1Bg6 zFs>La2UU4Rwf#+VHhnxjMQ1XEfC@T#n>HSr5;)q6kC2YfSPid|fH*VI1Ph!KQ9#-0 z$e5Y<46fG^%EXq^|8*{y`|+t<@YLiVYHV?I8~6hD2oGxx7SvAs*w5~=gU^Q)Vp0Q6 zDsAb1$JI}s*mD>UhktT9bG~>r|8}Nn{Y)&rgYl#*n#ygSxJ;x~VwQZpWhg9N zLo8d_22HPylTwn7mn-M?BCb2-Q?9bfl(Z*z(r#Z!jCf6WW5H4KCPw*UV6x$fEj@Phn*JH~ug2;5_6 zNR-a?llfOx|CM_a0-ON9X zvP*$;lDo4?mWRs*{je(S4MUGt#2!X-_c=K^FB1Es!m8cwd{Y7^n}QT>haQA3NFL4u z^Z@U?<}%bQr8M3xm^O6#+LL_X@0>^2Q+(q|nRwm@Cp43{AWUh~V$Gs#D&HGV>dqhe zFoq?du9pZ;IluJRR$=sKqedoY;kc@KBCMujKb0Woo_@|QX{?dn<{6RS#ad|R!6~8O zB_RbLP3`Aw?|t~|!GQhi!O%?mZx05x!T-yHK^7p*P`I?{=DK)$rGGP@4mKLf5Ypgz zM>9{W^*-C9%e&LE`|cJYHk_$n<|yxxj_sGaIhv6&rB1=f-&)IdJJmjC9D1gqRDoN? zK?gKmIKK_Ue^93xyi@KC7gxlpDb`Jj#oju*!a|gLJgl7T@<(->RR*$X^Rhg= zW`o$&m|e6{yiq+v{su~AkWeEzYu)yYc^uQAZtEPNcGaUNg(vmc;2xVBOY^GlGv;jT z!ZE~)N6jhP<$!!ln-bb94Xw32rT-@)^Jj=dZbI;{_eTrj$^lBUSe;+$U_=W~LCBK1 z6--7tX_jR8uODN%9O#|WNQecx{Y!db}jIwy_=>lx;eSobz*;uipFI z%|aSbeIUFIy#EH|i51%RnkGideLzhK0(b^P3;EO494;mYdIkgF6}?_TQ67!W%hxVP zBH=*K>9``I=Tl?*aT*(=8n)0IJtP@B7X|r8c>4|4tH0g7o)tg#C~pvh+zPW~;Q2nn z`F%68(I1n%TWoDLSAFw&?m&^7y0g1jzq!>WBKqMf5l7#{qnnIHVK$3MF5SPy<31D2 zx^yuMS3!}7g-VY|?41FOYV+;_c>RO>brj%YrG1Yc))SBR1<9wB7l+UE6&}jV>kv&1 zWwrI1MLcmlj#eUeyA_glUEQxKuLjTWA_huu7%4*c5(cypRd4Ujn9G&imN9`ZhWu`va_*j|V6F47iSfVEV9(%g#0X!{c++9n^6CaCqfo zfrc$+GVQfKUu5e=io;w`ETXJ)7OVe|`^~5on`myD>rd>`?M6rP&z=Jorn2k7C~sA* zIg7AJgoFS+euMYLtq2eW*N<-gT}eq|ZP%``^e;n0WkyZ7CMKQ2^j$q1kB(P)M5cvU z1h^J4nhcNqq#ESzrF1T!(^Pr>%KnQhijUpcgS9~4u5?V*Oz9X!2?LY0r9KH`Re)}K0^xk zcb0rJnq^i3*DU(AG*o0=z;3X#3_3jIUIE6W?_ukL#X{2%q&1m&?uX`vVUb9>Ii*Wy z%Y`BoU?p)K8h)hs+6{RsGPYK8HFhhjwt~)?TAdIi`-<-;M`cY9ZG&De3)L88#u1+& z^lP@wr#V`=C$3Fo4K24fnN7b3g#(6hS{|rGA?JjG+z|`o77;?+Y~0-U?_XJWg8V|q z`Ljk08FLAYx+84Mb1qrypc%n|g7=k2Cyk=oLKO_Ck==Gcqop<87LnhrlZcRz zK?<68OC_ib_=hcCz&=3*S>}z*mRgm0wKQmOJzi%B00u8@EsNf)M$S57f3>^Zpz3+I zJ$_Nht#{@W$L6UU3j-Gf2!<9s_IiEX)=LFy77fb<2Guz4?gohTZ#Q2pf&EKILWvU; zy{-BdE&;4Xqq6q}6?wiRxICp=Z)1YjYTuInpk-lVVydfXmSg@pxxHyZrA*mlPuRyw z)&K}H-y?Y72F!yWB@px{qSUkG? zbIrZ^&gq)AjLF1!49Fu!@enje1_hj&Pewm>m6&?V68dO zTY~yGc7n22U8Y+ZfEePWK@iD`R%{x zsy2TIr)KS>ahlm7%jmhUdNg!R1d{%CM`vh|&2vn%^F_5NsLLK=fsai`pd!gWPuGe1 z8^|~GRhfaA%6H9=01xNQ;BeKa?KDmr364@l1*xZ-Cix9Qi|~`ez&z}-p{eQFH4M7^ zcJDX;?wa6~^YYVX5wLhKuHt zWI|;)tddmU7&Ovq#OImEOBRjnA}kP;K$i6cBV0N@*)Zl*`vVr!@1K-B?QfKI)C`9A zV$)IJl*fVdXevD^bwixTl}kpGyj^t5oKhxoZAh!afj8^(Oxux4B1bEoyEvr&s_>yCto7Dz{88x*py#WR$KgO zqe4P`d{dLY@WYLj`L3!_OJ)y|{ry1+XyaeumXpWW8N=$@{GoN9WS(B%-jJBCeCKEX$Q>WX^&-V%$ zx?KK|ps6wtve%NWaZ7#58^{B?(gM{!oH?Ua(9y0|2&YpJnCXBS`c*w2>=-262I*4F zBegS6fuQ>+5Sd5RDC0-b^?fXjnQ_C9mcGPwpWs1JM=b;*t?(&Mg&T=vyS01_fJ-8cvG4?1n|Ai1Bcf;=+w(1}ArJK5jjWn(N*$&NDO1F)_=Vv94b2odxjDOrapzqCu^A z;A%p3oLJ}N-*q;rPg-W2Wjgh&we+CN6;qGbLA%$(P5&ta&d}Ii z)j$c+3wbOm%8soqV`+Gmx0@)`X{_wwc61cDQMejG12b2H2{Yna?ffzF04cv>U>Cn_ zZcOXn8L}MPr9zZ=fmDsj>G_oS44~HVxOwr4kcE7r%Sq;Bot+M){FRmF-L8j+_n$ry zCjDptfPn^wz-zsllCZuW$1>jl>y6?yFkNfhA5RY&s?{ki{Rx0d6mpjJ{Mj{g!R8zG+e15ZEikp!x0%0ACsLgh%6$BFNfeE)y$DR@jj zzxv9~>+UJA8N5R4>2%EiHYtcGV5fU@%a@K^JQ^dDXzQrBoFYJ@y~wRS{FEl)OfEr2 zw#Bvz9P(l9*#=JccV5btp&S`&mz$I4_ibb)lCddi-=|=fyZ8XDp2;6woGCG!Y4#<< zz+|#cN3mxk%Lp&c5Dv`4Ts1tX4!+Lgj!-)~yK>S(oS05biI8S`E6b$H7sY#tfU5$5 zd9n0@2=Dy(d?+L|vZC*R#5n?JTwT4q1M`$(F*O76L}tPk+FQ{JxXn}e9P?K1h;FW0 zO}17?&yh5n{^7m@3bzg9K){ysG}{fK z+8*A1Jc4GRvKG?|Gar5vnEVoy{I2;KV{#_QzVv8#`LMRo1gHJZIeUA1`=X{eCg>cr zjaYj%f4;J~22w@IP8y}NoA#>FEfY75!YctTX-J`vR(55#b)WyRx0ILZ3j+ycu9wy6 zL4(+d>TlFIrLuk%jvpVRxY%fAq^FS9Xu@;AsRAEy-QLsCjtmt41wa0Cs-DRB}V1$+D{jQ}7YUSLwOM%=}ab&#IB*_7XYT$$iW zE7}1!CXj2K?XF*35WT7W3`PhVN@!29pkJ*UfR>#@{IIF^3GkU z5rLs8&#Ms>Y{KU{(7GAp-a*-;{X+&uI{j*MZ@_OE`cW`Z^?aQIb^!~7MZkx5GWi4e z!g|G31#?*j1qZ1~d;8U$E^)DI%?($Wc4mFUcvXZg zMqY10kaF`v7wF3fyK&Sf2Wg;z23?gL3WEFeZ?wmT$%`U3&zWP}KGsM8aN7MvAb>5+ z1FbFgq!GKPlw-7q-{-nS_$d_m*Z zXr`jtL5&hlNpg*s`F8%QL4~o%P&3c~Hw8|HVt9S^{T+@^QfB|d%|TxmD?|6Q%g_2R z+tY|cO(*b~x~8+b6gT6R2zZRYS9I152%~-Xnua`&0ILMhgTLs)!{wcCg?jtWP>`SdrxDN7~y` zsA8q-kWY(zj_e@C3(7vmzJL6_8jl01VIIuqb7G~+-+v#F6orUA5!`4;ua|rgBnePF zx+Y|x(By4yDpJT+S<5DelO;3!x)A#1uXG#Da>otSs3+ z<^gZTLCS79M^h)9iMm%$cdFw@aj3~WvA(nWYakBp@$Vjbv!1t8rIjlK zdPBM1Rk&pv9sV5MMK8hzGIfZ!gmtXi0RNg1U>!ZZ0W1A$^@@+b#NE}nxTMGj7)O7f z0?vBnr1Ny0P>@LI&i`GzTXMk*`2-M+$pT#)*+vpgV<-rXDz`+W)Nj57Hl10MQ-YK^ z^ArI867j3ne^PweH;g2Tl`sMFw2~qMW7t5HeIVQU)zkO#IJSCCwTMb};hC8ON%Iu7 zg7L9L6x7Oo4HrWgxmXO2c*fRKLY!sZqE64;|5e*tK*jaF>%qi@K(OEz+}$+^7Cgw{ z5}ZMU4Gsw;xC9RnAh^4`dxi<_?hLNMyZPPT+yB@0yKUEMvslCAFmuj6XWJ{!^So&3 zba|<%i$Lm!0@y68Z0|YdcD`0J5ZC|bkfoNCj=pUkOZP{J7L=6`c=l7CY5^m4m4e_Kk_U*N+y4**An9d z&y1;zq$*>moH;Yj;sYgzWEm}YFmcU1Lda#@lGMDy03JLzFxuZElB(;8Q!O6N({zRY zqXr$h;$g7wK-;E+K*f@;>|tSLXrUpU;**z;bsxFLKM*S#m}?yyAdOfjhzUwFPZ9l< zg9a2plL1~}{pCKzoZZ4h#3S>BRD!Y102~Q+`q!ru@ntSj0InB73@$fkQ-eV(V(NReb!s*l=9KNU^$GxWX9qxbilkbG+%irt7J(?U)=Pk7qo;#a#Ex z{xZpQrA*}rD*C7I$T9GVh@u292qNQ?ErLU6=o82TQZdhJ%jz=z_N>&HPj*VDh4(T6 z$(P1fmr27{qfQ8++bQSf8#>3Ca-VMG(Zzj0Rw`;r^_e+-Z~3D^_jE5Iv<~RahljZW z4oQNRpvq4>oj--6Q)33zny-pmB=%msjZ9j0i8Wwr(})~i39)q+pG4=b2BoRq2XSmw zf*t^{R}NZzbl<05469}-Ch=_LQ0UuOWjS07U{C{KPMaZ7@P@pkR;4Q5rw~g_~vv9WCSS;LOf>hq@mEC}4GRqx%f5U&Q21qQ zd-R0#EqmbYH}O+faaWWua+dKinrx`} z$U5HrNkG6iJ|Q{|_S`8ma$MSU;c*QN@M{3~v-b`f&?e_sm&Nl+4Djx|V#B_qE^Z&M zq-O`kGXF0BA#V!yDGpN(LYKGEV%z|?-k-O{GgYYK_FHOpF7~wOp_HSi3IGKekGyBBn3QI_BA)sdx0U!0*N;6A9ldfl&DPG`pl8W=0I(_gM;U+> z@$moy1c9q=nhD4tvomv*b|;aiMNM~UVOPzkslA*N>Hjcq0K`-XQSEd}BVd@t6)ahp zWJ@e_lY!m;mw7W%?HCeFf4}wWJYR`u+@atnobV>y)*DKwOY!tm zuPvz>Q=iz=x4(QtCarKErdC(SnP`SeiYWoOASlGM10xER^lfG1;o*sL^GS*LJimTc zcde(mUo9Hm?Y^+ln^!0KH2`o~oEYO62na=HO2^|dvWKD7d;9<@$vGUkTjSL3ay}(l zDB0+Aq(xocCh_h128gf=+Gv7ILy|;cL)n^@o$Kua+q?Jt=B^N3c@_VGk1Y8%1%;M1 z*Rf7dQ|Fq#_;xD*pfBZDPL-&!Vl7y%0D!nlE7!kFdj8k*;amI{ei;v?(kNwcB%~_j zF}^OF1%ZVc8VrD)gGesd$xSoaJeTC~3eECiAhy8JBVU|nJ=Js;pjWJ~`W>P7j@-{b z{)uinD9;{Wxhs*?3MfI)%cf*0$^49`dtfdn$6E>K`1FDh4#3; z;iu!P{>R}WgAtgQVab|}j$U4BG7xK^G13@h0wn2~afbna^J?4itDQ#(aP6C$az_dF zFsMBiCgM*R@ff)?_?PpD63$5ZsrNPlN);#L0qe2Q095^pr23{Aa#M$^-9hd1`Y8?> z5kzaY#OT&8vZZ%G+%L6Jq;_3OngLg59%_+UCE zlp1Kw7M;?alHTd%H7MRy$9J5xeqLJ29@V>Y;#OR7=(lz{4kOlYWHUNh@{q8&T0^j+ z)jpt?4%7;*ORLjc@tnU}YCmK;plv=Y-wqj>>Kbd<{=KN=EM>Od_lHP9m-5t^5Gs6{06_dB3VYMpz)#F3$T`kMdL;SZG;_us|`=ldR_`=Aa)}ocE z#ZpNZX|f>LLWEp;_2qn=${W(J8t4X(TX$X_)_=f}=8$%iqtht*vdvOLV1o+?)$Yf#U{uF4vCX zi0#?ThilA&=yUL36R9#7&Pg~37*^S6Ma4xOO$|gTdYy)?{LKNYC};&8 zB_$FWKG!~k^A-cJ`w>?=p#bUYz0xYUKocJc_4!5nazCoN=fQaedFV9hE zyA2h+J9_<5BL?yOdV{AinCPPoUzeUAhDEgxWd{~WMZMdjO|jZ13(DSCzITc8j0WX1 z5o+}`b90^iW_t$I?NBZ!AE?%x>}rqu5mb_fcOy@Ngn)`G&rtxsx=t8|GPFW=-OMlE z5y*Y+zyysbPb81(R*zhL`rJDG$G1#}1U%3Q44L!CG&j}8TanoU|X|8BH*Q9$A z=LFAuJgzu_2wC16mbC*H<)w603+DbjzOW;g89l_0Zeald0WDy-8eE{BS>w7Yun?Gz z%y}ZGhU&(?3w#O3E|j!R3E+8zvTrTdnrf8YMO*c;VAz6$Jt6`&Z3WTOQ6tz3c$R3v zz}ao*+g?OhFRwQ%G8z9VGp4%AxkJzpgvZ2GK!K*}S=@;DSo_JgC*jFJjP2Yc7({x` zrn&RbqI#*Cy&hdMuOfcgCqD@92B4`*;>^4vB{Mb>QFXWd{lU-kqNTs=zG~$Y9X-Z) zmqurG*=Ecii)0kAkUm!31>OIeUwlagybauV7&rNdUD#z z?BeEzvO19W`qVQzDC&csMEr;)(5FlRK#o6a<%2>iv+3;*d|I#rKID!T3}kTr5Zihv zVp^Fx4t?G99Hc{~yBfX#?c9FWVAA3<9cdH2SPVQOVa6ChyLU{;ZKmd}s(39fK54C9 z?Sb-cLr!V(_oyUD;K^!_%k{;Lb1~_Dx8>R}Q!ca|5sWhI>utM+F$0;v&PhgZI>?A$ zgKi{M@_Mn7H5*s@ka?wmuUdNS5p@qLq1g{+wGk3Q^PJ4x106qC&~x#1-MTA$|Q+R4qVa;yWSLZFUr55zbRzlues zsah0HTJaFR_!Q-#+{61C2zpl9LB&9dw%_G*2=@$@DNmA+I#YgV@?29_EKf2XQ1LLC zZ~s%0Usv>H{ZS&fj@QXLUr}Baund3`e^|z;5o>L3vFOrNZA(tcJc&FS#vyt75Qx3- zC+{K2qJ!;(L{|tYpQZsC5yAVendHBLlbSKyPV@s0!}& zppsn}{-g{@E|M098*GG({5${d5FGSkF^6`1>!Kwb1CoT;5k5PQZ(F@SrED6zer z$)37ZovMS77v3m?@pXPLZh3R1hpE$>92v4OQ>h-RkpKFrp@Bl5&(9C*vGxJq8}1LU z^AX@T&^`9}*z*mu%ujb^KR>5P(5`7M)qU?bLdFgh4f@-01c9q)f1%o)Y@qmU^+P%I zZ3;{n(2}8h+XL{66*Dura`ByL#$j}{dX3mYq~kE&15@-4Yr4kNue89k|1}Qfnlc#g z`BQ=fQgoC&JhY7IAfsAWb?>#~sfTOEY;si0U0E+H8;Pe$3{0=zGte^nLM#>VV#&Yu zggND+7F#RI`h3SV+W+|Lp6-Meq9+pI3rqO0! z#=)m2;8a8f1WP5#EkcO}^i;JL4<3W#OtB>ox)mIVZ8_ox;?y08$A`!DA=CJzrUn|Z zkX$i)DYbDPAhME~oyBJeWGz%rPhbAwY7Z$UXJa4ApUA80vcr?Gl(KY>yLtmVLnr@g zo=|AH`(9B#tU?Mm7UDpRu$wvHTxT{g;MXLGfL%^51^tR@WDBLJm^eTDj`__L-%qKu zAg?lg9C1M04G7CxRes$>-wOwac{Q{kctqGy$q7S4bCmiK(qjuY3GOjzbm86DPapDy zE&DF;Da>sgxa3&sWwl~rLr3y}rRw5K$CT@`Bq&NxUtG5o=9k8Roc5t)kjW=%hx@Vh zOEh{b-j)nvl|oJ4{!kJW5ZHp%J8z429jSE8!24xZ?}NZ@W-Hv?_Oi+Q5}_Y5V0Fs6 z&klHTSW6`1VzOD9X=-in6EdRmEr=SMT1EqCZdW@oeNHS3t&a{LLKT;$;Ig6RHAAvc}MX`Ig=o>QmyUjq&b|X_!M~0AklC|BY$L$^N=1KU9qUbjOpc3#oq_AA;G!UyK;72;bFDHB3?&RAatVH&fN6oS9vxs{cD@wJD_Y0F+Q+@GVjr@5!08(oy}6XI*U=Qc36}FP0C4 zsQnT#DYnPTkZpE$y z5;&~isrDPDm(fZkrPPasY>#N(b1CCo&xp?HeoqWyXJIJJv)SbSrn~<&;-Myx)j7Kaz*D@{tZf0utintuFFl%f zIXM-1HFY4s+#i#H-eRmMw(pbK%6if0`JgBQReJF^P5LY@yQ{$jNJIrsj?7HGW*uWF zMgvqP)=Ce{&&&RPx{{6PuT{0@!Bgz`B9kv2%U$w`W7l?`(XFuX3S_Wpn_M$mjl z^C}9#WenA`U!NwV(ZoI`UvUwMT71XesDM1Upi=5Er-DEAEK&pbCzY}T*szL=KR zOTQ!bq3YSo)?g~RJQhCF^oN-5l7Z%$7W@aLr8J82!GMK>O7v$-%`$^HCn0Qd0 zj*S}JFVd`E!tiwCv7K{B^*Z-7L+_7adLt`i4+}T1&CYdne7{_zU%P3!+HbZOal`R4 zW_-!j^oMHK_uCivL}CV3c0w<<4zt&i=%pj4cjhK0N`Ntt2(SAvGNqoY$`IB2(6@Da zGbW`o;`H%bjN8L&#oJ@46sa2hG0c;Kdi}u3O`6!n`xMv zM4q`im}&R2Dp`UAt;Pgix7{))Dq5+zyQPEe?f#z)aXwXwULK`LSCQ=d<4N$xQe9IC zL{&2VJR{hHl|{+z04Skjw)a6>wvp~>5Jd{VcV$ym-p0V&DHJ4=BuBhV+~aGN@?Zps zIWy{`!tTP!F!H>@3UZ6j~hEqXpx_Q%&S%>w^qrRq1IO4?=8dGKUW~0jk7kb?#+Uths)Q_ z7WKxTYj9EYN1nXmvo(69*_V^Y&G}I%qGY#-3w6LFPD-X2Hpd}Xp2k#QF*UOk)Y`Ld z$Jr&vneFqrH(=SXo5pl4?zEDt;c7{V!fh`dRn4u3k(`Z7KyQL+yl-o2f1{%pFB$t$ zGP@+PiTyxeM-)BOk}P5RAlQ!Bxw`RSq(QXMwNXu7T_rafLfgsq!qPPLBb#~kG}*^^ zI2!luseWbV@aDEAt^6>0)d;cZ2nQdJe8{6~z2fNzh3rD%bF!PW`9ABbuHN
  • }-m z>0t*V^MpS3>y(>W{U4ed3Yi>e+=Gq+Nuh+3u?bm?UP}uHV$A)PL@fanb6+~%WD zbx=Nx3ahT@HeLVnV!z{OswGP{Ws*Yk!B8sePd+a0@;Bmxhu>S2U{!QI?RGPRVwVLz znXtqmQK5)Ss~j$KIM(!}U`2Bwy%y>2_NM5cIjqoVtO&%ih<9#!Zj+>1^x^v)CY^WU z@KC3OtbqYmNOat2Cj)^I^}YG-^&+asUaPm<$?Jv17$H5IpPt{>S0^b9n6XRx`}^u$ zL0wsqfQ-0T%6b$+Ai&(l?mTRc)mglsGB?C)X#%sI&Bx1@Asw9aqJcDS8jc_H&8Ij? zfF-bvMBj9KV9ZY|U%Ze`rh)wT0PCyMPCK1~vIZ7hlaNYo-WOpZcSFsg-z4?d@L~@O zTk&MeJ{=}}fYnU4pVZt^Q$vsbWt`u;Of|gpWtCU2oM-vp6TJcRBB}8KS|w!cxb8vw zGJ4{+BsLbnJI!;>K6$&@RBKi{>_5 zuSX69L0nZ~l&g6>YQXryPZDVq6KK5E7DIHYO=K)hNSY#Wy%mf-d_*$a*d0bpPEq*_ zAhR+mvTbt;a}`D_4tgi2ww9a=__lf8;;4@=KhYxMmM6lG4~qR(m9BiVUJvC6pMG`h z9+j(l`-iVz4dX-QC2zl9pvw}t1Z#@RoQ+CBcACkeB8v{%h0x*$>Dxba2s@GR;OFl` z5X&qxL}L{(ifAC%H!2NSV|7KSpgf=(s7(GK++w3nPvc`}9R(F3y2b-&@5)YM_#t6_ zUV^iooXvybKckKe5$_)gDGVU{5;cUYO=zN&s6+_S*}9ev_%K&t3Gb=W44S#iOytu+ z33M(jDhe?brkIkKu z)?FlLg!`(ox;35Ob~7-#`FFAE?5I(_*22w0%6sf*D@*(Ug&1@IQWOXhD`tDMhwNP%b8`V- zWm1}o8+^!tZlKhkL4?Q?P9x(V+Upp5(>0$4`rHdbxTUB>T6ZmtJ?w1^sEa>Biv|yJ zT_`*)yz?tXsil8+b-Pf|+bYt6-bXmhH1=0(y=XVLESQ{E=H;v&@w6o0I6L=D zo}Q1Zdg2Lzz|qInPK$c_W}mmj`uoC9MmdjUAG8}SV-KS_mCo=g&)vEeY3O%o!06(v zy`4BvMOk*OOiL%NSjj}`0`Fr&>ueI%`nKvyb+uxrC&6YnaglNrvK>&|WLLJao&-%e zDedc;vY2YZ0M)HYn2U3R)V|Fasg6+Pw+vYPbx&VZyjx7-W<4hcjQ)kYYm4B=|2<8Dt_8A)v*I?HseD|=mE*pBOxtd?qlh%^NzND@gE8ltFa zruZCJX7R(q(usSVEj2~mBro05wl7s#L%+s19S_@fFl!D!ZPxblzM}>>(P3UL)6j1# zC#26F39*<7MD;SFB)+Gi&05!Y*?mK|k*NDA`22c}thuj^+Kkm{<;1BvU1u)GVbFB( z*BKb@VoWmPyqA7LHsd=u5;)Huu{s^kIj;*z+eYdNEg4xY-^|ZF4~KjU#~FY|)wk?q z|1j8ag$H?Y_G{lXzKnIPe7vjUPoek*UIQft*c=gZ#WIUTe|q| zTDOB_=Ix3r%Eu$Ba&2lY#Kfe^KoS4{F$Dhl0W)FPZm{WJ%ADYZYOv*pBCnV zy7_7F1Fr~XqFTwLHma%(IbJYU*;Q#8wg1(4uC-oB=DD8>x3MiKJaiYHyv+k!@R+K1 zx)~dgtK{O{w|s^qQn%R)<&MdMl3L~O?U422en#_`0V-!NPFv%Hg%IPaQb^0Z-Gk;i zt?9O=AO^Gx+v68JnM2&8X9=jKsac0(e>N^tbV?*Gj3BE>WHkb7%DPVhHo&Hn zKcCF1wjW89b1#uLUXHo2NF5>$n5)N}BcC6+d`Gy2F*0c+2p?)ns5z+A1L=5If8#Rhf&6xBDH`C$@y0A;CQT%*>XB*(dK|IxBOP)}q}l zEtI+Uh>qPzKX!h2a2mMmD4@ea>vw!_VbCb#6)qEe=s62%wxN@zL*i=uC)b4|#(gwm!4EhY%Bku?Pb{lf!iA4cjg4;r zU3wlPBT^(>3Nkv7SY04nS*To83opymw={wrxHMGOMm*2U9@5PVSCCT(4^hNYvUV~V zg$0gpn}3BcS5@X3SR18t;nPjU(e?E@u31Pq0~?qjX$|*tV-lIG-f$?;(yu+205fk& zm9^h~k)d-rFlMy!ywd~%d0A22fgRW&Pz~^}_|q;wuzFXHw}mjB&d_^;to5~n3nuE^ zpV&`pjLJ|hesVBspI9m2-m}#^8L9my!Jy{JmHggume9(^%G}0kqmCP2amH>MQl(We zy|cAR)}*c9K>i|3L4vU_frv;VMKj{) z;5=XTgRm3Kt9R9LO`R+$>E+)BmTLciKFUnnhQl;@C41Z2fkIry-m3Z&U`J}JSi5$B z(i15IIiXJM`6yPq-^sg`mH2gfO{BJL@j|F8#Y_6r{xDO)9QVXYl_Ly3v)?`{O~WI zt=uvu`ZAr03v(0AoXS*FQ?F9nPmabOX|<^=v}7xCYt6cDO(m?R36swsSzQYJX_F}o z(SXHz8HVsF#0KGr>uTr>G^s_ztfQ;0j@E*kb_@culJJmkei*IIUpZhVqHF==AWr%cLt z83`F*(rRsty`QwrA1S8OJT;ZlK5F>&7jO3MT6JkGGQi=iYdrMNT%P!KkMr_4={9q- zd8|tE&H-)z&!wTWheHSJz{_p2E1yT65DU9>!*kHEvb+cwa}A6L*h~s)D}S^*n+MHU;GcMBkh!4m5ck{%~5CO-*~G)Sepo zEi=X{5zK*0?Sy>=a6SJG5hvxmygX8?T1KKo_Z)@i-F89pPTLqU<8LMj2E4x%o}+9#eq4wck$&bf^RmQ{l9Dnf>Yd>H$=JHrU{C?B zW~@lXJE?Gswhs2Q9u>!BjN6yEoz%8m&m^M zm-=Dnnk<#~;4_J*&h~lB*ZC!M`m(F8epWg7Z%)Z4Q-rejn`Q-tMEjZ+N*mL09tqw< zNydCnB-n{xP9Mbmw$o-@-96e9pyI)R*Zi^^q5Ef_?h?>ICm=Ns?x@e-dB6(_^dBPW zKd=A8fn=s1whtB#-X!6mOl+pQJ#kV_{)B&5?(DNJy4;x;;jkt+oe(PNLO}CT+()as zGr{nN?pU$p|4=CZd5iL&9B3(j=%kE-lKjj}w7&0v7!#Ayzg-RUnZFqIOUp$d(jD{4 z1WdhsddR5S_*h%&P(E^&yovsSzik^8T}P zb3=6fLp>8;vyiC4jPfVkxsH-gFNO)64D6(bPdYPS<;Q11Es35j)qKl*6&4UMwCN}m zQBXivK9*tov4%%2(_*USk2yyS1KMMYJVq1EIv}V2v$TH3mvR}cax^b5G)$3&rI>H~ zwCjQ}ZW!>biP2*;j2NPZ%Y&F$nF{ekh!THz(Mv%!ZM6Zfs~L?7H9Ns3I?P$V7K z*mzH5MP+Glf9eqVoPF)5*~cg=L7`+0XVOyWO-} zaBPeh^s1oSMfzaS>DXHQ`RYxo5=GsZ6C?4n>S`X=BAbvfMa;n%qS*%Ws%joap3709 zS@97;w!FlVkq_&zdQ-gENDv)NA|*^zFgY$XP2vUqAC5%itG5_qyvxW$yTr6(v@osw z==W6Frg)(GsY@j%#y&W;<>UIHLC>|Mi*SWw_<8;iumcn0lkJSi-d9H z&KAiBh_!)Tt+zQ7#=dkU#k7@=OSkjpP3j@Y=9a38&W9 z{P%^cqrIuU>0BT7ku0M)mCRvJhn10-E)~}1Hi;^?Jft#syzar5H|~}yd{|9$vvIQ; zdKmj&mmWsIvP0d+x!m!-O>w{QyZP&UTf9vvdUYU0mV7S9DUBiC{)akw&gJ}D@%4;l zZLthhr1v4-o^H)lCLJtn-AVq0&wAeDD0Ersr9qs?By>3p;V{fat;8kA!&&|5S0S~Q zS|7e3%=i{Km|)a3(Dg=c!vw-e=FC)9Lg6(A_D`k~_`lbBVKqa)yp1>q@cD^has9%=g8E9h$v={VA?gFn zEb9MVkV!h*2>DRTslv+Ku!seBb|NI3<08;V>P7q0i^kj#0l{WfZIV!070-5~enup# zh_?n-=X<(eTapE3Gi)K-T_D7i!UyZg>JjfiZEHK+`ZCs48lO3}^?aDCP?%CyIj6$W z4%)d`^5lMN<%v9nmY!~LQ3w_$;y~(@w?9$?Yrn9-w#c@_lk^OP&)PEYb^VyldJ!Tg zKmxNg5~IF1%yo7lxU$&v&`cTSltD9Am^3_%P6&alOYSx1;pOF)pFYR(Nqw)mbZWK~ zF7kJ?8?QcD^t3H7{gRl6I)311^U(D{XOSb#5pbLBc`UB#=daUGqR$IcZY7PZW7=sH z2bh!UN6vTkZ>Zu2JQi2%uJleZ~4f5B39Wkce&+g5VIUp%h_w&@4!J5$TM3G{N&oBf` zO|>yz#=bVxeoHCz3YNkGc?4^?B7z`+hCYZqAlH#5k>^GGA8t5Xh^jz3!FFT#fTe{b z4z*IGB_pMu!ANHsaMvK}YBfJ$mJ^cWl`}bS#R+D=QxCd*i0E+C;(j4EJ~Cq(QVM$B z#F{cjN!HhEIO8Pr@72)O&DdiK4tX)GN0y9o!z-?Ov6-Rpzxxv%ONV)gb!>@1c2j%- zsTsWx!f;oxQ6#@1?sgR-O@vp~*49>4nI>eBgJMA-%5Th3TUuJ$zB;kZ5;q=qy&kG< zgoYOCy`HEKFsJNYb~^4x+s-dwrTJ%!`f~gvFp3Mr?MZXC+&OR4b!NM@lb-=wS6-iN zIGNQ=qv-j;OYcL?9I8KaLjn4KlE1&tip-UhZs9F2u{ZX1$l&@YoGh0a2tvwD7fXww z@Q#+Aj-J1cV%9MU*v8$oSzyE2&Upj+HR@Uu!mSyJ&u+?5=h^4O(Em=9BuPU{bNNy0 zvOQB_!RWXu�QII=Q>fJgFah!28(D&xp&mh$<^i#y4OQtDh~(o$NH%3}lj?Gvq znyKv1yO^(Fr(Z`aPr`PaE)ryhxgO}9frqm-c*O`fYf+(Xh{6Ez zmn_bLMtp#}JAjB3Shvnj!>=}cIa%V6=af$hmA<}f{cP)My$r%9RH}8& znXO#%#oeQJ4sRfkxM67MbG9xYFT_%hV@-#mI8WltoT_`dt+|m=`won3YP_Jwem6(j zHDje+k+t!vZPC{nt?{~@_ww;8O`E*pgUd!>z>rzl(sbo9wmBRH16!hUG{&!2M8JKs zGOD{w);zDUj+^VEH6Brbr>6Eb_~wa0h=k08BiZ~c=;N#8;}(sa3>TKyVR`8$N!5OH zm%aSSMkkfrkyJD)(3I|3I|uR@84@eoF74eglG9V?=EUma$}pFnG(GqIccNN@8nW(7t!}EQ3yFSE|tff71Eai5>+7 zGWv^?4a;Rf5i*25dHvDb1y_+vEeR!#Xf@Okn&kEJ;3d~Nx3T9K zYT09l5J)9yMHnV=_iCXAY*7f2p9ge(D$&J_sXtows<}%?7Wl>i9VN)vPjg5(v z>+=-Lgf&=w^L2mo%7Kfxl5xnY*TAk0-Nv<9^ZAi_-D8_$5F(Ke`+W?2E!Hvd_W6q` zs3q3@PE%T$nH?il6;2_9Y}(iP!KmE*o&<=?XI766f$#IyU&cP+QJQXO&HagkCVbOf zXKpU37X$$>)QYAT>mo6iY6u=i4^XApQ5WHk#GQRT$!!p_%0o9ZF+)~>q4JlZM!3RH z7o}u<$d4{)%xvlQ%*;Es&-lf#j;{VKnfD1La+(TpBuFqpcfoJDHFfYHwE4tR|CaZ5 zi|ndV|N5J{o|lPm`|SYPRkb*Uw78ISvEqv;O z2pRM969Y3$@$(GK>)3?_&CoCtv(8bBh>lQYf%9?T@(wa8PLXQ4b(6I<|91?p`D6{r zqQ5yAJRX=MqV`t2`mtmXi@_(yga8n_-xqX?9hQVhJiNRI{HRl*u;qfww?EPS+ZTtp zREFi{dHehO(c1q2T2sZ7ADJKskMO>_zCAlUVL_&Zup^VBKX(^R5cBx^23Rn*i|eS* ze_HIvi&N{$#yjI4-KZhIB{0=>`f`l|oDX-v_y`oIi0U;@I0Y+q%m==VL{oYB?GttV z0b`D_p0E4ov?cMZxegj;o_j@Ulyk-|d#xnA=ZJn$eH&lQdpk_Cv19EQ%f{IeO){h1K^TqF- z;`_IkmXv(d_1Zgfo4Xi844;lV#LLmd%W-sB@l3Dl7R)1Wh8S5r92YNaZ?i=`Zr+qb z2DjYC3$=|%ly)gQW`H zO%4R_2T!ktK-5K^S=xtVRhpLCwanct{^Q=b?wNkBxVJw7csM&STPrJ2*d!5dI%{t_ zTc0eG$q_HdhF`JUcc~*c>~Y=kHoN^a>S$cFaqMid7Ab_7PTFPtDFnM%S=_EXBQ6C! zym?01syLwEILJF7r&2YUp9ZmQx_PG}f9^Yr`XJmY<8}4NTnO(dM{`L{TXyf!xcK$LLGIg`vd;+LN!>WNf&yV>+6G}V<>DMLnQ1TXN7 zhza!FcmA?o&E$R=q&H;RIJ;zx1 z3yV(F6yd-1H#bD=wm-p2dyOq%HtcGe>;fW{HFbk?Q#uosc@}X+&b+*1{5z+ugLgsC zHNU!VofM7pHLkLeE5LEAJb?oXp%M09aT5M7E3gJn2K1T{iAgbG0d7wuEe&Jj(VReL z&e(9u;D3Re0P5{(uKyJ`ap5nC7dM_jxD~jerH(KFB!r@J=bXY!J)DOU5s077S|CT~ zxDG-cEu;&M0)+zfn~qxJPH!<9^osjeFLcrgdj(ks$tfV<--$c%z2c~VpjaJ)5P&Ff z6d*x+Qt&pyMJQ~(2Rm%J1|UkV+nsfMFc{E)J1#!*Kh5Y@J#X&|QYL*C^G!A&r?ymK zcx3WC(>>aD)pag!Yark!dN%pK)97H(d%9|5{P{NxClg(#=zuE`1Z|2F#^B7-AxWeD zZa}Qm^XDxr|6RuZ-R{ZYIkm+j1DIEC32WH0Va%#iDVG(-)4h)q&nbN|IRqt!hOa;i~&W*pS5}mrv=tGh>-Oq30?hC~~e7k?{QD3@3 zGN<-gmh1HUl}~2m&&vXC6E%QrW?~$DWzfzxL?A!OlE5HqC|Ny+^^-z+%-65zh!_!2 zFLkcA1gP18bZVM|f?QsU7S$Bb2FpHp%0m>!|2=2StuOn3oHNdX64u3P`xR<%ehJ`; z8qnGPzzGdory4}otFnm2ENhE+$+sm?Ryi4pGs<*+)TZoA6=|G4C_)^Vu?}@(NlCvn*!J! z1og^y-c_0|+MNxU;wHjR4-b4J=`bR)kXg{8f+C{nOq1HAou6h8cRxP@O?<_wQD<#o zYUAN{({c2qd>H}&9>AHTh){)C8+-H#xGR@@cpmq7SG_4FV8e9&qqD6jC;Z>DdtxBn z?r3N_S0`BsVi<&|y+~oK=Po(}ZTCw+d@QcW3FI3-`~gO6=hZvA^T1Rl9pwo;Bh_O@ z&$pYc6!5mUd+?@I7a`1W+M?MF0@9@4o;+C3ca7JDD1m*AvVR=>FR8ajo&W398&fM= zqJ7F?VeEq)__%=W^@gvaHOw72)APt)ug>b}=&Gy>;4*>CN@ndBeOU}8hy+#jS=XJb z^0I0an9jziEnp%McS9dvF8r2AUboCkKjDU)@jWvyv^LO}h-IQNc{5?lJ%+|Q zC$|BZw>PwS0XbO$g!v5m`t!d%wB22d3fYihAXTHG%T|`x_&@rN1i_)dtpD8wyV}O%0QG4O5^NJVim-S zbHpp1ywCZ%TmyGm(mGo}_1C7$X0on2v%;L3`4*7&LpQ1cH4tRKY`U-sApJ zDTza(NlIP!TP|%L9>5DIV@j#_pu}ayldyvsWC{@0P8*^I|Is3{ug;G_{cqW}+D2ng zzyzmy1GZZDMlWV6h1x*K^^qVueXO?{$F~@`7|Lew27B z3vT8t>((gU+VKG(Z{GzDG0Q{D{d z`;>xjqD<)i%V*sQ*03JA2!W-*RR`}8ir_`f8?e7UNu_%ZyN@D7`H2SPIQ};x+5ZyS zUNCSiV;FE3BE6CUUnOnSE#$j3hU>5d-S?UY2NVf@{v$AWcM~Y!TL1C-zj**ym;e5i z&y731d3<+rMB$x9qzCL^CZM3yqyEoh?f=rke~Zcg{~Y+2!~gbnSE1o-xL#}LPS7tI z@fGPxr|<1J_TK$dxj!)mDK#u=&pB5>i>tv{U06p&+dC+ zlWOh~+J%3L2?`Ob?A;6dE9m{~ZuFsibbf6Oe*J5j;h*e7%Ww=&LBFc)?cE4b{8?Bp z1MHuXS)%DTJ>b9uXQ@$m7Mfv50DD&eyQI~bFE%*3z5f&*z#c2CyvJiV&hwSsw2avp zFxwZvpIm-RQ@Xok?X*|aP6rxQ5BWhk94y`CqT*?m`Jy;`R`|pC(3`Em5buuUmh1Y_ zD_7<8s|tNAt5$v351No=fG>b>_}PmoB`&l~V0I0j9ZI-}wT2NeeLcaR^xsXrBPHuf^yYQ3$)_G_>82uq#8vs@QG zD1EwKV>R(+?XH!M(!K@#VgyE3ma1aoPP2c|+A$s)C}yFhOgEO4Sx}>P6<{;Asf{i0 z8G~W^St^Np<5Zq_EXU7gqt3>>juidENq*WLDi%Rq++Rzos1XE0G z4qd0cI7m~aC{T0osmlXcVx%06v(qCOqcy%?M6dBSPp6H_z9qO#7ef;cf_64=zB{uC zt^K=+CdR`Vf3SY+iyMRx_!*ECIM=ftH02+bGwD_u2QWxm*{_AztvcEJWa|N9$iu{~kT6AogeFm^$p?3@1z6(|4*50is-T+#c zQMR6isII}0TXI&RY_YACocwKyDBsyo%;5ahC|(je!CkW#lrC9C|88dy(%nwaH}NSR zVdIP^=M_3gCzrH4{I#u~xjMeSJ@iF*A$j$^enWlP{mzKNimW0cBDfE|r8x`L_bA@m zoN00KTOuIT%Zx*Bv|D5KYQCOnVWcG_{m3u)@>$It`EdbV)T4RYc`NitMJWE*@s?k- z^HQ<#p#98NWbJP0{cHx`=s28Ne9D=8!)zk1>!#^qh51hdBV?#XB}OIJI7>@W2@)A| zDek76kOjd>{N#`n$5-FG+y2nf!t&38IRT-8-Mh>BwG_E+7!mOX2n2>4L6}9r5<_tjI`(g9$g_tXQa9H130<;woQVA#?^0;Wa4+VN!OjdU!ji{u4b-DJ z#B#ze#NMGi#7d%64rn{}^-l6QsyH1;>>FEle}31xH>IVa{=t}-5J>#ItAeMQ%IkcN zKxyOeXDX*jDJuFGX(H?`%}2J;fviWio=SxYCVP5OG?7=+w4BJ-TgkYm)r6puTO8*e ziwt%bsUY_kPI}P3&Y`aM>-K#OHiXy~6oc<7O8+?R;AbN1E7HLCyR-_mWiRE?%7odW$;1-dxQ+TAfAO ztEapX*FS$?HeT`1_FHnc`SS85B}Ib9QnT}0SETxgXM;gz=+<{>@%eNA_|VD|vW7&e zv^?JsN4IrRdhwN$vg%vj=`k!AwDVZ1RM76u8u+(oXU%--p3-YeqB?)GPJnE63ZzWi zyG}lw4Y%TwZw zCwylEim!^}G>wRKquM&KZpW%ec_nr(Z@IYn8|)4x8uR8oE>p|~=CGR20uRBh^I8*T z2K*Ekw++&Y8SaP&V;dzS)JHU8I@t3p(gXIMisBHP;F!^SWg`L4Yv1M&UTnFi7l%L( zn7^Hk9`E+aG zx!w4EGsDfYa>Kbd+2_4k>h)L2|2Q2T8osM=x8Hv@G$Q)v;eM|8a@S?x{XaI0MCy{z zKNDfsPQX}CG4bb)=7dIaE^N9;-y`VmgP;5p@)i~jX9R+UQHhX-!)TSMx(F^8b?~Q^ zi>Xb|#Swqw1mmIMo9>Vd(W|LV*gsd3+CrrzYIYqWk@5IDk{$YCsoggDxWPWnpxSxU zb0CAQYrpCNiVKe&@tK^p?e6QnaqsgV1|Fx`WViE-hqCnSc@DeTP+5s8=y>-1ty=vH zpCj;zZ%;(V_O8BxtO7{irwqToO>aIsM3thw*}8VP7CU0J?9GNE?~6#OSpLn=o(?B1 za1%d&YzxNx=d>{JALH)j>}lQNX_qlTO94yGy=^MC{QyGnwj}?{1CUrl`NcOwhw>8j z&RYT7NHR~s0me_cyN7i-C2h5^cS`t=FyOyX zclY*h{Lo|1IV6w0J~V*_MgflCggy^pXJioSnUI9hb8>R#(k*&Dc(V=QY RfK#AIODKpJz5D3%UjPtu=T`s# diff --git a/ui/images/sample-wizard/step2.png b/ui/images/sample-wizard/step2.png deleted file mode 100644 index 955ee7eb27610365c0195acc272d7b1457a1e829..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42277 zcmbTdV|Zmv`!CoX+qP{x9otE#!`(4=c5J6(+fF*Ropfy5?%3AkdH(NvX69Vynh&!- z>{Ys1chy>jU#PO86e1iR+?Ow35M`vrRlj@zoBZlqB#XNg4y#JA4}uA{^cM>UYC zqlTV5hS~4>mnnuFcu`!WN*gI%FD~k%)-XZ#>V*Bg3-av#?io)(Z+%NzZ}F( z9E|KOY#l8?HYERWG%y4?ISP_}YWhDW0Bsc%{ug5#hyOV0(`C%A2DZ$sOf1Ym;6HZ# z+uFfV)#U$HEYQv{wZ}HhF2G-&r zBPXDVjiZdXAlYXPld**{9~YYlrvwWdH!CZTgapgyzX+$82#Xk(q&N=;2P+H9e|h|$ zXeBv#d05#cSVTE_I9XXgpKM|*5+al=5xH*e-4aNJZuAF8Jvkade^NWxNIpg5E44(`&%FGQXu^Y(6+4(*-)y!MXIr+VJBoV`(1je64 zx~fME{htJkza_W`3WP|E4vrtU->EadHrzj16eOZ5cyKH*W(g=bq|Ypb2v125EI7zG zILD}D`xV;+`X5pR+t{AaueA!sQbqn^Q#Et8%Oora0a#N1@cRm>FiSMZ-i}J(A5{qT zbr13>obO*rP|XKlLRhviB#Zp>*J}`w(#HpAk@C2d?fy|`$afYuW5qTE9ve8lUxR2i zZa}Ub@Rl9&CytF{zLW4V;y*Uv#$1{^^9AC&Qvt#dmxhoFQcl^C(4d529O= zc&gqY#PE=iZzX>J@Cbn?LH2zP} zPc8`F4x#g*Yu6T;w!dV(lzZ}~fib$FTTE+Vs`lr#?Cu!-vC(KyQ7LizxbbRZZbZQ2 z_Z}L%cHJ3+Q@d!0oc}VZABi;A?-i1AeCt#GNVV%z;z8zXEj4c z%YTr=T&-p<^Vwwunkp}}Ojz^x$CRE`ToF`q4I&|e_JCpM`T`I_C?UyaDjMTaJ`!&l zD&w*y3f!Y<-5r)1zVCdP+aZ66o!S7`>Gu)CpC7EwVbnR|Y_cS|=1om?OJfQ1;;?-f zfH^lSWV=6a->h#HTu0$23UVl4ev*fpev`Y;J@}n`l=%R3tnxF?P|Uw-FajpEUs-~) z{V6WgwxkKq(twS@M#C(G$^B&?_PYw+VoLSdT^4KT{RAgHkgcJbH9WDLuLo(0v7BqF z@ZW<%nr_R0mO%2GhI5}7y=66ohJ}-a7QFc(8tR#Dx zd>wwmNYAZl24GXIPazzu##vhWr7BBOI7W4lEptmuPph)}17;5o7p7*FhW?bbHaVdC zyF;swgRU>j3)^0K$U1CEZ7NM&R%ARvb~m6c@eOtcLUn1wcLedlcnz>Z+Gw6O;J+;A zY(t-GzJ0DUNp3#Fbdt;!KZ7(y^(FzVMvUf^9)WSS{;iGsdtzuyZr(#6{twRqvvqmU zE(A2k21f{QRz4=E2yQuL-v}dBChV~**@{NPl)>k%1GUBKwjU3?uqTG=-l zZeQxKX!rEIomR6oJG>A5dC7wjTSC1^vRx=iMa$;lejh}PU5ry-J$Mn+|@>;abA=Vl^QwNV+1l%wPUlHoH6Y19^~(fDc^T zu}y;VV7SjSk5T@+PTjO&{^O}Ed<-Gc2{PlY=1BNSMOu)g?9U-y>Wq$w^65|-z1?e$ zjZ%I!(xKxsO-*9!0%^)79NKvAd4c^K2lVh0d3tVjV?gl@HhWo85K}fr&h+Yco66A+ zLmi>OOyF<{#~#AbG>o^@R!-T*VP^CKvz~y@KmkHW&trd31;D#)e;PW; z`A+ElYUhFx3%=m^;QqyviL*RvUcXvF{%j-H^J%^bE=<-U4VsK)Y7?|f*rY&^e$M&_ zJUBWy5NdEx(#3X~-R)4b>zxTeVoaa94z+X9SxEA3jL!uO7Ib0CI6-@7E(3HKbt*|@ zT~HN-+^wkL|CF@P^`_tQG?OX*uB-A(TS=DWlp}py6uQ9nUX%Xbr9lJ>d1Xw6l+65D zTgmvcW|F8Bk&IK`08T$eTZUt;0nfy&6faUko1EXed)EVJyIRdEdUbJ=k@oqdIs?qh zriAN7JaT|c2H7l02`SuX8-b@fvrBfQWi3U=#=aQw=7#<4l@jBv@9MTE?v5Up+*|+maz-wxKIUc%9@>t&%1CcRhFS-M^7^93p5(ex#OCYxpGGM1#bb8gs+e-;ZUp>rr6_LqIAL404rCp-}Y= z?g5%POm9PCwz`=+H<~Y$E9L35T3ydikK%P4pBFcuEFoTHu#+Z*;|91&8h(qJg1 z3!N=SZ;qPMgmi(LJR7!yrOp9qPKTb2;ySvUKIN*=aDili`&=&$aq;J!pB+eHWELVX zCp%C8t+)g+6rgH}It5w_fxqPmgNUv5?t&^lLYwK7Y2oR?-!HMUJGWD(Uwnok&K^0K zqtRd_1V|OGRuOi*EPPwd8pL1V{nZU04Eu_Xq(Wh0Egxg7i+LT9_Y-T+oUA48yGn*-Q=3br z9hq-S4DaBgFrnkF8ulmL5r8M_(&BoIQy!BA?WNiSK?ULMS2QFp7{0 zOZGkVzJTI#g@G0YMPa@V^SUeUdc^1Lq8HX^#u}l*g4L_>Tv%azyuWZCIyx(b#hHVj z;dZuW)l_M-e3iXO;d$!@#AYV{^78R!^Pn`?_~yDb zES2Y}OGmih$C@>k@5$|qi1h^<*%?aC-p(C^!!x(re9Edpdnt;LpwoC;7ez-7dt!L` zW;QAF?HPMUqnU)Ur3lmKA=S=TRYB?g+wt?cAp3zLvabY5E6_h12zh{CTba=w`fynjm0{N zg-w?j3b~lMgWu3ojcle(5RQ_o3s-QeEs-@f%i^1;phT>dJmRni8v;RExK#on8;s%0 zwZ8XwQmA#>$O6eMO?pn!EhKU;P{Ydg%P&ILED#=ZO$$oFw#CTg2IM!+UG>Rz)_+kA!S$ ztNPMxsbGRxOiqpiQFv@7YsC)OOd|A;6T!>m!BlH?#@@q;ctO;jcV|kEQyF0L`WtK}s!tQPdy(butAM~Zd@3Ko`B`g$DAq9z1Ph@2aT77;Os5PDmIX?E4 zTp7(|Fd1;CE9mkAIq=YT^G4|^)5^K*pH9cy7T*sq8t&50gwLsmr|}g&+z)rP)CQ>s z2Az$l!kxj!XktS`yLT3#9d#=094NW%#g7k;5wdk=4!-3oJEDL)3g_fGfyS|-fVXKY zf9t8(oLNoolBZ++WRZ{i&O>s8xsg@H0CKpTNDVzk^Fa(Dam9L&!Q+3J{U@1>M7mR^W%oUtSnKGqoNPN!o~TNecQ$T7Z4vW%Rl zy>X$XZ(13$g``wI&^4SfO0ass=!cYB6voZIYGo~o(H5fQY5J?Go8oxrV2Ar*V_z^X zuQir1MQUdr@*VZ#RV&g@tAXQBjkCti4_UGToUbp~jku)qk$143!E-Sxo?;Ev&v1a% z%Xqs}2V%V;=}R2}CS9fjms7W4l~D1WozN9N)r$N#ve;s|%A)21>kB_4;jWL$$G<~; zOFdKOf(_RfE}L#`;&k|26trX%zOIuo-@3}?@1{`VEL@wG2E?1U%M#knjUD>J>d%?jFe{x^W zwPQSB!o;;3vi@uGu^5x@9s;8&o~U23n@S=!2{MQD47UQENz1n(|*~Ack9= zjhi+`TK~>T(#{9h27UcgEbdsh?6nUC0#+^oOu^Stu!oo{n<@mYM<>j5HXEd@2R}eG zLw)nYX%1nKpYgLUe%vtVR(8OK2r96+9UEaZjg;tg@A4Wabhc5d`L42tVAu~ZVy_r? z+T#bbUPC)fA8(`lJR?=p4GBSmuCdaD^bF?1__nHe1*yjbEAZWN%CN5BvAMW4V+`Gb z=5ApYQAcU9YF+F7+Dj%r7{Wd})zwkZnB*7yPp>Mh=AIWf7g;a;;LcF*Z3}Yvyc}02#P@#@ z2UJRA3NpdFV$g^?9xg*Kr@D_;K1zb?JSjP^ z;pHYU5?BzdWSqV!D+FM@?*r)YV+q(fSc7h9+ZN^h4fGC_`M%>%R6WzscA*jCR?gzT zZyq-d6h;9;?U?;l-6&ekpBBFUNwEA2te zHPI{`67?-2>O66LFRa6c%l{uZ((WOZGm<^~;|Ifj$EG7p7zekJR)K0S*a zNd^B3!ET9o;6Iv(9abeC2pvh7%7Aj&M$NP3E%InjohDCJnt7=A;W!)GeSNul`;e)Wv!f)#q}PY6E@r}2C9JEwT6vvz{aE_dSB-2pgl{#!+U3FANh-bC+VoKTki zE~mm$mcuBhlO{^{yiz~IbY%bNfq_YXKldf9UA~ALRpFI56?Sfuqx3VK++tO@ju*Zz zHysQlFe0yrewmpcUl`mJdYH=?DmoZ++2ZW(LSgRZwVCbt5j! z%~!oRuB%0_wYg5z%TXpuvhxR^?^O+@346_-oT#E1?g8^=N1?9wTK?ShYli$X!D+j( z{PB%qt@Q%Sl)db0cxZc@?| z>1LF7vEJSbb0~bDU3BT7B8QJ&EXivoO$R80++bQL^WD1&Af&Rze6J%!&%pGf+1+7$ zL_s^G@utbqMGkmb9yucyyd;r(daVU-VvxK)3kJ)G5)Vna#cmu3t)n{UEW4f?c%;o( z!CiZR9(&qw(+taSDLHZ0G=Dr4;tgIFj&sy62vqGI@x?v+p2nQFY)31kLIeUb!5o6%n$6ySYU;2}oUUo@*%9TX4h;$83LdLF2t4*V2zOmnrm9iLWFwb9ju%Ozx7a zpHthcG@e9vxuFemd2%#vyF%4!O(s~-pw~Mic&(p?BAcq_^dD5LZ#21b8 zmJJJ56`8-M?UODCTbfqU=?kr2JUx-p3(X$v@n?;d=g;I*@?twCWL_oD98DXJEbK6& z3;1|Q4_Go{v}8^`nq?PHRMgdmlX+FG(yHvs5bfgp$|H4wZYOye+yuzOhu@PMVHW5CgP0iV=D%B$Oq*oBJ@wF+q7|v`cA06VF zf@t3rW$}q2B$a1)W=7iqDwzmemHo zspkw}O{VTx4TuUWs*yx~%)L z?P84Sb!CVmpb?dehwHF=xaNp_pN;kv0R{=gObH#8+2Xa3VDoUnAM#|XHM5^f#DbTq zLn;}-CixwNvVLHaL002>Twl`VX^i{yba#+J*oG_=(@;H&E`>TedgXU-CKi5!%fw@l5x=Bv7g$jaTI%fr}Sb%Z1ba~lgrA}ze_iPlNF8c zIkgvKbOATaYH80>o1n}%N6Osk)yjj-@XSWRj+0~fc4a*>Rji=J!`*-^&=h+u98Y!$ zIhMy;2iJWZbXb6J|M7q={Pq-YB!H^`Ul_IMdrgu;JJeTMd$yBOwu?w`P`zbUCRN69 zZ`Dh*o{)$&=rQL?8A<|wZ<*6yo*DPqHl+j}UX6{S#dS7&ecbaeUF zl{O=6Et$J=-&)BoUV8n9-b2mz)dJO$r>I8aKi`Jx4Tkm8{5|GkiV+RvlXFp{E%OB# z_d7SYe2odH*N2FZVz474BhPDigI$CI+rrwgPE$(L+ykZ=lB5a0@6%#j7i!%Eg1xsY zEhKBszm$OgB5MRp?B0aS(xDqcyEs|*w)7&duEpsX8hlw{tge=(IBqUfFA1XnG@w7`{8AF_J8i|SB zNZ_M39OdF9@Yj@-@x8shC#u$b<%9ox^LL}=$=QWjJRJF9>?&<114$uMH2ZP!HVGdl z8^4>M%#;Q%+=BssXd;?IKBzQ5$BjT7=lMFv%d=Y|C*sT+Sf zj^r%kp6{7hE~`{V$ZqG(tkqsZtjqG+Y?L^3J;P~B)0l7IAvGbT0d#BI^S1jbM@bhe!Lm8NM!6<)GntpgZZwHz;L$#BNM;W_(&_W3ENxNSiw zHLUG`_RRE-DqC&s)90==(%kX6hVPL=TI!$+tgX)Uv@~g@#SNd5601FJw##a_z(O5w zf#Q3$_v$IW1{jio?y{VuoUenYR* z9+T++QAR9wm*Ps5)%l^&y)zz!l(cfO9sQv*UI4=ZtOadhV#DDCfMO^5)aPULBHQOa z^zNdoQWg4C*wb*Ynp7nqpDI#LNijm$o(&l+gCAgC_^ar8sZQ|_fPe_US$}dlt0eHc zty30gxV#ZC_|wO$j#R*YY7o?wmPRmn2ro-t+4=C~GJ5_wse6lNImt+WE$kgZqlcNa3e0M<)FPJhgo-em3vQfG^N1I@G)>Nht>X`jy4w@%0|SzyuyZENBPS7@gS}laa6$@9z%I zr`VCtWv%ydIAgu!{un*Mh;4?XCfG%?lxBr;i z9};ImC`Mb3jZUo%jG^ycuAg)fN%qF7gGD6+9jI0P_JcdH-?3)s^+vBEjK}h#{Sxy zmaQ>yJaq~8;_b&#*fCAqvE=+k0km^wmReOY&1}vvq4x*lrRB?wAx`ArXLiI>Xw!=O z4Hj}B7duy7JeyHD*6g)r8bg_0RTz2ssC73ben^ww1w0MK}cP z@Dt!@|164@;r+!Lc5)fKR|}x{(Ty(`;`S(~es(u%cq3>%kK6)o-2)t&P$5L^WS*!2 zAdJhW=#4fC4xY=jKt`FHXZ z+mvWB37Z#79~TYTi_unDy4g_w!`CW&=s@=R?boj6T7&1?)D z6uhjYbOKA(Y>K$Ttp&AM_u-T?19c>6_Yc6 zN}dldDH;I8C}KzUWjm$q_f89gYq1Kb`v1Jk?KhagGktiBADVsc$1qf3!3L^PN3HtM zXco;ba+%KtA%EqhN~CAPO61e&%+qFf5_Lac4oB$?%!6a$k@iFZiu91i8juRn7IXi! z&5!m>jJ&>UMZ6aYHL+rxoJ2HlX2$R~A+9TbOvrrRaica<0#2&Zn`msj)Z=FL}3r9|4WE?4y-(W4YF4+LId9 zO;tp%i#oC1FsyXF8@B4w@p*PW-(=J2q{}QA=xA?>Zm&0U^7|QbOF+ZFmzd2tiICWZuR;5j+|G}ag3{TfIjx$s$YtXYuC}xN-@|x zeBVEo3PiKGy~u5l0zC}lBanIZvwwEH&tp5++SC(!dZVvsmW|WdV-Pjd=1+ey0eK&A z@Hn2{KAC35MvT=(ri{~>e+1eSk)BZUqQ|PrHh9?#xH0)UF7UJbUY`z;lz@mP;Bvg! zRuhnt{Y7RV?)rF2do-nXI4w{U;`Kgj%+Vc4GCCuL(4wpF)RO1^Tu>uSDCo6kym2{a zTqAAHnjRYR>lKq%|6w8uuW3A4WVq!aYOIR;ECL?`599A9Hp9_5bO;-jAmU*Odd-h9 zLF7$~0zb?7h|qk}1`&}SDaEYtP-8$$v2v-I60RIn;ek9MICmTh``^bSf%twtDn?8O zDF0Zmi*V^vYq!uJL-{4!V)2TH*xILaF^|RMEoSCIKDU>~4~0M1^^cbTi@@d*#6u_i zMyAf(os89OB@Qy0UGsEXv@a5gt^08Q2Ev zid3|r6{6Eq!hkDs8u|ow`0>UbglyA|=erCKQ;LAO<{M(@i0?K-yNl%`IkUz^NK-10FCI@pQ1)zs;JnB896oJx~O` zJp3LeCn~rYV&oyYz&c$Db!dMX=4P#71-;)$w8*DvD&H=60J;aJ_I}ldmHCmPez-qD zPBDReZtq4m1vmvLC)@<{`SFGHW~Zj6Rx3p+;<(l4U<1i&1t`fl9(&qsu;Cy!&z7LQ zbkBQ!)(saRAze#0p(6G$GF1OySTlr4b{MV}=6Oa+;bvDbm< z(UJ;nO+c^ENTq;JjjOHkl3#vN+;0AG3j6~cZrMBmJ}w8V(W555i@?)mZRby!jQ(Di28H61 zdvbxZv2=btxwp7$OjcO4{-s`4w0rmNq9blP{4=&C4GTVJR{*&8Ge7cU;Q*f5?9f3K zya3$DhBcfVS8_+fT|gQ!J(IS=jKe)EnjLS&I;iPd?rlPtdJikzLD9-V(Zc@bE{A1i z*JL>uk0#Z}%4VhHG+?C}#Q@|57o1xjC+>GoGmb8@frxY^Cl~g}ayWyZ4qqjMXzPjP zm9oYrz%^tfK^ah3U?$e-eg1mKS-TabGrm=*HY%7&9W!*UB>+7aI-NsIK=k?-+y%H4 z$4)`(J@{_)UH0**N#`};8mhwL@ZCsY#Ik95(JO5|$093+)o|FKH%IRC(0AUv_T7=f zR(nkXAVOU(EF zId;`&&8Ot{IK1maLpsJsA@kr2xpS@2YI7}BZA*Ehke;}`A?i;VTiTi4DHV6c>s&qqj~Dhx@i<9qT>I&a9;H^6K@Gb)k0HCT&dqCkHTiIIbbG3-O(A{x`yv}})h~~8cJC-R#d8HC zrF@0DEAh!s_u(~>=i>#rLPYhm&|W0XFWQ=OzTo3cJ&ZDgaS3hfTVVLX>pOy z+dJ;@$~s$=3S!5)^2ca-?3rMvES-Se?3Ri4X4v+l%mG-2(#_1BxzoTcs#tTSK=bFN zn4xD$!WZYN2o-F(($AM#Vn|>h=AGCsnV3Hq;AvB;%fnQ{TD>nA;EfDtG;u3~S3xVp z_1xQZzF^sq?bC#=I9Fdyf4J$fOH87CJ&A1`f#XZfOSuzV5U znNZ+!iSfE%Q@60V7!L6Y5Wd<1>Ax=ST^$L(JIe19Hy^|!vQtM1IK8udUf5~x=vCWU z^ltCGw0Bco+v>kD^#faV*n**H$><0^5LJ_!%?~O{NYa?&`sO-)J#bk%FqWZj_8QEf*N9UgQOs|?L zHe0=q4Gc&&_pu{y!oF5-vxv?(^|% z6@q);tGKj#T|T8yLt!R=XpnnfDzLxqa3OOwO&(mx!Ob0?{na~6!{=E;O)Xxv$LHA0 z#m|UzGm-L!q>d`86}W=KbL^Sdw*MsXoQR#}3$!Ikji1R$v%Nyz`Zt7^GFDi00Lu7| zc6X6-5Mq$Hb2pTTn`&I>jH=3>s0#5r6UR^kYlhqnIN(@fEFd8*DTZeOvPv{hHP5>tM4WgtX>nWmS+P=LCmU}-X@d0Mnib?6GH04 zouTtKh2<;dp|EA#T5V5XF{-xk!RrdH<#6Lj7E6pIoH$0iMh=E%g;GwJ zmdt;39Q4x>%IukQuX$3YOCG*1nsjz<8v`Q8%oZO4p`P`as*qrB1AwU6NJzKz70q;U zG+g_yeG|f~I9v~eC#Rq98{AvIuh~*rqzTgd@dHs1gM?^X$x?M^vRWo*?PIhkRBRky z2(YpTFScn;_U!V@xG&19%2epy`p`mm3Gr=7&G##)@?*V_zBBo~mpy164J`7l^(Je6 zkH+EZo4Hrublwjhhw`ASTu{V`cfK0gSe_k#D%~rgg%wE4P9yl3A0Yg^^zn2;%zjTa zw(h|4U=~15tDVESvf<;P5%Bx!2GsO}fh&xfp{4X`FnrP?t-k90B7fCXfwrPpz@z4N z+@}*p^Xb9^fJj8~w~_Vptp4Pf(~JqDOx=?9#lV0x1gz9)yj*56uR&+rWkV?bjSh%i zq>?kegMrT>x2JGvJOcq&anp3(T+)4SSXNfH+nAaC&dF*@XHO%^*l}DDZH}JuV|wyj2dZJtJe~-EfQF=TV3wt zb??jOk&P6G#*tmB)s}7vl9Q7{17@05_u9**e@Vpt&XSG0hyqC|6Q(^;J$#(;jX0r3 zQ?S2Bq{iSai}X-+2084Yg+jEN8?%x>5*Df+tpRSY(G{lHH`*OOe%%_ZWzD)lR>vcJ z7yGc3P%rhaamDP;&UqttF@?@tRT15M+nEm;eraoF7ZrYhZ?qr+E*KJS~I6s;_f zxjx>ufBWeoPt22T;}RhZ3H_+Wcp#GNXNy^=3umE5XPiub(q9{=_7n(~TU?x%q-!Ou zN-)IMf?*>IrHz7jW`RD_^Ddj3vR$(|N<-JeW^~QtjFkBMcN8(ApNHYt6)t(ND3}W| zPozu1`eCoffMcnuwR1{2|Mg}% z=0sT$3A-eW@cs!@d-BY;$i}D2N)K32dbCiBjZsc1;{AdfX;2LH<8z&7@6%7KtSGpd z*=??UlyqyJ7qIUjgjK$3yY<5Za&W;e(W2^^HS1#;2MQA>@Hs&upXHOrLu@Dxfm2to zzcD8L9taY0_SY$xC*@^Cp|a7@v-K>?!HlE&E}jS*8>^U7hN_!JteCI}Po55`EBYB+ z9M0;ZYdf?u_Mz3Yc>Av_pkletJw@YhI0Zot`G4O|#CP+^NaCtGSA~4;FfR|O8kx9sC^s9 z)NwocNQA=ZXn!?Tw7hDXJztASg)H=Dq>v(`aWeq6C9RF`h6V zodACFOKfR(xd1O7+0c3w#E5MuQC5)}bD!#}2k1#c&hG$BICR?R~VBLG;3Q*!! z6iNj!M6dh#&E!O2!~(av!^Jwg75bXE~yZ;D+Bd zFlyvIDI6r;;~fZV*EQH`=NS83Y~4(8Xf>ZJ{6bk3lRE^i)%e_{6F2_74ZW&CX4FB! z#ap^>gtgJm9Oqba8_bZ|1a0c*sinuEw2?Tl(bH6Zn9?rZ>+#1L{EoucsIu@rYIxW+ z%qJr!GvB;3c~cLAHInQZ2rJ?edg&9}b2<>ZZD9hs4YI;RzJ4C7bi6DFd7P)96W}Q8 zY+mj^@MCXsDg1qWEb0KRr6(Fj6$erGVuqLHPDLxQ85zki>7 zpOd@~q8yGhKG%s*Et%_&TPuQiImG`3J;F|VTHiXmC`1TQ$$dVg&uz0Au9V|WoIBx6 z8AEhk`wdd3{B0%V-`?8o#@H&C9mk;iAfy<+oY3&|0NiDqyeipOh$b~bAtD@)mmzS> zNBF6(?WeFVpAg;Yhu{m07Rka~pSNNJKSa;_O|qGll~s$NE&{IyGUWhLtIseH<-_X2 zi#|5QqnGi%J$X~Vp77O?mxEOhlkbGM{3Exul!}AFcp(86eJ03$lgSR130)HTX?rjr zz3~DT4#CxR*)usX9!M5#XQ3$y_Ew}I54bs!9ID#ubKQI}$KB-6-mCMRcmuhtuK_lz zE_CG3w4jo1d0@neh>ix62w05pG4u*^OF0yGz+1S+X7_VZ6Z=kr&40vlla#MVa-nIv zU)xC^CgSetMP^pE+@ySoh9AmY^pABi?mKDmj1J(3n46p9x!5+u5#l(ii0*tl`fE|K z!9l^wB|tHy33@*YwYe#M2W%!eFgsW?w<{KZ(eHI%a* z4MxT_I88x0%q}(w_D9)1FRn6fg`!OGu<9YQ1yx#slz|Awr{?m2sk(>Mbvhzw_n+>1a>nK)^&A}(oKQ$PY(5`zV%W^d z0b}wJgr|9$&y~H^`CXr|2o+`EEwExZht(^e3rdf}gR6 z;S`o^W$#B?NYbT3bjBZ~7!X6Ko(#ZgXgb1tSV+XFqp`9_fR6Lv6Apr#Z%90fcXgnd ztuCFU?fRY)yDkY*Tgd>p2i=xOiZ}R8bX5I|NNGP(U*N(7l%yxbuB>7oCu-zP3b76< zHGIrB0YF#Q@?huo-;-#XAP#0H|^FONX_no|#zqjl%_LF2}Bk!X+kdNYi%bBS5KbTOnvu4>dWEz zNNgqM+iNP9$n5{3sG!&cIKKV^+9LViAW#2AG5v3lr~e?Lq_gwlAAno<2B`nQq>e~= z8vkEFssAFO{sWZSJD8#en5;H|h?^?Dmq|t7e8pE#P$(=cG=8B1v+(Vjo}T7D$@>`` ziO-#snE3Sc#Hiam#Y>4|J^Nb$g@lgIaym}}rCYF+-E4w`iz|wNcV>55hcJ6D%0uRa z++@0xbmXg??mPgUwpuZ(n|Yk6^@0btcSGL274m zbT>I{I)c?pM2EwPHy!-ZB*uX3>YLl6*{+xM3!7&|pX2VA%iV#yfG5kF(<&u=icdqb$#H%CR;P=O+) z)Q^-jmAV>%F*y<>fHmeb{nu7*j@3v4aj2LTTe}hy6O#!@$XjT;^U-AH;6^MT;JEAG#6{!^ zRRemqbZ>K=Rib3bU=WUiXSbtznJ$l9>AN@UVu4#>7*9A)-yvqt#LnPO}H8&GfQwfdLeG zw*;v{-*OKoX?vZIp8v0uGC@wj7Jyd=NvJr`uRtzrjWR!80fEiN+6hJHbT~4riM2z#E3@c8=6-4UR zo5$9fG;4pM4GtHL{*zD0C!H>r=%}Re;|C4~MF>PW1lQpjEd~hYsIZ6(9cz!~mC_=J zKhQiA2==6ykrNp28QQ&H$o6GiXw*Sn}9(Et|7)KrF z@}RG-SJ76cI{5aW1@(>`DF^eHYmTz0x^g%P4hA53`nobG7)`zsW_$RS;!A9|($Z6r z@Ehy|CDOWenB7>;dpsXkdvBJM-v>}26b95e^aWS_(vBexxMiv`W{_X+P>RuMn447RK zwb6wwR)*XN&NMF3MMs&&YRiiAd|birZyC05i<`Dts&=7>Xksghwed^1=ixul?ok|g z&A8XVTGT3;x2KUdG-V#tMb`a}B?;u>!IM#A6$LSW{mP#b{u&8(<>0*9<oQq&EY0HMu~MV7ncZn#a^$Aia7*M2=jb3taUcd;_=NvI>H5V-6H*Olcn z*B;6gEu<7gQ-(UbbLan3InmG=qdq~+SH{QGur<0xXUl6?c0b`%)bUj*nV%B(IE%S& zw;PHF>5|HJlqoDs4*!azuXIi2!NUS%?OQMSITP5r7Y*zsZQ3UXVE9V~yu}bDdp@vL z4LfDZDGr*<3r0(Wi=B!cL`(O6dybtjJ*pS*bvVp$O_sHQPWshOvKy;=So^QHLsHpM z&D1|d&3^eVHBm2kIa z=NpuWfbEb8K{CPhUnT)QOJ+fEHst{>1fVa%Mb18yv==$tiraYx$5?@)f1fiz046vC z(p$5)Ey3wRjz#N_Oj>Xy(_(NdG$!l>7&TC#oAv+ z)zL(2z+eat!7aGEyIXMA;O_43?i$?P0tENqoZt?@-QC@$^WN{S`DEtDtTp^Nu=+IB z)w}kt?N7m4LIt~s{B;30GT&D2H4QL~qy?kgHe!H*dPfXO+@o9T!29a3RBvjsW&4nu z8}M7B#SZ^Stc@<}R7%fV^Pa1FqweG)jNr^_gtwOLb+>fvM^cjR^GTIc`#l0Hj(!4YYg`}dY^ydDj5{t_hNLY21s2Cp#?lZaYEYRkw82^5)7}Y zB$~VN(EA}H&9Rf<>8cre?=Z9!_#g+MyGn+;2iY z`xbT;nw}f?hgzWOX2j8%1zsc6V?`SoisU5N>#5V$99H z*6Iz84$1H(-EI5k%$j$m@zeEQ0!Ktaetu60`3+0W+COe;G9H59OT*uEld?H~IIrVN zU~aHWoM=}461TlZGi6)sD4`O~@RT;^b{#TyZuWlS=f zvU)M9iR4VA@Eq+e`J;` zxg6jbyI)xrtvbQ68X6D&ag0r~BLzr%=-P}W1}qn5gfIgl@EDpL_8wUQT4I=mtv;#h zckne_r-Gf{pQTtgYCdcyvi@KPxH&?}6laegOsJ@+`@h*>vFf9D0LLYm46RIB(yWiDm*E#Ot# zXQ7f7ml)1Y9F+g6+5EQ`LyD)2D}ZkQH}Ms(2|-AV2X704?lDjB9x=HC&(+W>M(3Ed zIefy8ooNaa`Sw{9ii19>iO@xZvX{;lb%LHOk$_*R07<227FZ|wz+KTs=<$O1@souf zZXLG!O?A);$??C`r%&&6itqvAIWhm2pCe5>EIsGI6SkPIr)e{lp9dk$Com+?v=x@I zm7$HX{%3z@JR;>c?TL99cV3qS4tr^Mci0GQ8%RrwnvI#>Ll_TA$xla6348dWP`zt` zzLH}O!J^o$NMUX0g7rSoBq6qoJ9_YZ?eOt&%bhC1rIA;WDFTT8BfG;B-Pq7>9r*8Y z@(M_Zi09(PY;xXsO{7{FRuo)Kfa{es3?T*`l_*LwJTlVmaYa($;lOYjn6Ns*x$Z&A z&7BeBipGvWA2_;g67hHRve-5l4!kLpkFLLZVy7=Dr1yTY&e+<@is;M1hSc?tBG?=i z3+p!?Y@M$U`$?y;2neB~->P4pPio=`_~Gz_4syNYoP`Vx4VRXdc-_w-MtuO2hrm{H zd&>r%h?YnyLjin z2|37=!-|shnrB10C{clFo@aVatG3@#`t|9!w>z^)bn<(m!JGN1+9AjrNXgXz4jxo<$0z<3jx{m@c29nylMn%t~ggQnoe z6htLPuN@@tjv*V{YbRI>m5?}7&KXukMD$)oCu*>=wpPgI>U9?4y~MjlqE+YlXy&ce zs^-W#s~aRFw0CWu=3bXReIKka8J(st^*L=yRG+V1Y{b#~a{ z=S^b4K!`furevV+ir4!*T?0;#8{a$t!ht|aP9EC+{uU0yf-KZDJ*j!}=`4U~eX|Rq z_h6HXl!d9+zK7%=Ud2p@3rlk^)0btKe~=XkxdF!izra)!SKf}SbZTj7;hf-p5igoI z5=JBCM5)ZFTcm2zv0=%Y#(}|hHUf;)pm2{!IAIYS^ImzHQQ~vs1{Z zm{Hh~|NL8Dedly~wU2xux!`>$oryGKT=66+qMjxk$&D(t#T3zJQQ?8lk+=8&7KJ1o z3r;NOrdAi;O6`gbo1F1GF)+YTHL<@X&hj~~13g}p(6hHI$ww`109p)d9NRbtBGCP%XLB|z7>+|`S4rq|ARx~g@v~w z1SOw_D*PwojE`!XgA&1v`WzIdq)EX20N{m#e`_srmUxc21_5`mLwl0; zxc(1s+%f#)BUs~!zYmUV%f6de>L<5L{rxXYu{Jo zR4C%$k>HO!jhfF_JAD_rKB~M>Za^ZoeSs$&9Cqt|bPqA571+(;rDSxiwrZ+#`B_=T zjDPI|x*r%iRBAX+hn9Q?3s+t`%?Z;XujfC0%cg#pk1vzk;UpeGf+pi=o&dXe#Vvv1&Ls-?SQhg*P>=5-HtkizAT!7o|7B7M!_ zG~W-;Gd=kdwmoV}+#3j_kKp5{&HNwwffkKWqY56PAx0t?){@R%1iY2Jx<`qk~;M@Gn08}{oIX~S@gx?#%vhN~74-L;4Hq9mG3_Lwf;3(_MXAMRyJpl=L1CsDdfIttLyvM)u!)XZPE=TV$V{<5wNe?l~HBN>3#Qgc38}}<$VC! zrqI!Kg7m0$O3qf9rwVVQ{m57i;EVkY%4G{^D$2>$Yt-@s8fj~L+_q@Zej?82+RW|fItI^k( zFPzQc$@*B%RIqLIxGt7TR)kuaZ*rYSpg#`S)QX)jVDs%Z)_(^Xp&oRRQf?92*o<&H zJl>Wh6IZ~Mw6_+I@h>ZME!XGmHTJLGB6XPq+od!X%NW zqX@iiudUsOP8A?C@6&fNv_ElnQ*2t)TLaAf<8k%2%^Hn7Z*z{M$$nr@kYf3!-LGb} z=&DqOI$+`eK16C@sb`iu{_|ynP13YMi{?eAb?@Z4c00Mbzp8AXpq_^^4N4gW4{fi4tp|F__c7b6v4k=bAwd;fz`+Xu!rdJeAyf5mhS8q#SRRjYaw zWK?Efqi1k;lfL^8fyWW@F_V2fSjo#9XB?5l#r%p>N91hud`Xre>v%f` zDkIb)OOs+7hq7&eI>r8q!)>K;{bPj!OYhgC*-9WJ>tHDbsC@pI(Csw12sL9>kSaM{ zmk7D1S&{0;NV0H2Vz-AuLAMq*E7x_=X|)_ZPexFVKOQrs4ZAjY@l&RCkvOXWXO%}`w+H6#+O0v#`O=hv`&Mtnj`lCwJYNo!4>?3?1R(ZS$Zpe23n1wer3)~S`Y!s$;79$!f8{#0Vb-9|y&M+oGMcg! zRvS(6i`9C2j8O_azG3XVw{w)jKD4eAg;NHZHQ?OwRL=LA1uG65G%;}P-GtiWeooPm zwh~i`o+j7I>9sNok*W)p3@EP+VD;n}Loep{@CnSRv(|P|t{B)$Shv88B<~Hh%IW>I zj?EUnWV3w*7N|uXoP%!MvSh<2fUu%y6#9*M8W|{a!R#a`=D@>%^>mmsVS@wNyej63 z8A)X^lo{f_U*lK)+;fUtw*aC}P_PB|v_BT{mC`p&I)Am5K=#E*$}Riv&OHSgC8ZGj zzzeq(5&Yz&L$~|%QZb=Vvv3@%*#7M^x>_{0D8z188aJX52{|zwy2JO_($*@cjjbf0 zCo1$UpKKFIh#%v9mF5izS58AGL}NdkNmRk|TlFU+ljQe2y}Yb77(>BZX$>uI*CkIQ z10lMtQ=>-a0MC>|fA+;06pqIx{qp z}VCkIezk0?98wa2jd|Km9<*(^9q> zDA1AfO=9LF6QaS?TGq)<Bfars_Yo3ny|I$JSd(FTN$J+2f#N*IC>b3vUra$K|=Ep7C@By1X->rvO>x@VK1c zs#;Pvhib4hm^1v#uhD4|#RGxA1f9?S!Jq^WPR{U+$rbAsLWM$?zugn^>*^^<-TS=H zKYYvJv7L*U+vOq{Uukr0(X(HCnN6g~F_iq~qJ3qpJAWuF!j&>;?90(DWY4sF z%G+m++kWCAmD$_#x1#4$_ot?)Eh&=zAQLB@ggHiouTyDTz3)a9vevagF}a9)RMz7q z%OLpY&(=7N0A>>OxD``I)F{m@l!}IAgb>xx*Tf&{8g7pkCsxE%2iz}lkdH6qlz)UO z{Laqe;ozcyre_(c7z}9#Y-9jk#VRGduA_ZI%2*tzhy2*?z`yW)Zc;6kl#9$0pcW}n z?n>)?i#+%MTND1sR}{!YnlZF+;?pO3y?8kxWdCZWfZuT7SNz*1X50u#u&p2j95Dx>c>Z?}C#U)cM~vM(LKZsL z&BgP+k{KcY?+*Fh-f6#Htzy>=lBLLl`$h6`dUCzwoI6Wld4j30rXj^lty1vhZ7ghl zSzFvbsDwd4&T(=qaI!%`l_Q9w-F_-0>DSqIk56@?n-5zwAf-~&)h?5Y!i_Oq&WKvx zXAM<1D*>g>&lR#m_;e`(Rvuk-MK$zr)>m8%Y=XJMg?{2TRV$`U_SkXMNG>kL>iHu& zOqm9?8s(w<1q-(HTXB^D!44LwO4EMYlzJ$+23e9+NswA}QyYk#ELpZm*|Dt(Zp25_ zVsrl3t4-&#A0!-8WO(*oS@KEj=@`E#6=vjS6i=rf1)9vjc9J=OTt(w!aOEP{{XUCU zy^x8MNe|v2K_>Lc|*~oEsEfibzDZ3;XbQsNCe7Sr3?So3xa13(TIk6z*c_^+uVCZ#8~r$lhya^AupghNUXpg;f2}MI>*~ev^8XBVb;?pNmW|o zhLeF5@Hs!1h<69le$mNwG8HOFSeRpXP1s@Jtv%o|D0h?=z{FB z2S1uTj5VX}*EqYU!1t?1YML#b^#KC;C2Auv@=XafTgjTp;EB^s@7P}vr z*aW}SbN`p{e*0n9a<`rr@qa{1{{I8|`G0@c|0^jHv~gdzU*&cN@phJZ^UK3#xNuxS}&uemB~pwZgx}P z`{MR`*`NJ%Gi?V%COY9JidXS~5gQ$bQIl~52JKo{*NM^LDCM)zfDHtbU}jFpqs7ZaB^T7;T?BBzkIZn1cbUOxCS}~_?I|-xduL2*SaKUcU zv&itWqW^$Wy>e1hQ^Uf-=H^t{K=i@}0UN%W8Qts3^azA&XOd)}X

    m@2oe3{sUWG zg%PrXfPl!yw<8u8t6)sg&nH{BF#S4WR(OjuG9@|9cK5N=fp%H`v)^{JVbX+U*aQip zp2#q%c9;eN87xt(yfA<$pQK!)4y0C4{i=ORNnhRkM-5%4uAC@e5loZaCMbXo_9Zox zJ}^P{@6v&{IpQi}QYi9|guS;!^ITqc?uS)g*n=GB$+0{5)T3ZiX*thOJSFG>9r`Mj zL04$;NWa%S(-pUK2KytKn)kd(0Gwd5>*jrM-TANWVo=_4g{|#*bVRw=(wSRI)CJ8+ z5=Ed^28g6#^!>ruc{u9Y^ytO(rTfR3heGp??^17#=>gJcRJ5JO_(@rL@dUoQ?FS|% zCI&u_*KyBQlCSpzopWfz0mI7}H3PH$P`j6i7U#U4Nk^HC#QQxOiqe^*$G6}CExHZ| zMSrukT>&MeJ3{jYUNxKLx24IA*5#OFF^VD8WiLnBrW_6qzi#8(azlx@JZ@i(PSa?O ziiho!k6+y`Pb29P1aM4xxZ)kQ2MV~|8>NaB?XKs=vnTfDjan*39E?v^^IhQ7?jUhN z?)T|v&80L3(}%Mic_u7I(+k05p3}e=!S~5}2K*I9z2_$33r@#X(rT?{2gdOcy;o4w z=_oh;x!uiSxgk{W$9r%^CYjE?TL!{jNj|gK~P^IR;j!WP&8MT`*l<> zoAI5f3GV42MA{40bhQo znx*HlGH*IRrTT!z8JAnxawdNG@3f&H-!`Jge5QMJ>v~{;x z?6`Dl5Ab*6&$~6;?6TdcoV>s7(-p~7t z2?w&8ePHjI>koV0bb?@EVZqO3Y*4Wa{#rb#JDU@0Z0^b6b3%_s54%O+BStzQKDY>{i9$uBCq)gx%;Y$ z0)La|&3=>h0wOMl&1O#^AF(RftL}$6DAUtfFETo=Jv%A-;j4#r;0Ll@gGP?Le(y}e zWhus~*Hs#=N_^hi;BW3ndx3sBqTN{~x-g#N? z*ji$Ba-zP?o8#nr@55bNW)>k1 zDw)<^LcUPPxNqDIT$D&KIA|>YlYe;smM-ExF8HlcMZaAdZSwg8w6FI$uS=tpNvrt5 z0!e8`1md((qEXY7K~Y8i*9Jcx2b+_InoOCfH-tc|*!z)rzP4`#-$y`JYaL{Ck1n2n z_h_jl<6p1|5 z;?iGZa9jt3kaD5hsb6&^MA>@%vE<9p;v4ibk6qe7Zn!<4PxJOEb=!V;`)}yY=4Dtg zJsMAJY&;R|yow3t>@UpboqOBI`_sP`57?|-zBPY%n?iATek3s>Bc9vMI-Osb{hT|F zxmlWgJx0pF&gk}fuP6wjH2ynBv~Wjp6V(9dDYcl!r3dq@{THaDbVijYW?P+L!7P!%$*$`@l?X(qwo@y;-h~Z2q2@YxLp^KGZRSWWXgFFZU;Ko!YKt^0VK z9lLc->w3NjGu^1d<@6FY-1@NF@S=W^j!xfqnv{P^`=+X}hJm$K?finJ_vY_7-p{+f zR;n;*mxyAR{*l&#)x~@;-+EZ?F7Sxw`8oD2^{m7HYN~cC_~qA1z42RWjwG%o_fxfY zlg=9m`cYwLDedF&Wn-e}`R3!!@whI@WAI0c@$-WI0g?Y$?n|k{a=F2R$I5Jp@#n%n zABBKm6!_c?>kbkX_`83s4&9Y3hLjjeF~la!;$}d%W2?X#?@#t+)}svm4!33J%c0)6B8`KhNp+f(^G|c#?k#(joH`@zIvT2t zuPbtJp@pg-pHUKh(+e>jt?i!FM$hE^2L@VH`48uo<+BXunO?Et^yo`Aq~;|i$K zFAbKEH*7k{_2J`VJFNV$<8#+aJ7o(*p$^W>)@!q>duz0eCpodbGL7q zaBBPfv9ndt(bDvKA$hrWckb)drsMK!_0erK@B=lGyZLnSBR({5P+Y=Q+TviCmf0ik zk9sY!!(&VjOnS?-;7!!ihQHVQl>bL={XSuhzz5g9IA`?p^;(;M-r_f3r${>Ij*G8x z+CYNJ^_Trl2R*NOH{1Nn;`9A+y_v{$v`U`G^Huo9hYjcn9&1p5MY)IY;#?o(1bhopbg_mr)*Q4c}b2!PHJ*NkAGXfYCHST~Wj+I2fPIJ6 z*?t`olI{BMCDQ)$!47n`8=mSHoTsWbe~I6RQ@>n09$SHyfI8IbXB?S{DfW(RTQ`{z+~ei#CeL(mtDI<3^aBmBe=xhp8h>L2UKq~Oppts9gcw4K)wVa4qb;I zeLpW+%o`|p2Jf}W{dY%f`o!eq#T3s3Z?&Aqt}r?kBxlA!Q1oIN+iVBhR!z6Jo|e8A zPrky#3!1kF7RGE9?0Rd2Hc~_etgUu|fU?rCb@w%Jy3EhfcDM7(nTz!Z7^Jh=25Tjy zt6cDKZBUEFM(JKhbhmi2#X>tMC_W7e8ghWc-iw0uWW)a{XI$>i3hE`>)-TG_k2S)O`tIsHxXcVyJyl=(&Ph?)nCJjZ%XfFAC9`rMl zh8w)UQ0WMXw~He_Fng&q%M3O|H%W7yT!*yLwwCjy@*@G z2ZHp)^E65@(}pfsn)QVKhwuAqsM&ZS0p8q4;?~2?M+Wry#mfU^w0iXh&V1u=v_eNW z*kOsLzmLq?5fJIlMM&H}+aY9C|S_w=qnh7!HfIHToqN69yw z>hl&xSw#4^4SqKX{sfJ*lPcv3;M|dqs`FEso+KdwX?Op2UIL!xexD0&B&5!ZL__d_ z>?qkJum%OIdDDp*%uTmKH?c|WQ!YTaM~Qcu1!D}#nwq+~GuhWXE~8l& zDykVLU%!N9XXABM-3?7QgtCYQj43_&A>ScYymrMdzX@)aIv>XoJgmP=zg)Cft+jt& zX?{KVDGnF9y*VkUZZ_UHTQ+HG!pD-TPp^QWsiCph<@*8$50Ap@!Eqe=pq`slVQkW} za8>Pu84@~It)9(4>@HsBeQueS;P5;UnKT%GW20Il{hmm1e?hoW8mm>kk6i>M%aGP$ zy$k;)pPK2@eoTwbgulisu#Kz-)X+PxUl)l z0Q0vS%oQ1IZ;t+6Ip-GMPt9fbc;Lhh4i3Ho8hm%zv~UQh3R`|2u^(~-C?a|nGjFy~ zK99k6ukjNt5$XDen5>64o7_rKcq1sX-u z5%^c3q$Gvyk(Nagt^L$FRAbiZ$rz=@F>G_M$rtPzZdVC6R?|xl3QX zA64&~7WDd+(P_WO159OE~h zpQVt?hkbJu0b#(Bfz0@ORATsuz^Li+@^ZRJg0#bc5qL=|-6UaxtE(&JE|gjch~(W} z6Pq}mM;jSY(b$XyA;i*H9%_2}>fd4(7t?R<8T0ffo`@x=h$zgQjUP%YWY|4Bg{5@b&D1)2q1d3HjXitKB=A$Nik{CgwbO zf9$R&WZFV>*6v25%je0urqgY9CV^nx@5Rark0`&Y-OF+r$>Q|akT|Pd2=0pIfn4gdYbok&)~W`) zMoW>(d7Lj$vRsuHaqR#T5c)lC0bDu2j&cQ-^pBi@pOnCD`7+%apJCwk5K@rWkTlRL z3#iD#E-xLJkwtB~Ja5dV>8}op;*Baa2)By#yZzR4&?t@di0FlmyeON@2&-qH4;jsf`WpywD5ublaq{0 zOvp-rsOz`8Q~;vwW$#HTAQt42n4KLnxHp;>ZEyt$fnir`Es)P+MfqH8O*nHK&DfQ` zA6Q1K{>$gAX<|B;LTQrNs1t2P}7L=G7mN}=*Tzq(4R?!=(qm7S6j>so83TVCsPU!}P| znL}VPjgL6Us#4Z$F(?&6{u@is42+4-j1^s!jINrj% zqOVj#XdEh&&v`bOCSe{D-cpKx;gFV=MyCG!`Ln}UE;5Md?QJ7}fB&nw$w?&5VjRZO ze>*h$Pr@CJ>+9=*2RG7+lCrWIji)A%5iA@W4zHVRKyLtF=o3W}H5XTU4Ha;=u8QC@ z2e5M>Q{xfHbZhCUDsMaR)aZ0RkN=!MUndP0DNr=P@5%;rfve)%;R=AfL}Im@Aqy!6 zatb{#7Gzj@icT9qMq~aP!bk}Whp!3@2c&(LY?_yB-o2O>ZRE4o-q0{GLd%J}J{5Th zrhWs4ilw!Ee0svbz|f?_ERG77K#Y%wspZX~*BU^;nIdGZR-;Y5W(9;t)K%rJ^#>|8 zw&(4kQj^x13r}!NAR2+U=sq@cR2k+&cue#a8yj2lzel6YientzJp*f;E9kFxK>Xm~ znVg)gQT;cSUOVbyi--i+FH6^BTqB~QfabVRkkRn?I6YAO4=4*dx`D=Vv*`Hvn#jqn z{k=Wic9$a#cwyM1qa(A1HkU2ai5e5oi1w~8bSs=%WuKC2^-oi^^#cx9QPH{0`7-aG zu+UIQC@3>?^Miwfl{2>-c`R&fY=9bPoj4XcY4q9@TGpo}{vYrB-uG3z%`#~WXY-Zn zHa2vb=IA7A-2w~@c4O(xRO9A;z+(H@5qAJvcQ7SeeUE0x`>^+y#X~F5TL$>D4wU{g0Ay*W?xu6qhRx_3rt3ynsDz!8V@7Su?Gh zPcIQ19;VYcYmvK9qaS}iS!T(Igkx5WAe(P0resl&NI5mdaG%a$i}M2_*XT2(5zbN} zpc274F^yr2Q%jnhnw-3gkElqF9)U46RuuTqHW0uLVi3*mZKlG* z{)QfrAJ7>gpT<&Pf(ID|%ACPF{oE#eT5h}2EJ-Y&tx1<^Ia_MjO_F91?;*-9X<)Sc z(#oX5)9(@WY|*6k-i3m~Y|Ex>8W+LmcKVfo(`J!KTt1?#@J7UiCV9R1mP>A0j%6Gi zH%brULNbs7+fuwLKqqK%OC@O9VgPQ43g`m8SxQSzP~;~gXndCZ(&2W-a~_zz+gbm$ ze~VuTBAm!ebcBzR>i6wd#BK;ODS{Wq-X_Ydz#G+!CW!lW4je6o+fr0?pmBO?bIm6= zd`_H#oIEmw*b6J)d@@&~I_TTLi$?{08V@~_|J(E35Heu5h{mjQIcx^FC(*f)-RSsV zy(`9i%^+q0Yo>qm#GWo7X~tNiDJQ#+UNc;Ix;ZEUs_K6QY+ANU9DV!E>uNW`Yy6~x zbWII+Cme?oXgpIa1sFhy&t{4dY-X6Wn`7!u5dfdfW8t+Zi7O)WLj%fP)H~9F2+C?R zhbPmVIn`*ZFSG>uyT{;X8>%~5gU@x~vxSsG``EC=VQTq8Y`@k?$>k_!e}1nV7__ZT zY2^x%CE^iNjG)2>15utE_%<$)e2L5_6}Q~1U|Z~a*`eGnj2}B$j@8&WnB`OshoGQA z1mj|>B7!Q30)Lnw+_s_9`|Sb9ZF+o8R3cvCzD7ttkfSN#BqJdyjfH|4e@bUIR;$sA zl_c&E=uv6B+AbouaGz$1J|QaM9DD{gnjAh)mgaBD!dW!+kQ`WqkLd9;wPCpg>%N~n zXi}p(hxpP62B_d{Xaz`vo1X%q2`-mv4$FHWwjtF-h%F|{T44z>* z%{tL&Xzi1nmH_;=zESR2c~@odw$|Y;Aug^)VWn!;y;t2+E-X5ycqJ{}AK0k0PAP4N z<^Dq^|V^bqgPE_(Odzc>>O1!WC|CO3%X?tIq$;NDjG1^?^UR_E|r z7u;M5G8c_DE4h~t#3ucgx*dvqFQezCjZUi(&Xiw&Rs|_XmS!u7lyal9xL{3>=^OTq-!z!q4TfBckj~ zvA$c>`i>+dK$yoC2@yANu)l)ZB68ByjKsFlJ}*FaGsDy3uy!l`(edEkCka+CdR@Qv zveVuFW#rUHWjzUv5K9UC$#}V87CctmCXQPY75w7FnZ7k$Huzjbt*6;j->+^UiQ{j6 z;|1SdQTgFvdYYaGB5ed2@d({niN1iK*{X+V)OFnF6bZ{ADGg{|5CaSA%!>PAdZyQ5 zqEbIIbc!$}GVfy)pr*&SQj+TnVkLfG>aa0$#K*}?9!~L2``VJTvx8 zT@#Q1$~Vv9lQ8J{xYHN4a{9dJ!-oN{_EdclC63q21Ha*~UWEp%r}3bhC|Ffk)s8%D zDPKtN#_uR(l|N$0HgW7Ijw4lqeIabV_7|e3sxApW*GpK_%*+v3i))m5z6@T{xtVNW zZ4hWaYXH;w;x@ru84cc_Srm&kAD*=}2WbRcDzzKRU&dr3i;!>Xn3?t6)uErEt+Ik@ z;|p>b(3qXqmf1a`*v0G~$7vht5K9xC5?qn>3yqERy&RwOUr;~p7d*E~qFWfdA76V5 z`bVQ-oIm*N_kOZJF5NhfEL1K?-hiHmFbma`Q&?C%nM41{qm`g7kzO&ux6jB6a-x;L zfki4APy{3UfxiH)9pQ*eQXgT>ock+I#1|E9`rdfFdpyq_c)Ji>GNfs05-(oAo8!}+ zGS&oKzty(4%eyTi)G>4n+9Wo+qA{+ZjPihV%op ziY5KtB*b=UOqiFD`Ge#K^f#eDcUNdN-B(yuuX|YM1sH^kddlt5eQWs2YUe1fRBP(Q z^KvdLHqV9~sZ@z;A3^Q8mo@M8{^9Q%TVC!DZ{AK8o&}u zyv5OxbdUq=f>tq5Yv!GWHH}3T;l^46 z-|GgymGA&+7&;o0Iz?KRmhb0Gi_*e5Sfv{E+gqcDD%CDmhFVrp>wu9vQy1Xid{?4h z1#e#0kxCpjdH1T;qGqIG(yUf129D+ejyJYo(y*B*M1}X)1(xZ!)GLYO^Q$YS^VNLN z=UDNbgM;vL!KX!vEtC46Ek6;$e6@_?UK#^4gdoSILD+WM0y_no)LgU<~j6B0{P$8uYKmx zSze-C4dhDeHR#kT=l3BMK(ch-tzRFV)P8`BCdP)SeD#-jHS)41{#mG%ez&#|I3f%t zx!)`2w`Dp-H^RQQY&md6MMpEA5h82oc4$AYcWe{O8H|K$w0xd8OC8U;zLC?awmaR~QMx%j zt#Y+q(3zO%tGksyJ#p^f%7)8hW!#6riRIdw&CQw+wESlY(aZpVnwzz&*)}=ht0G^} z>8YvfG^!KjvS`sqj+gOqSQ|Z+og>1K<8?b+CA6ctud20*JfkCG%A5KJ&j$v&e7zOJ z?>RYorl*+;-#9X4EgYu{Ns+!QYPqR#6#j{3e8U-Ztz_?2l1Zt+jKJZ_dqBb?n}2;7 z&jRR9A^60WYt;U|QVnB49+^*(R>wmV0k1aXB zpfM2mJI0wIiWFi|Oxc!oW(%yLsIO=^0;b_Rul>gkKzs4%+)O9QpSb%)UWJGcE@yGb zwe}ae7cDkFyyWQ2q7PSuOl#?n(FjnM$`nXY!MB6d%0?$f`u3@84{K4B8AIL4t^?iGjz9>ozV~RQs>DK#y}Me^&bgMJAjImj=wJew93O`5D#RNbjGBYCfY#5^_)us^sgNsrm%t~A zuSl2;fgTN{Zz6^yr<+LW-nN}lOtqS<06W{v2$Brd#+!>BGPI~~NM})vN6^%bwBS8L z;GA!vse{szJStj^3bU-__B*jjX2qoZVAw{L&erT(KVV13hR9QQXQtIC4t zYU#$4rvJ5aovB57m@oXbR!3TasDXomLd*iU-)J>2oqT#eU6)>WeMh=RajoK0sfMy} z69}>qj_bWTU+j9jz8h8E%dv$(M-OTwgVM{t1T^`o5eI392M=p=G&=~zuzL~0{1n$c z9tAoHRms>ykygGePlnqs2{!~17rb^~PG*PS;NmOWShxxzF>-_qB?*;>*2V}FezjV( z#4c>!6$BO7o=>+anlz>*xcvzV=~R%s*R`cPw3OAzzQ5#?Z) zDkHJI!f>H@AHS5Mp!e=zLk~_W(2peI=R)Cb)=5mGBO`^uBiV?w)X2aan>;Q6pjH_2 z?WB@z8d#v5)%76qgf;PtC}BVu7FjdK-^no;QC@qtZTweGT6F{8={iGZ?cXvJZtj%> zs#`+Hipna8ktghnY3@P2>tfl*%hfAV?&Pz+#|ic<^gi}r&GStVu!67BXI z1dbJACYduG6B85SbFKcHL=7Nid7@dv8~l})B0N>0Y-rU_FK4rq%)GR?u75~OsIN$y zms=LrSVQP^}v#Sn^^4r#;KT(ko zNeK}Uq!px7N$CzpWN_#hx>H06X%JyZVSu5#Q;?P(awq|5=}zwk@45HfbM8I&xtBk9 zzGuU?=bQcQSnGY?wbrKMfu7!#yGRvPtU<|R!w{W^LwF>lj!`Rb(+`&?+eDatp0LM< z zQZ{`j!Q#pwwZ1_^P5oNWOh%o6FDxVrqNu^534vh#-K=r<9;+bGlg;un;(Z@&No|oy zm{+Nj{EuBq^1Q!v?*5RF|Gne)Po7|A_254B;OcNdH-Bqb z+6Qs=05?AA^1&G{&}Y7cR;eQs5^}Y2wwSsRAXUOwpKF|)eMs|``@$}ufFN^p7{zEm z0M=Q$8_Q?zBW1!k5?MN5eXPApO6qsKW5jTAM*CS0Wow$5nMHRo1W@?nQ%5d5WMFu+ z13*4@Xgw9DDZnGAR<#RvBV_J#A{2y=!1y|&{Z_TOBh^30#PAt)emXonL_9V5DDVQi z{l*lUj^oQvoB05hFv(5}(z$SR?7AF2Aize8kj`odZZoyt=Vvk@6d!I0?{#p-Lfw`p zs_Y4QYQNLc(N!mw*PAPfo$y*Amh7G(Lw6tKt5^bdyV`{|1B=MI;cP!a!2XGd+wRin zs7@M|@J9h179%n<$acRB4N$h+^O1=2bxcFWZdOIUYkwD+o(?OOv9sI0;OA!iY20EY zz@wjN2w$(jSrd%2L72Sw>*|dG30Dg`ZikgY*@b%=K8!S!8R_Y?_AX&JnXSp9Wrgo1 zTQFqCF1>XH^hKdiy&-UxO^U3MxVE;oqM)k6-L!p5#Mi~A^rDi3nf!tp+v*QRKWLl| znsU=&2Ud3{3Xu_Y-1jgF3|I=yH1VVaN~IK)AF57sju|VHL8i6+!t#w*(TPINr09Vc z${VX>blfkHd@Kc#@h*=j6%LzkH&bL$v9+K9-Qsn23;?(lE!!u+w1Hu4Bzg+R*Gz?g z`GQ{7V2Ot04eLMAzp9+$41HXGI19dVC@IF2J72Gkf{4a*H$vxvF9SRuQcC8Ps6i_( z;dMzez^7@+OI-b0u46x#sp|4kJlr@kGLl6-xg+Y4d~(w3T9+*V+8ZV{rnz{m=2fWt zASN(lmP_I`i`4N3645DTU^tuC{7aVTihPcy z)P6lB5Ia@D#po2lu1EgWbfX;5gwXICjA`Mt8U0gXR04N%0>$sdOVSR+i5A8=DBc*S zlL**CjW(NbOSj0^T}+v5&p#W7v)dObXMU<)g+vMx63wGxU_4uZZDRs#8(+GXmX@*y z)i`=PI-Zw5Ygb{3p1Pob2SCLJZgSiU^F5QZo!?1;h$>a3on(FM`hB^rFSVj6o4CR% zX*42q2L}aTwnXCG_*tr5hq6<^h@;VBI;A%iOXSk;l4P}cQi5oM0AegGEPmW%<>T3# zXQ!v(QrTxPoC`KR=s4nn^ttoJFexG>J%&#Cjc5aB)TJ}B@`v8R_pj`uceD&Z#TKYA z72-pcfHNbQ>E*%L0xRMdHcXmHoh2&dQr7m5ecT$iIS(L4fQZmN#4Q;M4)<}H5xFU~ z3m+BskPZXh5+S;keRF4rm4!vT{SLH*HXRR~OcmoryPrix!Sn|(7hgbdu&~>1CM0iP zi4rz}Js@ps%b5g18n!Qb&h|D#hEB-n6^!t~F}tTnJAwpN{^U#$H8ofnH-*+!Db}^C76F!sQhp`YRh>fgPgJ_eO*; zrqt}I(=Q#f4AD3bT}B8dqu(@+&suMz zhbzu}nY5V1;prDgnVrXzXux)0wzc(KjSki)W2NQ8P*d>_I5aXEo~L0&h=(I2DZ^N3 z+Z5t?^8h^*D}7%a|0_qW`fVIC=NPH$DmV8-U3^e<2M;*EyziXwxxN|{sfxc(SA27y zY26I4lM0kS<8Mi7`z+mW2V4%4HO{_EMk4gcbWn<(ed@q^v=B>PO8#-#!N9oZhGgoH zc~Q!4dL|Q99v4m{{cxN5M$eVf34k4lM~7X1bM-|0)+loZJw3gr+e^u8en2!ZvXS1- zh#-)heLk;>wShTyzy}jM#Vz&wU;cgqsoSD~CQVY(kOtd8kUu07 zXrxm+f*C1X9BOCav1XQRf?)xEOX+t^WOa1{_rCtsqwE@SHHcs9(*=CA1=6KVS+7eW z0~VgLvv;3N*6QSS4VTvPomEiW?4wdymNSxEs(xTbt z1nVD2myb~DOEs`hyJ5*Hc#Y+WVf&rZ37}RP&3eH*+PsL)n{PCST_s`Np|8ojg*gEp zx2$p<+HdquO;l?*-EaUhzpGM7gPg&@Gr5TRg%ZCJT3qF){7T2#oP)J5;vX;Et%Mm8tb?PLe@gVuelkF5s zk{g`q!~d?yVz&>twb{W2ZQPBt|Ha_{y!>$&Df9FEoP@jn!W@K8<(dxxT@zr1Y1klX zz%XQqK7gxxZa!ruA%uR?TR6o@(#q=s=GL+SoBag~+m3ql0SPgA=#Bl*(B_R(>ZR7>5&XeOcVVeZ@eDfFVrF*}|0q0GZaX|~f zFc+79eqT122biCn@B-o4e*$)X#|Khf|LUH9>f}#DHV5AkM?a);Q3MiU_Q?bZ% z;z8lXcej!OL0hI?W|>w^9QwjEfeHh2$Sq|UvSzMCiSG~MM#2kdz%A7N`xJRjYro_* z*BIiujcX?JM^N2CX=oCCn7y*Svvc$j7)TMp$!~$%^Y_b- zW%}|9aU{`$_G;??$5Q*d1^o9NU#{rX?JV>BrPJElgxuWORJ!;fOt=yPg6uyz@t2my z#=gEq28Ma5Ck@a4n=L0j@`0Gx6aLXVRJj^i6a&omqib3vM;>l&URGArwu&`Y+P-JNIEzD+q` z`{4sQqym>lAWh z76vLdf!!@W^uXDCb!hvoI+@ohP-~Hl`0jb%*C|fb8M*ZB#NK)l`+c^Bjnie5sk)67yLSg`+{to5ZQ!ve6x!{e@=^6m^|`(H zbfct(*jjsny9(&`%{RU$t7i_e%XQJ*iS|6sYZ-a2M;|y)CDV12K-AwhjSJ4Y?hbX8 z#Pw4$1E!xiD730dcydfkOjHzoOvkl%;!l2Wb_D0ShgxEyA*Pz9XQv=mt91~)DCE2h zXEMFyxaaN!)Z^i&L<>K4yH7TQhDcG_+68)`=BVce zx}Z5uY+cOhr=ef(ZYl%R1i^N&zhZ<7ulAVK`itz`7#y$r^&u-~V~?Ud-^v$((sm3j^G9#{ojcswT8@l!)JEq%%W`_6@3GJ;6E?BxPq)$Yi z98bA>_u}WhYGv0eIv-sQKKFFX)oq}kU+Va5+rL;qyt_Okfz(<)-Ss-hwyZa5)041i z{?sP3?6}gt+1J}Jc8Km}1|fv+CAp2OcxA2{$RDDEIWCbyndGO7JqsQH3-B z2+-1HaW-L&ryAq$TH>sO2DuV*4FU|N8oElHm+%ExJn6Rz3`=OryX;L@=azGcl4Vsb zN}sptOW72Pr<+f+65ZuJX+Z}_@Q%5iC-&c@hD@!Ca zCChD}Kv4uT(pRUue)khGmG-_k<5e=Tfr%?ct)`anhUCAwaZRtZnbgshVFOxIaq1SU#Pt@cO58;z^cm zfxPkOD)0SH!+Tr^RYv(;;f!`eMntZ!iCc|=-&;cF6A!Dc0NQd_`qGhwr|=2P1I6BI z-}NhlNJ<;3uZk*X$EOlOR;rQ1 zaB+7>Kn!};RUa&@7{!HmO>|<3{0o zrtwPPvU>O%gb$2XsCp_QAK5qaqnt~6So=Dg$MsO|GOc;}%My8k1fx%tm=?}7<2e;L zXRs`~q(&k=SAtap1V%+xhnS+t3>8S6)-c6j% z+fT6Dq$mzSib{>OdPteoqVh?>!u)C}W@=JML~!M~cU?W^z>ZhM9&9%!lj8=@ru7`z z;cR0_glPBnI(dlY=3Z$8>#I#W+3E9r27exU>``f9efdMjXVSH>#@y7{-o$&0BqiP! zNo9IuL@V6bRY5PPjG-mZqiwU2>UJIJs066+f4;@hdKT%j>!TO(rgbRZ83pZH9?!9; z8D{4cV|qq&kHXZRe=fDjgV(HI7&iFzzQBiaP-tkGMnBiUTq_chwn`0 z#yChLEhOE?%hB1+6&8nDO+lR`UfJzi#TAc|0! zVia93g5rKV{$8(#u790_Y*VHYa%JQmCk$l`fgM3s!UR%~|X+|^+8@+b$##q&M z3xg*{CFaP>&2PHfeuKVAB3dq*lV1Uep41A-ai8#88gH*medZ7=GL^6$C!Q8Edh@^p zjuNwO4AbGvD3LFh;@zNL+m*cC)b?x9p}5Ap7*;mh1C?doWfGyG{fct`W)d166{S9< z#c*GAHAaBwc^irfSQ*m(p$Rvi_&HdOiyzatG-YDR%|8%6Gn;NSnzpKHn65Jp+Vh{h zK}X+ovbSDu)>bdVjKY+g6Qb$7rW?OOb9GL-nq|VMgpT(8q2*?i)!!%A8pdly9yi#m zq;b-Cl%Jl?*Loc|{T7&Cn&6v0SxrE2Rq1A_&Apyq+lLQ6!#pe6IL}1%jR~Ick#eHb zoJMvgKVV+QI~H>zPD}z6NFT)a9b<$xUn=XgJ|6xK(S1DgNr7}h&*#E~U209bN_O|H z+_K}|lqGLapx$InsikX-7V`XPGfN#26C|wttSMfS-l=wT5E+}~$vLuOXI*vB$JymQ z)6o`;bv(MDic_`PIW$o!?<#R)-_umTpyt)|-g%@ZNIe;bOOWG7F`|QcLejbDS@8Nv zx#TXKs#C*QS5?7fqFUso!0E_7A-(0?Z=^CbKen!^crBBKL0-VK(x{nhyn?|T(cm6L z$TCjr%^(v9OxmOEk&`Oj{M1yKnbQ6|D`Aq3&9P-`+xOuIhVT8POdZ{#At4BgMCc2Z zw=W*LY!slY9wR@E>QdyUsJ~+S%&|77`lBeyD%Zrr_!+v7XZs!vcXMc&~FR7vjAX&X&JM;IGZLBuFKDMt^oa z51H=OEperUHPh(1VJwThN?|3HS7+@6;lmblkckp(aj zIZ^1N(Y|_m>_;q)ey^EL^6VxH8_yU;1&Me3@s~*M5^s)mhNLvM>LX_~AXQ|ysvRw# z@^2-T%2z@2$1pY81&Cg;!0tz(6n)%uhb7UJN@=gj`%p3`yj#WQ%TQFcj-8&>7lbVY3wK`)1E`c~jh;*KlH>7#q zE+wT1lDB>z_Mr&`HarE+vs}73r3L_ppOE+A1`-syD9bbGhmKj2##@v@oGYqP3R`Q~ zOnob=Wp4OxyHJ_7pG z4_A3`YQ~sHc>H`dD|5Z7DQG+(^kzVv${QBM+Wx&JGBGhGX5_1%ZRVhPUE>{UGP1rt zA#30mqewYZ9@%psbZtXs^zrg{^47Dr#*;>V9KgSD8oMii5WSvuT55c+%Ko%OHJNor zt*8V=HcAOFa2sd%o7A6y{{ZQCB)xB5#pQokS__%so#v{aW6K}9sLVTGX8H+SHs)1`8C z%-b!CGHd>>$FqESNin#J1N@`?k@y%6P%Y)nH8W`vbr2G zL*_gPg#euBmkp^k#nb{GI(b@cZEY6Z6iF{ZzmX5%34)F|sxwY#Y${=!D`LVZ@Dv(G wRCoYL`Fo}B-{iQU_0Q%1(6IOIH<|vGU-M^u1@~ diff --git a/ui/images/sample-wizard/step3.png b/ui/images/sample-wizard/step3.png deleted file mode 100644 index b04a0eac1e0f66fca018e10faca752b6b0d419cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 44303 zcmbTdV{m0%*EZU5x}#3VcE`4D+ctJ=+h)fd+u5;=j&0j^a?vuoAf zYppqO4a`+@%(X|DoQx>kH_UHezI=fb7ZXzW@&$C_%NLL{Xwc6)<{998pFbFm!fK9+ zHYScP`u4_O_>F7~jPb<*`liMT#`;EXcEiS8U%r4DnJcL|s!2<67}@}6^#9V(xB_fH zxxakj;&rvvH?%Z%#5XWDHMiy_ylCwt#5XtMCRAmXrjxc6G&VCAbGJ8EbeB;wbhk8Q zHzMTa!RK=2_!IyzcGSmr1z1@-aJX_4{v(&;^Zf5^T0;DPNE|J>3I791O#A={OHV^b3jqAJ z>mO|o<$Z}=Gx6QX~aKYjOqSM-QQ^sR(! z44nYR){f#r+=QPaG)Cq|94rj{%))dGtn~D3!oqZ)-~7yi{B(jWB0_9TO!Rbg|H}B^ zctwPng&71y1O%AbnCa<-=>!-A>4f>2+35v@nHfdcMgHX#w{~#Uw>C8XSFib}-v9FQ z|6h4I1nrIW9c}EDY;3Il%>X$w8%G-lGaFlcK}BYKRdZ`28yAPa!TFD^LdN#y&c;R} z_BH_gf4s|K{=aBv75;=x$0*3oC?NRPY5_*ZPv8u6!a__;Y|J7Ig#YC=`v2oG+D|aF ze^cZClPdq-`pmz-r~i}qpBMi#b&Rb)=ZpR4#323!F7V|`3BR}yzmn_nY35f?WaWmB zveE~cqJ;4dKG3fO_?%xOPQ|_~2`QmFX4s{=`TqHXbTEdM%lOfmXOorChJp5OgB+H( z4NLI@MM>)`;Wns%k**-9IKI(EK4<&FHLo%&dH8siqZssoBu~!se1&GCjPmpAd9!2* z-o&5$@8JBOhdV5!v@briKR$FR+&?$0^=5H~`|PkazWp8kj*%pRf)30<#vo(+has*Y zm%e>EDLs@ybVZXoEbEiY#0 zvk>zKyT3+X;rmn`n=WlS$s`PlCA}5nhUI>aY`D)LuQY&w$N5M-#|PauDb5_<5_b`zXLnk(p^EM`ilMY@1)-=89X{UJ24 zXP?RKV6xHSg_BycJl_;`#AqV*i;jSYhlh;~eJuR1141L$tXb2?kfkp^-d~&<8(*`K zr&AfM7OR_@*b*`SdeHw7l0p#By{EDyevEOix#LW+I zs_U7WQU8fc{wN{H23ySEaj^-1i+_y&u7vfEu;0sn-Sv3@|MCC7k(C4drH1zZcX;kU z;)_}SLkt?!au7bg;&I&`1D{FWq#$oBDCpuB*61ox+A~syZ5D1FkL)%s9Mh^0mV8Qs zI);GoY7o`in?v5x>tWMzosjfUQ;N?7p;3!wZd?5`;3)bjAs(uKwb)`uA@vT}*s<>R_==D#bd20wR zrO3hbbk%I_ks!V6CO^)_qMp)5{WgnbHq1u{@tN&!LADVX-odcpF9y2ryz8&qXb)p& z(!ePQp7-s;b4I6eH%SDP?X^=D|J#7yBrnkfK{PA^v6k7QAY-A)r!a?3%NR(aKEEEg zN9`<TIbn%H7(eZHJkIwN@{-oOy zl|(6;j#3QvD{`%O^K~hj4R?vb)f0b4kuos&*EovG-61(>70W8FZjfZanV3~v9?44y z34H@%F(i8$BK4B$WU;aitw%r!oUifKI>b2ty+lNYDS5W!E23+-yrKqC!Mvhl-JCib zZNEDiFj~QU#Yx^E)s|ZhbL=*n{!Vg=({n-s$o{2^LRO)C{r3vYM z@!swcH*z{fWK&&{7GMSRaum`B-#{39wjw1GUD|J1{quyt$W|u5nGJX3Em>m%32c!E z!#l|}UK!3RP;EPvuzG;)(76V`S4Jk>$wQbey$?%E1gRjE<)dgFOoh+gScOl7R1X}& z3jB(|eU9#+KNu6eb0eo&`UN&CC%3I z=afOY#a{!wZ#0@!!jt)#^R&`G3rHq^z6k?DqeG(Tpe;n&O3Lw-S|PNQ+E=*Au{YbylMai8XxB%wzmt}y>5ZUUNO%{{N~AJEmNl$lrF0If8#&2GTZ|Z@MNFsl znL*Je4PPa;@;esjgs2tr1TqAN<>t@_uIf!iC^-f79HVX=_mng|L@m|m4cC*ss`%{(@A;QhdGmHug8S1d8b;q_ z;(=YZ=oI{Z2H2TSIM=yoR=?%%9=A@&ocOcF@w35W{mPv54D)BG>`mgAhiwz(hLId7 zSJAl1b*m>aJ2Ze@B%q_*q`%k@{0j_}PWYfRolbt4nm;7le4lXlGxH=q`A z%7fo`E1MaWj|ctUYm@nOzx4<}AT_xt9bvbq`{k&9bE_V4TgHn@rkfy_N0I|M0xJ|y zCNs-Qyq-5L4;i6`;exX|(~MOzP)U{ol>g(`z!(_eM>y$!iFHo38RqkmV3rhv+-+e>1YIW_9z*}EY9RNQa?(hA#AO+rx} z>#JY^DwrhK#8?yaS%=3t9Rxt z)SQ0UhYeC!uN4<3%_WDXbC#$C?&5hxfXY2w$AJv++k53Wag`Uvx1k!`y!U-2F0NdM zm)qc+_xO6e8Q&&OS@f3Y4>~+ol zDkzPftiS;kIpm6xKkH956eRbtmX|R7L?domsDhH&%uri8$O!oa-P3%`HkA~X8!$ly zE9gBWGEw7s0r6sGae*_(wNmkc{OX~*o7}nwCN(TB;konGI)gUTHzt607$?zOtVdH$ z@jyC|%YBZCaNok-yIRw1r9pI?F*sy5vvT6~H{k}PM1kn6NSA0>Ph<1M0nE2=@oPDw zdluf|lhs2X??L1x=*&ok?`;)d&B3MH#j=lmy`PA_9mU(;9TxLEvtKZ2a z;}5+o85@_dIv*CjWT0dU3p0j}n$f;u;`GZQ zrmi^z$F?e>w7*TID2>o(NXNzGykO5TVBS>7C?7K&7mMS7)2s^XT2c!`uwKD22P@eq zjjBXtMhM+;G7_|EdnrfWo&M)6m5)^DdilY)vOBxPaq{tDHbcTwiNivk;I(ACo9U?= zY59|BX5wSt=Yb|x9 zwY}8LZ1-E{ipEhju-foE8y=Ep34+ihbUbd>BvmQyIK~1fQ`R`_+G>Xp9s&~~-*-$$ z(LVMOmf+wI`~*ceBh1Gm$>prhL$YU(+wl#br-K}T95_anX0}$#ulLhI7yu}lWrqCQ zgP#!lb0GA2R%I;VKhh3WbrV2<#9yoVQ%&lc-OMj-1vt2lc3m$^`5Y=1jbeM$vI8zX3NS1gN0{HK1pC zE+SWsXVuk}<=GQ}U9rf!Vpv8a+}rgnz&AT~W|6c^arRTW`(`fGpJfwuaa5T?z)q0loqtMRlm)?n)F=)m z{ZD{fdy2x>u@FA3kudK*u8LyWyg z;%p%b;Kw;>^WnaYKb6vvPger~_Ro+mIv_v`cO&nur+oC@$_`t9YO54m0$x!kj$X8$IhYNR(e78z+of9-T+6Sh!|2(kQo3ZgIOy zu5IfbZ|hYWKY#;bi5PjGA_Xn;TY8ts#n~i}?bq|~uPSoQ zp`$58)FSt3wFmrY%C~ij6D$|JH%&s;!kIAkk(a$9PVsr&L&}D1y8B zc$;H1Ob|HFKt@9=Bn>!f!DtPxdW>!rVaEXvk*ElciK)^AeoWTnkxporR7S+WHwJ=R zGM6qE+b%k7tVP_!WUXHvWo_3|7|Pk+FeH(0mj3385*fY0&X*O#dPwQ~uP|>zbXK+{_7I@^`h8z$P2@Y`1>$B`#y zjh%%qCkZ>A6W^j-D!sIgmlv|93xGun(tfkQ75Rt)TnY#T&RfZmb8pXWL4bq-Tr*#j-FTC(S$W*38L6Zy9&>>?{TGWYi{V8H22-;P)u_#h7VVD7EjT@WmH!AUjx& zw6j(V7g6e(u4?7KLiR{$jMR%s3v&GnSU%JQ7j~7I2GjA1%o6iezUK?HieSi2SOrqwVCl1jj}RC%d5qh54%(Q zi0|u88L#H*@NT5k6=8Q^t5Qd!z2wvY!l>Olnm9fY_v>XVcXU`M)x27ljiTNaurWaJ&%$z1q`Jrv9t#6`2k`wuH z(<+&>x?JLt8^Um(%J zi%vd_I@!npQjSKC)w_Pxy4W)ZcC|dTyPo?`((Zs$P89-CYv?k&WMUMvE38oA?t zy@@N2Z&)SkY0n#Y&1*l;HB;@CCEz_kDnb)zAy>&@;Jf2Pm$ncp#iGHS_~2uuxx{ZtE1bhIQw|t&e1tiO4J-GK zEoND+onY4ff~!*9Y}Pz7$HGH@nl6;uPZU;ewMlP|8A1U^g$q0>nP4d@>7 zNHmum7>eGRxQe7;B7rMGm<`Hs$k^!%zO(8RJ0tHFv@M%AcxJSijd&aY?=xH-pY}Z; z(t9URIqk|VK*oC~8t*s^oAzD`xcXeg9^Gt(5~(2pQ5yn+#XyXIu*DXx+BX2M@qDyN zJzuf7t;tJSet&K@g(>P;wt>c(WDjqz4imWCDOr1JST%f+^ zP5;hsF5C)eNso`AsD&`S*sDVgG+x7jJGSlTWv6GG9pfczl7mBYyoLexiqLdki6?QT zrWe#XYpptzQ<$Eq1sAr=rv<_VbWes?f7duc@cod@B42F3n8d^NYTBJn>I)2PMH{nY zThnwtZ6PuD&G*I(Ay3pQdAb->J+4oS>0G_I@IyAl!G=GGmVJ;y_lQs-f|TrPRZ>}K ztKCb|<@UUoc4M5-rE>7-$IXC_Fn`KbSI5%9U@?-5omwP^a(fi&KFS>>?!pgEZd*o^ zvoC1LUFI+yQB#b~fiVGR{R(IO#8*H|Ty)N=|B+RfpAE+kPAS^b6+i)FKv@2AS&Qbvvu?{#-WGTBU);T8f3k_#F%hDBEbFWz#bd zUkG{aH`vFbm6?c!ou66tUppF}>$7zBq@a$`;1x~0b~RgsFXzIjGn4)#S4e2DW4IyW zL|%EPSkqR(g4Hx-`)Fq)Q^Mvs$@AQ<_SC2-n&V*j4c@^R39-Qp3$s@;$wDoqx@2CY z$&{VEs*@GRSxt7H)*V1axfbdjP#gi?+gz2F4@v2Z;g+moZca~AK&YeTb3|1HZ{kE= zkvg@X4|oSyByX$dr74Q+U|Sb5%93aLh_@QR2m|6Wn?vV@%e`a#@zj%zFRol(jFf+Sr;gr;V9BeU=27)-`~U;$PA>-ey%*L4Qif1#w$k%&Ui8B`vPsi4Jfl+}S^C zoaf}(<7EyuIy=784c&>iot6+E`vd=vPFTxagnRRlcwj9U7z2LT4y>KCaBGyKaZ4 zuIBlqv_Rx5)20q>Qbf`Ra}6kNzEXOTFkmJ~3zC{dMBv5F;} z*d|CmEw|?MK0dv{+Q3&V=SS%u6smmd_o9}`vGCp$%|7$qZnXT|#rwS&%mg*Pk1SE1 z7?OY)I2fLyw`6)5REmxbtv~(|m%{dW`oh-)yc>S-yEgb$pCMgPGFkEw^~i zX8A@&oRP7tq+ZBt9RsQ#1{Ewbxj3)svs`ZX)>m?Gz`Q~*Z#x<^$`cU> zW9`1~Ie`w|ZOnP|y}Mfil@G{K)Gvh48OhWx6;ddUExB+-(3Z*A%uG%iRnIeVv%h1^ zme9JyVsd24IDg$FiV8R5kFeNbMW~4NnC!-L<-#QcXPJMsBF{&9;0p&`vz^#xcW&jT z`Dt+;`Be_NUL|EbUB=}1_h2}T+ijl@86F(uJ?B<^!}nA2d|rd2*Hp^$U!o2AXc;&x zo5*$6IIN}`Ei*@_zXOn=%CgAJrWFLtY-3dg7gnt9oM;@xidD*ptZTcE*d5-tC-Vbk zO5k!S5*^&SkRb#7>Swn~bHi8LNVCbE>?PxW(L49;4bN@7#e_So*tKrJ?t~B zs~t9ZyX&?28~M0&m`9ofA_1n6k?Z&^6#>Q;PAwRWrmru(ltY6qF8f|J2dytDwB`?8 zHF?&li9F+5wt7}Xd^K?4G^IZ>x93K8E0wX^3-;x_DCXz95J^+&*r6zg&jDG)@{E2TO-S%cGZNX~t3^=s9omg&wM1?OzgD^b zCKAT}F=Sy$8%|3Y$5RK|e#E;Ehh^q=erEH2TNMx)SI7)m8%ttzBjnmA-;f#EEI~7a zh2-XRY1o|9iFtpRTW;kJfXYK`9QH*Ix5nJbjmss+4R;Fn$a&vvnWY|w{r2n6+${1) zodP%2-HGu|imdFi$74@8sf+6-&9T(9PTEzA()}nl<8MJhayJ6%seGmBIp@l@O5M(} zkeu0hP>txgNhx=W)w&Osn(}3yi^dNM;2h(&uO-k3Ypw{2wn~SsK0x>UT1haMMQn4K zmT5Y786O6hsmQHG(*N=6_iy&O3`rB2yqjHrib`@Nz62jszt0~Gzs*n z{^#l)VtFd<*HwUmJ!XO&eawnQ8dIgxo(N;|04Qgch8!!~-UhDSWHt0Joe-$=7C?Z9 zgo9b%8RK+=PsYQH2WqcqK#iUAK+yJd3TvmLVMR8F-Dx-{+Abcq^2M!J+rbNEk}}ue zI*TB(sXA@jYl3kEBq@BL^HT{UL;w_==SNRLYZ)$G zTAwc}a_dbUEnbHY`{|jJWIR9CwWu5x0Rh1X3F~HIZ!cf1lbo&ZkC2Zz=%-6WsTw1G$X#~ZLos9Nv*N9ikmw9wZSUld%bB%=nL+9iFrZPXTQs3l19|vPCQeP=*tvUK75R!oeQIz#4DbtBk;qMQu%qVCXSJXQHj)y)?J=cvl zbkQI_q0szD!AXZR1klmKlCBS{)2L5cp|SQo!gsLT*$6e?SSncC~3+#?4Gd^eTd(H6?o;k zjV3FWSbuW#U+zEHOm;5={k|%a78Dc`T>!jtZOy_mIV?u1^ZJLIJI@nYF zDQq!M8}@xrf6`+3B$Sf50FFTpcko(VKWc^+!a`X@Ng&HkZIpc*E`9P%-Jaz7 zi&4yKo$awzjtbsuH`+_qs9V{H1#PnefCU4H`ICc-n1y21i~&uTf|7zfozp{OI0V`7MOloo> z-VE-W~PFk4rEU0QZx{R2YZ?6pbj8 zfkg3_jtLjG3}xWGU0!gnZZg4;V`a6P`I98dJVs;T9v&|gNd|cvIAm3MCD;@yA_fW` z5=t!89S1n;^N{9JF<;EecT$P+p{t5)JQa^@CWy`7Hg~9IRjIX|dUfaDMq6lGio-Q) z!t&4!<*6G1(#1(mram-Qnn;cB<4qauxJSxrB9GH!wAZKZ_JJf~Nl#g;q)_?^)5C@kLA;X9v zFI=*qAys2hi!u7*O8!}wPr9iXGnhAC%bUem~FVJ{(C)nk^ZQiAC>Z(W2Nt}P9898(Au4^PS z{CUdZdRsrX^)t?GD$K52gPt*$DE2D8Hf+e~Tj`M#<4DJsX*HYPUv0ZWNZW*ik!{8o z??jPGNKq`H{ZHArjW9CcjZ}c!OPGQ^G#ceUX&hJSc+t3Qp7yh$ zN|`DbPf(VNW}=Z_<#oBI98*V* zwgfoGQiy?U4k+%`OVphnK zxbdPzigs)*Y`u$Ssk$qt1plQ3el)}Uy^S!g= zhE{>nzHPgaeJ;S#$F7tuJm}z$XatX%+*D6Q%$O!SCrbW6ZKvmniE$%DlsUpgpPbd; zABlXCz|PrXYv@(WRjh+!Osb#K?{1`fw;F59f2gNn@sr0|Xie_^zVlP#qgKcHbIht0 z@?Pca*243EzQTBKqy|(clF5`q^x$Um=?exj@>x*1J(uNDxa6HcEF3ySusi^-ibyA5 zO}MbBD-8I;kd)b2I?TJ{18v{J6dK+jI{5Hg&Q`KQX-ic|Ozl0fQ(sBih2LA&YsO&laZ2|nUT_Pk#TZwy=x$U2|BolhcKX0H9f`dJO{V$9?Rgmcky6*cWpC|%U&3N>xA5@>h0ST8CJ__aiWFEpRK0T$ ze&;$34eE@kxi-b>oa>6!LOO+K(g4|T-ZDhF=#Ge9zJl!3uU5m!}`%W)+r zL2hO{vSgzDIZK69hEQ4>xBe~SZF8ZUX#ac_$ADoxD^14ayi3Qd+~)02a_DPRuKsY? z;5f1@HmB8P*DqH4QJHK?Omw`PGX&AkojvhC!e$(tqwY|HqEg~`cjFEJn8M6k{^98wQ(qMQu1rO>{O2pUff5x>K5EVlIy`4#I+)K5maQ z`iygvNWztPEp5*A!}xcA-I?=S*tW@O5vgTL`Pwisse}J?D05iI4&Fs`yNO=DiB`Lj zW$%_OAi;9ztSX319%vm_Y~EN($YtWbqrkrOdPWAmL1|bml|hOGMPgw#vR}M@oKrPX zFmFao90XSJ$S_v#afDu^mE;|l-EQFABq!20k!F8qsJxG&X48`2= zY@=hxk7ftr`gW{ZuKqBXZ>lr?ywsI%CO!XlC7fin9J_hf-I9Au5$yP2|V|JOc`7x_ui*7l42fhZrdYNu9mx=r2TYC3L&CJzZy z=cKY@e`@((|Bz#qNg@b-u$%1=O%BngU0d8Mv7;e&t3$P{R2DGm$kM*=a)b` zoK%p{^u5OZwg~<0o5dAYc3?Sypyk=nMCYnY$Kyysrw|!x=+C#IpOn|(n-SV$3 z!O^nvA{a^dY%W?(v1sGW=-^q+cn;>cU-g~hXkXoYLatG8g*b*Z;Kt|9BbG29u2*xH z5?prANJ&^@Cz z3I5oOeMuhku{k)u9w}8Fz!a%{ytuvm|t>4#53~ZJXoYFA`-~^n$T}VQe%!Z zh^p0wY&1C>Af{2zHPjmOfSpD^3`!D%LtkIs#=$l;nc44IW6_sB=RfqT=V`>@HPChYZhff4dj!qIhf=MG0DguFVB8p0E!~PDxr9>P*mKj$c(L z?~WjHe)e=PcbiRReFeYi1g8c8rZg0OR@<}A+902TP-wx?G{|AEZKkXVHSPG^$<4yK zV!_x)ZslvZUEgugRt?K#p%3b`A_3<(1YSp>$uzin*`v8=J9#yU4)rL!NmLO zqT)&4OhyisXvMa9W-98+-a=YjN`7$J0FE=KCHk6%Po(+@<8j|*uzVFw)lO!4pDK(O%Az*7Y6)_3$c-QCf5Em^k{3*Cr<3$B`z5}~pH zjVqUp=OCqJy}9s%vISb^0urbS?*JLe%hPAU#nqy!-Asg&mP~UA%~r(&cG(lb0%2Vx zZtx^W(R95XE?xEAJdF+kiUVyAM?7ZEF?d;Q7Han6yp1SYWH@O|wnzT#0Y4x;@6P-* z%BXEF9&WnlrfwJId&4DZlo#f4xa}%i=389X|3F|cZVF}Zd$l_(EQkbn{|U_j+sQSR z$(&^t%%W_2yz|S;Nw#h8K5VoKV0)TB0hm>nfNz8sG%`w&zT=9QhZeJ%IdEpm?DZl2 zScyGAc_Cgt1cgRGs46Kjhdn#K!v`N0B}4iKLe#tE+?p8)>X)vjstPrL@b#;oqrFAK zun|y0+<1rpe@723|JRtwWs}hKFsZ+g*i|~^tY$JXZ6Yb&7Q?UtiPFre*>!0{;o!tl z96S#hT)9!*(ry-2wXByHH!{qCU;A!*Z9ZOLewWUk-xxIlDI5$;0KjfGMM^nypY zAjveM{6uAKD#%bagRSPOAmulYoaqs_TPe>*LJzsdA1&Qbz@#wBLe&pYl&x>t`BiUU zR#~XO!Ba~o3~FaIQ@CQe$_8*+ksF;1S(=e$MIk7L^$Ii(6c02r#LVotcuiAEnJc^4 zFv%8?l&Y>3YQ7n$uk)L(^HUu31}`^?8g=v@;5blc7mvPmNNI&7nUc?Uc#g!L#w^lw zd?^qtUfmjnLrGW8NNIZ!Iabc;kA=JdD*=I@Mq=`ti?RlSf+l^>nJF=E&eIi9fxquy>Yx0l;h{EaQ$f5L#mDwB0+#)afrq<7Mm-H$H*z z1KL zOph#sc7w@ifSPw1P3NQ_{p78q?wLp;s$&SO@ZS3Aszj|`*qq_-;zG2IQTDw~v?o3d zQVEcGxDCN+P|~pNKL_U>;Cy@0&jkYwvX&pg!8Xpd<8X`sbU&vLdABL+F0neTB(;Oqu2LfjI7hf<*`g$gOF5c zB87{P+B(C}!|o*|rSzC(gTRhyrID$VHbr!=*rYwq50+~x+bUf&=c5%se&cklKW1@_; zUBD<`SDN#N3H(+_!MeCa9s zv31CuL+Oo2ji<}wTHeyZR!M>kSs zar)fCVNSJXb8UNhAj^k;5q(S(X8J5+4>&VUL9&h@6cpxlbWPcQ$X<4HEQ=Ia(a`!i z@`BsXMu?o21W{Je(#3-{VAknS_*(eC5@ zJ%rFG3hI>|+m^`S0UGL}QLoBkqtADz+ws3?pv*Ddxve!Qmh_m$Qa`H(5F;r`n~cjN zXXf^<m&m_BHg!RNVJd@AniZ6?0%n-X@KvLX9ybUuUKvWa0PK4bu5y~-?R-i=yr)QYM|L~ z)^CN(&&w`@duVLom9BlQ_~0hKbzMS_Qa4`6%s7E( zG>VPpIai8)zkHSw!5<%~FGsQbHL=yaAAR{(L@hq;m{IPhL;9CDTTmCYokWJ0oB8MI zh#9#$RVkXO*Ik;%HAZhU@4G}EJ@H0j3{yrUAR;3{TVcihYj^cMXqJmMZT zx*aA}Fv+KFj*M6wuer`!98!(Vh}7a}s^QWC>b8$tx6lWba$2T@P%0rNca*cos-HE} zb+qM8G%H0zvyb)a&|1~_nvb-M8yYm6dl33ctm39^4Y)?5MHczR;Pc!BnxMfJuc(H$ zyLrhfpAWfHYtn{B4Z!nug_#5Z&NQ#Pac?K=X6z_6n{m{*&T)?Vv_|6Hvp0X$%lm%a z8pHEJLCAI5jS`VKo@bs{qhGX&f?}P@P6C)oM(DoEFC>QQ5T*nlFy|-0|CxAZ)7gGa zi2a62pm(0FR@;yNJ(X)ZhT3hWkAI0-r2sIhwAM++LJLjFZO|#?okef1XDDCIXx1J zq!>&17ycVDudg(VV$w3Pieg`d(Ine-HtKxbVHMUttng3{%TSAb!&nUb_AL`)ortXc zJ5PPp`79pow0zN~=sW6*b{6i#6qo^gTvJwhNaF}075Q8u) zk_|-HRw|M74wRKt(8O`{juZKYaeqYnVA&&#<55s9s{x9jPxTN)yo%kxsE1GpbT6AlRgdN;011zhuRREt!(2%9; z?ak06^E>3+z!+@Iz^Rv{dt3dWhoM%%-1shz@ujs3Ub3;zM_G1utK7F^2IEUU-u7u1 zJO|uP+R1n?WF;&wP0Md)fB3q7lYl*{ty5LBr=_JWhRW4WF~Zqnrw1pWGMel{l!lm6 zL+Ahkf5B0#_fJ8#`G6o{Lu=I9ghi$ICwh-kLad5!?*t~FQpU8zFk^VkTSDmoRyp*= zx?>=id(1w`zj!Es`hTMi!-t-xaB?<%{zcFFYgw(?YTI<9&2$c7&zyU`=lpj8H&YPx zwJ)>pWV|0eF$)RUJiaJnF3Afx3iJK3#>FCHfHfU3j>q|uHYMi$h4Z5&zjJUX)#$X4M|MvI&pPgv`XQSGGJJJ4a zRC`7}D{u6p?D;DLdRm&U;s4ggmhW=d-B`=a#H3cCk}}#~`v|Z0SK|?hY?TTYEM!E# z0YmC@((?Fyn@fVQr@;vwSs>!0e6C6v zGY3anp?TZMKX|yWo;@|93Zs(P^<}lSxp%LgD^Fray2{`hS1J}xu9As9sl=*!=^}NC z6ibxawW54ehHgBobW#4I!)KB%Q~EQI!g}PW5sULC@t=ObX*Az~5048QUWfhD!EDdc znWO(U0{)OI>83jv&l>xCd$$(py9t{n)9`k!{uqY#Uoy9HKOCN@WnOpOZ2xX-%MK#&ARW=j{mdd&g2ey8MHIboMMf@7;Y5ACyL?&Gqq8|N3f56c2~hdNlxn zhs^X#6|dNKZXkmhzk-0EASV_)h`L}dj;|J+SfIVwD;N)rc8ij__+r@~T2hv(7%i~~ z^1tDl)}(naJQoMahNDSkvws8tHNu8mPiqGCz+Rai??|&f?R$ga`b?3`uO|f&5D7p% z_%pdaUT<*N7Zhc=g>T`IAwq06I^M=PF9MOy7Ha~KA%Z%u2C#I+KP`d^1Px0hyP08v zGff1_+&ssEZE4ZAU4mNiu@rYI2oQYgXJurPzLP-T+E>Qfj5HlfV|KmNtG((1Lm2Y= zX1ns6t1B~r=wmYgfkvz8OW4jpR6-_4liMPULBBzbqZ}aC6|5M9?R)+gGITkFYJM4T zEFp|8@=ey+kmB#(F%p@Y0(V2sQW;G<-k(owEfnx_g~hynf1l2&bGtDqW|Wu2SN|uE zR+Z>r$K)tVX)8{=6bD(l0KCtKd7Bjv=WCKJ^7s(b-Af62!L=}gG9dk5bg?+V%Kh^boOurubz@|TaqVGD8avF97e!RN;sEL3QU`jb^hM2f}<5_ub{ zem+NHy~VX0`_{p5FsaayFr2j^CZ#%Hjj(Ghx{x(f#*`ry6bO%5|Ja>!u~hf@KL@O^B~05;E|omR1ZfRq8Y~to-`wAP)2g5& zg6lANx;}4C(;nA7Ftqh5L%-+^cfoSHj7O7)*XW6R1rfLd!Gu3eu>OHfmCeD#K5J#c zshL@^b=oYTgtg7L^!)4|i>XmDuSe%QtEWDH=FG~(^lNNCHa7M=8x))U(cE?-hUb|q z4l!|daOxg>_A@R$1V)pPVu9yMzU31j1#!3uP`IwX@faF;-U zKyV2bJU}2waF+msLvTrO2o6DmTd?3RgS$(Bd`hmB-jCdt6^dxTzkm@-_3;hnKC_iVDGmD9?2BA&|koomgBFV0h}{7DGn`4 z;@Hlp0Zet;b14Wfn4=`U5WW)O@owyU(CU}(Qh0t#E+c)syh+)@sFgEA)d#e8GgwsO zEj8QXK6e*I8u{M{P+WuthlaX3J9GAI>*j$q2}H%lc5gCNXFd4=NXNLjao?{6g*;gh z&j)PsG+`TivMNmOYH)ZJX1_4MgT?h!Z}H1Qfwl{fhXn{%y^aU>3#W*E>69|k8-KaT zsHl|>e$SXNth;|h8Ce5W85-#nD@#kG!oxd|(VybRow$#)TI7-=vXWFD+cC6l->FG5 z8YmMU-K@%Je0C=9;wq3pd^!-)_QK?&1E6rD;osZaKg44Ep_t%_uy>Z^l59DU3YzUm z4|}>vK?EP}`tMvH7Y?s(y`|(kpy#LJukItFqTq|y64Wn0(@y}`0`|p!pF~o`2XAy4?@NsOclcewHk9CU}9nQ{`-ner zeuoeq31e*6H6$d2nzJ9y#DW0uLB++zgi(mcTC z+1Zg~-}~$xWLG2$!-^f!@N1f&VZlg8hX@bvr@KAYWv-XY=XZ_JhhNv&WCG7Ur`$l0 ziTS&)j;NO9tHu8K66irclqe00fq)G~dP!yx@pD|~ zGi4g1qUJZ{Ebzy%@m&R?9CeT(D-*(VDUsXJTb6kg0acV73T}mQ5n#&T8yk40FO?!0 z3N)S=JaluB7b54ueHm&pE;ngcS#5Ss^8Zx4F7p4CD0G z)(}=UvIH$M4w5T@N(m^KF(lrSAIQs}?295Cp5ZaQHtYLB05wb@B3W8m8U%~1n>Mf+ zQ9%U@{5CkuRS|6b(w9U`gOQG`QLG=yGrS*y9K*D*e?}4kA9}yaY~p)s*&aYWSkq$JC8+mRGlN&ngtv>dkLvYP9e62?O&{Iv(rE8t{^c(^wms;$HNK2Z3+VPp)Erk04n z>)*CL)K|<#l;jHrS%T>#?n50kQ4VORHjPnT(3}okF7oPdtDR87L5(7yv(u<)*Yo6o zH}=NA36If%hs{Oo^%w$=g{#~-q42$NlMB7QBs?i!SatCdhmUOzOcUXasu@y_Ff=P1 z3+~D$KR;OL*YuZQ2nTo~6LwC~4|z8xu8ZaHlZt0}hRIy(KTmHnBo28Mec)IRR6q;A zNXLXRYs@Sqjc&0+FHA@m?qVO19SEL-=0>p#+-;^JN08SVX>ZOcRr!WU=8^wCX(puM z=}&~X?N8Sc+u8Vv^uIu}F&2#HDMh7W)@KX5Kpx;lZEZ22vM@T8XRpXXjI7I>XtbkKL{ ztuhr+qDnBm=^ETlQiWYOLs9)Kwfd2c8a6W05DN-UiYuKlgUD16STI}%&Z1RI_2lT@ zN;w$+-kJt{$ZjRR2?rG)wLA_}?s#^CKQtncdb?w`$T2JCi_Hy4-1UUIi;E>GGR|6f zdm}Ava5N)$W*_nQMgM%A(GFIa)jkl0Pm*717PUuPwtedw;%vvjO25dmU;4&>KA%3j z?@#KF{?>V|I;O1psl$qEbSrYqa0XxUHlne2LNNk>v|tbvTwU>@89ctSKs4U{SttlZ zV?M+>{c`2Jo(v(z&CuWrWe7*Scw=a=aphZwUS?&W{|zJ^h*o0ZD|vaa7G<&~ZSN17 zo?59`IH~=KQ`Gtj`M5 z3q^08ZgnZ6qky}np23X%b}Wzcf5*27|D#RIzq+{mXUiAh&fq#4Rt}yy-|H?gmjg)j zg-a(Gwis1B3K$5bB-*z>Q-!c2_8xZ<-ur$7Z~vJKn+1dL?@tfUC2c2)?`z&X|NiDr zxUO?H=i&luU|D(p`q37bFVA|NoQyAH=Ht_v4UoVR@4YW<77G2lHl^=l9ApM>ZU7IU0((P;pT?iho-5?`Hc}iHbG9prf0B^~!_z%( z-!Uo<1sT|%j>a#SB7VXC!^aNL!!1*6cCcF0EdSrT|c^BeyYm>C#ETJ!9%5O zY)~x{f|%&i$t%>SGm6m0UO*Juf?tR~qWnGqW(7hC_ohP=osj(&x{;afdv+Qa5~V-% z?$}3yzU}v&4AA$CzrO8Z>xP^5K2*;i4HusBGd(;q!wqVj+Uc)h#uKHdhr|no|C8z7 zqzyGcaOgg;X4;yX(jbM8RxZv|$_etyZl@dSGlZPUH6{BX=)q_W&TRig6b|5q zVYy+K-A$$Q$^-~G^v{6FTx_y{C7D+;!&puK*}Ev+YaTfI3agavfkItPT3Xulji+P) z_F{W=ZS84+1vQI((3$^ThtfdmSi_3f&*?j%_$j;Tkt}Yxid!;+dg&@UM1j@W568cl z#ZiGZQpC>vB<}F(tcLo#Gx_K_9>a5hT0gdr;OFNDCv#Y!*9YbNJ^|7>#EluPnZ>H(0Gg@z*4%-N5T7*gVsKRG%V z^D&~SQ2FxeDFZ13|E~~j&<+DI`uQ#J9i*iZLdTH`YLK4%GbHQFixK}#9{|n?38lU! zZeM1=p+XAE{qjXV`lb7yqUDr)O4Y(1KPl$aAdRJM9qEM82vUpZ(n{o^?*Y`V5Ff3%goHoUS{-bpzoPfW`EuM-87+dh)3(l##LF%v2PxG z&dE}<`{#m!E5nDU!cbPE!v*1M+;Q8w`n-EkKYFHG9~Xw+3km@I6gI7fCtl17cW8s z5Eg&fue^^iQocpKwgWBds|_pOs*ZZ!o8F!Mvey3>&TkLVw|@_i*o zUZW<#t ze<7%s&n9s+jn^GoG@dTd#)N3)y1R?_-T6`crMDC-^i&amWA%W4O8@}bf~8bShf+iP zb;Y7Fv@%%yY+aEUXT>P}dO9(HTN zfq^wA@RrGmiITpv=wUKob>vy}T?k5Bh9QxLy*kNXfHtVJaR(dLrD6KoMVPK%3Lgen zrl7>-Q3lx$#vEiM&BV-{%! z;P#B-M&%@bHE461S4aZ~Vd&`FHc-Qf6~8h#iGsjIdz^%?Iv%$k`#LQbJYZ&FXRNng zKO}mns*^>RpGOWNQ#~RMT-9I1d-co6hKf2t8}nK@vg1H2{MjhCQc?E4huZ(3IS>+z znCv>jSyN8!7X3EWeLy${6CTI3#MiYe$&i7hKvI8uYO@K3=P*EvxrX-N9JqW=RcB4m zn_%x{!XN0;!NAVTOU+QMko)+0lq6nBk*ab{xOn2w46~R0xJuGfFI8zxJH&IiKFv_^ z6ciMgT|ecL^sU4#f=Cuu4t3+K84~VTq~EK<;E6bd9}xh&NBcb~Fcw(?lQJdvdaZ%n z!+LoWyk$E`$WAf4PGV_-qV_P73wmO_?tVC2zHM2xh~F%d>D_t^hGZH;Br2 zu?8Izw=NkQu&L~US|!p2%eVRwLPA?BCW0w19T;dpMv;IQvh_|nfvQ8399DbEk`H!c zZD8QL9AWCu8_OyyDM)A#yMUD(LbQ=g`UmDgFvB4T1o||#Wk}|9^}|7tx|L2HPD}D2 zjYX*R=e~WUF{q)h{AYtXNB`=T2Wc0VnQxV!!XL=!!fQUk71EOulYLpA`2z&P5Y}+G zfU|(Y3d!&!LxilHAh%(@)a`E*Nn0T9x?PlTU7YOaToLJN-V$S!(Oc)UAukOEo9V^SQjmGOf0ANV4{R&7{&VLH- zAE=*Q85n44YUUPQTtqlg*N(6jRAHTg&w$P^52qV_^ArP$BWjT$cMw^pjuqeYOCWvn zXTurge+_40=gV%^C|AkY*Qs5I;UGZ=6LbG?y+T1xfj*T$Aai~m?@Pl&4PXC&N#|lK|3H7_eLndP*aYHAw*V@KEJE~*c(wcXpA&lKQ%KmJvp4gPgXr3hlmK& zbs2a{a7PFTDA=>(@^(DWiJl-XApu3c&IM1$TrRN?0#@)#NwuQ;S?7(<)B!?0iS`X1 z>`rC+<)QXYG)3tXShai?)izKD)e_`U7(<>6#F)T6smPWna{6o?0Yv*KTE4I^Ea+ja zr_QIJ>FMF|@QB#;r8}jm-4!jNd*20SRXG+bUqMJ%HclHO%Bjh;*f@M)Jk<@#{NC*Y z%*@4$*paackenYYR6`rlVFDs)zMzwM!S=`4kf>zHX~}W>@1y+v z$NU#YLPfBRCxk>C`~57XPZ=uFSW()is1SV>UE~j%>!BFCTO_$fHE%{8y?5!Y(QNDV zlVqcWofmsCmAvj`t7R1wvkhgWDcL5-|MVdh1Ov}Ej*Z4RrE>+4sXLnEmW$H}oxncj zB`7z`@1Lr~K=Sg0T~Sa zr`RVUf~MgT2$jrTWlQ)++=%xzr84|YhLl}*~Q(- zI%0TUquRhO6gj^kjsMjqD4LC>T9Hs4F1Vn2I=SRG=XJo(nG)rGs?$FiOvFDXaqnY4pHf6+GIfw#4RSz^^E|^VClte-HcTqeuYd z>7+WT|N4O31Rnc8oqI>C^RtJy{?ETkqO)=v8yiP2q=5s~9lB|Gt%k#a5}}LBO+MYz z1;+bsZ#H71qM}q(RJ7GpRin`~sHtqAJEh&j_W8~}YE_|alCtgN_G7sE2wx1{gRK zj~~*j!DHgNNg(ur37(l*3RjpQ8U;5|iogb85GL>Sc%Ix)3TMi5pCWiwDUQUXq+&~o z+Oj=mEDT^Ck_zOsB~NoOWJ4{?gzl|S!NC_Ciw&o5R#Vr00mN?+zPv&}RyYo(1NA;F zgmlMwvPc7Wfz5Ovl7N+(U;ZQU_@h9+UaJ>>yB@H#t?1@tep?|##vcKR-MC1@GJu9 zRdJ-8oE$PXr7V^`PJ*(E%0iLKAHyNO_w_R}qKL^=@3$8KsG5QTy3(n6qr=ioq_5WF zG;?RDVVI1q=6x~YKa!l*qKG)NxXqEE;rdyUq}q^(Z#1~`l>$I%FfBXVXc(>U!|^s*R{ypo7qg-L>UHKVd9pEl%YHFITrvY@C!%odJY*iyJ zfrBzRHPvKTaTSL1EiNO2{N`G;A zjQU)ps+QS4B0Jyqzs*v3UsRd*O_v+lRq5Ibr0daI4R{zT*;rfq7bw2y^G>5#CJA~i zlsN*hoWLr4JY*rO_r7%r$IvSPNb@I4txyy#i3UcS%YZF@%|}YhUaxDBY1!cqt26ht zhxC5@X-1cd1-v$+&#PZeEH2uQQI-`WDCol9G9(n&*I(_GVgg4LjE=bxtvnL0bJwq( z-&*`anV3JD$es#?hDqm|a`jW80YkZ{;>6m*qjl|H4)V5G#zsNI_~1x&{C@s%Xm>I5 zii(Owm6c)rkUo2+I&eU=Dc9`S7#H9ad_95tM46IjBoXyE0hAPFuSx-xQ`c+n;CX9% z`|dtq3D1UTfn$mUM3l_T*2i*XSXo%eNvk!q>ca*2`EMT{fjO#wufX~Ag@H~*J%HUG zy#EC(378YYmG=B>m^=KV^G5II&!11z|6b_ZU`GJF5ui~kG0ZX%_c&QCD`PvtesY*a z0^cyxe{8*P{!nx}F`=2HHnp?dTY2-zqjJXJm5xav{f3u&eW5X0cX>$_{^R`Bb5B$Ou>r@|1k6rj}O0Vp5AjTTV_6f7px$z^od4 z*kii$I{zU!p3+kP`1tEMHd{DAOsIrt#hnv^lKO6#%j^5_Xy!8=2DCe1&;6S?creJw z8PK9I<&?#ztmjdnU_Gz4M%U^qQ@*6E&i*rlZ^qE!8u=PooddWlv*;E4P&tkZruyXQ zkn1kk74U-!WwAik%AEXW#+7};_Ox_3yu9CVnWyz-6)<9~3uKeDIRz`J{V-Ebbp2rV zs8={IwLSTw2JnsYWP^BgRllT0gE#DJADCz}^DI9z0A3d$6tUL*d;Er`cv~nCU^56o z0L|O4AT3I>$WC6a6>v^y=}&(8sb529`=m@0!x6hjU0ybHUMR2KSVt@pUf$DV@N$1f z!5(4!I;Lgw`xT0c-o)V{O8F#$dHBw&Pcpagn za=Mqsj!=>>n9_$~L#UqIRAs^V$dQ zcA_W8i}!M&u!~}QJ`;V;2-in0##~AY7S!k;x;18EYZBKu&qQthUBe7%jNFz!FVoa_ zUY7~?&B{DGxSEb`h=hq@pQ4&Pzmr8LgTseF+qk{u$**JvHwPVQkUb#JdBT`5_3e^Y zLtq6_Ljqg_LW2n{_CHUCAdDZghpTgGK6fzs=s8|#MhOzOUeNM6p0Jmt?bkYu4X*;& zPXp;J@3Vo_s^py`bz{9H*@}NU(t**mx%Y(poR8 zhvNf<7Qs~hB868^=EsR47Fubbl5oq|Ia; zwi*n-8_tCCF-|ymF-|p;hs-OT9sy*eL-kCr#wuEtzdJR2A@C^_fVYZ0J;$Aswzjs^ znG(~}7b4yScNth%S{fPD0Dkf-po)p=3nWYDv(fbN@!=v4{*Bil=zH(sAp~8;pXetM z^QyH^4ldLZtlq2q8rM6APQ}R7bO~De19wQ!-B`C#lzW82^DlksC_wqh4PUV9sM$Uv;4@n9q1^0YB?kyO*%E+ z_99axDKqm&GS90U`0!{~rq){qT=|8Nd!}a2?3k(zbOM=06&3D^C=zzj^ZhB{_DdQ zuqhO<)}I6&Oz~Oa$p3Ou5HS#}_LQ2sOnuc~7YuYltaDav|C#i3q}cZQEtc-n1-jAy zp7sBn)^~O?d-!aZW|Iql?1txk6Fb`0^e$Q}F=b6?QOxW(Yw6|Lm%SGh!rmyi94S#R zRe?$In^-X?iy|EPb$eH*=pmZb^p0}2-ty)4{9L^o?;}K#7Ps*6bhnUNXnqBMP^S8% zQ0Y&+uQVsaf!*A+Y+n8?DK1{`I`{F}F;i;0I)VR;d-GD)5-rQ%5FkCeltLeHZZf36 z43nJBF|m1-7Vx~5_7vOO+jPsmHBP9MvyvzOP$Rl~_-}0Xg1xc#uylqBWig4-(ri#= z`=X)V*xXzmi(OufPFE6Svrj;evvgUc5#Hpg`JQx9)fHA|-|vJeU{Qs(Nxdv;bN)G< zRnDx-}dj(&u+^ zk{7Dn({!iI?<~Bv4C2j-7rabH6;#SIU4|nl@?Qu&^M4jb>jx-AA6-xIEJhm7u{I~y zwwwvgEHCbE)F@q9_2x;72zjOSexgNuYUz7J|`&czm)TpHG^`Hb678cw{6@<3x!>Bd2n zm5}LEV{bNf48_^?f8Whj%lq~b&gQDBFB?LQruN|d5Tz;JlSJC;{$z7awio+Gc?6r5 zH$kys3CoQ(``K8&XT2jjm5up$+K!ngm>#aEysqP+(dqS1^F^?^IYvJ51x^G$?RS$~ zRCv6aJ1l!10^ZXy);Vi`C-}W0R;A@bengqO0*DRSRS3?Bjyx`-frWG}JWkc_L_~ux zm)c+DTz{3AU1{>GlCwfVTtr#P#;tH$b)!aGjt?auki0-c|IWBKcm$6;&$#_M80~4t=3xKQiRtV@8kl^FslvABjV96l{LqlDL00&N(WXnFS5)^5;h zt4x?lzT)^77U^j+VS}5@2Oz?MY(k`PfTbN;${g zko#pXUDGHWRy_Odc)Y?nl`ldegX*A|dj$xSaM)z$>jL-V=|;_On<)v}fMnYFlw=}K zPkbAE5Bpub>Qb4_=V?5b$fJ*`60X3!{-Rv6urDbVc$O(I#vq)^!N{@J0zv5|seISmBNy2E3xj9$IkqU0ADn+Tq4qe6GxP?5Y{) zz&enUZYdjr04MctSQ5~A;?;^n0wKCvu%WBR9pfTya(;67x$`@0uhLs{b75)V=cSKU zlDtvv5rU*LRLLMcoUy@t>T(9TZI2PL*QLHNe)5&(4Osx}Z_T0FE}; zHLIfe=Bfb)s7W<C?UM@0?u6gn{9IA*{kCy{N+yCT*l{X6Un-{q^-QIe(1oW zgGO}ZS+#`ww({Y!PD<`6Yn#UO_BjBXhlhpcgWx`3h}{wjVtD!iOmmg_Wxjl2UDl`J zGPdTWaeem6nZky>#U^E_+%VUc;qv>ThB08gDf#B;8hvC7tfQu@2FJOBw)@1JHa{m9_TH&zK90^74igiSZ`Z9b@RyzaW|`1+ zB+1#ebOiYKe%Zs7->+X;z1jpu`$)no@NIqnnMsg{#w&+~YSfo++-H4^O}^iU(H8QG)dh2AkXDcOdO*BMucz8#?K-UhJf?FsrF7k}0xeFIJK~ia;$;?|i7||7m zuCriqvHTMNh-xlQ8P(AbWwd3BFm+>&<)~n#JyTCp1zN40jeS)chHq>oZh6ASeD-NEdDI@ZbMd0l41K(($whVyH_i%1r;-& z`=}(Yy8yRWM;Qyv{~1mlytnk){izUb1}9G`?ydMXJ`!Q4&*lDxyYXbY6SXhaFGiwP z&)7KTOekv48bM%{?JN$}gkuBLgJe!89uQBQctM7zrNBP?6+RcKdFFHtCPu8KJuqmgUBv+S0~@i`V^eGRh%j=(Ew8`F0kqDt~+qJeiX`x+O3 zS&5A_q?n7OjzP0Ey=ds+ld<%8rb1)Qr8|`p`J%|zT#Yk0ysW_Vsuy@s)7htbMQua~ z=eyDre^8gYaGi*7o)S~++8Gw{FgCtCfDE~;hK2rxxt!N$xeXCmv5|=?gnb1wM7-$$ z;3ay`Ct4AF?W`3LwnHDAteFyJX+vOsRpbrMxNCmfUS6?e5J>;&!*ojQj?w$<4zrW#8-4-=donfBxVBz~Fj_LP z<^0NEQ@@{)M*kksBB=ml~p6LkjjXcscFpAqq{e42KxC%%5Nye|( zM~?ioRztEkFuxlkM|e_M_L*~&=E4&uqY?{~9*S3wa#5B$o|_Y-l6dqS8jKq)Mf316 zZr(uq83JdVeXL3R<+JzDU5u|M5DCU%OfU3m*NE;~5pmJvBiF+DSb7_GMh96qIon;+eD8IwURf=C+53=UMgI~}4HAJ= z2O|pwKyhQ8fpoI?WAI+Xk0Tz9C*)@00xe_cw@mTN$eUP z+r3$vmdjY0G_{s>`R#KcPi@#aW@Tnkc0MwsUtAV-@arm1aQrzbgL!`u*A=!}0G4uk z|7(cT#(+&CMOUn1B+BR0QB<&}C%yn9C#lcpvnLb2pzr>HKl8}}Z!c_le0HQs$2ko_f= zPlb45+k8;Qzw?zt?hnN%pv|Ku7M~A*^YVFvdGUSedNC><%i+ZleR`c`W~v(jm-)jt zBJeAV3R4XEwTac=qro_!?9{@o&0UEh_)Y)}H8nM5-Vn7|u9yHx2Yy>UgIPZ$_j>8K z|AB5W+0a1K=iJ;};aQAqcE{u1x87a*=QPNLjxDDQ(2w7a?#jxNf5bPowC!i92v&b{ z)0tJA@e(B}E%~CF9bi^c+}&MJ&`mct72d{EQ>_(?6m)8EEe2zrXD~EO3LPuw$#Lfk zVkrvePzUwC7;ch72h}c`vFxUrOBqG=1e)!j)bDP8FLG#YX?gdPY0J{W!nA(}vCcG$ zaJbP`*|yksN{f1_TISt>kk5wDGuQsH$g{IGTKVBm{0i@NB#*+m2Sp3G-`-J2F0f0_sj3M4;ug7>b zU5C)15RHW(bH9!`|6Z->px`)W5%%GVN^$&zJsy+<$ z+NAheAol!|Yr1G&7eUG>kJ%?9G3IOxFy`Civ+jD0CPEWGp&qB_8}AtNuF*{R=;X|2qoZ zjXI(D3S3pu4{F;6pZxdmi@)IG-yBjezcRpd_EGsQzE`sY$Ua3StzANu8-u@G5OP{> zuDG<=>IOhE%)cZG`3&V975&Mc>{{flriSI${O8syFo8mTaUe-gPY)iW+GI96JG-2f zs;X-6$XK7m`@{LE_e4b%q!C36p@|XU@A2(+&}D0>C@3Oge##1xWy(AyK+lqW#UFvH zx#MD^hAa~3rl{o@7^ueT2Wdv&zc`|`8G4ie*%G7)`Fw72tbD>j)=h9)|9DgqAeS9d zxHto<`+c>@1mhaTNb?d-tCF$+iDVY!s-dM-Yu1CttvQ&F4h?s}!L!EnVIE47mWGAq z5ME5j)2r0KU7^gaeOiE$=%cmo$W+0V1hB9(&m89YG{{bVIzbE7G6Qvz2?!eH^3a2_ae*Ey}a^2dlp^fN{J~mSGoYP*&~MVyV1Oz6;<>2B){% zD#{{gngcuT^5vr=XRTlfs)6}ZjK-yvckSt%D%Ckpkpw2EDmu$-zSagpda^z$@w){L z8+dOdtS}4ZSBn9V#{*DSX}ezj0Mu-0@+j)Q=8^6aTGVVOkm3*y!gOu^va*arY%NuM z&gV506&Z;|DH=}xQ4t9x=wED-YZ9>d($717#gGeYq9R&|pAYi_#3ZCV6J+lHmXhB# z@xa4>boy;+;w4sXko>o64s5>EKhzOJ=AgV*L9VDNwn&m#uFzDkc>Ly2$~F|rGz8S; zW31?rQK9{3n84yr;Qe*1Vf>%c(`9ni9UUE&m6a6?%F4@s3=ZxQZruAyh!*v2NTo+d zM+ZoDm{kE=+KxzD#;t=>orK6w8i7J~H3%rW(j{{kMbdg@1I!Yz^AjSkWRWHIfza|* zrLmax7K-j*V+b zbPO-D4E6LP_zvP;s#Dss*`kz4LBlt1!sWX`1E;s%V>D02f(hgzarcn}SJKN=sOh(i z#^CKv&o_NW-Uti{s4;J!klV=!L-Z_``(NOOwCK%6?Cm{Q{kkjEt@ik5;fptK-ncu% zsEazCyScdur2lYiFvs}Xw|}{g0t|2)IA{>m}6UQ&WXpcg7+k(ZWJSFP{rU{+oh&zkw$QkO(*v z$CF6J0`{p(juM_{bWlXlACQI?#|^l++S*#jUAfOi*Dig3YVO(A|&cI!bUr!VeSg|_z2fJ0 z|NPm(ckXq5jwQr4)nQ>P;#SiwfWT+PLwtZO4yfsmBl<4;)w~Z2U;Whp>&_S!0y2YM zEnv}SJ6(LMHkE)jy0jg!tgoVl@>O3WjOoNANaVb4t8MO|gZ zMV8w`8DMe0Txim6U3v`<3rNFp^X8MqnU%}AcCUuZ1N`zSHw^)zpihcn2BM~a! z&ia5AgNCK#4Ho5&`M)txB|1Hw>75D{#;s4#%7Q?WKxV@wfTOY|jN?D)r^Xgv0UDHC zuRc9uY?CN*=)S6E9L$3*7fu9WHwGj56wY-lfTAKn36OE{OE}cLG*bzYq#FcfGxa^p z+VTG%|1>EfLima~Xv!jTj+kde9zahM%=)iv;{h9;l0T-#(!Fmm6zf`$!s)O9Yv}5R zd((B(b}n^}D-#I4)j;aUt&AXdbHf^C=~=x}{|6=A?Ym9o3z;T{6&kkt`uqctD83IV zXGmU#Oq<2X$s*0m4(xOwVI;?Oa^cIEB=9>CmM9`%B9-20o}x~@9o`MW!JP`6lc`aZ zwMLJVy(6!i&%-V%g40}f@%dlR z1q*q{~D1*-E8;9!Wa z+KiF+F9R;;ZnW5^~k!Khv%5_K;JH5q`gUZ;Wg?%1xMWm-(Mf=p6cH;D zSu0vg)f+1tDMF@=RZ1g^%cHnC@3eZ0@m+tG|7}qzDZfoVf)S z*xlk~;c)1(6>5O&%v7Hmu=ko)1CyI!y`K7M7D7F3jk6wWM5|ve*8pKMJtJdroN#|{ z57=K>(G&qVv{_)Ye~wy zA0tdSS*Od!6dGe{(5(waj!oNyBK=%_nY`3bFH3QP4NI7b0hQBq^T zNnQS<##Rr83sb4OZX0?UD}K;xYeM;YE5C~aTxrGZg1bfGnx8h4=J7$hF6*Ey^lb_X z_tQAiPWvklQJ^OAoFSpbMz(dH4Q^wwQ~KeJ=Xu)7ao9UAAbx^nLS1Wtjzaf5&z_~5 zG{KC{_nNnT>%6<%U;Ao#X|Yi65MuJ1@&iQ?EQaV-(9ryOqBby#nX6H>3idX;4625P zoovYGo3iNY6o8y#k5sfVYDuqr8kkKtyP0*7F{_$1ezRmK#n%~@Y8J2xo%0O_plTVP zrzlJ$1!BkwuoYB~hLUAI`uSw|cGbJhKpg4tjt`noQRpA3*Iz2?Kf%I(V1ZM)*cRvI zmn--vp+Zf-Y5m$=w!69r0dF$^VS=>cO2B`I4F6D2|G`B4gH8SgQE*=zEY&psmwxQs zwkS?m>o7o;Jo5cz&z)n)#cSP1kAV7F(#ENWRyf?PBCUc6y_FSN^!~dOjqD&e+y{9D z1xP+8Va?nQIWS|;J@YkuS(syvT$Gd{M4i^Z49_CZ8pD96{VosP6FJrUW(C6}YhC$a zH)_8Ss`@Yx>w8n<#z|At(sKXnMLAn)JD1f4{LgqNv~LJKWAhc)5?tP5@d4z!6!fDy zGjA8FmF_e*UxQpAgxmqsPqY$qdI&{jTL4_Y6nn$MgZqob-+f=o|4U0|3e@rba5{?Z zZRb+}e}P^H>jOiCr5!LlP8&wUZYvWE2Qp}|f81FuJULbD`qA)3pd*pl6OpqbOkC24 zzyoIvmIO=@F)29$uv$rNvWbeGuBU}T*%;?exyhvEkSDzDZ&{bA=Xu!pre+ zoB)m|nHd^e_Cot^LYrXX~%Vzm;OW^OOA*77Nf*n z?!TpGggUbgxJN=WcON^ryRLt4PqQn(c1;8bk??qH>}qki;wp#zEdz!{{Hh;t$+ZZg zEQ4X#?g}VzSf>x2g|`MzA{RNL6`NS9nJUbvxy044dTa~{RA;gfNI!i4K~ponU3d|S>1wWE+XRb0Y>?w{>-|In4Gj&g06b9lmU81s;4S+0 zh^OgR*<9tZw<-@`b$# z+lld)^Pd4E9`8|$%=`GfIDkLP)ft@sRq?1nW!2`QgH4p2v#YQQ$2OD{V+yYghhIoyLUC}HEoBy=98 z)8t`G#s>Q&65_wIUp?@G#&DRvNMpG#`MkBV+PZzWO~~$f(S2&(@7}DyW_-q^R)?SU zhXNA%MpRt!78x)n(M2(u-?<87SmKW>zpp!i>LaAlZNg!DR6B-*_kGngqD$NF$)D~} z)gCQLF-c7(OghYOhQ(3I-rI(j^x}EA>|dRk`?k-;>MtS$-j@TjzDxVR)oGTOor$RC z^5+gtJ+4v`I}~UgCc+OL{;ivPH08RvskRJ4!rZf7c{7^j7E4Nj^4=Iz^2H6QT`oI7 z|CqKuY5^2bX=zY1W}3YbK3}F)Z>IbGMbG8Q4!|ZD*M41<-6eVKeoBHxUPTp`Ton8- ztbamH^|#slA6BfKoF)zL%X`eE=F4zvEDS3Og7xX-yhOH!@T*V}yPQNa0Zam4gY-+L z)qKfZCFS8qrzb#^0u=Y-WzXfk&lCUTfER0<|Ce5xua4n1D~2aDVC#QtTPC;w`q0w? z*iRGJlg73-+r!(%6A(wzg&a`8?x~U8hI=OrXqlLd{$Ydw#M3t$3UpRKq-D1)vh>{5 z^pL5qj6_342>ds`plQgKw2(C35o}bpr_FiIb>`y&IBOs?d{Z@l0Cf1pAvsIM{47BC z6&C}^S-?S(uw4|mK>+t86ckuxa6GfA)4#NpxpI(n;1dv_$;?<^Pf-B=5A5Uu{da~* zhUPyRra$)wKqS#!_-ZyngUz|05F+56t`Yj>Rfo9JNT#09AE2Qd5-N3*+*-}DYtL&r7+vp8nt#+OuB!p~CfenH z^G&B{A2299OAQsiL#|U2KaxCqC1LBTzh68sJXj4>(WcJ4e7s6WGD~HRyD`j1HV>P{ z-bers)R7Juh1Z7%{yyOOTXJl0@o>`kU%94DiHq=-n#aFX6QHfzGohG?2HrlhR6X$8 zJtt+&@p@SAokiZ&@oz3x-qJHedY9ghBxFXXmzkAPnM|ei4s*f9HO~&jD=SSOrN;@a z?=#((NqMadN1uR?PsAVM&UAPCUAZ69EkIBCY#%msBlfmVD;8#2Qg_iRx-~!`n$rgcmJZK7%B-QqGf%dwVTt?(=Y+R z?P^fI(V&R#K+2F4=UE%FG|Zbd{n*W@4ho9*H4oe?3L!~eQ-sLi`-g+MwUbs%&cF;W zT_eXD{wteiw1XX0b;mbpy18ev#I$n+yvu?DuG5-R*|(P3q}F>cME{SJR|VK zZVx9n>;_tbEsdnNW(w{qCMyL=$?(yeL`dMo);Nd?> zz9Mf~u#=sWWOKYQMynrYkIsytPym@eGGYEd=TXfd)i-+XkH>8@$thQhBg>|Z{Z#%g zt)f5O?41LS>ho74G`V@__TSp0C^g>DaemXirzA+a%x5@vJ^=l`B?M?JFa|pbvLs_F zGUt>DN&t`CvH=|P>JcBMXY?p|+L8q(ZwDtS=&rkCjxPoNg; zc)Mz>!dvxEdFKJl{*8toJ5)bI_bKmb__i}%4G)y?hcF4aeYPZ#%)3$UAj=i0Z>hmh zGQHi^J3N)}hI0QsgVtXOz#Abk?sIs_##`9mRShU!7} zAtUR2AioH~1X4p8Ip;N=isFzvX~I{!UOL1jo)-+(54F6<{lXe@m3?6=b5+AqkJ9?N zTJDQGg_80*%KjUX&IG&o;&1qBW;Ed@@eeA|W+R?uXx z@9)#4oKCKP7Gxvow}+wG?ZTEN6{>9MM?9m zPjVNu_IG^H%L%0wwY*4HJM~amMDg0W$*A`(@;8ZEl^$ux6eA1XOI2gjyq^zeYb$IiALxxj)(skJds)Nyb2_SrKsW>}3-@n<@*Z1tnlMTx- zu+kKjA8M>Icx!Zm?ha%f{`_PKiAa2`^Ht(uM%$nPnkx52k;ZwHXLNv;$eah2lcJHk zuP(S9n2O;mgzG>yL~pj1%QnkKGA&F;XC!AphHp&!%^OY4H!)k*^2SMedU;9#%_oIe zrgwZpg>TKTIj!l^4?aqMOsy?Xd!Kai-pb6^OQibte4ku;OTH9=)tk?SX!`KLWVG_t ztGODhn+N>-{Jds|{`$P?SIrH@*(oRPaRa-navBlRI@> zEbeV|Z<&S@MSQO-K&h{^(mhl*y=iLlGb*Fi#LVVMX#iODhmRxe`Gv={Q&QD3nPM5A zCQv2p8j3nq#s)P1TKp0J_O0s) zUjm>KcAjTmY`zZn2&hcevX@>ziU$Mr4M-c68l#pIjP=i!q}#@tGKhJ16DhcD2DEg+ zuyVQiA2Ty5Nl8XqL*`{V+S&x`Y2v*y2+hPVNpehZ#@?O18@cAtzAV7D<@T@qA}qi< z(_#on$2e4|#tBM1)?D zw5s_hPEG_uB`W`^PCY}%&)@m}e=5X(Ly-QiOaBp={HNf=1p^L3YJ`AP$X48l>`mmm zL+;((-94`!`+Kj$yt)`E4cgbZA3oF(1FZ#|^6!1JqBDyrK~$pyCDzV>`EPV&$mpnc z^~Uye6^PHf15}3Kpg6JV7QEzhT#!F;S^`=cG}KZ z3NC2p5^pexIyCK~l!=|P+~Yvc7)SsQCvZ~4Mg^3Ikd_lz60VdFMs;o(=bSWbmb#)< z;~zBNA0&Yj4w1DXj^}5@Bm`#!hJp3sG5>+~i}0_?`IZdMsHFJ#pz`n6Zd_58q4_NC zdPR!Ksv12$N}MQc$3S%D@cc$zQ$QDa7sbC@LVynO3dg|K)YW9}r3#^q#x!~0xZT0g`s^~OFi(*0>Y#;FpP_io`a zeF#~1o5M*TVUG_&c?b{(E6&TqY~&aBe-?8-+L{CbFd3G?k<=ET7@@$q;|TwFLGlJG z!<5Xp5{uB_A|OF;SL!vzP;=?ZSclW`eZ|=+mU*G9&&I*fX|uAGTR^{HKHS{%_$x^^ zvM!j?2VkB})cC~2x2&vc!=kiJ%>OTj31njb!dYC{mrv0LgEZG^2I&*XDqe!FmkaOD zopJ|F___Njj5=L>S#B9^iw<~8$jVa)zm(2jmdO_VVOzLhhlbHcL<~3EBP;= z2dEuYH8tU+r}M=Z_vT&1^Yiljn@y??sTqgAYEf{}wYAC8cfeTOYob;9_;Yx8USbdNSLiQM5Mj-U-5Lqxu6v%+ z2g=)$z|oif1DdQp20qn}#V1dUj8cCr`{(gmMy6``%e6qdI-ib>j~~TPy#(x_e9!|; z<@)zvK47&2Xi@9xflA0FD_;+}EMk%mva=O2!=yAeyWa>VvSI5#$RnyEHgYyJYs9;t z_Lh5uYb4|X*;!dL8XK8FO0UIMnCt&!k3!O{Vc*XVYmVZcmL^kTYNqJ^?;w(ztucWR z!U&_@50*nKHw#}P0mqzg^%5XfZEbC!B~VveTU5|2^SkX0kO|$#;s66@XE(t`@Pr0P zKeS{xtWowEW7X@NC@kK-ef2hW!R5I1e}k2rjJf4IsvQ@Hhlfv03D%tW6CO!ciI0dL zVPI@0{BS+m(u?e*A|p;!Tm97>P*z%6Egc6gw5Bq|X29;Z6EjdzJeOxZ$V4+2p)QvI z;3X!Ze}k8tJ*B*k)fC#Ij<=`XTd#?%DH&zHX9OpY`VYTUUE#k;u$TtykpzorDY?l~ zP`}qaa|a{ANRGvw<=o}@2uzkw0L@%30_}LfT`3f+|EOtbI5TA`HaL+2Z47N(YOw$9 zEcJ%gTNF9-z#DG=i)Z?gkK}zm$Q;EW8pcQxz4ix~A!W|L{ZubRye;Ek5~E=7MciB` z?>GtvV)NH>BZO?m86{jq*~P#o*lb%OcsT$#)u}HuWGaTPu3b8B$T0J%x+(F+Kuji_ zs8h-8f2plzt=pGL1+)K?*wWYkBk~3xX;Rct+dcW0GMB&ffWEac@0BI$>hJ8u&0!i4 z-it{XlIuO0ym`t2GL5B~N|AZz!J+c>3gJ+hm605FT!+AQtDjvGzw=P2&%6^>3CH{h zZpQ+E=&pnQfaEb_BdI9c-iLi_P&#nIgM;M8iNI1*#6zRnn;_W^mFM$+uUyVzE;|1`s2Ud6siz&67#L2TM5!EQ{BTR@8)dqT3K{1@dySXbJ8MzO zrKorXu8`b3iR5;(4Fv3Jeq6IPe>+Imzc`F!qzihp zW>ZSsFH0TzrP)Nsm{TeP6Cu`c{q|RVUT;g9P!c=V80GX#U4z5()H*Uf5N_nV=2H{- zp`*-m9}QoDEgV;}T*hV*oco@|0`0da(0-=$=;&9*h(Uu67x)w}LquO+-^AeoFI!NC z1f$d+i@{pKBmP_;ia-5X|IZX3g)d(&U76DI)%nr+*|Cbcwu`sNAKms!P#YrYQ3Zb5 z*wjn2z6&8igPBnIp2d)IL4U3N=Y>y{X&tsXy@sa9^|asLZ(d_tqPkJIzVIT)E4)&A zpE`Ep9JhTM#tL|X>pt@ ztkD6L&&H0q`xFKJpHkyP=zxH#VPERLmmW@U6S_mboE>dB&1QjlE3esRt27=E_kq%B zaJ78Z&%~FST7IaL5N*28!e2j?GFI2#t~B^wS-y&**S>0EVnWWLZOoKSUrvGJ*PrvX zONucB+R!Vi@QYK)$1-59#Qv&p+i;ZUV_Y_m5Ki0E(AEy*f(m|!Uv}Ioop`MCT(W02 z;~$dAy5yV{nUejcj;}(bLY}Ho;!s-DIYFWhZr7oZEsp1FSn69W5dNr-A^uHtNpfAc zCPz8SGOF}sS%zFuC9z&^X%^ zHFVXwSz;&}Q}kq7~@MD3E=P<~A$RAPdUSqnk{| z4V8hjKi`eyz37R;}zs{2PhY zPav^I;lEPPMTB_o%vOAzTLwAD2OJ!pyUmyD8kP~KDV9k@b#(r7)s&JK8DwHq9P=$28s4d@$I=yPBjEq_8bQ*Eyre5w7t0)mE znKKH`D2Fr6swoAEih#p$*N+b@v;RuP{nj)8xmn@wvc`o<^5ntLRC01&Pftfs&^rVo zA*a{63>=P{o4w86&CUd`tPIVK08C$T&yP)O8?E8tnBn2xcqx?6;fl#15|zfmm0wuI zwB=THM9ctYi9iy`(wKc6g%8r#yBMTql0c1i7I9MuFLyY(X`3<{Lf8@%RZe|xbjavm zRN20aM7PoL4JYSuo`r)aM+vW;HMM@|dGbOLl-TZ2j08u&sn4aSKV=6C7b0^>W)g-w z+bxM*IZkWyFZI0D~13>&W}of*zjqF zC*TbzK`4Lmg~6FaTe{Rz@#=QIpmscDN$*=9%~h zZH4iN6UDxc8#ni*Pj=N6BYc@}C3_x^zLvX;oN7RZm5$^VEhQb#Bt@qkO(EU0zkQpF zP6=p9PZxDPHCw+`HOud_W-_sykgf9>z0pANkbtlk_&oW2ED0KePDv8LvQZ>GDO$#K)VC_ldtv`6IS@iT;X=dO3*mpsr| z_VmUEq;dno%v~IY-7rv6OG`?OUEB-8qZ06wOQ^K`BAEBFpVd<2Whu$sNm><^P%OnQ z9P(I7UTv&B+1MV(W8B5>xxd8ISYPe82+dw%CTGMQdU(oX5F*gx!xQ^-iOElVC)px% z!4C600Jy<;+h%&>rTFRmE+O2|i}c%zN#w%&_7rsE>bjaOQ`Z!sjV~5#3Q!n1@`0}fz5ReB8}KegTW(F^B*X4?t~r(axNK8E>Vx9pIAXb)=3o4D`{j$T1DZ{F z&1}{si7yf<#=rg{Ez;k}v%6)B&GyNQ_-Hpg+B4^j?b~U7SR8>u`+)VvsRMZ|WC>+; z6u(6ihDxF8^tI_B61ybkuSI1~huX6+PUIf_R;Q^+d8^q(gFY(ev(MaOVLsGBe9IMu z0uf*izUTBgRoB-KZ>}y}KCs?kP*qR&h9cgJ{DI%KY&PD-3(1naw#~#EX6A^^WW0?% z9T=ay<}?YhUhhKrc}!kBtA}3t5t)>Ri*E4Av1+b5-6?KKkuHrFN!C?EdGcQi9!P|q zDTY1cvcLdqHeSTo6iN~r_EWJ5nV*l+esnuiCF4gi3%ww(q*(Xf(7Y+R)~~hD(t(c& zXUhdv6OoK(5u3qG5^l9~%#C&}bnHTbDNs68d(u@n&zy+g%=cqeDc7H=Vj>abACg(6 z`i%;3O9!#(=>@z74~R2MrnKtZ7JtywU))LeGq5fdnEWKn&lQT#1mmgd0Ngz5{5?>JfmR>^erf_i}`V6Mn|yC=gTdQu>{S0^z)C zn`aqT8|A-N)NRWwh|FPJynYH!Xv>k>7UJW04K)2o57P504C!DIXqfx(*-{_b&M8zC zJ!!L*A9>Yr6nNLS1{KfW4^?Uu60>=?37yK&IKJc#pXbjn?}HpXWW1*EJ}$5H z^`}&!Tb`&?-^^eODcZSK)_GjU^6zH5^=lg*lFbp!q>+2mD zi$}&dVr_dWY}CvA-b$JG7IE2pjl!!CvgZ(cXUn64rWNDqD@f;?-0sze^OGW%+|tVn zW=)NG5foOX*u=Nnmyh~jrMJYVoSsS9$Sq_DnD0=I5g97!YMReP3t;E!!O>JmL{b=2 zZ6`Q<)z$4$3LdLB=@*=CJ^s3vlXF+kj+E#^*n!2%2k`*4@2Y#L0|+^9+-@&Y`UnVd zvJwz*U_0UKNOk@3EHgqeb6~YKSL1$Q(dDeWl8p}JQ+$@HVigq?K5s6A3U9=mw4WI-U1tOnI>FiE>nXGnS-;-F zm+tGgzqZH2KWqpSK(dDV`|MAah#)-S-J1{*uhCxUGbM95)5T4^(J4Aqi{2`i0^g2H zx+u_V9>!(&u5W%VrhT>nD)$DmO#3v7oNLva0w1d~+7Z$l?`@YLxzn0{(?y*OGAAT~ z)xwraFt>H~WRthLN?QHY$vA$YxgqrRfc3)d7RU`TS>U7FR3r=D?&<6e91OJt^-#(p zPHQ8k-9?9?BH^0&3p^eUL6bfdQhp~x&4~J=9V7R-9TYkYf8waS()0+~iI z;Wdx*9$kwh_bWxY7sFPInbC)9d$fKvb~8Uq#1Pf%eeOr2qvNn+=-CD>+;6=Xo%{$o zfE$?~I71tc z7b-uUwcurg(1bU*TzXYKD1w)!B+KNka%eH38`OOF`?wnImBm7K$%JoH7YS}Q&31Qn z#Sq*;#VrM;p8i?%xntvFgY)#FmZD~zRH{smy_R2$ojIuX;etQj#)r1U)}N@+@!5uS z(=|Gj)E?UQU36DZbv|B#hBxe3q!^2xbg$}NyzCOtE}0`Nf$FeupPcZmQ+Wdk=IjNZ z#?=+{alJ)a#cfe^~eed(KvUnHa@S#^4u^%uXDkCsmX=q#!m1Y3?8q?AWhr>e6qt?4 z#@ZLwK9leKy@_t@tCS?Y74&;JPz0I?k1;r zy0{NVVNT(g*+-=pD9JxuI8(*afrw&62hDlVt8}>hp%xR=LG*30a6wXqlvFN=tWtm5 zvu4p^pzBkM9~{3HI=mCo;c`GGQ8)5Y8s#_+zngcM!C!V}+BP|_o~^u4aZ&fwC{y|! zNEVov(ZgeQqT?MYTt0Q%uE4?WY*;v;jCdwla)p9D*zXhyDRQTf(mT^}HTBqtt1sw1 z-_2nadM$`&Annm^H>oS^%?}-$QP<69rix5llGx#VC_>O}X(a(s|N5{CDOtkCE;bML zM95HdwuQR2<3<|He)9~vI{$xqC*$4(>yVz(B5;^hp(YJ`n6Z(eOtCj+79JB5A3X%_1O37%)dk&$ z^!rQq=IP1n*7oElxQL1hj@(>24wxMH%sZ8z{+Z-I7d14* zOl)%noBELn;WKr|y46;g0wPvgl(W)O;|z=`Ut36UyY(~WCk#?fW=_Twb70QHo5ej^ z^cd4q_b`Le*=nBjCT@D2R;)^%)L#=d#h3z-H^B%b*HN=^<8p~K!#ya~riyHU4;0t# zyXajDp@QiEMG%r!m-Err!`jKb)!<>B2)edVG2FN-M*C- z>C@FZyx&m#gc&`J;N9cwA-xvY{$J}Z_%tZuQ+4&PBQ-4M=*3d(4}xo_Oi5*n->e(4 zA9x5u9eYbjKld!jDdCCWmK1tDBT~9DBO>Mg70bB}Im;sG$;2jlV3KXi>4?GOhK$!2QE(R3Ja`vsal!9#gNp* z#0(P2O^0#hBx+_xOjHv%^}w}H0hAQmI)Bxs%!XwPrMBb}ERim1s{t?Ne5~+&(ciDn zRPlOkt?`=gGs2Ui66qooQE74g($I@SAf=4+lfFlo^;mCcwwm#yeWa8>9Le9Gnqmi| zb^PZ~Iuat$Lc)LW91Ku60^IeEg?dV>v9PKX=ZKKX9}{<3(~$Be;QXv!vKfQmZdEMQ zwRyr#X|r`=8HDGpUm?>cJo-1h4!8=0$NrX;{m(}RQ8od1<9FxJeWx|7Q=#oGIR}1J M6x8I4WKI143!dXCasU7T diff --git a/ui/images/sample-wizard/step4.png b/ui/images/sample-wizard/step4.png deleted file mode 100644 index a5857bc729628700df3918fbf43ae3f4204c2600..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 37188 zcmbTd1yEei_AZ(L!QC~uySux)TW}o)cL@+65L|=1yE_DT_rcxW{gU4~=l@=vx^?Tm zH&rvUcQ5;T_3GNa_wFxLNkI|;78mx@r%wpdQerBfK7mbq`t z4zT~Q{`84Y(B0n1#Kz2-$k@!n%8sApytR{r$jX$TM3Y^fN#0)6%+gBA)6q=TQ$fwd z)5e6`ltfT~h|itp!vVm|*@(y;U~A{Ztogo%Thh=YxRnT?5?jhl{$g^8Jok(q^&iIbk0iHDt&hl!c!-yf2XXpW}l zJSt)m|Bm%>#!q7D>}=1&$mr(g#^A=v0CcoqWaj4PW@KVvWMQHI(4cqnuyZzYr?+z= z{f`GRGba;AD|=@vpdHa)k4DBo7iWHw52XL<0>EBg{(l_XIsH3PA1PyWH?n7BW?*6j z0RHytA8jXR6|?^b#{X8^NzKFFj8VnR3FzWz@-ZIfr2jGg$ld=r&|k$5G(3uqRv&|6 zWGeX_Mm%ooRxi4ldQxAy5%gsZfeu$nvQBpu2d zf3E($5WqMsQ)<4HNH>JwCcF48u#0Hv8$4v77!0wHJ4ufh6#n|Ox{|Z%H4u6z@xXIjfb1--q~m}@BPbJbHtmr z2X_z=bN~@F%Qq%VFbH8}2w^3_KMjsBJuViM%IH%nf5ic5=*ajBf$6Q(EK2tO$Wh~P znT;k1mb%h)t$W|+qRC*&!~Y7Bi0MZ9#R4c;*p?g&y8e-_4bp%b!*a6vP(A|e)C3e* z2_tVHF9X^pcqy~UYCl)L ztIY{sl!sR%{KeV4;tVNoV;yLuJ^6)*-I-@qnty6$Q6jtu^l6DW>_Siq9_BfWIA-V7gE~wGmFQ zWZo@%R7P3_&-X)6Z_yE zLad|KNylxxnI7rr0NBd5wl-qX;H+7pzZ!XPL`Xtp-_^2On6LM>HO*Af#VZ$b|0Ub8 z4FJy*aSM)0vp~?&A<+Bv&(KWU#C)e6C;ewwtFl1*Y()KPV+pDXN_JMRv8e!}+n>gW+ z^b%H1VVb3g>>WmkozGQt4tU6A=vgif=Ze$04~dPi>P-yi+hb$vxv=aVs-I9@N?GGr zdPZuxZ*fV4$!!Lff&TTB!26_?W<)!&iA(^&xea5qe&>Ymfihzdl8Pbs#m(x%+l51& z>ALp&HD005vEbFB=||u(E~A^D(s7iz)K7z*hQ%N8Q#7I)JE~oFQ|h;@SMNKx-@!^F z&gf2Dc^F@u#`dIQ0bKzTy2X2>@rHkvL3n2{Dza|MA>Edt9W0931|RlNtW+iDg!S}4 z5)VpoP<8_1PO5rn8NCLF6;~%-opw3TXt4^%ATx!IMh#dOWJwRpG>2swf^HmgA4Zjh4@K`rXe0+ZcR5#%#FqIxl=!=zP6wBN+O0U$il3_i5df)_W zO>E_bhw`K0=SW?C4x3T+j3i?zGRAZ0BaqJ&Hi!8fn=Vw<-57pWwgm1ls}VK2MaQ-Dni(D7b}$B z#CB8_7q^0;M$2S{$9aBa!W5n;$ zSm*6+C)3)8QTE3SyN}7c(LggS>I(cyvxquJwP%WY2klk-)9@A(-ApFO_OLfskBP9js6v*+ul^5SIS4`F z4S${&&zor-Md#|Y?ZwFaL9M=_-8*Ok{75)E^o!UWG!N@#u6^eptN@fhhOtSJRH%-$ zUHdSlUuDW(I^iT?Qk-XE=*u#2vq&6?jPaOxRb?zN`CZ5i$i!S` zRojxh^H;%ow6_3YQd)%7sVT?fu|4F&>k^L&$hlS+cuIKYl+}1t--M7l$1|>uyEI57 ziIQsMI{8;#)ks5}#gyZBYu7L|%&J;Oee3ef<)mXxG3d2}jUXF8D}pXrl<@Mz*M&kw z(ZFipL=q}jT3 z!H8XLlv!iNSnsAn1SVZ1LL&(GK?)fm+jYyc-W|y%OAgVIWz)5(P<%pVs-P&El4eh6 zIXN~o6@nhxZ*#o+t;+1XPT7-S!{-EqAt6b4IW=S`)5Sk&#sx3Z1awkY8YYI4p}N5zWLSGurI_jXCz`h&x{p^H zia!`y5qIF!AA{sXsH4krf-iqult`-32;Q|uUdK~KLlvGISG;@T{iLx_#oHjQCp3m~ zqzQsMrg$ES&0=LU>Gn!jC>(-Y#10j6rh#X*r0q%)mJQP=kd4_aR!Ysvg)-)>>nK5x zx4t;kpx~b>uxjF}EM%(-!Sgm!d0;ugIXYlbua~aqSk`*pabH-=dakP68=;8$Haejs zQz6kxOJAnuJ)>Gw#2 z{xO|r#cF-jsF3!)5p~d%ox`P=)0nxTaG!q--bp@dtW5*^4icwFx7u_i@36T_U)(AC zbH48g41X;dOhdH{*=m0QWd_T+uLf$q4kL-MHQhoWG?aNE1kS5SQn^6eQwA`V@FiE;59ynbJST!LZ9Pm$QMx~ z@cdtD3eXO-@(Y+M+TU_Iuj9Fwvax6?IMr5S>kc9!ht<97$T?TCgHH+uv0%l3n}*8(8FCFHzSVe6QE~MeO<_bf zG)F;$UTN^b4Vg>7QbsNr?ZFO^iLlo>)-Rw{E!bk&a6oMsEYwH)@C!#yf8CL4YTMJg zG?mrPuC=dgW?i0CHx%+&-4Q^g8J#Je3KGzYJ**=em`Y(fD8Ru;PUurEmOw@00leQp z_T@)nG0cX<1@q~}w{#_VOxf|{E<1ynRF9Z!#_((TBN&ldc<`6_HR9Oqh|$nOj0 zZ7peZ^fY-$v&*RR^K_0TLVjxCVPdNYnscRSsUHZsN#<7Y(RXS(P?Qt~?piF_PYHFf0NyKe3epX9A1nowCt7k8`*4~|-2o{s%(5_$bRmDG$~V`c_!4^!3Z z{&g3k((iA!+p#Km_KX-PGC1I*yLri@kZ19tPm%ztJi`q8BjisKZUs!=rN;M34wHt9 z?jghiB5PP&#|sjhHfB6-ft+I`vWFV|R_tjeezlSEjWr0L58=}h%v!e0OKl`eZQesm zw#}=u3rVY+h`w|)ozyR(0AaBCl9%VuW5IZtSG4?LwqrM-SriJH%1c2Gz3k z3QNo;Ij{W*PBYH#60>-Ane!ZV$Xu!S>f&bMBP@C9)vcdb;hN9LG%9A0FIt+_c#CYb zw}nKp7E}vL`SL>tEUn!}aS@HpU96$Pzqn~B-UMdHLf57Ng5Rj!98)T9IzOx#U_6%e znaTVzL1B`xZ-%wY4=b`>Wp3?F882m=sVH%WDsZYdkX(E&+M9rN(uqOp|jVNWAr?=@GWS393 z3^RY_Mol`7isYyz8}6SFf6ZUFNC$v#Qjzru&XV!)i0LS~opjX2TIKUjdEYovKkj{B z<~QJhdL`8ehc6ho;8uu*tZ@JfB}zxX*)$<*h}7aLNp-xBDyZdw$ojPr4KYoxjF&uw z%W(0!H`_g!cK||iIz^!6y;%cA$t998GwVx@=7_LSuTStM!5limS#RgfMw{+xg`mb* zvKHnoA8Pf;w5YF7qGW6B*6n@rNtdk9`rUkx-*YA^&t2T#O^Zq+hyKZv> z@k3n@NA;~EFNl_Ee2*tfD5&OW8672I4xi5o2Gi<9O7w* zBJ)#;M}2EDx{STiwv{qZZG~Oz6x&j*HTHlih}3VhJ%`uNfXAd7k#k9|I$J`2+hKm8 zA+j@ey+atNfRWY=n_DPSf|F1y;BwJ(*boN^FR=reY0f-s`!lQ-z4onXKYX6o2_`wS zD-m-Mt6k+BgzpR}C=s}5r2ne<#&8IsR6DgTKvNW(eq#TH0r-KV>X?i~8=x z)@zI*Ijld3i$qp<)I}&=cUXGw2y6$RMw3n1-layZxh60c&C{fX)!I<$FicU?aNv=z zRo=mBXib6f)O)p2&Bm{rlxhojAYJ~)9@l4ctu7>iOw=6(=3Itl-UR2 zt=;tgNI@u17(rAvTKrwNle)bc0;Wz_uf)Nhe>@~bqQg$QW5=1aow<$^MBbV}Gg|PwM^fH$wKy zt%MZDiforEjXAkPH3dd%#+`(MTasMD@&=P^7M^45Qj12hb-G&ZubcTY9Y(;@E(Q@A ztzdZ5QIlnIsbU$ZS8Y@s^v2bPrZ&9(%N;^?x9}=uJ`^ybY73xErB(Bp z{S;(!_#!urnD5qfNBc<169jT|gv+>dg*Bd&sjD#eavw@RL}b;x;!%Vo7RB5xvsXKG zp+0K?x1TM^+(Cco02h*9DHo2w`kK!`#r=VZa$7;~=_acaL zAV9Cw21Sn-H1vs39I*X;7?QEVQw|~H6tTtC6+KdxOti?j8o_@0;)*klnNFp*>&ta5 z>*~PkV~*S>L#`-V7GVWia$@H|ed zz*$Aks0|XS;RQCV2YbtCPGWZU!l@L9`pIa|EIfqiy_S~^-=~!N?z=3)V_ZjIz(xGB z77GIwiqPjVfen~TeJ?RG$^gL<91*P!JDbZ`koQ1Ht^0!G7(qYFg5gj@Etlh%4t`?) zWW-jFI`oK?jAWaYi^f;f2XJ&Vs_O+YD(?ttDL7PGpyB;3VaY^TO^fGMm#lB+IW$FV^e4q*5!onjGdr9{LJ# z@{P_A25(e+!b(uG*?Nff$iWNy&LSRhWDf~PSsg7*CpiL9D<6m0cg}^F_qvO?d{0vmXx8zp8H6;>|gL1PU8wm8^Q~{HPX}1z}z)o;( zJ%r!)@RWtsLwsRdQT4+sLWo8*yZ%_UBJS;z`Hxfiz5ebgseBv%$)otY&LP?pYz3pj zK8^o=!1|(03n4=}eM{ZHROwfT60e#pK4!&M#U^{|7p0*cX|?z?wU(!@_*lU+H-Jsh zx+59SH$ON2;}XQmr*Wo*_fksHq~^0?g!<2Y_FJ;smp)Tg=39h-6oVr>?Z%1g>3d29 zd5+s06+}B!nw4tsZJ+Et3MlzS!`8Gh-n;V(-ijb=bn!@_Cbwfwes!PIcKDFSui`>$ z(zW0iDYA}2xY^0N#1t(utLVGNb}Cs9+tPqdp*-TR zas6d(h^+p2RGKhr(H845nPV z>9Pz82WpM$t$q_P@^K1WiHx!EHKNl~l4p#VSaDX$KFMt`qXNq2cvr%iq&5-d%HA~Y zE;LijcHu5siV+&YqoX@GOjYJCO*IUk^{J(5-S`6J^AA;vo2FnPkkXH`JWDqOgJ=YvJw1Bro#tH4gs3@z=BS^)O8R)B znk(e5ejGMf6wcO5RkRx`@K~}h`J@@#_LQwq2JVhLfYytwhLsRB+iPz-!!`xld|d-z0l>_ zD1MK{2nijZU{Q))H`*KC%g0oEA{r@w%E;Xtk#PGZ#Up;#X;*l2n~EcjATp^Yk;6RK zD;2ZR&_f?SBz!!Y9uVli+HF-k0U(qvXe04~OdX8v5$by$A(~JMEbc**Jk)V@-26Vj zVa8GAAzCIwR&`$#0yUQ1DETq&kx2wWIo|{}YHk2HzWKGk)E0KK%+XMj+jMYjf}`W)a}1_~ zY08^kHG+(0-A?!yteNI^Rg)>TN3WeP_nlWJum)2bOV8GBdk4}i72uFhA(zb62bGff zYeQnQOkYKdJG(P-zU6KGIO3R6Gp@LQnIXv8z zhTKX4EK5jSq1!%JTCpV8rSQy7d1Nf?g>jvm;vM!#GzqNp(fh^r$pxR1<(sCR$_cDD z0oEcG?pl2PBI76M**A(9-A>MJXO&O^i9aLHE6Y88J$TzyM%C~nh9k*z&`!$I@Kb-GYBoO*I_+6Io^V?tjpe?|X zgMk3UUOp&XgcM(3M=Nx{V#!yXlOQy~10jD(YCJ-d-GuP+Ey#arg<(Gi4VR|=svIWO zzqcsTzaP+*eZwrVAt^k*ENwTKqFC+Nk$M#MN)L6HO>eq{b@2_d(!R+KSbOOst^76&RS>Q>-tAipRV)dbxspQ4wAIzZeiWC@#Ru-Gulm~4AP}&HRzXMLl?YKw%=M3nX65sK?gdI zA9m}`vFq^qfg`6HgAKz&-&@V{G<9Or^{|s=&TCWVsiFg!Eo?^ov+wdARVLFrzo7zeAx>;_jq_Ts$-($TD84;Nj==Bns9l zz9}OUscCIoOX!;WAFl;JKkU6e&G%CG@f|gi`Gg`IH-gI{ez8*5OfSN?AqW92yB;W3V@RlqOYTLR`jFCYd70qy3ONDNVe~&{jEq zL+#nLx5vlo{1P8BCK0R{V8`2tvyGaNnxd=Wee=jmE8x9ezh?;aVLCo4u6o=}G2_(p zqhGx4U#ivcYhh;OeBO4^ts`wX6NqNFwd6uA2HGAPKTga873Afg9iP80c2u50V|(`` zEpNQ`mwQP>4aYqPR%Wpd|0zN!-mhd(ES?>}Iu)>{!Ur;@W^QGHiiKXV$) zj<1{34>1taP)C(_KJLcLcPpVpVg_W)=~4ul?GPD)PF4bYF=V2PpcN^er-S=SDqAGP zQIk6|8i>lC9Aq}7aQV!9bu4H6_Ngnes4C_4uk3>Z;n41JEa@}kfKOwVi;H(JdE_5Z z;yJeH#65^cr6fsUsXI)DA zNznc9eRFPEe3+TK9cB*f(1_1hg=f)8?-%rfq)=2P;}gP({T+0EWhr~?M(`T7&M@P2 zBABVgW$x~%7DgpQMFP=H`Aq4GR^B5F|M9gYDhgh*o%i11o({cG@l6 zR>irJdk7O!p%TF4Ske^Uk7}a!#*)+bU5K&6C!*1% z%3DMw4d3orBF#EI&b#a4ceHAMQbBv?4J3w7Eq$p!mS8fmH9helK6RCd>oYdF?Y~_{ z;#1=DTPZ@Rdi^uCcj|>C(7rPV@IpKj1QjhDnveZ{JJfL_LbkQLmQQBF%%F?wYpJ}m zZ`+Mx_cYf}KVR!WQdv~>f`?z5okq+VOO8!R&M3KoG8DGoCecoaI6C*mGtCl{PEg|D zEIt)*f&L?5G)1d4Ri#mZPK!yQzLKP|K$k!lLlx}IR_e<(MH);is8m8opyK)*U83k- zulgHD2F{J3^C;8-5l;6_qSt4Zi-=Y~h9_Df27-s@1oLYWH0eXe%ls{7obT#czouYm zREso=d^K`~vYIg~x*Ll2I%TCUz;|2wjP75FRsfZ7LNimVQuVIM??aO(l{JC_DUQ zL5kWA_KQy7X#{&_DO@Lxf>kGP5`_g~BNa*v%W=FMlyujh8t&q~*IQwrn83SEnW-;y zW}sE+gSpP-Tqq*kb_JYfhfB`hh=vR!&+j|+&Y_`+dd$}Xfq^di$9eRHNc*W76jbv- zxG7M^PvD+(Dmpbf*=|1ZO^<<$@^ZPUOBirm?v8Av`OA=Vps$at&m$A0NP$J_%Z6-1{p1o0cl*$_i+m#V0| zV7$ZHG6HB`Ng=s9r5xNqGN*fOs@%BgQrS_6NVL=gG&SKvVu`HV_%k}6aTbA6XW2g5>_Bu6cQFcCKqD zu(FLMpB(sC8sjLV-j-~KaE;Rk^~T~gp4~xq>)=7ffX5D2+eSSBoR_pyk-omg z4moYJ1b2P=4tJ&M#>u}7j0Qjj;YTp4<|F0>_V<5WpFns zuj_Ud;JzF0@QoQcoDM2>6dCA|D?Bhc`-4^juv%{zU0mJVv(VZ&MaS#XZ3$3MdON6R zwtNPp6>w~g_!NhrxnzplH6b0#mQa<@fJWq=kvK5MLh6 z9AK%7S3`F`o-M%CBe$T ze44u2%X}^`xVCiG^74M2>;@oaMo%fzZdnm z26D0Ra7Hgs%g2p{V*F`he=l(x4PBUSN9JxlQa*tBW8bO2^VL=ajnOCZ5trE4>AjwA zW~>vqo}LX7lEz*9(dYdmR_Yt0e6{?_t{cA74`ZMX_)Vn>i=xOkJLMw=F@x{Zm}Cp} zO6**bWFtj#rU9$ENl`-wZ3UcBEnpjtUKznLCOWSgQp=-n>2Jx|iE{3E?d8k$f9{{j ztXN#ZnW1pnxvsHtZ=8jUUyV;oz)v5c#ZJUc5ls(JFEHJvCi!HFuUFsT`VC7+1tN9W zt~P0WXk2=3jvYpwR+lD;J;uE2r`wQdJ+ zs6y7DBL1hOrg!+Bq(a|F{jzAveIEVP{Bt72t%McaB;HSfGb=@c(AcKzu&SWob#fCG z^g{j4o6yJYnz(o1;W(wi@@g^>7pJdYf&bBFEMb)s6?)Ei9Zd^|9V-WG!d~`7deeNh zv+w3;VWyubkSI_}a^JkO-jU$J<>Dv%h$f}hSgi$SQ8@$|bx~R8-FQe9rCM13jyYi2 z$sM%AOd6cusixZLWw=^3dNaXki<6$3-=`NpZiv-4_)~5_JOHMWQTOo_O18x>znEHj z`JR9ScT9&+D{ksy%*#$*QQDxvMg;BCpE)4NZ#tbKnyPhAO zcrm(s;|=5KfmgGek50LtPpfMi;P3d6&3gf7X}lEB<%=T3QPB*{6I#<3Tla zll!9>{rq4u+7Mj~#puI*JOp*jAZkDQrXx*{vzDJWr4#ji+iMs2^lBR-Sa@^X zR12&-F8o8Vi}LON+~Y_(b|9GA5|1Ft%Kv7u9{g1S?x)xBo;OQ$J3@r+6Kl7aJmFu) z$cTDuHIdn{Vsis_u!l%q>(q@3K7y?kBzPVqWqip-T2w9G`70Vme-PrLU)=9nZ6(t{ z$+vCmxW{qY8ROn!o;~!@qk$zgyCNSaP48Awaz78RM8J~9Z*F@t0)t#51mmT&cIOZ+ zpHt^o2A{@dy=jfXnI2`@UQXxyLQ^}UsQVC7F#3d)^Fr{E0t9VR$ZCXR<$0MUE@fjY zj)J0-M6R;iGkfm3p4X1geQ1KN@!0+bX%EXeMqIy(>>G_**y~Q66`lNU9`1yer6~o@ zG#P5$;<`O(<-$6v$hM>0T|#B*eb}YkQK9DJu{D{D22E0<|EdfSH}j@`>}!{S>#u5S zaZwL;#UEf&oS~GkqZ>s2$ddpRmjrzc!<)RM6Cr2s?=R%6$BWcXkld@^z`2!1z{Mu= zog*g?L)Rt|0!Jm_gmr-av^rciH~rulnu7dSN%z(!DuFjSTd&twPA%Nbmfo+rHY8K? z=RXa*MdJ{wv#B=1Ct8_wZ*;X=%qD5Ye+%lQX;#O-?p;!;ec$9kpg}H$4dNqDv{T z(-qX4?qwtRa8b)$YvFqB$*PiGNB2WiI4+q^nA7T{DA;`B3`Cmh_VuSUf;4*I=l26z zPjhLR^gdd1ey(?Pe z-yy0iewIt_iPi)IO6%xo$y2`kNY#pRC9SZq2aiJh!UV>wYD68`!pQ(a+aTl z!J7E|XHZk|x!~R6!{xSD%2rcy=OX_TCx4p~_bP)Kh2(h=C)FuXEobIjPbwb2kmsQz z=nb|GiN0P-p-l)64ZHoeu1{RrgG};8rinEAvZ|Y>)#1$4Y_$x1J{cK-SZI8Gfe4D! zVL?RIr=6k0MLjv&v3Q~W7ezj|1$CO~#6+wNaKl;vKfyxJZjWU4Rm4FjOl)ySn28zaTVX2$Tt$&`L>9g^0_7@M3Pnaae}4tmC?x6{6JB$$ zD>v0q4!iBN+wX}9{zJ#ROystgM~wgsa5JauBRWmrZ$ue#PRU)K`JPnz-bJ?lCNEE9-9-z8K)k59@RUgXgsfe91E$OpUSB|HS9vZeFDg- zQTiNi-IBAoh&A!&Ol-xSAMEM4T}FAheV0As36PHDItXsS3Zdkm-ipaV3J3&Pd5tIE zM1X3(z?i|MSCqe9`F$xRRQz5^vwk=04N`lJE9)MDsh}s=eYbQbWKCMj8j;Q(K9ei; zZY4m6T;vm!%#0hxG(3jb)qI<-9=;!B(=)qH4ucAIe^D8;C_fWbnX}+v=`&<3an1(x zH(-QQC&QarC;;$C6sU79*7QOyt|s;zZILo0%bhakhdv#xbbVv;-3Y~8W=b7n2EwN0 z!VqzDH`mv**u}8D2&#p0WAc%k451OMJv;1yd{%-E8POYy^4(XC5i(B$1a9{DCVlTdorO@PX zcE-OyZlHCQ44$ErisjW&DeVEGyVx}2Ea0MxRaTwsn0_J4jZ}v zeeH4rv1PFJenR(_-A&23*w_tzcZ&zu>G1q@x1&go)ADOR15aJX$kl3-fp>ZXcuHF) z?3SXnU-jhTVWqoz=-cZDE&Y{eWaz^{z1@$Ki5*0Kkoukh^(s17z z&*(%&hm6fpWm|$|ri%JIJOu{bngv{7R8vlmBN7i^=qhd?dvXL9S~(!~@rp~#-WvQO zX#MnE0dN?Ktg~h1lznw{Dmj(z5(c}x#*A1(hnTip&5N@SB(&mn^}tyTjQSxxOd;#< zfjjh?VZosm{GWqx&whjiN2)sq0FHj#lc5Zi#VK+t9rfQ>+E>T(yz?_gQE8`A|$?lSAg@&V+%LpO^fx?s3MH#xJj{zVG~!My>D&JR5Q$Rj^y7Hjz;mdJlH4 z9-H_aB^Q85@8K*pMkz1;uC2)fRVO7g#A*<-^gGi+#t6KBi;Kt{u)CHpyOXStSlm8>`Z~QZ}I*e!khbBEeef2$Yg4WxD zxo8xM($w03gnR$F>l1Mb;pvs=C0^^$U|LzC#zX_Zi>p^oXp=6%7;Lr9#_NZFvHfpFkI|W9oDQv2iZ-M~|5(XIF#JV*Y63lp5~W?rn>OpS`!9hsF{Q zQ@?MYd_*GQ)`7W60Fm}%a?i-y0!AdVo+c%kuCS@{4I8KTWrat*4?S5jF~Yeaso3l{!^WoAA?1zbemYWq1Wu8b={XZhgAZ!lo1MxEhI9JMZXjFud?iLeQ0 zrX1zqNKP5MS9+o%y26-C7^rooph5(T7>+hpe8Txmhs0ZN8A8UQ#D<9dutlQzL8q{% zgt5aQ-Pocg!IMHvV`&xl?g1eKG2dy=x4=4o0gb6?Sv(}+%M6ABo4U`W!GchRusa$+ z;k2Cji*8IAp7}*v&xJq=Yl#mR6^pM7^muy8-HexouKbe$dc&nYbGQU)6Ek4R88B&X zyNP6tEZ^*--j#PVyAvg_j(*{F3z^Hq5mxu`L$vE8+p4y*x$E0O1bLSo^J;;(opIui zCr|GHB7t#Ubr89kbvPJyl6XVh0>S@-BQdU)L$Et*#L;!|N4h)ErzscTqq zJbm5-aX&QLuU|7VQ@1Z^*^ht3_8l2tTnPzsNeEni^{?C&XiI=SE?x)%;G~`3sPx8iuja1 z$Q3%j$*+rSWTY-23{eYtd>%73&q|l?8mIGNUiSW>p`kh>L6|cF?PF&4=&{-#OJUvL zEl%hD+qV(CZs%iCiX&e$l2iF@52Dv}RnYKt+`-`!qN9-!L+UZkx9DMN%qPnVyr%j% zI(X`d2g>d8ecI^ef;BQ^f~7B7I3DkhcO*glM<3fz8RtK?3Y)>}-3LJVoR$~UJ`u{` zfwn8ayM&d0nFP7aIxUnfER;ocM2#W6Vn}o9+leK?0nJ0s*AgyG@w8M9H5P1u(N>4i z>34A>?FGDY4(y-m-v~5t`J5Y9n>%)JvbO}BPS+{9%GlJpu+&2<2&`p4l_~&BZdG#F z5Bup#P!stQ)8i81z;a$(lHFxaX)of-W4`GsN*2Bu4WkP#|8jD^-gjpzEQ7{g>KmHrEZ7r%Dz1v!kx6xO#9Hqav^ zOv)0is3b!eu2*E4OWY_6B%Bu|NiV*;FY#%#VlI8U2is|JP5$4sG z7?PN&$s`CGL4_KI`0*nrx81S)Np`NPjab&29RZshNjc<)J@R=n*l76r&{xA9Xz{ag zgZ`9Rx;LEGQ|m1PZWX7V3wK4vv`^;;sVOdcV=bz8;XYqP0vU*_BNj2a((;7xWcIc6 z4PEIx_n2(UUI*M22i@%_~tyT>^9T zSI)Hp!uqaii?V^b7>>Xp8B4#QZ^C8l@ zHFz!%j3Sls%rpdy8b4omC%if6L__}!eRy(90-D6x!|TNRI=!cmPhdk#Lq@?H%NlT0 zBZS#qC?RBUUHad2m|C1jv@BZ(o<@Cp&Z;AWM&v$&XP}!#S zcL8wPKCMnpdr))Zq}i072w3744IMlgW@j7?PGo*`DF5P!Ph-aDsAk|se}}QH(wi65 zbXJiY=Ib+@kpRq;D?E^%bLLgBBc`70f5DGY>6UH;20ACbFr|jao-SYt}v%7S~{ zts5IP>Af@EBoh_o05D@xyn|{9ARG!hN~5a)jEXb8D&E^ghVOpSxgz;8*WXvpj=^T` z_a}?|nO)a_Au&A8PNq}$@~W5($=UaVP!@t?7KFQ@Ec;r?fqFurfhb)Eb8grPg9t$% znus5+fI9X)u}x~@%arw3t;*GR>+K`j>pQ>ZPW9xU_mzZy661aS#bxhGK|-=5gBv-k zSaiSH4nIg|_bbeRJ`13GcKoZnFfyWNjw9EI&&j z&rpH4gEWZe!cH&O>LffYb5^sk9$paK$L228*+?-qu}mX)h}CA4=u5^ugmP zM4w(f=yc1=9D>PMtY~CL=LgYpKzGD~TsjZ)eykeaAJKb#uX}ErwNA)tji7( ztoY|b70OB2*Dg2elE)`y%MgJs$93+_7zKBo56?RdtzY>!vnONQd+ch%30WQY_)R)e zGV%Tt1uo@vx}2H1Z_5AWdPa}ngwy}S%KX1$UH*rZsi&7_(SeYLg87%J87&a-FaE1A zWa8&!SvF+oEVF-TwOhT$)$ow@T_HX0jDH&-=7J#`^8Vq2{(lSiUxd;Ba6tb}3l8`% z@qh729kVyaU+Z(IEJ^=Dz>)L59VamP7q|4k4FB8jzlo^-%~Sn15%vGB3(>Vb`m>aU zGs$D-=?Vi(;bhVQL=b_((&(2B#DDp%B@qImGnlnFvmd0}Ut%ORbhUIgOxU&@x9(%F;#&vJ7c3mhQ=my9D5LjmR;`~mE}v4=a?jFsT}Cae)oG2ot$C{2 zjxc6=?hR$WZmdYQzdY%NM9!+h6TH&VF7vvfJDFiHZG z?>k6JMg~lJs*mJjw;_e9jg3v%%+R&BqH2TI>HUhwF)gs|zBlS?gk3eIH4qm!b=?RokSshyR%OJLB58&+cZOz3U=v`nzXrmnh%y>c{}N}e!1$dr zvvR%8Zf5=)Q`(kXTj!3elttf>qQstQ<2mW02L`?@56 zbN%t#K!y)ytw+~bkO#N+1JmDX4GjL}AQ!g2KXd48fo6z5&~>N8wZhAul$5~j{ZM5_ zD-b@672K=v;=jS@_guKL()KKa$>HVwE9r&wjS3kv7M2jv3RqVKOBH-CxK_21*c?YG z|FT>vOB@5>y>EM(hZ5)1b6a0^tMa_I0d_#O0#-dX5;)g8t#!Ii8^@nbdm4}g^(ANE z$B;)fU$<`oUPo7ja$4=?r{k|7o&Srqw~UIbYt}`RV8LC2dvJG`#+?w{-Q6`na1Rzd zID}w<#wEDBySux?UF7|~efAz>pL_PbpgPd&wQKH`F25(f9m7kd31@FQ2OspFw85*dOb7tejD{BBdu9zw%`D ztfOb2m37?jG9yId=UGfrO*9y>)faA9R+;&6YSwEh;LA3p(b<5FHg25oR$EC|=L5TN>%FdFaBE{P9A;IY7PT@;ONx)5FL!4$+Z8A0LsFv9Yib)B#S1F^F`UGP`DegIw=0W@))u+bLIJ zY9BbHOcRCl1T5j76NN| zI#=s1&tNK`>fRq~hSpMVgHVw|+HN9flt0dU3FqGwm%RPfw;`T7IGvdF%WP7&=rlqg zORz@9$SG~el;2J|03{53^(V|fxJdW@ZOc&(O&BbHj*fR3w656Uu(1Mq-e*~)?@Lh+poc3;9AFP?TMFG&{TuNYc}@yee$is z4-WEhDH*w8NjU-UOZTDksgDmC>CZX9!$Z~S^p&4!Nuuq?5lyjceyiVM0=JsJ) zU$R1?+4=APwCf_2Lp3`LM6n7kRUEAmJ?`K5`P_w%ahDRV%o)RJBWpvYccdSfJ?EO< z1>P*}U4;;O9wrNV?T^gJr*RqHD*j9U{=fhTk4Ll8GZA=i~dSViNBo!Ed38KVd=;c}q>{016Bw%lMCc>~jB$cI zJ2|me-|Q%lMiBixhQikDb#wfp)r5mjRVOIF73rm$hx$Ms0x?`yTpZ?Z6)3Fr5lWk- zsZUa&_EOsN&13O~=|N;LF1u4G|>V2LCtE%3wvJ{|~}vul^>Vk@9fe zHt*~HB2H#_+FCRkD9FT){AZ2^%m0-L4FARg{|=(U|8+pazk_o+!SSLVZFm5O2w>T< zZ6H7Ho8!X3e**@re@%J#*N1E>2jhi;t~~iQ|3MaSRq({)Vd?+QkO=+1f*mCN{QotF zVjR!D%i|Ri2s2>)d$Iqs(G!{Nh@hM`dz^pPh5b=H*MX@Bkp!*HE9_O?MI3Au6i}}p z{C^M;G+Es`->ZtWbuW{hdN6nXf~0zNq5ocz{}%NBudx~bLMyTU&@*5DL4FbC%k@s? z&VW*cBUg4BRph5EtJa=yfBpTM57q9PT2bN~6wWo#o5oF6e=$|83~cKRujS#a%$9Yl zb}Qjjw?33t*y}(Oz>EJK+7T=B!CFd~;nSn8-BYDe>CjNNK8ir<;5K<7tJ5q~$x?JT zAeQ>bN$CF`5c!OM5Lh`QlX&OrH{2T+e|`bBJ*}wBZt-0?UZRYrW~Cj-jQtV}7}AGB z%fJG9Q`REHtP$)mkgGFanASOM(I3Zzw`CX{=-amibD@0c0Ps+bg3E!>YN?x=@8ey8 z;&(S5wC$04-(kd=_Uq*5tM0))s3pMQu&v54t}%s>hW8$to^14fsZGh<`l z5Q-#i>5?yxPCh1M>oMxuU%Wi@-K=i#(#|{{&td!Blw_&XZy)0y-`~Qg8r&Cg0C7jA zD!7&2x8IWEcu}i%Gk<1cLYXoekJW2h)=YU_%!RS$c=l&aWj=a*iO%8r%#Q(^SK~JOE8EQb>N##)u8@VS2KZVJKreuPE+cB@S6fqB4 zZi1Ly$czc0bYkz4C!_thl>E-*-h{ypGdiXI2mi|o22{F-DLZB7Pc{+P=0fs~ z$NK>iL=9@RL75)cU1U9Y=~A#U^!MV+q2^Ec!2ne{GOYRvp3$4k`els?l4-o!pKUZvR&+@nkZju)X-+#4?;SegWwAon> z+YtAAqdU_)Wypa&+~E1`r{J>swXd#x%|SdP@qys|=!~lG?cVj$17#!u_;k9y5K;|k zhOurBI!SdE|NJ+r3=tJrW5+K`q}+KVCM| z3%Jo1tZvic*u5yQlbT}%FZto}x*u89FEuX7@hb5X0~Y6-`k%Q*m$zp5Hs1#QOkTM?V)T3O#o#jnHXJG_7p`Z0 zu)MuJ$2*y6x(ORH4f;ssV;((c=(ArK^XGcI=34J%Eyzso*K>K6?@mb*+s*coT}$hB z^a8f`P4>E=z{~xLo|f-cf|MxB_%*>VBA>3#R=%r=xA#jCi*%AV^*9oL0@S`}Qo1CC zEpkT(9%b9txrLp~dYSLvjv1P%eG*35*5&l+uBN6hJxZ%R;V&*&(Lwan^Sa>I5*hb# zWBl4$-h3s{!Gcw?WDOR@bf0@DnX#Mmc__Uw)n>QpqqPgb3w}y2z4u-?o`b8kK0(afH{Z>vNo z^m~a%44@LWDL#*3+}z$?Qv1#HCjv++HfZbM6{TA$*Q-!Ha^sa*xtdAtH_IQLCwD6& zj$8SQ9H$a!pU-;q1l?CMHd}Ttv9#W4M_*4Bp?08USP(pqcEy+P#}E=EWi0(fLkc0m zw$OaA0n+xsAjL}(RYwuq-H9^D&Y&0*6I=|4V0AOPLt``e-CP=gh&oO(3#2Y_^+ zE*M+uw`p>gO0PkIeR(#usR$Y!A#F-}XH#1WDW}~lS#OKqo@B!%Bz$Xe`AE%_HvdHC zB!Q4F#pqZ?ob38fPJ_~#Kg|U@_B{m7+6*Vyd3ARx@*Y*nm3(&H1>qJCjeSi$z6*E3 z+>NV`kp(|JtizVJKB^PpipO@amo7D@C+5RlKOSv=>y2X05?o=AV*_LDNd}j#tfAw+ z4d+U1g1(XZG%E9DeX>fiy8Fp4svN(q>o;{-j1IM)V$K!f>Njf3ZuK$6oYn0% zaDI+*o-VH&KMV(v=ZL&5x9$3NCE$p7YDbroe^7$McIj!lu|(+el0dm4|69$*Zfrd* z%bI!H@64F4(eKCa>AeIr9w><5zT3chHfQIT`uv#c2EORehV62t!tNwiHj^Dw>ZSIj zt9ewxY4;DKm**Zh$2+=O!>EVE)O`KH&FVpiTqi`&C2F4Exf@yK%{6u}&kWFamf!W% zi%duY+kQ;!Rck|1Bu^KZg4I*CG~f9XTceAGnan{p^5ett-=(eSGy6NgWf>`oBWraX z;YZr(>lroV@OxQXzechn$IrVQ^VJWa>XBwruE2>r_&p1b*r)3zw||IENQg#b;|WjH3kH>MN6wAk#%BsW;UPrQtxRt;B^N zoSqje5f|n`sUfDg=dx}jVQM$qWOO}9YosM`YmcZh4__e5Bl?tSCMh`&;QAO(U7(4Zs#zV4K}^RML2aLz@s7_`GE(m|!*ie8ONSwI2fHr6OF9q_ z&ejTRV$zFY}a7OZD;rMhhlLW!~Fx?I+{Hg=I`Bw4@Ygb@}!bX1kvR2F|c zL>Av*E9ZalQw)7RtQ~=Z@U?+=-)*!_;eVhp>~l$TYkfZc$UZPn_$2P&oeE6ROc4#~ z=6P|AR}YOi1`T|yY^@MOFEE2>L9DI`|CZC&mjI~=kB`Lf$Gg_;D~>&zU$L}? zhR*9T3!HWw_3!+I-nD993!M%TBwSqo6jV(w^F5Avk(6v;+GZX`xj8&}mXU3}t*xE* ztK;vm#{Hf+HpKg^H}W*Hr|Bi4yx*kp>D1m{ew$X|3Bw_TY1$POwkRTKdlAx~ZpHI)txJ zn2__2qjS=HnVGhA-+lHv-qbVVy(USOZ;fxcTUBuUM$J!O zziGS`R2HqD{6)1~&HI8IgYkJjyeuFa0*07;E+hGoyJpVXNFwGqE*kN6-c%HF5*o%V zlqq}6906L?&~SaNBSZ!uEwiM?%U%uvSc<1lf4 z9+k@?h(7NSY38!&?CHu7a1y_Ck+Bd>7Y-K={m!9nW2UqGXAqFJ&l%$jQ`)-@fu9GZ z@H;C$I<`njVV$RwCrP}iHo%1v0cW%k1_s|vH$RQ1TJ{rD<2=E0p;fXU+$j*x6^0p8 z%WQVTm+U9Lm$`ZZ=-6i8S)XbERO{l+)!uQl@-$vz<$J$rcUoAuY&g>2uX)+rm|vBp z0~1|mvfq>C-BOu7eB`LhX}sF{a#?v-RaoY3B)V1nX%FIF(D`L4^l{0rERtP5i|=|1 z(~ke4vGsXfVnw3wB)PO^;RzRfZpb*yxxwLj%ukiv7mSRD-|%&Fa*{PMk)Pcb!Vl&s zDykp>V-umN5ry|s*l-bgS*d3AJKE>dTsdztPFQWZ?mTA}|Ma~GwAp^vU@eSH(R2=^(!Lu3`#nDh z5_zuj-uRde4>yOz%blTji=VTaYew<(Bg4C-d-tt(C2vo>d3@>$yNZd3fI-4{$Aq`} zy}(x={9YudL4bt{eHBySBSBPOb(Ndev13mbSK-^>uk!*~s_v{@c=}uF!&9WCD8V zo>vltd=_QAD~dW^%P4s9+{8m$&W4zp8AuWBk^Qk%e&r(}TaeQwV_$8hZkhzI2?Xx9 zB^rC8aK5%i8n*sCz@2WoI2&H_+GMGxrZ~FpLsv#e9}xo z2L<&ANvAWX*J|1`;in#-WbA$Dud%MS9lsQ^GF$P%W@9tW_VJnk{dA^#dQ<)S{(<77 zw8Q!7lUHFrd3S^$2G@l9!PAh5iHT*DtC09~-dS$Q)}?|B^V3T5%ih`Dh-!MZ=KfV> zaO5xll?-;1**}-j>5eK%Ny|*O+B-J~S63%?_m2m4?&!k-mL>A+n!|oTn7cT>oBqkU zqSa`C?z?pnqSw00JK@A~?`BG`cQ~KuU}0y7O@I|1HabJKO5l4qTZx^_(6utSiVat! zVsmLHW-LDf{{&R`j@z&nZ(Q$aSjg-1SfQ%!S!_vwM4-e0V zBduet4uu~A!gnJ$3a71A+Bv^<#bH-fRZ8)U-liq6(jg50ml0|L=61y9#$TOFxEHLxJ1U!>NH}OMUkKPePa9_ z#C)LNlUNgqnnGu7JuyG`uj#iPAr}g0I$b9&v*oe_dWe$_)z>l@*Kv+g7hoSVD#D}| zl7^GFCCVfQy{B1JXb5L#9p@LdcJnGsuGYY6v z+uo&Y+{TgR2&xjH8%ay|4-IVi=!F3uG?X-j+Z{hNw81|h6T1)wJ-?O97#>x*BNuAgt3~4K}t%xuQ}$9tp56hXUPv){n`cv6;)}*aGn^r zRDcLP{PXnx(}jQDxNq46Lx+kw1`fLULP1KJ(-LeE2rLX(D)8^`(|;cS(}i8>;s(QP z>BMk}q}N+uvMHLfB-8*Ff#yyT^k3gwMBhY>n$5q}P62!8rx1!mV#o}Z+PXCD}5H)Ihnl#*hj#NEiDveEA1*GFZK&HNT0fA#An2)n#Et2T%y1L6OTvj7(WEl#01Tz%5AdkJTz=|L07j2_7m*kTnGA+z2 zenc;991SCGm?;BNyQd2h$)2!Glr}-OgVuuuR3RY6kW8Bxo~(Bls9GX?sQ;?%ocEt- z678DmuY2QiPi);4N~T!;+5#vySVYP3a(9{W`ih8YwX<*hXVquV=CVSLzxpgHj~m!R zDs90%-4De$58$NI`w+d4?+gbJujvDJZ7R$+^ zuV3M77j4YVsc30M`%HG=X#0kTyG3|d>`Hj*bOAYBTVOhtt4Bs^YeeNk(@pV}TuTgZZUX(w$D zfPzQi`cEG_$KOKY!3}#BJdnP{F&~|ju0Q`?8}7F{T_`SYlz90JnWZ})VMvET)!h2V z2GA0fr&?{h)X;d`V0FIPR~q~V6rQ9Ire0w(kTe=f?BcFZ%1dRms$iT(OabxXEvi9$w_G#pgkU?ESuxxXPcLyz@~WKrb0wDJLmc;u|0Jk6at4 zETbYKM0l~iG%4~K-HW5dfT*oM9u5@vEj-Ns!YhE87?TNvXWW3 zM6`sltPtca5z88jqw90^d2TCMk64R%nm8`h7QrD1N(tg(dvmk;5d-1X~^w=$(6*UJxs9*>at^JKzcepD&6R-A%rTW4>FP$z<+zPZ*}J6jP_s)Fru@SRh-fYSk2GQnscP_R>=YG)V4N4 zA=lW%G`+OAQoS?~dVA4#z&J=dy2$@?US1v!La_WX-@7Phf6>ATOe`!@GqZ@32hD18 zrO%%+X__9-2dL~^Tvgx^QL>S{zP*hAtJ9H5M0;cdXt*sBgyoHF>bQV4*j_XZLOG6DjKR?G|2R6B91XVMP zwWlYK@^~2fM-)b;Y(=UjT}DS}k?vR$QCUiKJ#N5>4X^U+LntdNM~WML{+!rlc;?RM z+1#Sn>T6oXy}lv(>K*dSDGVIoQTmE^Q(o93aM0fPj0um9=tgn<79acn9mkoIt z(4{O`5dw+bz#TP;Q{BhK_`kG@NUS zz2#-XY*71YzF!YU6q@o>(b6Z&>$+I5>CtO5COkYm>&38d@$M#92Rj4hI}@}4PnR*5 znq7x>5hDe%Qs}qQ3YKj19QRQ0<9)H2))L`hC??64e!lwiwt@iSL7!0AEOce~REg^2 z0;3-Zhw@|(jUs6cqQN_vj)pw?WHdCIACOKarl!xkmEoW6G2O7UNcs0W@_5pu+FZcZvVm;k-zqPpvO?mu+Cuf ziWKpOvHkUznsIUB1X-%ohX*x9L=w?(0Yj2d)LBVmC{%XE^AI)Nc2QJ}y!+WzMkH_Q z1{=jtmmr(F=%XwZ=y9obY?yav@3Z&o&9bmY0KG8VfN# z3$~nloiQzc{{lz#ILGKqE&XgVmP_!VlG&$Qmf1g_q+eg;lBXJ?Qe35jZXAo0Z7{=a zg)D+lEcksX9S;Te3Tx$_}v|>OPG8s&2{B-v?FxVT3x>Xf z*OuIt7fz1>k<9;OwR`r-IXdypk$_Y(y0W6R!}0K$WrEL(6$+6s5fc|TRY$2O71Vo_ zX+a0f+6Pa^r2Q)?D2iY~@b2oD6zX!}9PPOeV!J%sP&Y>vB8`{^TU*6}PyHr1$@ ze^|wqlhYz1wcb}hF+-4vL|`2w zXN9fpc`yFT$a%W@Llp|;=)EamnHEO(ks2fX50{4;6P=^-V?AGLJT7|=I^AWf=dwF! zx~`DuKC-g1{?0o8^A3Aau=e+PyL6Y3iY%FjzE_1ZGvy`tGcDc%p~QJ_js#%!@zq=9 zW$IB~;b^k@%N`de8@&Di{0?q);GpY?6x8d7r}E&dr*qFd3|u zXFVepC^@wSB2THPz2 zJ~}{eai`+T1YywE6O*{Q_VRF&U+*Ex#C+dfxnud$$?2jZZoN3`Wj&cwXA#>1I}ggR z*>KMbTARv&CnJW=Qx*Y>gfvHYF^6{Z^UnV7iHTA9dx#a9Wu8ek9K!(xwR}ok z-bV3!G{luMokpf?A%j0=xKfW?RB2s>UU4BY1UEiX+7K2I zQTfb%S_T3>;4Pa)*$NQB(sSm;5;Z~dxq8{7piM)>SHEsQ zznj84{IIB60{q^PQnun(n>Xzty5;31B{Ix6PItHGz>9S9t)8MUro|&POKq(($@`mJ$4F*8C zHbXxu7l@lBWZ`HnLgLkvIwZz2;UmF_K(CAKhPa8X1oKK#?zoi!$q=)RHa%F}s&)aD zrE|tw(n8eP8UVn2R?-|7U1YC$XPxvJtW;sYLebRzXf;W<4Y3UyJG%i7Lrc}l7c5WC zcwl>(*bRB+!?zytkEhFrbQwy?Mdi$r+g_o~iK6~2D45@~58YF#(kPh<+cWBsw)|Pb z9G8~-kpRV7cGs=aZ9CKHb%m(%B@=1il=p`}9|UX)8~!kNa;h5GlE#Y#pa5@u(u`_p zg6%bA@lf!@wKtZ_l5?QUN2L{G4`QHY?-QAIOGPiZFwuTBs>$9YBzC0JfnE~iAVgj0+ zGVL>soXZq-Cp>UE^Iu_|@;=|J3wNFDchYEDptbwWksOgtx`2`Du7>;WT{RUfKbgcq zDC_yCFRD^Ny+|^?D&o$;`XeEtnfau7eRmCgvSB-M30Rvm=wB<6tR`T?Y0t_ zbh06b7n7LqBd?*rb1e|r;9cQT8i!413XimHvkT397Z45~^m`(&yYYH#-ET6o7?Bo# zV7jCu6@FJz@v@nc);O&>SwH@5?o+AE*5J?Euvc9~H+`FCmWi90wPIcw>iCQ87rKZh zZ|dnDt0}3hJ{d9r4^D%Wp_svg}Z$C}E60@03{Cc)qrc3E>ON4N@Ju5y>*mkKWI(o@+(Xi5rRJ{|acW z1c7An;+DCl|jo)vW8OUku` zjQ1X`!Ih7ehbcA$>*sxaT<7n>fUuEQ%sy_2&&j;*lFUivuOPO!k}T&sVA z^79r?1~mBr$Z#cE$hEsfiMHLPq?*P;gYVkweWj!-)B2lB^_3<- zrf2kkwAA@!iZndCkDGr0Wj-nRyi3MRpK4RRZZN~!KFuJWrO&IRcL^tXVCkY)5f>^E ziVApaQ1E*SdPo^A!e24<6+)FLrK%AkPWE>>sk`UGxad^})eSY`y@pZa6ljl&RS!%K_<; zv&C%vp$kP3I5tfO7otM*c(<0Qo>tvz(M3Pydywadmx36 z4r#usDXy>gD5|-7?sOuBx=2kBB46a`R zM;E@hNTNO&#TnfdshLs6!!<3ODyEq$U&!mc`{T0ydrc@fqRY?=L^9q1;J;tG;CGe8 z6=lpuiltjEkPTYSJj{&OxJN!Po*-M_iy`*AHjhe=m6pp-B-sa;Ghr3FILF#Is`R#C<1< z!ctYikiITsmC(0YyTXi!H{obeyv?Q(PiT0gj$qmrsSagnxFLbLh;oQ$Z>VF~Fy+(M zM3V?NRY7X4wd)WIQ|_RRFHeG(eY6<@NE8$5CrX}0HaJR4*u?F(}=QIPR>c% zD^Cxqpxf*^E(ibo>vT$v8US?AoT>TDl<9mI!bA0}ieoybh`Hv`g+A>Sve);<;zP z)pQ%O*wlfY6>5=d1=XrjEpvR#%;yJ6@Z%B1lOyT|tTwajp#}g9|B3$lf8}^}WEwUX znj@k4V*vVDiL)eP8qsLwf6>E#Nkk2qXEW(fpwgBR7Jj>u@Hc1dIRg3+0jd)y<|s>viUv-K`8&$yk~=4=;%1~nh3oK7T=?vv-&2bJbDGAA2xvP1wmDxyUMtnS(ba+Gr^BKTK zMdGr<*>FoI(Q9#doNwwq?W-|1oxX#M<9;Qu08Xx^0Z>|WVSfPA3BbV_Ctm~O*?tkQ zKl#3#Zw+)c07)2mg1+WgSh}OTovh>P*oHqe(}2?%6QlbQk&2pUtl`}#>bVb&%TSZuBgs`k&34z`9Vyi zuUot!+=j6CPc!uv-yU%UmeWkR!Xp-u!|Ljy8om=1u4Q*g$5CS zAZDXeulP|{r;ZWq>Y}d{3J*|x<0b@j8mVdt`VfFXq?qZIq0o@`iH$==4w3YL+QGgC ze(f0|+o1V_9>BFx8)#E+hEVgLR6H&4r8(({Jk zb6`1OJXdtrHr_^icU#=4WTWrG6{e#WfU1CP7Le*$j^?aQg#C zZ4A}0VrN76J(gCjZ6ay3B;h(K{4~W?M)lXyZ>HKU-go{GFp2>-C}}FHs?)|9VMO8-qCCM{j^k{EStzd*rbg_2~MSBy!PYt=GX+f$^T0{WIuVS#c0 zHSqac3UIPN|9~fYU4`>G{=&nOa!d|#pXl6o_QkV%*4-9GuURZdM!5ynWA&-C>4kXZsG{m-8?5nX6;pR1cX zzi{Y!pBe+K>%;XCIstU{RHomv$C%j{Sn7z-5)#t#(9Y3)O_Ail)H&hrYI*3gp-`j@ zp)Kfp)CqK2HL`i=l{uqOITh8_&YQh)>kW}Ovn0}|bfyXSI>4^?41cXIXUFgh%NI^; zIX>0Z&02Hl?EyLO`NaiLUuI`#@5OrmK{}tMdhy6rt5h|2Z1#-d>w6k1Dxrs{zkE2i z)Ar=d%mpHFJ7XK0YaO_sazsdaTHzDcL(bPhU$o1pUhZi{EiFH)6^@w$yud5lhOSwv ziaBX1H)ZmG)9|7{cjO8R?*!p9ip@e3d+%P|H#G1BwvA3RxMZxa8-7J_h!eTwd`*Xx z|GJ8==J>TrdU}k|X8N3)jvlvIB`WJYGLDXpoOZMA#4W-Azg8y67QIlRSsIF=TRwxm z*(%8gsu#&Nx3=DVO^haKU&&&i(-<0RTygb$Jm?q^5=kj~aP7?fb{%j@KypMuju6s8 z($b+7>43VGW7k12`4sp{rxl&#MScH{1N{7DEJ=&0s5D-VQOCyF?9uh*rE`n>Uz<`Z z2ADp8LI7yiYzxuI1)+I>a&PZQNWl2_3#d6bT>pG|3y_!%#2&FYRP&3AhmQ3C$JpKy z@=uknOxDx0LA8Y6VI6WEiV_1ZXy!m{Sg03=#W=Ljc%AldayOZFm%vgy2^cAS6T{)g-FVpx&?`COG5 znR@8Z9}+YYh^=+|utE_%cD^8y!7e?cS8u+HiTi%KR4YVP&az?j+Zw%I-EvSc$ge~HHDCY1+c(*r)-;hA2(9~D8Cs zJ3%QKu6%k+E|eGn-KRmSp9X8|_&z|rCR1XbBy$ur?TOnpauOXtvd_pe@ z%RoD@frSS_9foo#{*IVQ>-;8$gLVfY^Bxi9d$)9#40?mmSsX=+%WEat6MgJW#5LZ> zMJi#kaG@^+Na$R(;@p&=4f+hfTmf9WI0QvfO}PZgCpdFGG4|@YqF^3UV@Xkz(UuzR zT=OqVdg*N;h>);4yqTD^BGM>NkmC&%)v7mQuQ?C9maDZskntFHC?X!vAhcIWijj`$ zo8`MmJ)w_#YVkMC2^%c2W@pfnq8XHE>h{BCpn8!6VGg!O1EuVOsAY*u14%ht!tiON zD4tYZhxsU126{}|n0D2hI1-T{2GC@Wf^0<4D)pISgE^@xi4GBJ25B+@$%3$Y!Y&+I29-esCcilS z1K%sMA@d9y!!!sYg&EA?Rns;U+7^=P(V8f-eBafwYPNfoP$OPd1J_gd}&$GouE$y7eMvd%rSEhFAWgzIJVITw#%7Q=`pE39#18_WuJ-5%C9AoQZ{jpHZ zuD@sEkvCvW0q_SQ9Hpv(jKpt}%eJ%(t)1l+ACZS5W7i_;4!)ode zRQ0|-oC6Hhy~Kx}UM=L_kc7TgE(fVVPu{QT^OGbhA4yC9FMR1Srgp2kx#{`%^hGLfTe;6jVAC&J3bl<#!!) z1)O&k9pD(CIl?QE#_7g!&oKHKAL8UIT`SccDj}GnmQaaM5~rse^*Rp?b$W8uaPYvnZLUPu81zmdr(-Z|-OW1=Y%EgL^wCy!P1mUAGUExDA z^77pbM)cp|(?0UBiU=aoma6e!e8(b_UD%t7Z4aNtvA6()&7iX;WN;*@Pv0o32(!>3 zj0XaJyHCZ<*JVV`M-jxp{>V@XGY=s zk8pZRV+-~?UaWt7^lsVwy-M8U+Ol23YQG-?t**_l(d>D(E~D2;7;^cAz^okNwdug) zwxT>~f@=IDYOwSBoa!N{N!4hv9JaF$?7iLh(wprD|2S{`7a zd(Pvf$&X)5sGRcSnN$xcQmFMTXbOadDc4vy<02{G!B6WnfY2?v79({GPe%U z210L@dzCa`;^5#=6qx#K`-pqdt0yHqZ_kqf;ANoLtIyQC

    <_cY6ceml|skCPjXt zD6}c$yAb6$OM>JDpG#v3OV^!Xm%chgo7kBHOg(ke+J!r$S1nmqLN|lwDy?4B1oYCL zuxRqz%|n?B^DpnKNvHyE=U=;A@B&G2Ys0f%yMF$23GsiISN*qLMmY@43erZRqB<@v z@k_@@%W0(}idJmQm6cMaPTnH#R)m22<&WoR(mk0AhXanLHT4-Yr6kaN*5fhOjp^C4 zPiJ7X`&E9DoYKcT0^lidxlCDJdg(9c`lanigyy!5fmic^&>>D*Q>M3qU``GI>!~%vXNcWF#kPa^X8#Es3U~G>zR_A;w%FIayN&| zYR}U!A=2{cCH!^=G7pLA=Dm0c=cT;oJs8GU)75L{meP`P&A_2B%hH-T+Z{%7z?O{c z9G^V83KwGLe%mqN`u4oZU~XYN%IFxMb>F|VQ?+57*SQ~k;_T`+_tcv|^=yXg0-v{O z9#UT?wH|5Q97I0ah$S6JU?dbIe0v)&4uw@F{IJr<$|reG+XmU+jeK?*R*boALnBm z`91OpOKjJ{k#CKnh?XKo}sof5+IZ*o!HUHDhUGZEjvvQW8I~=>Qu>^>VJ>^|7(_-h9U( zO(uKlK+n%>V-3Af0uw_W{~7z`JjOI6w_y2ttcKr^r19DZd~y&EEqa(4C?T+T&n>+_ z%=x$lu1y#CY+$g~$GW5Eapj_PQRO4i!lR1H@l=>-RYE^}78KN-=WDh60NECFfV*fCgj#Y*t zmujx+-X-(BI)v$qdk2`*i4 z#n;ORk_H_Fj;5}=J3e4$Nhw)X!-~hZ8!H#7%BIMdEL}6=nmWR+Z;6pq$pohpMY< z>i5z~s-Rk6w=klT^sZhB+ueL|BbnRS|HaGrH(bgf@OvUqeuUrp=vXv-?L8Z1gTELg zmEMmOq`1{(WfnIp4p%#v1hNIX*x?D1R_U$QR00XIB{HYhGHbYg<{z*S;B;r)bspV`S_hWQsk;+dM zmingU@!RndJ4ybk}k#vD!^L`Gz+X!(GhC_L|-^s+tO@^KMqy?BW>VwCKj$3zk zC&-&Ve=E`Np=Um=CnkfXl4*XtyMHNF;-%|u+>PybG4_Ob^Wjz&heG zW66)}n@7{Fy{#t7&Z}I|;U$m<@V=gZt;2CrZdy1PIL^Z7kdgP?*Qt#zt*Y1adzqN! z+6dtO)G5R7(k@x5TDuni)G%9jS&fmhl9g_LKSMOgnj&kpyzqQokP*nLTquXzBM1j{ zT7uWJ4g}|(OrCr32%n#Cx}#bxR>s0to-D-Fe)Vo$PjWP*;%79*Y0gA;dV4mi79{Xj z{qc__85b`Y=X478+FD_cI2om9nEnv;oB~X^&rocx*6~{L#?I-hL7S4vvQ+cU=5{By zmlrV7IBc7cjz&X-uA{C0#u&u?p!gbvmrEySOxqmbLpf$9jNQ zUZ>6)3Hki-BaW~83mu&VR#U>n>};>yAu(zA4=3=>jkkuU)7b6FP(7kwSs_-GZY9IB zdEc3n%g(27Xqy*98TLM7G?XO>BJ$_Ba1eBj*~;X10g%4%2a{OX+G+S~aaQ zs4}NQ-Le{&x&$S49cg-~ye^zOCZz25bH*z3Qa=l^>)MF915^ELK!fpxeveqx=unLdwQn`>(F zyI^BlCA8iZDiwn|eC6?E%xbIrBYw7Myo0jInnwLyk-*e@{As+X8CDEKy@vz-P}Ks$$`U4N9RPM z9h!o4Z(^7-Z@0Ld6vrlWYO51=Ji{DjU!{njtvDto0w{veUc`$*#-^_2WYBFAV)+Pv zwGoC%mtbFP6^D!@*-L0b$(}IA3m?;!CaDCFhC_TWhR2reoT>_Gz1@uRXS3;rX0AGc z8y@&5fetTtySJ9`CqXs#qenK&Y_a}H*@&D>_;`eSPt1c`%Of&-6Q!Kcd)GB9;zKTb zsNVhyA*PZV`-;B()fXE=OR?4sCB#Gff4w13KKoe{aUOgpZQ^{7TuDiJ&dk=#L-F18 z5q5e7*!&|<&u&R)1~Lj!7D4>O(+Sy|d;1+Puu&ART6D}ZI!#{Io^Y6RwWu0aaE@uVzz%cH?wk-u7o13u->j(*3TlDE={4CD=0w9i28R?^&MzLSDZ`-ezh~ zRLPQhCtsEiez#iLoS;|Wmr!a1oO`=wv}i{+Qq|O#R40;~`vc<%q);}P2(cCJ&t`j%hN_Y+S+=;2zrl_yp>2SR;!bn@qXx-xAw#jS(PtA8X=6JIG&%+y!^~ zW{61JbX02vB5LtV%oGD34s|#S)|hLblaW0z~w4OM7Mx4U1%Zq@Ik^1J z&z@4;sr<3#5!BZ6z&9IEzj6B2A`iT`axM9_HIn9F^``!1^!S_}9A81(9vA1TPDt5d zYD2?=8?$0R!VSz4qvQ}#ZU=0H^z!mc50813( zfEO-Cor>_f;=8VFt5!z+K;^@bSxa?MMtD$zxrM$!h`bN|QQOny_pJ`lRgHP;(043? z&ghHvfZl@<&qt5Id&`h9X&)T6-%u@;uI3tqM?8mWWgI^4@5tM2%V=wm(Ow}hj>NXw zphxe45GQ)Uud!IEw)WA_Ck3hIhHZCczCGR@A`l`MJ8cF?sfP*hZObISz64`?T%;;}8rd_ClhWazgO*>@f3 z+WJXUo`#c6_a(zrT1(cefzw}xM7(?f>6<8R?FbW-PefuEcV+6Vr=AXbX(xTbEC3c3 zmI;BJwFRu@{n{C=!hsdh2zq*8!dPNPYh@_llig--8JF?d?DtsgY;`pji**1MI5zkX zYbk59*(eNc2b;GxTl%Cb>1FFE_YeA+?Rq&bEycUQgN#%zSX`Twy($7`csqSc?g8kl zJseO2D#6w%wbKV|u!D3m&C{pTc;RBvCy$KA?Vmo^!Emjc9GXG$^88l7!uym=m#oC2 z2nDdo$BXppu4O?7=k#L1={@_IjxfaRo3}q#>Q7fiELdcYHI3aLwaWB35{XKMC<=KP z^Ha?V$R?u=2m^di{VwT}IjIg*+9Or~-awCqhslGV06N17P8=sayJemSXm}>C#7RNU z(Zmt}r~c|LqV3}HM1L|#`q9J(sox&Kwiw@kMjBpkmp}5>sn*4$GFWOP@VK_^Mv$L& zoYkfD_Z?*=&bjGyn4F`i+ZX~L^C1GvSrWdK$e(+>y1SPNgi{nsfkguZzzqPje4f%d z{I@ip(Gdt6fw!@l3db_)G?P0)bBhz@+)Rew) z5-6^3NDchq8$hK4xUl%hjQD>$tpAg<@ezt-$F+|AVC&jOJN38A$AIDH?B&!7x%=Qh DtdM;d diff --git a/ui/images/sample-wizard/step5.png b/ui/images/sample-wizard/step5.png deleted file mode 100644 index 036a0c832b9475dc3972858c52ea89cbf282d551..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 41363 zcmbTdRa9h66D8VBqm8?}I~49t>;OO z>|trl1tbyRC*pPE{sgc#aWW)wv$nEvDV`S#y;$mb1FaiMdpAz(r?lw+_ZuB;e zr2j<_HE}d{u&{Hou(cuj2hq^T*4c@VGI6wZb};_ThbifQl|N(me>e1x;HNj-@(vcC zNinn%wKaCOHnDM%6y+oN9AN-j0J+%#LTqA801jqmPBAg2&#Mrdh!B$qySOMPD=RY- z(|9hL{i=7qhS!8;dxX_zxvd?Mh%y_Ix`hqU*46Q!%R4`{(w575-=Ae=Qvoo6q&)@VPK>z|Mbt`I6BuDJrDmwsM*Q z<)uBp{BbdJoABebPcoeoRTx7K<`)?qCABA7Yr+f|eZ8{QcWa9?u#e=%gvNT@C4h zAtcdcoE9btaTn;&0`Xl+u_Ti^z&~>7{+GN};ovlY;!`XYyU)QOpKS9f{kw1fLdzj@ zM$IDL?vOzQ$7q0tYUu7zsX6~a1LtC6gCc?klK^{^QL=+uY7{CEm-v<+R8Cbx!MUX_ zraHa$kCP6dBe!g1Pu zQ-Q!2M1Cm& zx!z+rl0c10AtPAg_s_5d8WEx(nOnOa4K7NRB=%*6Rah=oq<8|(zb4?wI0b3QSmFRf zJO-N9_VoWE4*d}W>#!&IcTUp~^N{~`7PJ2wSfToVQK9`o-~Jg#Qo;QfCwt}JEQ0@c zxD@{11pO!X{`ubxZ`&F=>+iA{D>pk%?6iY%;rZYuxcQo@NQ>%w!wa#9$)9-MPJ4RK z%!gx*SiBN=4ExV9#->Txc22oy`IkaW@sJNPS*{!hJ3Bnq?@+RW7O)__^{Ch5eELiK zXA99X%RxI`#EbfwJ};3O-l&t@FLkhkjAJ8x2i$OIFSdniZ7bsj;yBc;gwMw{gzJ7u zcwB_*&eNKkZxc@Zn6jS>I7!D%Z((s#NIBw{gI)%Y=aIK_UIn}+(P~N z8Djn-e53bE%}zPHiYk6kk?+&XDy}dWwz}Sv9_DOy^bzV+HA3)-*Vu~n*v1zvqkNrQ zfP6N&ayLql#mcbNeIB_uXh2>or9OBXh4e2m%Zh>M?h0Vr_BjFOWW)aT=;EZq`z>sz zggvNx^n|^=qkXXPdgRXY(jp2=^+jYV&U~2JLXr$ZO(l4dMx4qt-u_ARM(>X=(Qr-I z4Q2d_o6Go*q*}H1^)N(cv3~A-(sI)2ICF8boHap~h+pM!mWo2>WmUc&Pa}*OG9csC zD3LSRa4d){0~!UaIv_~O7O&jp-fXzXGZD-wYQ z1V`<~g=)o2e1v)U)=|B7#Z19Y{^{BPGY!WS9A(Ene#RSnP;&TO$f(AeOZjHpV26Z1 z6!ImL_gTatDb3f+K(2U6!a=MVtR%jXg3aE?7A#AKC+P?}KsW#2TIzt}c;&u0MGt8N z05M^#xq*0h4;CGqEmkU}fp}B;PG}pCo;&zpzJhhSL7`}UEXugcNHM&sDKOLvKu0YT ztq$uZQwo1~*kPNCwIq68fVAU0UpZ3A>)JBj!_(TAn!J+g0)@38TFN~w?-h`Way+V_ z@OCG2Unvq%-N^`^iE{?62o(2c`HF(anIv4m?kX9Ta|G*FBklmGxi46!yeyH}rJXB5 z>h0EMN)A%CMx zc82JtrQ=Yc)gBXB;C}OVeLWI0t_S?WSCE(fO0heygd_4DFEz89kMEmN)Xop}-GZeE z7+TmQY|~u6#5-xFR$g10koIiz(l{aYjgMOB>et*_3N|vkXX$(U20O{Q{ z3Gx-wRsYP#?lvI5qvB;OhV6z{(2o=NrbxD{D4apbzL~E3PzkjCs<}@(L4*TYu@P7% zO0y2P3ug-yFA2em?0{dNh*-%Ijn)gryk=<+{+*rnDoW^Q@$;P>nXEi`NmknVTfSY& z(TOQQ2WMn1u#yCUt2yx{kL#aW0U9y}1Llb(fD$~( zuT?-zHP+%ryum9RV;#qI98zka=w_b;YHdYZA^Vz7^K3AmF{w?g$X9T-7827`&a%*= zdSivS^{w_p}$kE5)9eFc?q$CcDaGO;LZp;y>c(sKej9Um!Oq)a-zbZCy=x&C{ak zpj@M*H8ijPt-h*%FTON$d9!X@x0NQtaE2y%-*2?k43Uj10M3k`KPewo7A@9{EKL>q z#6ZmcNf?kv{WAw+vu5V8;&01-RFJHVc1wv%LG!(m9wUUh4Dw`^_-u%6)J(djV3s^qqr~S9cyp^E3U=V% z9{;7XE|_zwn=qrBV@Z=*mP3x2&HLBxLj3j!Rp^IYKkuypA!i7_Jr=MDy@hFGBhC_i zKNv$5Vr6~{VluznDko%3T!Df^C2bTsRdY|emRrN%&??($$t)tPFWW$}?aSgmgf9hP$pTWh5d zSEZ-VJ-nvDmUqeuk60Ne3>P@7%0STCvZuRPy<1`tZBDBMgXivRDm`$8dnrdWMz z8RJ;FPTXo;bG=hdKe~Z~&$PBTgK`H^twnNr3yQl$5?O|I8xGCb7Hnimuo* zZ*m^f$ptdiCFVWzxIlccLvWc+dNzpPOue|FE!%+_-BA~w(9^2C^{Q3Oq{w7h;G#lv zb>A*Pi8({U0-$IzT=q9YI9w7MbFYumh)|@ zcv{HK3wWNS25iT!305s6_5cd$Dr|?PoPrwD&@@HSaOAM+h3Zzuz@$~!qxwDY++&f4*HLuYU zSnFS5cIK_CaFbh3PMReHLrhpI6!h8E#(xQDr{-9xK~BT3>@cKdXmXR+K1I!fIsx;h z=p&|PA}n>=e2t@J?A{dg&zpM5>h>-pmj~C?+X}*dsf8G3WsNO#Ri|~QKCvV0Z5Q8= zPnRfH%6TQpNUOT?&R?H(jpo2J`R;j@nCjvQ8XoSu)@Pe&0| z!B?$iqYn5`doz+4X_A#X>2iH*jBTox1)o1T-5<|k_OgVHwPhTv-ZQH0Bw4`9xkAbD zINnJ|FzG1eMEUNZIE~)}X>JUPFxgq^Y&n;K3J)H(d&}4|L;B5})|1{vWq0~BN*2Y} z>E$!j*!Al2V95nL&3E&&^Ut*{gjxzdE$lo)c{iDo>8r%j0l(7r{i!p2e$qo%hj2x@1ekNy(8&={_Yn|pm5-rDF`H``p$ zcJXkLDFFBVzXeumM)Mph#+_vZfn(fq zwC&kE5>=emjm=zHL?&v=p0rx(IVESXZLWHqgD5; zE+8A%<|c2q$eToGQmXw)>Fk2!m?E22P$=JSFm}zj=l4#fuhUm7wQO-DlgGm*YgXYx z1~Vg}R5)w}<4;aL9w9UycRU0rp)io^9$RE! z2*1IjI@&lUpcSfT3g{{0hK&=kZ!+g?8&BJFW^4y-bnU&M=KKAgjx!`d&W2IK^TSr1 z2jS7B#iW@G$_3kZlC#WI7)aJGgM)t31R1(I4B;Fd9l?10(W>CjLhQjwTgRn5X@hs4 z!c~?5a;DRdjECgOCB!Bht)P6-heEZp0EuXW8O$er5rsj}B%4J!8j;y>O3!M{-XjSI z9agac}}$N9*gk_NPk=sm~`Hv3f=MqN*rb(PF5I%hI|szh0K2-lhChynWFeeSELx#h;v z3LV4;s!|m3nHGh*0Rs& zI_juw>+GR68d(CfsC#0s3uZB?CZK{~ORPP7;@E-4RoBF|*s=-BwNMP#eQO4O*eE5B z78wQEBJZMJ2UCTInBKD039{{x9||%O8uC^Sh{9DGg4OuX1$23*tIh17t~2AQ378$A z=zZ0U^WFn@;}8glh%lt04b>Qrh-nFo$#IO{ItODpf6MQ;O6*T3S=JI9!vY39wV}-e zaZZt`*05r6erR8p^!oe($||oWo6^wkAB<cYXILm%S^@OWl;R( zAMfNwwKC@&L!^|=I;k6HX8w#Pg(X&YQ|J5}n1la=KZgu#(2mI5a+lK(jW7@Z$Dap-8(n`J^xMhogEtHi=EYwXi(3w+M2bFc*J+W4QCk}$`f z#xFL2$Z{SuZCwX=g!plq02`{|y$oH4-e8AWc7S&6HmUoYZw3OO`domxRT(2qmm%n~LF-BUZ%=1S{r_}T$C*P4AM+kbFGy*$k-NA%5NDtwxBn=h;haWmA zf(1~>Yn_|ba41OBeh)P2Q&0x`kq>G4JtgBL()#6S%L6gKAJ3+isiM%E=amIRGAT4d z-clJM=8VXMz*>;480p*d>tAftCg>CUt8&cF-%fb!i5UzkRVcU{=FaYWBcow>FQNnH zwHHd#Mfb4v7qZ&7F|GPYmTE@uvRM$vol54l-_a)_hDm@Dxx(B!s_uFnz0J6lpm)jP z+|65mDr)+PVowX-J&3@)@S9U#0(**F^43IV7b~`l^%0PXB4O5GooiZ(%^#q_NV>v1 z-ay%WLN8T`#6AjaEhiu-w~w{<@2zz)a1_J!=PhO6vlOK%_x&h4Z0AA8tY9S?>EP76meo-Z+xJe7{W5R{!s$SV0XvNFr7U` zkQ(otQpc1M`)W;*%|>178B$uyV#_F|Nzx<>yY-pjOvDwp)Mt8$vATV`YApGnx|!*I zS69&*9IsffG}5x#7#WNttMt2xeuG!E2L6sh%ZKvJU*KmbOmq3eD%Egw6Q6G~^^8AE zb4z65-G9FF0~4!u1#HK#yi|7UW{Um^M)c?8s*~WF)7Dg$o`Bn4Zks2KX<^D^&Ub_s zut0=tg)3w@$dY^{F=Vn321Y@51#|%9{J?MB_*^_T$Z8_c7Qsz5-btuCEf1UU`$*HFm8rH7bKyGWis8(+CI+0{!zxtTd|T?{O}KO+uC zRMsc0ez_^#uPl;zygrt3i6-aS;Z6!7d{lM@B2h`TLI7-%+^Ff3hJ1qRlE2i|&#&p{ zRcJ_-R%m7KVvvbA{IQmdYOTIi!!^kwl?srgf3`VkRSJI&?4`1gu3Y2Id~{=lt{->y z9IA7zV>{bY*JB>c`4oJaGwk04&Tz5z$bfTzVrCmEQQOY#*hG{|DYgW?G&+_@obYaC zJ42A&#qlPv!p6%nOtt>@vAB>y*g*LQnXN3sgTCd9+>r-{6I>@I?f#r7c0d0~zrOU? zULvRdGr~T`1&kLx$4YL$j_0#Z9V%`jjxeie)&kXTpFFcQjigaQRT*n_Lg0zzvXY9@ z1J#ind+M(?$=;5bzf49O{5%JVH#V@$-#5r!K6pfv7+{A>;44-)ogS5Mx-+(|O-X9m zM{#%%petQ2PGQmcEf2k0fOCxF2E1{bBHvRY#9D)Tgvh3m5Z$mI_-jI_jRBf#2>Nfj zK)nib4!JC5T?4BzJh@p~ze&=DU9BGn!92S1uLkdZI-?5nkdrsh@w}-%zSU$fav@`5qLH{1gDDHd@_a}eSdP(!(!1EjHFpO?{68un%5-A4EZ|& zjk$3zsAzadM>`4{3#-w0#r7a}B<>pWD!gLrEdqAUQKez?uLJi}>pMos{l^nj-4zWb zJ95qo?o%=Ow$7MTpA0|&@g8)o0z~n`!wiTdq|d~%wf-rAB!aYzMADkB?VB7t+J-f# zTEEXO|8#s|&Qd(Q_`x@m$_q*iCbK{^8WVJi1uZ|BcFx79Fy)a{x5bjbDe+eHk3+c( z2?qgFnPafm`4{lXUsPdIoz#S)P9tw1n{x7%7VNt;uF!gU3)J_)lcUR?DZlx)(3N?T zwyfyiLg&GDpu=zP6qn&a{YH-#E9*rb){kPoc#$|fnRkjVm-?!TP?Ii5hpiXUi^IFV zMZsh<9HR{lgcGBifRSH>4htLZBFB*=eu}cLJmM%Mz1}EACZAEYaUU zGU@cv(3GnsA}Mh9d*}ks&i4ufJgtDa5tbinH$pScmMn%x1m;d@@Dogr_e?q2&yPK& zgD$L`9+Dr}Ufxy|tkbV>b)c1U=BwP>k z#-%=9omvoV@jm9qL`_&GMQu}Nj{}?krd~U+sKc^e%>mpzuJ+w`@H!8;m-XIjDnwjS zpW~*ToGk5d(wX1nySH8$8IvolTe8|bH$j^>HIxpQjaW0Q{XU-@p{;V@N4DsdR&g}T z{-a2gLaY!kiJ4%ft3-DE2-d+E6fNY(>|}!urnhojllpjsIx`>RI_RU0#2U;@O2^5GgopcVDr5G0 zw{RkMukUM_5d8$-43h1cqKW(I`Te&$mEzT<>a^`6AIjrbYHUyl*VMu)I-En{>7T8i zMjghkRGtkzyKpA!JlG=C69P4d-ugwXd0oGYZ3CFgi=(ye?B5Pv*f2)t+JxV?o)+7O zyY!oGPF1bB9XaFDy&FT{6oFY9oVS1Gc>abiL=G}*QN1c^DM*e)5a|3e6}HeZpay#T!? zgY%S){{fa5}T*Zw*~# zjek5X&koPMk+00XvCPmC-0-YQI_Y|^&|#ZDORwmEjD!Sn=uEKBc&=TF%P(7C45sBV z#RwOIOAVa2lMRA4$rCF+u11#Qtq}Qc&V2=*V9ZQ30|IS;FcbdnLV!`1V!q1!5jgaH z!j#vgW2Go3PdH_@`-?-F5oBgw;vmD9gC?w=3cTSTK7o%n`)iqgiLu@ztm#{UhWfO3Qiw$<|mEEjLOw=r%RnJnk7WA zu)h|y?i2D={;a+)b<)-{-3xR_?-UUAI=5uZNYVE;QB*~}_+CTZxSNWIsOx^;M|@|E zMN;_YzcMs361L_*iUj@m=5tIZ+5PqE=cT8I$Khh`)lRx6qF_Rs-sZ>K-Vwg1mZ>al z$9L?9&wVb0 zayVu8)4@WUj^iMxX}5n-Prg=*L8oIk|8SMl=EZtwrnqvzRGRYYZgU9uymA_~(1OX0 zvEE+x93~tjpO;d4-Y>`5oO@4nq&$BwyI9y%wp)+CflzS}bAFrM;;}8m<1U6BS}m9) z9nrI5zp+SALcsm9J9_SE{L*nw=xu$(XromwICRur(%Q4jw}sxO#}8W;EcK{5s(M^9qm5*G>s8*23F6xIR7@i-YfiUnE4ck0H5^F~ieq(JA$SP+(2)vN{q343`3G`FWeeT80}K~P;0L=$3DechZ? zLpS8?^ci;-j*m5z>mSuF5ri(MIL>T=ErRsOO!zxi7{&S4D9v-m4~r-J-WSwszyhCB z?=GGn+b;w&${;jlMm=3RWm?8F{_Wr)Jgr?Ck>vyr9Te zufw66vjUOGN<7XfIGsqf%uo8XaL9DHHtv3msC6-hP1dib8QX+Kv!((&PZ#BueS^RL zF9*-k34b7J;=$^vwwPJ#(rxHwbyis$Q5wwc&zCs+!fP>$7@JW{Oy??v#L-zSW(Dr~)seC9(HvZBUOU;Qg10*XHlis6eSCZ)OM+arASA7RI;x&e2_VsAN2**3^5tbt#=aX=DQx`(}zh`?s*7jv26z=a-_gNqVI$zS?)~9c$sADp?zq*AS=A z`V;=~>%N)#zRG^GXB3GA`4K_b%L)?Yfl-xXDw1$&{FkN1FG5Ioo$Y72A^RCpdbSkg zdljKPrGii_nas@NrYc+a@$JY+Q2dzF+PfDq6rBk1$)h1Cu|(}J*z&aYUnsY+K=_;j z+3G^F*5E(L@sE8`ux1ITH~g+Oo@b?T2`#|8W1nWKDhsq}ru8+Z4wd3IQ1Z|ZZliOk z#yXv&mCd<9hWlrxOpvVs)ma`^g`7trnaaaTtJ6G*hEt6@W{Qp+tn_T_dQr-#DaLgc+g|F9zRpwX1U%KO-rF{Zvc^0 zK^K;GLY&y9NED7)_PZ@xr~v}+gG$C9#u(^_snXJptju)0YkH}M|F8C9pt;mJ32N-Vf!x=pg*qMF789xkUzTP z-eV5oe+9LVqmS0u%WGArd7GSWd73bnPg_Y!k8VZoNAs_~jbD%MkEMSL=j zoEKHMi{uPxf^+HQNb7Yp1`s%u`PzD8Io_D)x-0ZU?2Weu4xb7-kr zO#0$QqQ(bt&Dm~~q2ehkcXkIcfzX_*XUYkfF1L4Xkp4dRrfsdmm-Ac-18)maFKIYM z5`#BkaMSK+W#Kh0AQJVqujpQjtauKoim`lIqg_lUl785H(Rxs%8A0f?aevzy+%r!` z9oKisfCh3%Qq6wDr<*{(CSi{;KT=xrVK70|npgYs_NVFmDJtk;twBcltbTvxWK7l9 zK5<;>gf^U(0Di@lnmt^{v!MCvJ{R3t_mW85*cDY3qo=Bfgcc#&A8kJ7APsYzDnr+) zc=V+!75_QA>i#c>a*ZVdS7m`Rykx|DHpX%YB1I-6w%NM#&;bHfYSI8#x?c-ha2o%aQAAwiiN(8#r@B%pT9XsN2UR|+b;*p z)NePZDw^I#U@!C>d7vG&u!eq)l&Yfk-?G=pW~}9n9+fE8q+qA15{ksG>CxYm{b0P0tv?rc`RNMf zzY~)-Mj2DbFHz&Fk7GPByAS_}bs=PgYMewtrP~Kx9?J~JwD=vwGhJ5EN>J~5ENr8i zf4QGrnj7}66Mn1Dkpb2DdbM}PqxJ1gQ=39ODY#*GmF%(`$t%drQmDfv@TOktH!tqb8RvnTdOa^%WOuOuT;d8uPc-l@YZ0sayN( z(+H;+BJ__{O-+iJ`z14e^)6w&M{ewmaS5c_(@W9D5aEvF>V}o7Xv^nI*rK9j=DkCa ztdg31zr-$pNNOYU-tUeyc>dx2W@MONARtgUCm&K-g^h#f#-H5GUuJl8^uxS2iCPR8 z(03bgiat7#>nNng@PRUve56cKxmm|XC8e*$Nr?ql8rB2r-))}C>ZM`y;4 z&6$+q`8+1O>Js|haiwmUkc+*R*LQzS^R|Ma4A0M=et%8`sey2Jb>nHb7nPG zCK(a8BgD4 zM9w7kxtVDuOMM@=M@|@iLn!SW>$hXZ+=5_nW`zQ2f%&^*QU&6Ab#sTO((9IMy`xif z4Nd{ZjnP+uI$w>)l~=wKNtU*E{5k$Sojqfxg4WTiJcjvr{s1H~0ywOb#0N9e$$&UR zGccJeCxxnx1kKJ|5|N`9p0mry(CWJ1(Nw=0IWh|^%&J2=3Z=k<7=PJYu8)en;U>vU zu-Ps%!gq9ILihbFx~e2GqZFtRcX>*w(UjV$h}T7ISQ5CP&yaQLH?3L+qX2bydi7Vi zvRSjnLs7P{)f*DuoC@+nv<|)U)N}|MbRJV22>FJ*U5o~j;FfB0b%C!855NH1#)mY% zV=13AxrX@F^&S9n&M$cuRO_9@H*OyEjM!l~_-`jHak+bIw8W3+i*-~3e#(QXt;z5wyo*t}g0(1@tL&gRvDYL)3qfQcO3+RP1Q12fHPu_X*kQges8RFq3VtizX7s7(FPGen*?bLw_v)$b=^oy4qkm6?eKqehMS zz?>wZ^;-31M9S3nL{%4G%ZE6z8zpuy2T+;aL>WMS~7@A&zwz{|XLhF{fsg-1JV&&M<)OgDu;?nS%irOtI$A{GpD7*PMjXS%*VcZc zX&%U$prK}Edu?ssw@9WnudI-0ED(81Rll0r9todOUvtgAKWX0eZZ5kfN>%mco!hlr zmGOHfl%vv*J6qauCAgn&N3*M=?)9mOGl`nn*#ak}{XK5lt14~wb`s!S#f?z4y2QvP zF-)M~Z$oV+r*y4ua?KKLl{Qc-GtB9@CY(n#s3KS@?xJovwsEQTL-Ec8dr#UyjW?UK zH6c#ta|fF%B6p#E4&J5g^%|Q?e{&0krHqk{qisQJ7K=>yYknFY+4nIZDL!%bs_=@o zi~CO4%Ua*p=_|c!NtcCTVEK-ulXu0ga4KX-=IDif4hvel;cxKJNObRVX87~N)V*5% z%37?%@-c$2q0{|-#la% z^1BLF@}|-()xzdjd{w!^U6Y0{ZE=gmeTCAvmKz?$kMNn-kFcUQvcps0_R&(~P(+Ny zW@S>AwUtARxp}e-J@z0+8#2f(c)kwj$B0rQA{20_(le5tC!nkYP<37pe3+Bmh)YgL zq8gb};va(bo}4mZ!lI@)gf$@L=57YkxV5{2M+rH4os}kjJS-lVPEv8XPN!p@UGRMz z`(EETbl%O69?BU>Pip&th>r9VfydzLYgTCyI>^ZT|W=!86IZ6Or@Gd9cspBa4r@8_H{yty5tF=G0_Lv7Pw(Q< zw!}*h#h%i3fqvrPrlv<1I_BE7fu=*Dul>)M#0BlrypDCL_DO^7Y|IYkRyD5)xMi=x^J|3czivIV4=;rgyh3OT_%tZQN9B@5D*%Yf=%Wl;Q z_1>b#RxmF8B)S_&oakZJFg4VpjX$Y@3k3a?*gf=C09Q^8?lXzR+LJ$r2nY?oGI@iQ%U>F?ONY-pBh0E+W?pZMl9Njw+cmsll(`eD+?r~iM{Z{84yqV`6w1)t`5?X%C(Yur`t zOmxmTD4U*l3te0BJokcJS%xrtd(CH_IMQ16cj%gd)I6@YOEK)aA`tx)j_)vu`TPV@ z)fQ|aDW)|7^5Ut;aU`*_9v9&x7^XC0?u8-3iIOg3e?`f$rR8cv<=?FmyAX4LdEv57 zlG|5qhZK8r>cvPbEYWSyD+f2#I03Ax@Q~sCe_et(&c9N}XJ&4!uEK77K1n<#cBm&s z1x&GATJ8pl=OIr7c!qot4V}Or9v=2Ho+7zKeJptqa7ceyb>-A`RncRPuxzwizD|Q8 zWVIC4x&|kZj3w3LqU?L-SXg#BCCLG}zn@BvuUqD zH33yQe&X_q2=ax2BinkB8NvP?26J!rE1r^%vHa$N^z;}sjI63}+c9sbS#PL?+kqYD zX8UuzMN1qnqmuwOZ@Un**(rp0-aYBU#da$A8we`NZYYOQdh6&LhmejJaa`4DNDp-6 zd`Oj;?m9-vLicatf+iFPDKcPX98O;AD?tx^sLLRQs1^k|BTv>QP4U$aeyX_0{V+_$Sv^kpglx?G^>k8x=bXX=7;$!9+=q$FnqHhUmn$g{UK6 z{c~={JT1J|HxtLmi1MUy=_>8nlWX7#N6;xQ-?kSkm~aZFqqQ$a+gUkvIJqa4#f6z4 zdksm6ks$oNl$8t9$#-!n?ljspBz3oe)2u?K0v4J3NN-?=VdwF;9LNW4Znlb}0E0U= z1BTApSvZSmEbD+ypSxd$jVtPFJLNhsT4bGn4I9Tn3>h}V&HdK3RCkRUr?_nmtT|p5 z=eez340b1IJZM_cvMZHv5z?z*iXw|dZtZqleJ+PnF>vWQ3+p+f`*HKEFA=6j7oMkPh=;J zu|WCix)V>rySK_0zua}e7tiJR7uC1&)3aV}NvF5vAumU`HtVP?E2tt%?w;d32MR?N z3-a*r7&CHdKC}NKhBuG=DZQCW@TfpbtD(Yurk|uT%qUWw z=^IWGA??6Hlcq{P#(qL!eRc~Q7H1KnhN~A#hl=l40lyMbK3u)1(9osi9l3NVMXQpM zsE)>U9};0u<|$;|mbbLGY`HmQGl~SFrEK-`nx*8mNRH52QCsHq?S6=?Jb=&Cjp#1D zvtx4NkP+JsXs`s{oc@}JJh-AM9J8Cw$})7i^YA7PEsgAHBUKAj8agwE;>e`o>_7r_ zCh0ELrJc5qf6rzgnPld=2gjZmVe%vG4Os~~-|AY%DYXO@;U+ctV~DRB-b=_Cl3a)T z5Mv`B4C%cnz{SGLg-K36N$7)hK@F@M*>xJ6Jw#Mr>7ywW^9w{_fZIc-> zv)Ks6Tk!pjW-ONYj(;B}Nm-g*z^$6Lo|4lZ$RMg$E}=JXVs}d1GuWXCgu^hSO-bCb zQXWa(Tb_yBJFwI~rIRotxQ}l| zo46~)35{5Uu=v|(;CnyQ@UBNJtcON-`8cn^&@=TkWvbb)@0CiLH&c9O?O)j$NnZ;?xEtI`-p>Ia-#PZWmm@gy&z$$3deuh651=93ScHuEdKJPVdT z*GieDSgV#1#DhB7nw_*l%jB(}70TU2J+a#WE7*g0V;ypE=g6G~DrcE>CPU9qG|zxQJTgoyM#Q^?niia`4!r!M$$8a_Z=g7;8_$WzO<4f+17! zXib<08I4A?eUNJnxtZUNubz1FzFcNqjjA|th!YjA17C!#b`_hUNF*Y8v`fxLnr20Y zYxXjTexnCP_)0R{x)H;i6EC1EC7{bJi@T*O7GNfj9Gqbe+c`PYcT*%QRRqIEI-YL9 z7dPsk`<-D>CbU%hdMv`VbII4O=h5CQgWvwCEfm%a13>B)3Nr+|(*AYcOifLV=u1+9 zkpFL)!l5Xr7Je7Hut;Td2&=x;io-=^CF`kc754GqPLt-&CxzyM!2)ZhRIlpFQum{D zNr_5)mYL<$AoP#ksRBL(N@#>2cTvXjUI?Sv9|MKaJ%i8hQ-cyM*oA>`vs+vKUFwaI zomIGWDLU2X_x2g>QhNKbi|rUIbjb8yRy0(!b**ORo2&mCR^NM^WOh!}7t~IHSAk+q zwd-QP=&f!fOeh(ZIPH|;^gXW@C7{WC`>J@8-f+X}dCSYQ!&&nurmmV3FdF=rJ@=N^ z=u_YPLI{iB+BVTEbd5GQCu679%**VZIK*T|&{1r=;_FAUjcdOhb0#^*wjC ze8^-GW*LE7oT*Ge`vZeQbF^ilIw(FUoL)e6J*z5G*$isnZktCZ=8xINl+-Cqk=3%kEIG6yj86jZ?)G>C zZ$DfD=Q>PO3;!l!H0Ht(? z4gKq#zO?i2`xWDRt!tP;=gDBGMSMBae#R&V&4*WHU$wQO6r0glmOiKj9W4@O5Ya*D z_?GQkbYaXMK?(j>nGExj(>-D0dLrCZ>>t<_vlR{ZOvq_=5r);Vsi`QhS1)gn0YX{K zBezv1=wkK}X;%qHtAdaW(LoIB?A*aKWxv`*Lr3fZlU-!R6cy_HRKm;NJz5dY4QYkm z>eTeeI(iG+bNhV7c}aB2`0j(}s5F}XU&rqnwMLT7EH;}`i_if=s(eD@hxau$jP5sD zsnmF4jahuE4vndQbKqNOe%WB*w2mMCuC)E6J&jPAkL}nF8-&0I$wjp~xpTfyAj|@3 znCPCXl?u;>jW)+3NA3km*z-OQ@L_m@lyB|O=-d^p)0Mn&X>eH3?aYT!>lA(bi_%DE zWW`uA{~TS-9r<9%vd|nC!?i_~ejXy$U54qK(m%c`QE(#Fk!hifhrrQ&QoBgWsHBb- zrYq`|tJo)=W^Q$}&X1K2wr`}ra>m7>xYd!dk4#5E1BmztHglhD+@#q_vZq- z`zgD8b3@QfH9|?e4m{zrq%5I;0Yl!YX84>Hwo3GevOnc$T7^9ger!Cb{ObfenCL~Wl3 zk^ebF`#0)VIGT7DB{$q@D+bG0k`-%T$hM49!>guCjEHhXy4BvzX*Tj1lWFZ!~UN#)Cy)e0L#q8 zZ@!(i6H~&pE%EBbZVR#=qoFWPk7foxb@WEa?BxH=_{quGJbEH+{4P7#1Wd4n~t1Og7rw z^UOG*jb;mFmzS4^f5pk9%TslE+%6j;EVsF{ED_VvS|;#b934$}9*6FVN-zx)!3$D& zYzJcApRGdex4&MGMtch(dyX=8Jd*T*u6Kvkb?p18_-==UuXkxMqUSQXRkOIQx$h9V~GcpiWtisjZ8?-*czk+9o9Q>9GbRN7?lkGJ%D1b$caW;k50WI5NYVd1(9TZhzgc1Wn zP{`155&KWyhg$C!7o_IcP{@&lVl=@?Q5qD^AePvp#LOIi#9cUqVoD+(LbokHaN+^J z67_0(>4DSs&U z-*UfR`B`2xA7mCQSr*H)3;o(AYv1c@z?Vd-hH=aOEeeqU6k;|nUGOBj^_09`CU3eb z_UB_SRV?AA>1!%52Vh8q$|CKXv9ZPay1qXSHhyeDsU{|J_}rvjV&xkeGGXOT?C$M3 zJ3DuGi(%@rdOtfoKOGfi^`sEKUk2a)57yo~Dz0wZ9z+T#+^ukz;O_1uI3YN}-AQnF zcXtUEAXtzD3mSq`K(JuJA-D$VL-O6%@7-}@Nagz>)2+1sAA2bRLLBy>!rfSx6F<6zTc3ytlGhExg$D#G2Hy zjF8>z`^gQ&L1#qL2{jl2)lqU{0#ASqI`z80+ZLZ=J$6HPmMf77D4s1zpkanU1H2vH zJGm6V`8_{(6>a@)1tE#{CvoqKDE8Vc|mPAjGzyW2J-~M^3Wa)gA5j zdymFmI`7t`T?W3qLAoh0kM=aR(2u0G)-tq79Wx|Pav??BZ6~6KqWMf=_<-^OY zs#|T?Pub3gCX!_WNa4#ChZ-{Eo{zs&pXYPr1glRSq80UWPvIs!fM{5W@%dr=;gH&2 zocjc+iiY(yXXY25U(4WYCRwfGQd6WoE79;U&yLNpTi2ufm>AU%6-pY%@{JN-f+Nr# z<^%?ZT1jgsU{Jwf;-~DljG=~bp?5c+u?gA=amx;r1eSjHW>};oI(2hBC?+CG51r%h5n?p~<2V;0>^ zRYT%#rG6P3i%dFUY{p7pu{NZQTOl!u8rR_`E(NsX zVP|83fiFdlsl38AYF46tI6(>aSaaOn6W7zPqj+EzFg&YHDT!@t4iV z87GcYqw^FOR)YEbAT!8WpRF{eU!g%E5|3J~q6w_+_N$!2&qN;=u;MMmhhx|~Ul|y@ zzrvq>{cRKQyhfj|PrtE04)Aw!I{o+zIAx!QyPGCL3^DQB9hyCXH$pQa=-x0HeTM-d zlus+%`|kcI!1xGK)(g_r)pa+882p%;f}0FHTV-+msAulmEh+#bzM-NSOcZR^N`PVH zxfb^0$B(x^ypVn=V;W-CmzN{?zG`t?e)l4$`^Ea&8WOoDO#UX|aoe0$#K1zf2Z z$KH0j_2cVENbrE2;j!Pzjg5`HYy7dwEMykbPFzvP)sxG6{P^(u$jRC|FjGdbq<-pt z4%j$AvcuGO$Q0-OE-7DT83Ai9{Ps@lnAX->gqa?y-29pW~7A4$0)i{vNWHDPz2KH|4gm&n`-!{51i; z#BKoIUOVYIl&A`X%AV;Q819cp8l`pTBTsQ;X6T`bQTQCxgK)*pImxJHH| z@*pbt)W3xtC{04rUv?W8{aUI|=K0En!CnqO1bW)_cz?88cl_SwW3Z}T>SP4CAgPx) z$t6UU!}U~^7AKpC@|++}a(TtM7yWj*|HaJL)Z%)ANM`fyDs?`=-FJ5h_g*CRXMUy( zxzi%>#E4uRH3tzqRM_OcRmJi#9N8&WRPW0(zT9~3mqMlUB4~P6a2#Fl#F@lqV zn@^j;IGUc(e0>4r(R)l(lZ0oeCshd=cUo@uXmW}sc%JA?HLjTl?h4)uJw;`9U5G&_ za90a?b4)*x{7k1dR13ps`OTt(GnGL2>T0!r6gl-B$~MxT7{V16Qg(7Taa>J)67`1k zxA&k?gI3gRR-5r-3AUo^a4aeTgM%0NKLZyf>9zq&*w{E>@`>3nXHfP1t?v%!*F^%TQjFW`Xl5Y+k0zZZTyy z6(hn~d%y)r?FCP)$#v^5=n*L@h2Qa77jc$0A>{`acvs?F=|!UDmikrq?);u?B(dhk)KG(T)*(k zwG386$c^D-VWW6Di*9m3fjF9~Zu(5ZxX?b!?Pr^@2>Bngvjk{}R4Y^E2)ppEsJU&M z>H5RpzS;P}NBOLD`l+E!y26S;-NZ!0w>#Xn$BggR30I$ARY|~)O{gF!Xn03D&m8$* z2}eFpb!sX?oD3=Kk-kP5vmDLFFS9wn?ygIkj2uRh4=}2x!rbSG=obd6IOjN3(WXcI zTzQyHx-Uz&lLWyBxN}e7NRmD}_Vv#*rGbuoL{m*Id-^b|$sa#zF@vV%pW`yLmZGl= zg`&SfbSi>A42nO%Vy=giIJTaR2_rQaLAis~Os6ri^wnh2k7gm z-$y^$@xHC$?^-nxU_m;*Iogkb`In{+QvAfq4dNan@RkQVJJ+lCR|x1vG@Z%~Y;k^=x5v z{6>4ePkSOrk3sEAu&KgYQ)jBQJPKugSXfw-4}8G^6P({KcfZ-M zM2`SA4_SybXb7cb$YK*z{Qb8lkl+{u?qC0vBr<;yT~%dG^QZrh9NgvK?oa>LF6dwB z=)biK`uA=^|CWyaE4BSsv!Va9%vV}o;jZ!j?Hvuhk|7eUTXuY_s;bI}mrRR{9{1Et zX|VhqA*f_BGcPYMD@*VlG<II<4;>L+%=J&VtY6Ng zR(x5W`Ql@f-3)&kJelF7TX^OBoF`n+r(l~S{X+<7ppBnB-pyqnF24PIH(X7hg8a`M zFr`6ATigaQ2Fpq4@$e5_{3pFzXG}Ru{*^O5;ho=ony?OlQ8Z%O!?b^_Lc0P&SFDR$XY8c z{%?7$oCnDKB|H*S)E$OS-dw@OEBiXrBK(IQRX_U}cB2TG5V!wW z=1^wv#;f?Xtc?zR!GGPzbH*W`{gSw^tt>;0MGFFQpKrEuPzhLu0B~yzl*V8FT1K@( z>|ov-NoCUPPBE{IQ%+u^2CPHDG+304dmQnnWctTolCK+ZReX21B!wJ2o6>qkCgzmBDxhrA`B?umsKCxN^-n; zxH}s+_S#I_E(q{9z=lbtzGPrV{o#8yst{H9*0Z|@FOD%_JwKMD7UP1up5%`WXao&2 zE1=7&uNB}pb)l!_>Y$_ zYquF~oq873u3s~OXC!>~k*SocU4o|}ouM4l-0vVhUO9yBY zM5GM!ftjn`DZ_+2q-#_MH8uPR3%MmY zMNax~M@**KCG-Q|!^#3Z;C8^M3h1qG_MnyZ)qlPVni)_~PoWI!3BoIkQQDb9KqdHm zF{|HhN_4pZl_&QCxsL$D{j1~Z$NTs1`>`4|a{&}cJFf6Pog1NX%gczWpO zjE?d0)RYuWV2l|y(Bm!z;y*uCmb9|#O{{44HJPrhtxfu(zPh@)u+W6o1?bk$z=FBZ z($nyM#+FP`Vl_mp`!!$AxY+LlAgWH>cfPDMT3%Q{!65fLR~MkTa&;zPqNMK9D1bfw z*wXSANNAgzo1I~-sNu`*8RsFlxe0M3{QfuJj5~j|@4q?t4r@gT2e5bpsnJgsqV%n2 zmI)VCJ54=Hh6a9j*Jjn#^z{#LNC>2+vGFYAYK4%;Gwzx#;H4(+OeGA6$KQ85kK1aD z)b_jZ+h4iBKLyj3$;qSo0#?IBo&1`;Tz%&zBT`d=BkQZls+C3&FR- zdxBM%+_f|5CqEAyRPRK?&5hd^I=De>eFEYWVIE)Pf2vApBOukxz=Ym& z(Qy(d|3*z`?`ewpGm-*=nLKT;jz&C>mfwzL3bUWi-T&5GHFt7yN*#+lc=UYe*TF@C z&DhVEG0c$|mbrM-JvYRil&|(^<|A+V+s3pbPV`T>VUicRPFbLK6tA z^FI77vCO_taSy+f;P?CTzUkI#&M*KTy5EEghta^ZDzK9NV{I>_y1S!RWJgf|m+9r0 zVSB89nA)#L3&W-B?l3H@3Q{oMUBJfSRf#v(Dq0*o`uco%LHl;CEktpTJUEb>$B~l0 zn;)#4&{mt+4VxQOO|P2oyLfeZe^_Z%oJ#Q5S`^R;ja}y*R`g#WfS@PJV}^1wh|uYA zk^8rU?;O;gO=A;Od@Qmp5NW%PD$j>S;{S2n{NqQyc}~hEPN$8 zwbGcO_cFx;cuc~4XDY%Qjth<&SuaQlT|o13@G+!Li*e@{)t3Ut1*K08tF)o`_-9u? z{CvdK8RiQdsZYT&Gw>c=#a&M^5SS!9_zNEBihJMBrLu1qXLX&YI6Su`57$Vyyxh-7 zNV~rlXsyVOYt?r0W)YKQ7Ek@9`>7liC<2TV<$q!#m=s{x5uNhEn%5M-8~Oh_-&?sGE#BT`s=w7z5_iu zYUth~&?O2-x`AczK5g5p$ApInjR|>w$t@!o3Yb#M(WAv-r{C|LuYy)1Vcv#En5~CC z)!dB@jHF9>ek=a4 zB9SjMa0g8JLc=Dv0K8E64@wz-J{bAQ#j2=3lG|jy0iFD2Fb@~%p zdkTZ;^m(oQ{&#QCR8)mC+A#b&e9)9&(UX~6b)P9zyR+De&CWJxv5&ysz+xb2?oW|^$}*!U@9-MCW3?{<$J-FEd=cQ zqr;BD*YCJU-f|=t0jVrzY|q*$htrH+Ci&rfdhb*E`AUpGYI_70Tb!UTf@)KI+;Nr( z$E)UBANPi*A$G-?+aXZ|Y3jqt)p1*{^WO%%O!KEie{BAJ$7*i|WElJ{J5|y|UXQipr+ImaD3(4f)A+ z{Kc;5W|SuTmgYbUb!N)p_26Ng*$;H^S~%(E4P(h!H;<=cKM%h>$AMEEqP_7OwesHj zJT5pcc!cKXf8;jy`RI6!larGmW)muoT3l>)eL)?HB^5Yt{@rbLDDl(nYB0s4;TNCz z0GMDJCZ?0Kx~p^L{aH0XGoG!VKR=5FbZYm!S1YlI{Ds5H&R+NaJzAn!f$!#*AqZrR3(<<`88X?VBkZkW|ufnWK=^#12Qf> z9wA}Z<$@g$+)^RHM+&IaJUSY84Sqj?r01yPiX251Kt4${^7JpVc|7*CRCzfzw&j7d zgo+hqE*>-G=&NP&+Rdi3>JzZ)x$jIAou8k3oo(6Zyn>uI5=sFF8t_YTwonOM_9(M= zKuhE?72^8(`Y^?=yUn?5_~Q_d4k>}lf@kD9j=ejI88kFBW@`*67mQcia%Uzpnp@#TI@7`SGlz18*tgVb`=ZG<4!o)aSwW{ z?IiP(#W-7wl|Fn$n3{7E<-Y#biDLby^m7US9!l1lj9gqa!~kl)+-R;G4i{I2C~jZ? zSYJ5=?qm|(@ir44_Db@-H~!=`U0x!WFk=*ES65yp=OY`x6Zy>5!AQn-LTS-0khpek z^OihF^BHfj=S%w6sBliip0!RptlTWj-6U`Bs_> zlg4{&Q22wAoW5DG+npLFnIW$m2QZ|l?>zT4NAyT9>Q zN2Ut3EwKpkj1q_@ijBEuNKx`YC5as+V>OMHw7vvQBoNh*3 zQ3I?-l5vW1gP{3%&&jD@BvMMwN6iI|qe%q!kb7v4XCI;YSYsRfLb3-a|oO%*|#)bM?tHl&1^aoL2b>UJ3}}E zcQ@Fq@@?B(bWGfEgNfkYLaE3_L`>N68)No#JtvOUI zXp8*>E93!WpRXttowm8Ch{#?b;-rO|Pu5^oTZWyrOw)vyJ8DAp zuUIOcYuplYE8a-NDY{kn>n&pFdTM1T9N!++I@y1GRnud4W|R&$CfEG~`vf6sI?7m0 z3S3E6tsYQcAt1&#P{_S!>vb(0h|VId;LudLlC>@+zIA$CUj2hB3e_carbOPYP*APJ zk-g&-X+>dCg#;lB)u@Odev35eA~i4$&ja2s(O}z2x{x7w{PhRC5DBI?0u%Tum3lY( zg5$fX=pPs8AE#d5-?I8{*DT;L0FFl@b>3Oz$o&%g?XsoIN9$L7B7JjRNLzetRKl28 z2o}{PAVGuY0au6or$Pqv$U!^Tz%*gqP`)rvK^h@ozumZw@HfZ_#^(N996PpIo}aNT zo_z4;6UgkmAT#yVmD+2Hi5e|a6CuNy59+$j`^C(#*aC!k4HTF8KJvhZ%IBU1fv+bZa0T;tIn(1|9!uFgw$n0AUkv)^m;!n%EaU`HMFw9cb)SRIr-igiT+L z)k?^dUsf4kXcZbX_crNY1+qlua_YZ#INzyH?oJ$2%^Mc>-C4za6q&Acvm#B_iZIf- zr!KadC|q>>l=o-~tl-&5#|a%gd`NHag&x|eX$70X7ncVE=uE^l3>so@e+*&QQrmQ? ziB`p=hWOht(nBAw%ukwY^SX_Woi|(67k=w~Q9s!XMbmuEvkkJHK2^nGUznd1i^ppe za~ZtXyI700%15t^`edN;T%^>|Kn_e*L*Dq+nd-{(m4UCQjz)63yG=zWCj@Y)x3_mU zFA*B0;?kE|CaHH9Te_Btdo3FcT{bswOB|mthg1b$Ho@k*;uojx3Hdf1Qg?I2>1wx} z7*wH&Tm9Xd62N?7RiQsY0yT%HaQ?3s+mNTfv`C&{z&|g@OxI|yfWJyvr`8K#?P5^2 z%N2kx7+A4|?sc~&KYjWC^i)L)fAIz4(;WgZLwdg5!>;<|=@e=Xq13(M=%|wH0M~h^ zfg=SjTCmYP7Elf$% zm!yCW?f!I)qfUM43~NkOiUwl~|6_(TfkNDMaREWyvERGQzv5`W zHej8=g)J_^F!uv#z>|M+-^KYb3P+PA=tf^Ew>_4(zhcF~{`nYATo)LVQB*gQ$8~`H zG-$G(Y<1aiABM~e#xQ_P)ZMvj@Fd0K280*nefHKH*E%KX=%Ke3+8~5 z^ZNB`^z&ji{wH;6C|wm~584F)GRVGG23#QIvG?J1FQq(m6FGGflxF=5r|7|!hK|nS z?)n562Wz;Z<9VwBMK?xFYlK+I%g}W-vaUCP4as?Xw;#*|ZUUAySpzK$%oe&#dRi@W z5o!jCE<$;UkhQfnm*zlejzC@M&meYA%7Jf~s^MtF4L(35>|x_IL|(CcbSGT7McIBXZY@v60cQ3vYOg8pBU-uK&rS5G?1sOt>va z0*4Z1QNHp&jmppH(49_ng}7CpaQmg!lSz3BG682w9{CSgZ5YqBS2MzagKW@kX#|{H z0RIROs!P^9WBH6T9!)D+|Jd~bXS1AH^@!PB9 z(JBUj%4lqO4aTu3qTA4KUZ8mHmSuRK@4!|jWoBOYAW>vKr|&}i<1R~O zv8MhQ3BdBEFsS-J-e098C*NgAH%tD#lwU^0pJYhKW3=g=H1J+xIBs-X-0|`8vn^H= z7;b-mVCEEdp5DvMBx>XIa%ef`g2YWuPIA~qW-KikG?P|OL~17fj+=GZa2JY+juvv? z!4k4EB7dB1EvL=|cwAd6!_q)V^-PthU<4+GoG7zX$Hhwzg%`J#*uY~*+iu)~l7VPc z9W0SgQ;?QchVF^}8gQb`%N}^0VmCi=uagI7#gDt;i`)R6|8o^)rxSH$z4MyX1+j;K zy0PB{ektM};4_h4Q}uQ;;Uool3mUpA{H|QXVxEkCjD14mH13lq!N5YItg0+RO+w3< zTeu5_TC?|@5Txn7rscIRI~43jqn6G$im7eVp!sg*kmgYevVD*COJ>A2#hq(4P{q~F z?Szk#fr_D&uZuUD!y45A6N_M6@e0SOk@a;QD zqu6BF0xmc4U=)&bCiH%^yl=d>6GhT=2yh{3&vb^XV29VSM_;q3lU_JO5<}HGHW(2x zIfEcjy9nBNxUJwQ6HnM(P+Bhv&I4UZVEB%VImk$z=>~D~Igxm-l0RH}X6iLMBH0%h z+$7wxY9puQd|9-p2MJ6)*dVUlVfm6nHFCkBI<}`M*F;MTqkK1jz;BBsLj}g#Q|oT# z-BV9otuYGdCGXKsg|(%%EtVU7M%funY|1xcLPIUR92|HJIZ%h=IM_rHGogu`6^@Ul zsQ`V&t>~Q4k8{IL3KDxwocp~y(Bd5t(a5S@AY8l@epF$Pxwf&bztn~P#_kJsu)?@? z$}{RHj;hr%QU~b~Dg0;FU;2PJ4BU|jZA4Kx&*wDjh{E`ZXBo?pZe(~eB^6N-t8|z( zSv>D2OCZMBQ0(P0me(ZeA|@R)-Jb+x89rp#`OWA!xFzwTQbMF+KH}C7J{z7w#`vo5 zF)3qEu}_nX*S3x^?;TuAi%+YV8t6!8hL?Nx6#tvC+lPKd2P#d%0u^=*eK4az*|+l3 zi7265gr9BaG;J40^=JgdJ;upQ%N`beTwH(ZaMhqQkl<$$E*^wl=89#c|oNC~_mvnv5R4h1>{p zj6L$I*-!2y6UfXnckG_`5*ksL{ zk%nu4xIV7V4M{_*!hUF7BJdX~{T=&4{1B zpw*q4#R;+PxET8H*R%}Nwc6XKc3{}z&d^{KJw%J|;vG%Zu)Kju@GA{+o#L8Yr*FOfZT#qG~nS5P{(3|7qnA@c~VzWtH@pO zdb`YGAN z_rB1PZjXoYAJAuYn{eO}5cs#bl`HmCsFjrYJ6<@Tff9oM0PN(Hl)BlLWk*$I*fz7_{{tj$LT|(uv-+GVUhx6lTOAw@@NR!#x{cm{IQc)< z-~n0!1*7lJk$*3U`!b|%=Kb!X{iBM?{MqpWQ`;Fpiy>NKOpz}XbYNca<;0a|1EFcL zv}T*{-Shn8h%l#;RcC>LII=d^n<=|}tIcSOYiZa#5b5oxTO*ag-U|K!mG1QnEr7gi zt*6`2BVj&?9yPMR8D3;47w5HimYePSS_NC|@9OGW{&TGS!0XK?!$dhUAVhusA z(;R1B7vkJ2Jit52$3sfUXya`UXNFf#D)OMOo zeKvg;#-skfmcyKsM+j_AW6H~8uzJ)hQZkJEo5ec)fA4+!-Pl;&Yz#-5_h@^BCfC7Ee*p(P`#{36=2Ul0CNVL3$Hkku;asdwR{hBam z)(vb{>J3STiD$Y(%h8+PIv0J^pg6Rsf9$6!&T`|*iPhKjRul01Z|yTI@Ss|<=OdEI zBML9E%qn+Nmx!Zb)qS_)9VU=;HXWfmoKwi==rXYa!NyR#W|iiX?@dic4N=;kQlv}a zZNA~>u+Rnq&CEZWBjeJ$<%=9Wojh7ow)0Z8k^bFmUcRXJyGry)DiDgTn%uLS#>GRZ3+jT;don@doc&%ypxS1lEM z;%_$-7}4pe3GLZX#r7(?JEdkr%AmM*Qd!WN& z%=dbo)*)^rOk5dwx~Uzi&IeH z6EgmE{r%0!$5;C9#|1MuTkb;7I*cmf!*l7YXM`S#0(w^+RXIz^y*d~#&as5 z&(?I~eocVB&In?DFMJs5yjRU@V4$bJpF3k;w+0CLSk{*#=eLlTqEl7=;gIS>B@Zfi z=Qab)ccOd`Yx2$&PAmQh2sb+%cI)QV{Tc=bJM-L!;suXRcoS|Ck5gw*n*=7SA16Q0 z%Pn*w^thc@3p0$o;vOpiG(BU+v-2z(C2noan-$TEQ;U#oO(1~nZco}51~H_lqGIP; zp70A4xiARph1>QBvH~+q`Ip8+3jjUYsups{;3igxxuQy~}~@V589R^Iu)u zoYkOMhGnx#^R{fT1ImgSUB78P&m1%6lU>!SWQ1;gpyb`d!UgiYX5^=p3U~@AYQYf- zD(7>1!1Dqt7d_ZC_)q#^tyd2;oY*rP82=}oAn+A?#Mja2i!Gq|Yo-piy*dSHske-P zNB?uCz`6a4-!CpW|7t604hE?HjK~G!g>!SSH#WpHfTK#ou%cBh8ue-{Hrb31UEw1B zC$O)mua;!?Qg>;3d+`^K7O&slTCwettJz87>jU5nC4ZI76Vc%u7% zFxXIv>!%Gp(fr|muuyqn^4&Pcy&tjk|YZoO& z8X#+Ic9{bKugA7z<|saq;T;Mf7%YF@)#P=jVrrJmbdcxGQI`OIM) z*0MjI`dky=05|2I9H7cpQfJ`=`jU7lDsv{CBoP+L-E}y>d{k6{;X`UR3ySb``?C(}ilgmricBSB?r~&&EXt3B<{I*#Q zXIumSkv@7ig{gV$4OB9C@VE`~dePXhEYk3X%mobZTQDtVUl!;2$f0nQal(mDWsa9Vu3FW zA;?9KC;-j}vp{Sm>GSmTS>F#{IID$p`HEAd%| zR2FSO_AX@@fhU*|ZtXgP{N~MEEirdM)Nwe(ppS;9YHp?VqujAvsm%L44wvLotU|u3 zx@ASa`C8R!La^F0FdcJ(L~CQd zjRM7_4^}z26hT8PK`|PhShCp3kJ5%I;qi2pY(CEe42K%Zt~OzxddBZS+$i0o0L>KdzxsZ_isU-8GdL)3c+SY}1f$*)>Dx<`?F61+iU)s)u6DM@ zfL4&;*G2NC!WE4m)TyD$yTuQAN56^rieEo(nzFBjlZer?{eiC#rzr}ng1~}bai%H- zjQV-le+uB@5l5sF`%6ql0l-G<&T))aF*F7b5|J_O(+cI852l>=H<&LG0b&59uyYT3 zj_+ob-U6G^`Cwp-Yt3H?BfN*NqG|CkV+vE=4JQR>|4-WTEX{Tul(wU$C;=LiEkB3| z(}VDOADkD6*WA`j8&V&XrL3MP$C36I^s1ZJTp6L{x}nLDHRcuoAOKkEW%>F&UN?&Y zGs1aydw{!hBcY0G?^9DtbZXgtp;mi(bg*cwjwu^C{k!psNVMq@? zczqr7hdeRo4@lGZB0ys;16;+`vy9c@0lEJsOtflrt*Z+4e{zpGQLwPJZ6RCide!D@ z1K>aH&fAL+-D4o%x}Pp(waf!1|9?mmr3XyvlehS7@Ldx|jB_7@q|0mx=9Q@g%spNG zCotvc1ZgeY>}HNa4OKF-xzxpZM^M)f0O97?mb53okJoNeSa?!;r4no>~_>sI~*Y%85QBgaGE+s`6 zvkt>^K^rkOhAcjYkPVGk2+h(~N4teSCRPgl3$yQ{^rl{Q+dGN+3E6LH;65Da7&h-oUl`$01%KP@>yzFa zumJE*J7*3Wp=&}!e?JMHiwkneXQFCUZ3&N`WI{wf8*%6t)_mJrTb=(ZiJ*HM^-=5g z@{qk{=fk``l_&R$nEr)QepDHWW6@hl92Qc4p@bgC;Uu+ zUk6CP|0WYC95!88bw9;shm`P$>)WgDr9X*PHo;&0f`|3RDe5bIErD?sAfCJCwP97k zn>qeB8RMU`{g0FW?^zk3r^}fDY?;sHLFK1+e9_U-eK;>57PNP4z%2lhL>QW;+KLqL z+Y`VD?oSg8nBJ_%u=5Qo)DDY@isr2t0{2-X<8WPcd5I~Heql`Tj)ThK~K3@uPteZH4)(ahE_locj=Fok;oU^#aU6# zu1O+3w=oc>-(VgUiH3LlGOiQY{JkEGfa(m+-niK=0GMp%^>$h+wMcCV-MYCA_Y(XA z?MeNV5AIDT7FOvo4YTDaSl4xhIY^zgh~h%WV>C+k_PG2VRf+*7#fD{}eZlj%2MOCwR)X>zgP2POinz`L zck?xc0Xxki4B<5iZ{#R=go8-j-&bO+p{(dn82Sg@J*AgDt!XS_}beFtJQH zvtKFh5EuH?s(_80CvX{L{>9k;KKC zBbfJ_12rwS_wp2ep~tbW*qpF4Uk45Ywc19umLr+!1!N?^D5b!8hr}k0WB4Nx0REa-o7aFO zl}*RO!eTUpFN=jh`PHjeCD*}}67TN;IfTOZxJL+2LL#%vQ9%wwW6)s!6)*$w-v`_^ z%*nC@Q1@R;ZB#VAz;6BPgdiZ7Cea{KDWBdr35|%5)`nWc;A^O>ubthze`(jdj84o2 z%6QOkar6Tw2YN065d7*TTkycX(eR3ua#e~*9OO$z(0K{l`-(gsls{5P1-b`J``*gv zFg5Du>a0wYCK^DKI{DGzqaH&16~wtJwD2@oHEIs62DLjo)*(f+E8ir-WaQ-1Q&V}O z7l`;BN&7&>PD)DA%y?=~c8fv6_x$4AoOajKJ$vTNedesA%5Lb6VTLbrk&nncfG;&3}aMFY} zt;kM)Yc%8P9s!y&4XrjM2`|XKxYixSu3y2s@br@(+jPq5Ly4uht=GU@SfEysO2sMA z9pXVT)R}1B3Xs||*rLCyGt7H}^$^1Usg10Klhl43r0~C{Vr8wNGGk_8;mLaL@ffXB zH>dORq6nidHa?{;{x%ODwCJi zoNefePEd!5bw?Tinuj*&N@+Q$JqK0_V6HD>1~=MHCtlG1q5Huz1I<8|m_RHp4tyGF zy8)Sz^LwfB*9ldj|Mg=AiVi$;jxT=UJV9H%7Ztprt9QM*!_fv_XjhEESARRAbV zQBhGrnT$+Kl^aIIPpZLxWhpfEC@&6We_uDrT`?w5RxS?gx&vXr!}@r634-nQU1>Vc z_xTMVsJ^V$7T%{ufLc@d!hh?Hz$_t8Yx&__EmTT>4}E$Ls%96zSAf>Z491$lY5@w< zAe)L#LYuHP&7yl~e+2}o*TAM*Op#-Wom^b6Yh_$fK7_s3quIP__zR73xEDb2dl3$^ z`BJM376BJpE;TlBMaDE;L{k9-qX?(wnmy}@50fISp_HJM(bAMAq$XyoYra9QoHPpl zmeziWqW@3DlqsvNnuLyCCH6u6$6+B2INq0tZ>cStHxbRdVe@`&Z6R*c{tSg~@xx%F zbD>2dMq;K?u>dSCag)^JHMo#mCT)rp8k1oK;PX8c@zob@xiL91H$aKH#Kn9Y-L zfn%BcPULVz)CD5lWJyGI;6$)xWI$xRYo#dAUjgsutv=f@6$H=|8cllO&O^2$qUjw{+En!xWtLdyBtjaC+D7#`zGy5WUJ7tAN2;j z2kvm4j}w@al;nxd_5>T9PGl?gLPV=h@FmG64N3B``{8GW;XcP`y2JUj&Fc8g@Db`Z z!W2`%@dPbIec!;h*ab!WtZBEVW-BCz`bsYgn^W-?Dv$$>KyK}iWpt8Jl4+GBT9P?Y z#=QQ(1+by{TiGY*{{B6Su4&-bL@n4L=|2hz7{pXwlnSKU3Sa9?w5X`3lrB3XIiA|9 zxDR`)b+bT80mJqQ<^>q6#EG76%|dEnOj_=QxgwN?#GuH?XFjQoc(l!wIx|xQI5H%> z@axr?mHO-<4o!lXUrC{&S~YKX`3iBhs%{bj+jdg5$z@JlHU>n`M!0#m^n#arjD(QM zbD(HIo9Zm+8|Ig_29=JC*qlXVjR)Lm2$UB~Rq}S8igIyMKWRkRNtvXo2|#ftmME$EvJIq1$?1%r&tb@ z23gd@7sl=mVajM5_qZi(O}+G&)GLSuZ?QQ{M*T(GyOhMJsm&w;=CHE%eJrw)PZGo4 zWcklTa|R@guPQ+Pj~>BspLmPR2@HtI-};|To1;GkK#U@ zma%=@eO*AQw<)y%vh7$GusI*>SyKVcd&kU160HNu%p=O zsH04&`D<2r_o<`7hamrAT^%etj#rrgp|8QE6NwDt%&jR8^Nt}nyr9P&Sp}P+3g9mk z^ba2|G}}e|#n8zk6AzG=7uESFsTL8HmoY(G3r?v5!Er$9lke?lv5Ufh@}sCbSd;!k zkLMro9Qq{~&djsbC`Qrb{zqIE=1pWs& z8}>Q|7_gO>m7UNKI%69TI&?zd*zU0dEiTLf&Bu-Hku-h>4{1`t0kG)Vlr3brGkFqt3EpAxwTxb&yE^WraO{6ED8 zssFAyG|6TE^e2g*LQySCV8{Tz)8?4@q9{kw&pcvXe{lC>vl zv}t*10FW2-;-1QmX;W=J*_thVPomnf_`Ztf&jhfzfbQseX?3(%@3+Y0ldreoEC9tr z*i)B_`=}U~=HXsj)1&Wzjs|y!XeF;!!dqRxxC#NzRNBUIf@fXwsg&BDk3{MMAgJZy z$bICG$TiLYN>l9`5GH_}LW(7!zW?E&CJde%>Hcyil0kZ9J z-R5^edaVS*o%5e(2n7j$3`ojdBBqAl1Z0*6fWuZJnLq-sso_@~WV_-H7xT$57Je|j zX*jwhmx+}pe%;B}42y^{GU9DVbq>2w6@`>UmWaKs@C1YnuRs2+^EtWy0iTyF{0*j% zk5UQBshTAMt0|yerefN)^4mq5;pp?lqK;>Kd1#k z+O7&1(a1-WnJIe`Y5bn%=~-E40Jjt47=M@D$yB z^rj5`Y5Bl+pjvV(*IC+ljE(c}8Yj>#M?ihH^u#oeBYfLBtU@Nmu}{n5IC z8;A3i3JZhgxty+iJ0ymK-RZRk>hchkgu?;j0H9}&Em4s$Mix;pvxp(T1I(fO#P_z@ zol{`UCa%WbY)O>}92Hl57;VXh%Gu{dtpeRERrrgU$HTvmhdH75g@3L3y9R@3W%A=k z%jF3Gy>jaBd6!o{s1j@ue))j}z1X!V8i@-vpAG7@&~nkt@@=~SGt@V2w}k^M1gjnI zm+W0?tn=wZr3eZ_&!t3wz5U#?Uz)gd18B|8A|io1{#hh+AjeD#az4}WrMHT=9LqX3FQJhdL2T|Rq}F7JF!edisZd2vAKGoo2 z^7=kC;di-Cpszc_-s({XZBhctjb z)%WSnIc_z=xG+pb+&6CX)xrWa1)6Q8q}cCMdGo_>HNe%I$%_3hjq0iojY_$-l^SO3 zO7w!;Z-V$eM`Kc^n@yIzTXM1}P`)-(U==l#|3_WrG@z?|2-mdt^4d#R_RnOEr<3oS zYw=lowHVBrMTZ{FeD@=$v6G&TE~O1hG&!jbJSY6gAYyD23|n(l#dcD;kMSYG0(i7=?Cz7Mkx;XKoE6 zKDQd_G_>)-v!2H=>Nh&9J!QCw#bHkZBlM9ND?{uT*Z#5urx=`RMPp^VicFW?<>p(F znlA6t+A6V3XKLqEj(m9nkgK~rpU%g^$HK&9E0@upmNwt!|0yZ0s662p0CXMDPW>;% zVmu=#RPeHwa85cJ6sR zX8Q|;<$EOe4Pwy3YAUd8U-{#vj7r)K1TupVHFMO$kotAp5@HR^#Ous|$#u?NW@H3~ zQL+}%m{=CTCR1#>A7@q%H(XmFY1J;|M;t6H3_Pi$0jifKk8X>z$MrnJss$gqwfW(P z)}%rYfp+&3vAtjX6mJ>$qFRhU&Qw-BF=$rJ5xNEL(Tqa6HC|x0E4VEoA-RHxiH@$7 z*yTCb1Ivk9BPAu~EUohv8%b{*@o;8YE{3{MrYlXQ>@^C2e@wR(|H_O1r7QlIX@C$0XyU(7zLpo& zyY%KL1lmmK+~DE=g)8#^%a9xGU0SQ-;$nf<9HytI1+vHW?$*k-Pd zsZl6!SmO7DeK%QeuuTjU(*7MUb#LgJ&lkO8zVkBG$XPL!u+A^oX!oh=354fxLH-oG*RGjn zZAYaZ$Oh^t`8D~wGSZ|Gd*>cpU(Zd*Mx(yR$k4PmED1na2gGjsQ@wD*cr9IHTIa)d zs%XXCV)<{wI59~z*jnQ%M%F(&B*a_OLBo8|n8&(i>gB83pvoRSX&Xwta72=6@1;T1 z93c_4SHjXcFtv*DEy^*KZxi#;GtwC119iAlIe4VUY*f?=GbS{7EUv%4siF(inmPUX z?s}Y%$cY!ViSlME>t$KRCLthCQWo}Qy@ERJ6FfZfwoIR!lDTq8D83Ug%L~b{#gBZ- z0o##dv{tTgCuJ~?Cc}+AQUXu4zWX>o3+g#gDEdR{A-ywU_jLL4_OG7D^Jgyh>(%p%I}0^t3Lj2j6o`@1H6Ey$M!-=c z@o);Rrnuid<6La8ief)sUh#wTPEJ8qMi0KBJiAx-(gM4ce6>GEyklc$BPhN3+}xnz zde-mi;~tk#v3u0w1vE)J{0Exhv5mVW8@p?OTgYi0$w7E=df}BEg@WD!XQ{-TWG{x) zm>q4l!3uV`#`lVO)G&t=G3{e@O&f-!Yv5=&1i4p#bTl0wijz|F-d-79Zs8&mHlSk+ zO`B0g*-HD}>&pI*{+UY}4;_ydV8WW3xIoHQ={e}{yPf2hHjfkb89LiM5e=B5Z#N^7 z6cXSixzevGJocOivdUP$VDFEA}VJ!uAup_ zGd;usG-{06Mxr9SaCgM+%s1d6xRtH>OaA;yOl)k&_|9(X(HYOUWwZGKB^K2dQhb4i zHtT4;=bbJ^;)BhOTF7pxYVO&>=ZWKMQ}Z;m)65=6yUX{0%g4^!)RcpHHHRWVTN$ro z$`^AC7jjw#z)f=h{5{+MG}h+$?dUB8!8?sZd0wreiad`)=3RnV{x#*slROjrkg#)W zADn%+l2d782kT$t+a|JV5QYgjYKHayO{q}W=~sgue0UFb9!|f?I|3SV`y8nip{1F3 z{DE&#vqwZt0sGJ83dJ&5*H3vw=swI3b&#_cb3fOaEANFuWM7jjg43fB$a&1Yyw#Cx z3dCY-D0v{38gnTy%z#D(FQHWkXX0*F85YhH#8^`2PimHrzd0#zi+pnrwYiq)-?7x# z@MVmP8Kol~Wnt&;71{hC{Oh=oe33tC<@(T~pxkWiphm z9*}tVwm^yr<)Y}iy&D$d`)Y7%W;oV6&X6=hrtxeQNTyDn`>IAW{bZ5F4gU8QGm0}v zyqTz0>w;5Nkn8DWjn8H|rE>zet(2`_GH7C5@2eCAPA`6s+=5=`)M;I%TB)stqHo*Z zN#86}sP;%LwhJcAccgfh2Dw!Zu1biDCkuHXEA#^CbOM?u9Ap+JN={)k!Gi+*OuU;g z*L2k=Fz0Z3FUHPJunLf4Z2o4KUw~H35-D=Tn(FGjF0-zI$HtpnD&`Xy*ptQ@^AFvT z5J#(8?H-%8ffi0y?JYn{=f=17DA-dk$X85(84!I@Ms}lL9cyX^lJWFZW1;yB((_rL zt2ON|b#NKIR&@4Rc-V};Q*evz&dqco!*p2h^wwdy^f$iM7k2Kl!wd;|Yt(&(pZo zYane|4}3zGCu@TI2tRwO!%cMqg5cF^2#%$KA9j0hh5F#UAV;|jXb>!~;0P~L!OT3X zD%B9?1`TMqD=8u#OC145Wb}ATi=S}jMC!@6tLRA?tsx09s$J(|8r`r4uVtT%QLAr+ z_)~4eX(KXc&6{p-m))}Tq(VM=Xf)1>jd$$3_2deq4PJ{#;9BGIL6ocyy$Uw#R`wZ4 zNk83ozUmXUnpHoVnf0T-$J94jvD}*s(mSlbyIsI|#cPEalt;5E^=C@JO6LU~yj$4f znePH42Q33|xvvK@S4}}lP+XvQnOHP8Et#v6o*2WpFh*4`TdzTE$HzD6bAj~KHkoLG zA$KMjY_{t(HT}0otk+4%IQS_Dt{)v2ce=ZbcfB7r(P+HXFW|pNWd%CxWpR=Y@0|9% zSw-RloszT&t;+T!rc=TqWR^8?E@xoBXUE&Q`$plUoU*jdjK?UK?|$(E0T~T2;Zv9%Bf4oBxGWhWstt5`fe{a84 ze(axZnQ8BBxp4NH4(#mgv~ELLKSw%%Y>(sPqSEDd$ylIn$rl^YF-yVd<+`Oe4}_1CGqa#t@amwBg;j5C5_&y+%dgH5 zgiv88Z@e$cT3WJOr~^1f?qr|!d!fNUR(=e%@$-eE*+8;X810+NSVe$}LDwSQj8%`DBnv#4U=_Od5nlI9}G?H9h=?r1n4 zhH-1SL&23NRd{2NKi1%C>gka$1X^>p@YAH2e#w3ZTCuZM%scIycWh?#l>e5LD+U~Gd?M6rFH}KJ z@TzJv?H#4f2hMN`hM#(d&G2~=PS_#A4)ds?Fl+^tsGDu=DVO2(d}d@$lckgadU}{{ zn|ji`KJ2Yi)NLt0jHJHIsGlhxoOn*2uNix?CFBCw7jf*pT2Mt=!a!w!T zVQeOTVqj5*)Pr$;OumqkfBR@I34t>I7~J$~MK!#4LH5U?$9+`Wc0tFbMBzrysmnqh zGL2ZV3DnPw)og@`wuk+qcmcWH_WMQBAv&SJ*))-BIip!lknp&B4%O1cC*!!zgW$$< z?{lDA>FOmvm-ld{I@=T`w`l-Qe$qohp)PW0n1w~l3*$FCA^C#ziHL@KCxb!J@ezJ8 z?5+`C-83CP%B!d;ykNajO|F_OcYIF=2VG@1tsh){N*HD1yP*~&BdS0doR0#L&vaWr z>K5;`ZnU#`yA*VzTCEO!r@ttxhPRq-EA+KIr zRmFHaX&tqeqqP4s?97|I=AOia%Ge`-(@(=u?jP8$qtNSKHnV*{h(W{?AjOuCkBHs} z&VyAZ?4$wG10EaQ3}ns?Iq5o~a_l3CEcQqY?8w~1-oc*x_-gy-g}*>V{Kn#w@>N)% zCLZ%=LAFz$w(Y!!Eak-pNABK4k;JI&ZF_Kj!OS*RqgfCnPntk``Bh(XrLS8xG3Wiy8V42 zJ3Aois&aDelsvNc5t8uRY-D-%aG7M>LVh27Dj-)K^~H>BrQBZ;kQKFV>gMa~@Tl2Wkyh`N#3t zN24*02vhvRtBJ6W%iZC2k=OLx*XC#mV^4tNjzLC#OR|PENS*(=&{I1^XWaJ`26fHM z{%XT)9jLU>LB7>3RN^!b`&QRUVF+CJ1}YX#>W{9ap4JQalj>{co5}rkXK_zft6ux< z)iKR`8gK`Pp+jLQHL0CgvsfTBE@Qg5yu}WO+;7~$NO3k7{Zg}-88H*DUzLY&9&a-O ze;F|jzdb-JDg}JI{=MOi$UERU0fuMHQ%xy$3ZKe;!H`J-S8Q z^f9UyLFP4S-19(wy7lW4kPAlLFQu9-CAHt3IEZZqfd_!Jvjk0{Acyi_EtW`u14Q*( zlTtX$%4M0K=YHZ1Szx8?H2|B6fbgHdp{o5ZvYeqdbY3Y(|GXZ#iwmCrg>vT?;LvcD dg8Gf-HxkpVIL$k|h~I#Z&J(akwff7y{s-w1m`VTu diff --git a/ui/images/sample-wizard/step6.png b/ui/images/sample-wizard/step6.png deleted file mode 100644 index 0e681d57591f5eb1dc6f5035c781f908136efad7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 39897 zcmbTd1yEc;+a^kIC%C%@cPF^JyAKY7y95aC?(PACyA#~q-Q8W6g&q67j0IspRmT;TH$+euWzNf~JB zu01-3+8UctTZ4Au-N&rI>cl#j#F9-;jiG_-WlZLDew=vL$-teysy_=2Qr#1)( z?{_ylLt`s|6Oj?X%)*wBTzIRI^l z{#BRT;{V{CLsUpih>=B@i$zHIZ`MLAET4fhGm46^vU0MCF_Zj9*W~|C#TY(=VfYJ; z{{<@l-SvsTzs>&%{Lh2`xpV-wpX?M zE_J0NFM;TaXj8*~`yG&b>~$o_N=Sqg@z-DxJFH5PwPMg_eTCuvY3Yzv(c)@B(>*BI zeJUa-q6M1vKtTPqM}%3t58{>uB?+>@hlZEg&#qFsr4CX`EEQJ*jrgb@2Vmv+E2g8v z9bl+Z-+jdk4Hko=6&SO5XteU%BebWZWF8tET-58wAV|00zSw8bG2s|7IOQn)3c^S# zUaYSBhk?t`Krxtx9B}`TxSx^K|H7RDT?Xw1!94EjW+XzL<;T9|3EQM zBFG~etce(-`X?Iu3n7Sv&2<5qtu&@e@f)8i?l(&;wKx_*#w|?FTYl=z)f7fs3TO7v(9l@z z4#rZk{j0VG*dW+f82V)CLo_utdaVSUzk&EWQE&=;yDcCuOn2>K9^TaY zH*GQe;ox37y#GoW3o!@zuTjGO&zxYr-(RoL{+3|>CR+vf-%-x8!2dS<3*mnSfImE% z*RG3=x9`UW#|5w3t{0#l6=4l(UuT;~HB&+HOi1}Q{&e>bf$9F!w>lGFCJTOfyynA0 zNw2&YtA0sk4DS})v5=&;7@5=r{)Za-5n!j6h~=w-Dz4c$*ZcJ|)Cv+uWSV2&u9f&` zM0nrJG8EZKttO2cwlAk zlUj$0z;=IP(oN~x#tnJY?Fuj^e;qNru7~>UVe!!vWX}|r&LL+YEA79H2tC@zuAT3E zw$v%#%HsLm6l0l>WbF-{=tP`vt<-P3!CD{lA*CS%rwdKOOG*-%jKhqQ!hFM<>|J#` zxGsLb9B54%-&%xr*~4?IzuFI)Kyb8u%mD@NfuNr}66HpVNatZA*O{2uJq_LWCs3mf z_}(tXLO4<)lGodBPB#BaJre0j=lm)GPRGT#hpgpb5E+_-Jr(ySPzw`T)ki@@9}ZeN)yhQ38w_oxcM+?bHJspX$68M*kwJ!7%Opyj@P#CvE@g(RG z)QZ?kkDfZKY1B87OJqH9P8-(+k{R!wyRLPt`yMP*8Ub@jR#GjLqe(0A^`O{PU_S7G zF4(4jv(9)d>GhtFtOK_i&$Ol^FvFK) zL`h(8(;4d++ZEV04NKW~=c|Fguu|w%e`h!0iE{KF=(3HQY@XKp_0T0I~6>#__#-zryCR7EzAmpkfD5;cRSi5s@~*C&sc)N8L@Aw)oNj0efhP8 z`j`a%jOz6*!_e$YB5);+Io8v-U9RGo^ta`T z8TC`ez0C0=7j9Ne#IdBnUPs>yCv6bazI7JCLI>`m)oR216dV+us{qoD#S(ea$`em-bCSYWiDx;EAIs%CS<(`j)-#G!rM zcwJH6bxVDp<@7$!!!?Gr#R67dq1$pAb7>e)4~e=K?dv9P+Jy&dTUwj-wLR6<_j!yP zml+?5hQ@J4(lz6^Rd#P-TyJpNaBb4I5U0hIw7E;hFn^3WVzfqAJ;NDG*5`+!ZH5>y zUkl>XN+LGulZAmE+Fa2|0X8OR=_~2ic34GE!hSDcu9Z)0e2W*TRWL@&h2%#O2`J9O zaItI)o&5CFwI6un6l&qp2-hmF|nsf;gIi?VV>)}|{lt8i~nKfd&RFy2T4&m;v zG!F1zB=3f8NOTADyH3n^r06T@G;g!n(HzNzG};YNWUWHd;VhxoH$Hc(Z1=04JAtNQ zrD5bR8Bz%*mf~8{aeoKot|j~Es5cePZ<)bj{O}^~%a&kG0Buc7WYp`@em8KqilR8t zBt!vBZ^72&;*D{s}}N?ba9RW4AE8)z<|-tywdE6#A`wJK`)z==(9VwgdfzMV}5 zjMQ}^&shE);Aw}hlM8gY2dz-{5vV?{(P7{-ou__W5qt!n-~uIXqSoHmoQy-)ierX} zPuIlG4z$hNJAri!>fR!U`Iu9%Lm$dSw&Ef|)Cpask)AfdrF@w&YN~rwyE!+b_}b^; zgM8nA)gEG31sShs%yUE`>k}-l-82-#M-l~Yvi<8@izKq;T$sX35&wmQv5SyH_g!h< zN;;k_zh~rvLrA|T?7oFfaT^yZLXxiRy_T<{t>rqe>gZgO&y=}Q1^}t>KqjizXGWo^ zbb24WFb`M8y@tkYXvpu%0urB)b~sw)CDU)dD6wQg=T@ok4$sDO7<=#YL{fK_s_c>+ zFGa5phCaUue@iiV@2*Y}@Cyt=W(31kmeCgcnua@43d$MCY{fTQcc@CpM3GkKmuKZ9 z%0apN(VpA1ifKJ`$4-@t7Ie6suoxG646lw)S5rbY3y)JTSik&JML2zc7q3{ISBb~# z1PBI@{KobwCMP35-G`%94-_IF)7!Hb&cf^WC%wl08EN1?I~L4Owpo(19)mqGaLh&* zuPKT7ARfAo9Ce99S7sRU3(5n4fowVIH_FMsX0mCH1cUPK$5!Rz@XHT}P^n=<2bvRa zT$DkL?m2uwt&qj658u;B$wh|9%pds`p!p8|n3BiJg=$=4t6=LoUZCNqX8n{OAAXKy z%zVuiznzPt^;|u(6-Vb8xa4bX*F@$S2~8NB4nxp)L06Yir``nX5a&QKz!9!ebc@On zVz5&BV`y+nW|~m?7j_0E?*xk%aFj|agG@$ZzPRsbMFWYy3gvyfL@LR{i@Azi`!TB3 zYp&^yZM?YCJ;DZCbNs#h~)Qp;#Q% zmG6oZ&H=^7O!US)K$3D_mq4~})wlVY1IAsQyysmysZgV%#7OPD#!FMXiYk>~2P~Zu zUD^R};2K_+pyU9}nhJb6j}w2uMHTet{79Gr=Q!p^8SzCJ*7n1$E`jsuko0ccNP^Ds zL@V=n2D);c6Ker$w1F!|M7;F=EFTBy14>~md0k_bH+NgwtFN1bSzr32KvwDqOTeu8 zaQfH+A5X=3ss&Ntk);3NCAX}R&C&zP*pb%q`&_g*?&zRZlflu7KG-mKP@V6nws#Pa zn9CsPXPm;vN%Ire%o!Apn@^JGjUMwrb$CI21|F(C1Q{_~p9RKVjzu<=enc%1y| zEJNzM1E?^9MBQ*ODWowP;TQ@sS=2RhYXz;;kRp6aNo_vOyzvCP6knvAFyxK9)P`%4 z4^AQr*E}!DFItB2n%duXwJc`W(DAW!A25+QDR}}VecemiNFMmMnZlc==+;w_L8ny7 zTr|ryD`%IMsj)*dW^3*dHL9^2t zr8Br*29knI`SR5TtDV6rXhv*GTIJONC^@p$qzU9$&o(?qG=rh!aXc5lngEp<_4uwy zd?|7}hWamOb6-XXCd<=Sk23bk`j_PK0~2zFXoJDJ^Xn!I;;ea&n4+isW|jRhyoS8< zQG85B+?Bwi4^QI{)WjnI zkfJl&=dJjJuJ)(#30;Ygc?#CqJnu_c-{_qR-VB|qK!z_z?p2V6d{*R$5s|}SR=Tx$ z_}X^wA}Up75+l$u<${oUf1q)DyS2J>#kCVJ7_%hDb2b!e%35*pMX${xlEzyZ7lyfgXH{CODhBlk4%AB0j=c7 zsFSj^Rfl{G4Gy5Qi%fJ#0b2p}h;zQc7{6nSx5JFHpPh`0R1()}vxm<}x^?GAFZ-*l zV@>g{=gz3+C(e+L;WliYz(E!4rhfz%IITg!khqOWj-Qf6?%`4ylJy->?=T(-@Fe*N zOM-_Q`lrdJf!?c}kuTen>w)%^ z4(IXph8gVL_I;^v;n&!=+>v&z5#<GL=>?ZgP`intMOExHQ3)4pD7HE@r%RLf1?>X0n^yzLy^m zLmYlRRi(P_Z<78-Wv%*Nd%85i{)39itRD@?mD$Kh_i#;ilKt}bwUoRy*1YWDME=wa z&3?X@JoXniUn!WhM{Ut#eDT+0rH+SOTLHq~0$O;|D+&csTH7oA>?YiR@EPeH$#;sI z5{(s$DU;18J}&Itv~~)EHFbHb%bMvTkVJ%Rg$^m0EbpC$bKqbq3ZZohToNP78>{Nf zKKc}Jdu_kmX=RDR5mGoF;x#;rCzWR=hGodE{BgQE5NgXhY@=c%6F=RvbiSrDOT>Z) zHo*n7Lg?$dW51mVC2r0*?C7G($=n>lTqgInJoesLi%e1-9ZpHIC8^kgQcP9_=C`!v z=&VEf=xK{*UREoMgeFq_!1Mh2%KO^;Qm}|QLmcnR7Twb)P%Wh-XLz*q?rF%3saPpV z@7+uScgwW$od9)M3p|ArV~?0d2!=CrT57Og-34~-X;gc^QqKuszn=;A+))JR&Q>Fo zLc#R3+|ROxc7P-%RSc)S_p2Aw1T%*>swVUAu=X8>*-l}u{hnxq?p2rPWvgTKUnva_ zcu8Z4Dl}T05ok^9{!>@ek4$nA^aMZrGhX_1QnjCrr;U9;vqWk3@E82B6ip_$&41v$5>5CcXx0;5 zPN}U_kr3^!csPYSkLban>BK9zihrx`KNooaZl14nlx#E=>9F=eRbySYu@y~hIn`gn zk7WXDt}}mKUjjdodxex&=t9#R@Ux+Y$2e>wIm}jus)ZDNcB`f{jFoeCA{d%pg;ea! zH`oO26h{1Sv1 zLOFx?dEAq>|CNcmk$ack1;MEv$W+8vmI%LhM#!LBsV;ZTGY`^wCd;Sex03E zxctfo{o;umdV_}Fixw{k)%ZtVp6*QDO5&8rv>cIfzW=`Pc&;ytA%`5Wgf>UwUY5BT ztx=ecxh5(K7CJX#TgYku>~TLRO=7bXG+MRB9lp2vE{lgVBJBF5KeSQ9HLQh)AAE;feADS?oMU?X60)3b+M(`Eg79tuzHcX}C*>24z zWT5TAQ|_ZJn!$VmW5zRq_90FCWV>9`dC#eXR1ZCXJm|psG89Mq*cAt#xg|VNbU=s+s2C3Yh?@BEF=aE@zo)`DF__F?AFqxW@7>JtOuw|ch8N9I5NXWSU^ zaD1lOn6J2Y)eR=hU=H%>Q)h1TAPKporD|el0TmT&cQOcc+>3eVc|I-Py< zoh$#`N8%oHkh@bS$(LcEFWvkOrD@_K{{TkLgAEUk;uww3Y9qA}9i+lXa4C!TQpCqH zG*JUw>`7M-t_f3V^;MTG!I!C-LC<2B9a}XtVT2e>MB4cKy25-O7AUPW0zwu4qz}J_ z#Eho5R9U@*pxDF8K(WV)C~KfHJW4l4+Nr?z_a^2NqwtV=`GXGiily;gv^8aE3TWzi^dnHM5iS?HuM zL#e|-TG`QaeNYfkXg71>Z&&eTag*ux7j+2aEX{?fuPTv@{HiOe)&b@iiB1}J)ng~= zY#73@jZ;XgAKUog{N0;NJW{Uvo02v2`P}=ITTIoPQvphq07@yWX!2pjCN(XE<5+CL z4gpiE^KiOo`EX{&i2QE%;VlXeRIqn#`}i0Owt6Vge(H7`P{!kwOG<98FzK+lBUU_G zo;AO;{vR`xiu!ZTctT6UQ+&pBt!IcpWxIKPZo5(OHnv`Eal$W?2;3!AZ2P*II{DcURz9oP z&=PWTE}iP;wJ!>|c%-5hN7Xt$l4wmi8AcvOJEfb(~+?OEVxnVJ+`7=OzcR~93C{)e{6Frad0e7yWvV>F{meiBKJ=9J@ z{SY>LtuHg$PD1|R5dUXvrA*|JGcBRiH}{xJkhptBGvlvBGlO$ z;Sr53z&CWb%*Tsh`&Z#f`Y)|{@k6jZ?Xzly&`s1q zq~!7EP2Y2dmoXdeT*0C>zqx0`KhJzRJ4*!VitJ`$@&wq%vzNb)EOA5>a9L^5nys3v z2uqUZ2@HjpP=Ye~?02DjU(dxSHokMKCFDfJ_nL~22@bEDNDPMgOG2gT&14{n7F!7r zesr6N4VuO*`6DJ8%3djIhn?5GJwb&Rkq;9D!|QQz4lxv$axI@&VVW#kH80t2GSA57 zRC75mnaoDXM@oeo9STFq8!d=_g)VI@3pU z4QeYh9$R7(eTW!JCyaN6kX}XT)zyEGS8@Cm?1tme{}_gW>u7d4kd;pIF{X3fu7bKl zw%(sdK1EJ5y?2+qt0xCGq=1&9ybAa(k>j2djHLLJ4*Rq&*9$z`*Q>@>OziZr&it`U z$R$q`m@-`KG}#x&{maV$d)~Xc5xB()m6IBx>{C%xSZxi&Iu{INrvk}AZS+JDF1EV*mZX-1j|4IU$w}@qXqKh>36Q> zU(3n)v$HR9YE9-rM{}E=5*N+!<3YQrkj)i4b*8#* z?VrPP8H2x!nbJAC?vLb;^Z331E9KqZLRU}B3aFTVxL_lT*jeH5SLYJ$8X#%L12 z_hBlZg#$Y)$VhT+{u$=&!vtJID}hE?xoA1&X)Z4Ato)LEq?E$Vqy&H59avlD#8;xS zTT*Ue`5`dsr0BAgy;j=nS&Y>*BOYS3)i?=hNTByQ;PYf;6gHhw>(sljIDEZ8!L{me zZViUTWmc1f<3)j#zDp-ll}XZ#DEampvxT#AsGx&nvKLgHIlm_pN!LVpZkMdA63YC2EA($lPnhUUY;5jj#p z+AjAoZR>A~ySqP+Z*^fad7~!8nCT_XD&i=PWgIP*6H^9J!fAB(figRqz1v@(U+*eo>o85^ZYC zu9U2*g{h$$vbGw)!!q+%ylQP9SIU8_TwkooLm?zr>Bywi$<-i|Smn8~rz&-FkP@?J z%PbsAj>jNC{WOfr4d)TgqZ(?HWuoS33&OZ0GBfFzX$67J)eTs&@8wHlJ?}%~T%-K3 z1U$EbW>7gCc2gsSu3MpwDp0Nweq)P9JoOeYa(P(yEG%#2qpWhbTd{7WO)26)hJBj8 z?je%;V~)(xMP|HC!xP<}HI%*8fJZXeFnwk(L;ALGo}0@0DC0_tQ) z_~EcG6RLJHHt+2tj(TFJq@3`bt9Vw+m}k0#^P<~OvQ#ysq@5)w(LfBJ4BazH)ugZz zo)8cgPNTui-FX|a=lDFjua}y@!%_dLj_P<&w5w<6;FeFD-FmZcAuE&b9?@+C=TbY8 zinFkMeO1Wwv&{;1b1Ar@YjYny(mmnSZm2;y%@3wVkM`n+4NLUQ4+%Q4#&T8#J7J+U zzRxWk2C%0}Ki(*MA9<=4zTCqs70>#nauG7xGjp{D=5=DX@DJ+b3Ovm!6O*GC{z7G} zS!nR|HT4dUFzPUx*VYXeEy7q1T*bZLv3($@crI{q(-V+5*LcX>Q2QS=FR*c z3{e@O4w8ENHJIM>q;W$;7tI&d5$D7EQ>B!pgk==k;^h|EMUxSg(RZZ`$D zj`hEbIq+cFp1(2p%-lH@;EiOJ$G(9wS?)Tat_4Kx)F?-Yk2h!fl)kuDPd^dk$Xe@8 z`4;T!VFSaAY+Qg>K{rX2mZ55l)pvXHfb;<|29IN%hEN7~^CIVL*C|II9MgQTt+baV z$+5Rn6Ggr6aZerbgTn&0lBwJ2_Y2B;qhdTcPYGYbv|5T*kXLj8E_Zv)$&Fw#kcB1}_Z2@tZ=CrL0)qJOiZ%tUC9um%_=H zk%3coCXjP&O0q(C`WSq9y_ud52>#YK7R~OBniA*dSq;RWczV6WKdpe6s3(2Y8L?n8 zY6-PEpY9l^?*_{tGvbp{!%jSj>Yo{K-o|gD;mTZ z!CcQMogxxUdaI^j2rj;>Znq6y5I}9Rf<7Pv&Cv9!%GcH)&i-%!Otn)dO+>BW;)c-Y z7H1NZm<_oQ3{Vatak>S!jG4$r(8-<40+YF-j;VdWmIjILu%@gZk%FPNVFL$Iqt}}GrJdT>&+?tfX&6fg zg+TQ~$)kU$uAm{4XhiC*j+GKQ<3)N`tOpE!J@($2gn3|a;=9Y+bs(ysCGFeVfFAK~ zLk{y<{9@ht!6R%kj-*YYnCwhfdp{p1{Fp)5{9;r~=*q>VC7iyp7zm8e6raUY<>ymP zS`Lw2)8gx0R%Yk?{`kCh2L_3;yxOKQo6{k9@$!iuZc(}Sqh$2s=@5A(tnj?up6@-!2XxL9?1i zBo_kSp;9^eP z?+5%|uIjQ1E+bB({w9NmvCA=lD2de2?&h{tz3RN>8Cnf;k|XU-9U$C2Ab18R?BOYM<6HcDUzEavB-;4M`fW_VJdoBCk7EW!4E^5ZlTQceEdh zFxubn-_aLT^}g!2jRE;KRofD#gJW4r^!&6?X497svgZ`NS68w=6j1lYhy2#y@sgto zV&aYJ)73VZ$!V^l)9)*NZqoPh{Y^igx5+J2iU%G=YA(>cp-VRn7B>}a3UABozQ*gp z%8C^mWB=MpkvlQ%25Krl`o0@?PAK^tS8SNqD&N^j&GV^}BSqIQe7=YNxd~#d*x*H- z%m?qx)vt#MSUZ?#HB>oz`Eo)mycC`9KtHt-#kRL+qKzb=7NN%bxy?gJ6R>5Ib{hL0 z*QOXAb__##Z!WYRqNQXxr@eX>kElNFqVX*R-=RoSCJZ5q7=763{^vP?yGLQ#CT{?W z`YV0s&4-2KL9d4O=Vc+9i>K)EqWH#EVk9%!#5+pE)t63-%`DYqz{0d{b6n>nHts8B`_jv~*GPZ6@v&K!m|ZJ{5Jx3fGAHy3BN zNjP;T4^sTaF^q-c*Xk5Q)pRpO_V7b;Q?4531(b(MrB)_UDs^Sz-~96YwT&DOC}=&j zY8IrHLY0<;VMOA>8)yC2t~*%Rw$^o{1gyKqo~K=DU`M=EsD3!1daL*y$*_I}Z5gX< z10>JTdrV8BD&SX+_KBFyhm!Xr-#V?KI|;KPDext)@(LAmcnGuLFyo@yu`E+H<*ig^ zJ?<$EoDgPt7_L;u1QfKFc**)L(zK*)H^gfokCoMk90Ws~&L(}wkMytRD#)wpt69LS1phcd{ ztNbn}!!vN+D8D$@nwf$pkY??i;Vnk74o7-KDHV=D0#*N&KAt6!P9sC*)mHvsHuQ6g zwExUP(G=a;pXzd~0d$RmT(r@oM+1N7)P-Boqi~A#gjp&iaB8m-DSQnkOW1oyhAA<2w2sZwId_b_auP}w+OmL+;>(W<#a zCePQezW}}{39Z>#=cK0$f#BTy6Yp9!61{{h`i}1mb=0D`?cTm)_iyzT@MN%Yos#$5 zZ=~m^nnQ9>-DVUwlGWUERe4l|zhhQ?iuxvHIw5Q>y-}HA+@v<{61DOjiU!hH@`rse z%$}-#Upg1wEo_LmJ&MdIU$aeFf=52xATwJKS+Z|jqF+>)IaQlkrVhMcJEu4$Czb?h zU(Y_HbO?ht)P8d-#|R@xTP-=DITmf~g0xz!TM{Th)}1Ed^b&ed1N8$uHN{G_D6GWq zp~^4i6JfF@$*(o5EC(ze*WynBlT=DRsnQ|PW9w$)QP#RWx)$TiFpy<<@2>vvS?4{yEH!_vx! zB-wjhiep~!Q2uqA6Z1PJa|zYDP^TU+ptnFa0=1_ThxYe3LI7Im?t_Y;loZB`BC`RDYEm;=_hi?4RKHQV7uQ@-)mX=QwdRLz$b6#N=vZ>eie|qZLX**3l zP;jP+jEL7appu;J4czucbu^XTIbTe(6GvX{UGL;7qL10+`(UG`_rmpSwr5!jllC23 z5U!+9jm6%40nFFz>(Eipc-4PXX-xrF9x2b+81^=i{yv%Llk@sBqD9$kX=zv!?LE_H zc5(NVQi4&aEuLy9VN!I2La?It5PO;zO6JC|M|~J`H7;VJvdxx4gD0jZA@wxh5m<-# zb8sz-#fmSdo`7iV26bBCMFU_fydqh1xTMzY%t%{*u#w2d3}Y*?!7I9~vA#7=JwH?G zO57Bm%R>2PU@3|GhXCH5u>_%C@98Y_r%W90701a5yC8YQl$dbf4@KUL@CbM#P2yq;;Ko!>Z=P12hkrgeQIaHFRIsCYGVqspqn1^axtCCCixqpU@O+IDBlWJp zxA4(F_oOGmwHD{&nyK5=a-ff-U!UAGkElPt*LLNA%+B}OjBbyucxAcxW#nWJpD8Tq z{v9r%H;xFZt)^1YVB6ci@vdAq8vMoR;W(kd<)&D&msjr-TOu#G>`*v(^w_@_cqxXj zODi{B7?TI|@`KyNBO6APYa9yp-hPR8)*R>(s@r*9*RaXOD*_43B-|z~m?g-OaLQYC zim3Qq^|nv-TWV3g_c{p_+pd1Pm(yQ7?^b^=3Tu8%zPkTrmVE}3I%ZZf+uUJtYtTmCIUPL6WR38HX$2ul&tP%<{iQ+vABiWv8w}z?meu_MJ-}c3l;Wp z$du5Gn_LVUnxu8%+J^s0;2uh9RQ5uTP>GHFf=7yFS5G{0-Dtlaz}j$SLwa1~78mba z|1t1HgSAdnw)QS6h`~#Ob~1z{y!X{8!uOSTU<0*E%{#*x=>RZTT|%xi-WPOtSEni{n1 z1$AYP-<5pJov&7$ar|$VE02n2jnCZz6v#7d0|4Q1QLLQI9iDH!}7*0;Ja ztiK;Hw8c`An3Y5s_>+$z7r`Q&Q~F@pwb>j9Mjbyt@%(}UGM;{&)Blv)a77xl0A5^= zAh&~c**A)5bWlI4vcgv`Ku6(k%G;gCk!eo%h6% z-I4X3KQ+#AIMEt>;YWF+`lQ)c4F9oGZ#GjR)7S#+3P)>G?@0Je9HmrdEO7fS{K@Et zV)|HY?+e%QrZyQRnM-l4wub7VGq@6LRjIHYBLuHO4_k&AKc-+J(HU3?Z|wAH^|`ZqO1#uGTn~EVH?lQV8=`TmqtHyI zHRc&&aNz`fWpiC(Wea1)lsG&xp1TP17EFU0nGrbK7Vc0V*&*+5O18pk6Ibe z)U)IpeT=Y1=^RXs>TLwtleJ^k0~9~W2`TwPD{eUdJ|vnrI|l_Ro9 zS%S`^5JizB!))&x;aQ0JezmN_Et635OJUw4rdb}Bc_{hU&cAU7rzgDHIc;UjbEW;v z(7Yo5KCIIqcH}Q7e(un%H_AF&P9XIq$NHDI=`gI-MIGX}K_Ncc-Q-#Uewe2XM5rnDqsM68;p4 zME)QTl{d4PO>uhIZ3x7{9sfcFhfkqUX4}v{0@H2nXv3lZd z!m}KBODae=E!Stkz`*dh;AY$&;;u58fF&(Xuds-TGt$<3NW*}wZBW73gN2$gcKxl} zK%w9ieYIFNBQpiB(tRThal&6n{v%wi4zyA%9qp)pKN(U=4FD*Pg7~8Nn8bX;I@6M! z$sUMEyPL4UVJCIKEcWfaGbk?cx#c6>f@k~lHe{-m2uTLYEL9d(WaBH5$^6qvTK{`X z3$y(X;YyqS9OqOzd;;MsNZ%*rous!1rkCcR)7X}p4?O+HP67>iEK!@uYV=_uXiL5E z>*@hs`l83N8TZD5J$i{e;5X=oTgUEAJSrG9MQbM-XD3Jrj^@)Vk&!6NNh)^^Kyt^) zsqcbmDi-(-?_|pH5-R;k$+$jEEAuCyQl{=NT`_6B(bCvSr}O zAUyk0L#AUao+pqR~?##ypNe_EhwYaq1O`U9$-iB|352qk)AcWwIT;hi zjV`tQ>03ON>V)DlT5`ydUw3g*07x_DbnF@mpU>G3Gc;T}hq@>vLX~1?()S;cC{RaF zLZ8PX>edkWAU-OMymco`wg#`%k^c7*?l!<4lKdtyI{H6PN$6yX1uY?2ZuwOadQ zl!XV)TI>65Pe&@bT)(zEiY=fu&l*0kBlWFFx6Lk9c8$Fe{L22 z?$;5Fa|ERo^Am4L42k7II!rj8A<*fa_dJjEqyzcxG>P?^EeuAmHCRskW4RH z!fAK=SGuD@|ii(Q1jVrc|CwCs1a+p)3h+drtU!_RJA_PkRu_6QT?B+LXy6wfE z1;8rVad2=t>RpKb<^BxTWNo)9EN>H%H)xNy{{5fD*B>L01AL#({_-h_p#NW-$^Vq8 zfO4}f|w z5RWh~L;vMy264o?JYcQ-BWRMy6z%kcHBhba`5L)-jHS~ z9Db0OwZeAV{R_JunHqy$2l|~~2Z8r~(HNN<15aWOc5`2NAaJixI@!fr;ZZ#ing{LK zcj&k$44sCtnB+5)67~)RcH7m(R5+-sFTV9Kl&5M6fkH2n5-+z~zI2*3BC1HAJl9s2 zGp%-yyEHZ{gD>w|jkdZ}OsB+N6S_izP*BpznRB zTCzv@-KQ;*Q^6SI>gsAUj>CEpM99w&>rqWxTY#!uw-p1Aoi;${2ZRD$RW>$@=@0#2 z6C%9w*-9gDxlR(nqU-wQq7&(enCbKj z85|zmbeEbPtpteG?80u4nu{BH?#{aJ$D3V~)$eA=q^;?y6Z*gPu8x%Wa*p9;=jIuP zL#*%zX>+GrGN20D-CP4K3U9jX!(U&&(}zzHhoXU|%R_;{1{9tMN2L?dGYCQzTtuSZKIA4Wc-q0e_%^-0}1G*DWMnoj}--Lit=FbOKQu^NjZC*;s0i2U`8%a+zrey2Q{v~R;5ocpZzbq! zy;x;d82{_dLaChO$pmaeZtkBM6=Nr9~#2>z%qLF zen6>kD2`m3+nVZsaQ2pAbu8P~aBz2*;O=h0A!u-Sm*5gygF7Ta65N7YaCdhnxV!tp zzb1R1@4fpz=iFEFX9ei4s_v?qHOH7^&b5C-x2Aa@Qxgblt_wVgDQw3`1~>C#T`ma? zfaAG&c^m4yN(u|5<>cO;VVL1kO?PL`9hTQ`z}F~1mjq=1Hf1Vbftijj07?@h9aq=4 z-=lav;?#ThT?I6UxVbqMY%muqMETsq^TVkazNWmatWJXy<)imuX`SH9<7LxHV7TmjGE2>YgGu2*utRO8gQ&tXs70Nd#A;82{ zQ=K)jA5Te(Y$61^Lj$>wp{Azh<8WJqP+V5_eDzJi9ZEiHGro<60rBOn;u+oJ)p;Ma zLi*C8iQnZ2_f(!7!K7;juk%isUQ?rd4%fE#JEIWbHI->qQ6a24F2EfSygPqe@uy`1 zuiKO3z$T5E-p>#)={`GnEddf)UGbpwcw|M%YUHoqlwkU@r(aD>2oryDrhqJFqZ-BP z=s>|SgS!gwTT~!V+=-OeZ3oeN9JybZ)oGfB13Uo88jn+t0rPIV zxb2z_@9SNrzcD_##s2y$nVbya-&@H4-9ggRKBjwuBQ4j0!1(7Z5d){a!0<4(`M*hE z|2x0k)L~-8o1l^*C;HFm+^4wR##n~i3y>i39(m};8mz1-l!R4CAi#&tmBOWe6_QV2 z!m=p>H-~T`?T+nn=xvK*V}lLzFWC zK62;Pm2z^Q76I`@ip2L5QHU&vUBa`X??`|->W!kMKCYLl`SMxpaqmk)QIY%Q^&Ix9=ak_4?dB?5fFa)P z9Qc%lGJO~v%)JWI5mJD-$vlB<6i2_fNWqEl^7LoPr741Q?_j}@Y zrD?H(cb{!Dn0(=5%Iv<|M3lL5bl_{E`05Gh)_+&@*t~8l^aPK3^8ojPqgMb(NsXy+cG+0|fFPB~8T_XC=C(+W(E*XdDGcDiSmaIl!2??qau-nuLrqnkl z*mPHCNQ-uhjXkxyZy1UPbL1F_(c9(Mn59%MHZIp+d@s&J9q?^$5O?!JFbo%k1YXbu zetMm#Y@!~>uQ}*(c=$hPpyc-ltk%FCe6UW$sqo&EvqeFo&T%FF{1oU3+m=0kT^m+A zX8@7)ruTE#XCQ+h(Q2T0M-M4E&l=&4b9wUZXf0#W=wF@`B1{dKAbN8i{u`1JB+9kp zlh$2*&g{6CadIme%|b4|H(9|qLG}ouV1ny!(S*4RA|UBbmRWLiK zYyCh`2}5v64VOtVKp^Ch$ETh5ck68yUmS6`<8QzaZ zG<4(hf?ah;oKw|7%JC;?R!tHQOxfHk-iLj}KI3^>kgk)hH|4Iqs(!$fKq2F&v*~gN z8Azp&_x9FWhHbm8K0Iu!1{2GEl)rx?AI@?6L^0y?{vBa1zFmf`4!0XuT#YpX2Ys%) zcTgPZ;FN+;i`7g~1}DT&1@uS&37WCx75dNga^BJM*|qXyZi8cb?fXdtV2h*qww`-B zxKq1J7Q`2~kj%(u+KR%ig%}CgJbXGo6?U2oQ(0du~=2 zagr8M{Ql0(PFY#m{#H{>kAT=7|990=TWn1D_~CUH=UP#55uJ#10fboUXvhn6ECDf! zAj;R}wvCRWL7B67A?8Oj85yhkH7RMiV9tDnDYPn-5)g{Z;$uQ=!I06z7{^#T!~n^8 zxrPu}86PJnXEU9UZ)f+W~#mJ_?#frxeyKsF0n_Nk?Ng+(A zWjK>{b;35f{0s#?_9AaSdl-G2l2J&GI_bTY21?gz>gwu@v&wvA z!{hmEy{m4?N!HHJF6GKDl$^^2asjCU*V4v;;&x@3{yC1vYqDrG_9&UW~E+!HD4k(x|Y@J+BHKuk=G@0W2~K zfSuj_;@G#c8C(d*A-agE0%jTfAB~B*Oto%O>{~r0f~d?bVH8=XdH-_UJpjeuNyhzm4A#91&<&8Do-qM3rRmJ z%>RhM|9jj2t?R#QJN>7w|5f^_8V|%hio$b!ecj}Cifn*EOe|#5i`eWTMF1egdW$h4 zP0eO^#}{W<(`F26c|&=zxX1|Zm*<-wRl9h#Kk1lYMYUxvo^F@+ukRTMxD5Q31E%A* z*4@^>_-^tGf#lv$K4U2Gqdl_&HIE){jzOv<>kN90m{%Q~bGi_#2sli-z(R&V;UCX< z8_*jZ70Qbw!i27CcS~%becO{RmNO~2;(n|<6I;pPwf8$*!S+GeHr{qgyLSG5W<^Zs zv>$A%MjBB8s++>_fal~ zvjPAjuRz>>2f~UYAz?F&*1if=?_`6;9OBM?`J*#TE;`Q5Ow=2^D$-ONA^6@-xEYM{-9% zj74E5ArM}{WjBv%eDgyjA$JX=#+eBZ^=%+G;*BPX2XIrOW|OP!oE5|;+Xn_n1QN?> zfuL%e6vS{JK~&R%_OnJ04In*X-%13PgC9qm7#1@9E<=NjjcsQ*`T53+*gS^FajtaE zwLbLiLw~11oi_PP&Pl#1!zMps zQuOD^rC>8?zDbuTT@Hcw)I_ls6%fh zfzBa9!`hF)2@QP*=_EDh6#`kTQLdgu>~p0Q*HH0>7byn(K}jum!M#pe1gWeY9N5%& zbQOeStjf9{R6#Ort1m|{j~>%|O3;Dl%P2O^=5jg?Jta)8P9v&C6piw4hz*a`i};zC z5KzLaAnmgx*zVhkS08t{LAppn)9H9?%OXes3SS4EZ<5f@?0jW4=`GO4=-zungcbvi z4tax;Ox}#+T=Ah{C;77gA>bCgPo!v@Ta>Ya1Rs0c-=4^k44nRbe>!F%mv$EDWfvYD zellZssK>DemO(wJy+M5VEays09or()reupqHcbj`we+r9`M3b9_uuXyp2EPUx&1BF;l}*{2bTjo_$M4%_qP=~j$a?)V27 zJj6}2h)8s`$gFmaRYLMq7`ju}LjZ}Gv;!(Hf6;kglKx14|1?4L=@ICn5--WyZ$4T( zIxi)_lGw-&#dlggfqI2(?n4$B3JCeUy`~z;Yy{p$8qd@pM`?3%9nGF*JgY-}S*iSTO~G4bi`;&GmQz0K=Wf>hD8}(5I2pt@5LZxUxlh z6-}_tTHMb!&KMvcKBwkzjSHIx3UrYg)@2&c?C^7ni2@#KIvgV6O0@aK6TdyqVzE(bnSM->6U{;#EytkbsMP2kqZ|joxww^8d>_m*6S)%da z?2n@O5K&YRO;vFL%%Rm5#AGP`+Lv`mo(F)hV%Dr!g9{#yVjy|T1rT5eKYskEfWAPK z`V0l1MI+^&u2d7eFacBRb2TB^RfS3j<&>OEFw<3drznR#7#D{=X#(Ki5GF4aVjQg} zn0F|;AJc2MLW*Zw&QU~AA1X3RFrBg^mL*oT?n5xV?fFq`3pd>+poH+!#LVtF<9y$OI#fJQHuA z^;{5;?A!6x0-Eo|8rcL7$SPU>oS0k2lKC@){I2)&yDuA#O&r>9Ri_{VXdoX6hxnES z^{0IDfqdD6z1h*(Vk4C6rmF70s@|FYuDt5Cwt6ccJ4Y`|M*88>3NMjU+t%BLBblbs z%7mpE0{iuBCL5&0sAP6R5MNlX`abZ=5d2@B*Z(?G{{ur#cBanNCS`S?XQ}ntHXgdv z7e7k;-hUM4>sN&S`+UcF_aA+mKSTR(zQX_9cfvA36G<@^L7*IzEA1Pk;mg=OB#c^bNB1OH>8PQO*d}?AXDmE>V7^x;ppod zsHsKo959Me)&AO=B`AI?x4yEyzmfln>qSG~`5bJe*gqcEO)>Q_uu=G@K&`g<6!BD> z13ZWV&WqgxOos4icx2=|&kY-z-S*_-osn4G9bo15JD#1P4fU4R$glPf|H7!6Do^u! zzgJ$E^r4`n=$GA#!yx51`8kesLFbdMvDW6z+LUq2F8109n1(lMzt8TO7`pH^SE;l@ zXO;@5n2;KnT)F_^!QY8rjX*7^r&C%J9RLwvDB)9qtP}%`hxsBSYzyI>XNze*#saSQn>5` zt(ZXmD*5terl$UN{&WnF2%O8rLY3&)hEH z2<;$;B>ToYbv2mw3jh>KKjGNv(kMeAoT;cGZdOj(dLg%mIFyT&0EERi#Tf9ZD7oXHn(lReMAFdZ`gs0garz-Mhi;N54JySC@M25=Xo>WdGwU0kGw1=CSWa!EW)3UoFpZN%vu;mOH_EC zWBjub^?qE<+{^Jp$Mxj$$FFh1iMPBy_e*6ubwze+Ej&3PKRi^Tb{MnZUkk?06Mp1+F&0Z>ntONZ``xB#7#*v|scUvcph<6v zdu01YRX|I!(6>|Pi#AH%P)15`Zd2VNlEN5wn}@=Cx0IyTXwNfD z-@hN^jrcPtF2liM%0hsFg}>23w}_VXZs<0zT7N?&N!889p2*?&=JFDRp&(i=r5pyG z@Xfvs-)PIV zzNoCskXV3q$aO7q%+F;VCSQ)g{v)XevovFK`+eS(e0pD{E>Hi8S-grO!DSUWQPcd) zr`(V-qvOSP;qun$F9P$40XQY#i2euP6sc(%~}7&izjb)hq79I$^7*fyR~msbB+c|4yD8{qSZ} zX)~|ZEI;bD+_+n+V*xA3+c#gpib;^qrobL2M@i|%vu!zmW4HLwhp@ovfT`Fme$&XQ zz3*=q2fX(*yeUWQ^}+OUPBIl#P~As%^RXYH$QU>{zjcZxUk9C3u3l10%xA~(-nYuv zIis4XZq++YKEGUqZ-CMJj`|U2&`Twc^|5iTVx2tQ_WPbU+bONzaDDS*-@{}f^1*8#NWTDAeQ5TpEII5N{%ylWLD?k!rL_@D3Fl);d~)Y^2a6qyjE&KitM0=y9B%DniFgCZNVh(D%bgom z+tgx;Z*t#sZrWEOzlAXnqR8CUWWet4o~T=j$m4Cg%*@(Ai!tF~jkh*d{#t+}Yig?W zCCBS_T1em$hmb^kaj-)mB0)i_YkN^{(byzgrRh_*qe#|f6XQD|l;{ns5i@@)&z$Q{cIw&TTP<7>Ci| zJYUjpHO&4c^J(`csjj#qcpMuDS@~QiXfVK21;?=xJjx6Pexqibd#nT}4MJ7c8#bRx zZw@>izAq@L?lz67*38x=s$1OeUTUfCG>c-XiWvO{UCy-f6 zjfRPy{@Q~-87WIJZN-o)rF6tt^Szv&Eh;d`-GOb|0cf9}vC$xJzSy}T?d%1TAXHJS zn7PBt1Mia1#;$g{*I&TSKm%hC#b-f!7BYkb{_B)JeeKsiSqF3z6xSGR75LhkD#7ce zS$~@N*T-Ks7H|RAp^6sC9=|v>%okYWjR{r!X%IT};*cNV{;xmRytal~gakD8uMb^9 zP`M3or<6*5*@%G8pAHIj{%-EyT(9`&v2t0_JN}G2=1@D92BN6 zza!@N{&b#1-{f}OyxTqiB;L2_w>)###Tx%qvW=sXF)~{33`V#;UCsFVmCb6p00aVE zN+U^cZ*8rS(9;)NjHP!pXqKvPP1f2jt{K}bH>$-bK_AzEkCKhn%<1dQ>%Eb7%>Y-1 zZN;HLk7G055Xj^Y3qd{J?!ZZ}5~OdMVrfYAQz>$x;l`r6MVRl%<+Y-$p{t#PqJaY= zW`5iN(cW%reB!{3{(@$q)jJ!`DEMN$J$4f~kW`E3eCe|2wv9x*#LN?{w&)W0d|HCK zYGIM)bAz$W-geVg{upVu?$%fv-k5%ztcb)Nm4N9Dp^+BtO2PMX+U$KS9SE(Lt=HrV zFbKV@D~`I$@{`3H%q}#<9G2r%V}kU3Ig+uyTVEunRl=wmgd^8@c#;4z;^n02)K}f; z=4c+w;tLykcOhV&;wZaoocuq0Y3&wI29uFuEFH87UK8G5vGNXDBT&T}@*I7qCZc40 zPET7(TSFlwFdZtGcN26;ZR(}=i^{r=0&jQ#%lZnGtam{91b7ApE!?;VoS(brDk%c|eND6}(9mlct> z4L3)lNwmx@Fh~&Wed06lIb0t4${ZOH@oYSjSL~UOd^FeO`CNg%yoUoJK!{N`V>>b> zz$VLmcq`Z6GQlz(!nd`5?dkFc{xWzvW}(AiXOL;)<6~V1HqB%3)&_W2l*B~sMHh-t zNzDE@ys^7sl@T*qObp~^Y0NWfd>HxL;oZxQacoS?0%=lk1`eCj-(I=_=_FKJOu_!0 znWy2l^3u|9Jdv>rqR{V#SKT*}&{`w08NWj0hJZ&3E4g6F7c#~9d-{iocz1VjL&4ND z(>8m$ox_72(kjj2qN272}W* z6$NhuQ%2Sg{08M>3FVjqWexs|d(W__0#=dE*a_7FP{vGZOq@@*M#M^y=5C5RCB0L_ z3bA5yNq%wfZjpskqx$gO|DqrQhPZ2~P;=a0SG*c*ny8h5Mre>B7%h?rwY1U|P9Tgv zGQR+i_4n6zUlZ6i-+mPp%}oU(&1RgI4r4&cg08Hrj4J`fsnbCwVYrd z)R0DM^-D!W_t(>Td+o0QHk!YI+UGwIK zoE#-!YO&5YDsWe%T>Ly#Kecbm`D_4~tqtDcph!pPE&H)M-L1T?7mj_3$Lo1y2$Of#)zz-2;$SO# zf7z_5xn+>Z-BpSSWJ-~WiBqiW#|7wyxmAP96sRYcj6o-2Jp*wZD&;V{~~st|g=T5kGS zB|UL@ZY$%5ZF^f?mM`gfX;&PnffOFq7D93LYh9fbo|oY30;tX2)j9JNOgPB<5isBMs836Lx~%@I0o?wui>@!Id8bEJA4&f1`Fp-y5}w zO=wX+s3V|^m*Y~z-H1g*M!SW!B5=i*BU#hG6OoJ~#-sxteIC2Oov57pP9dc>96ynQtV7^w>Opkiy-}?yIvenj&w-2Ft+4 zPgv)CwE?Jmg_2eheERk_;=8qWwWxVY11-9PLd}I zcVZf{_62FzjrO>KYMW|ttBq`bz0mooA|yvc%c(EM86p1Gd@BUoKc;FlmR=LI&QaB-=Qllj4@x7p*XXRiY2<0N#_RYNV#+JRcps+{Sk} zsa8aj|xF@2I#=mU0xM{8lBtJIrIyohH1`TPVnfrUD^oCG>Tf;=1y{xiFqbGZS&p_YV1o&+r$I z!Td027boJakj0QnHbwVv&iSB?+}zxt;rIDkv~i4yVR`z+2|T2W@4eLZq~I%_zH)x; z{KZh1`THx{be1V5A=#V^>3hEdDD|O2Y%v}Ac}TjZPiCj@g?R%{$?CaKnXY3yWvjAD zJL_=lz=HcIKYsj}o#HYmm_7)ss&7(NA9oe*%H_Sv|3)DUOm%(7PWVV6*^?2l-@?^C zwU*^j=YIeaBWUC4AI~QwZMk$@RH_$`fzZ2)>XP9hQ`7Gf_2Q#CW$G_QO`@$Ypoon( z7L-sw76)iK@ka;Of|(l{>zVe4P*LM>W}pjQ#4pr$SPHNVf0_zp_pXZQZMP|iNkMFf zE(>MQ(9qE{-+Bn}rRi^8jHe9mfa-s*6s8fJAMft$%(gE*2w8L{Ja1=mC}@!Fbc|<8>)MyXTc%IQ74NR9- z8FP`sfgKfY89qZddUea$I?YF`vaQ-JCc7&Sr%!Um#`LqAIcfw$iA*cM1aM%)LZkQ6 z7jn$Qw-#?V0cuLQ$xOJ?wQ1SBsz+$biyIXMm(zvw{)tV4a^1yJ>)prA z{0o#vx8phjKbK1ub?q8UmIp4XDB_3FsC{DsiD*JTzmrsa0*GcShperKi%6Ai(CE2I z20-$tSs~(&zii`VB=Nw8uJPpW{SZ1OziC?lpNUDjN4sW5QcAE!(=yJ6vi?ai6A_!< zwL`v@U9}a76~kr#1OWh@Y!y zRp_J7%dLLf$=4T-8@Vtn60vyP`iC0rOPJ1D$CM!4qb|jA^a+1Ut`xawY{G2tz@YZv zDf9lmS7uBc1wDQVhe@C5j)L|P;K!je2(V_A2~V(phE#@9y38WQ*OUoLi)69 z7u#_*l!ufw#Ch3*Vp|(8*y8DZK5{?A^ikmA9YOp8Xgz{ACE zUZ0)oy3hLN_7E3=Eu+t}t$tCoXJ~y>5ALSVX=@;Q%-!B_UA_@X2&2biIJMCOTBXfC z`gJFF{z#k={=~)Qv9V1i76~Q^i35Ds6+rWaAtW*CJ%$c^qbM}$k|a^odDE@I4kfkw zi#px=uBVVD>Vt(2W%nR0VY?kKVK7)>hdGJl66{81!R4+`kR{FW&62JqEkzhCz|s+L zdFR&JIL^8+ftSF3RdYdvgHDK|!$Jy$@K#OJ>_`)DR-7y^fuD3JFwzYM6-NZ1Zj~cxQALXk;N=z3jhsXv zn;p(Z=X|$CRwY^>a;ST6S3WLp84mw`XUgA$%5Q%nnd0Od*Fe>)>!`PuqlxhQJ3!R8 z74lFx_sm%Bw_`t$KRL6>J6#o_TY4gpQ-d1F1u~Wr#Ce{lxQ4|t&=BKp&~Ew5;#iP z*uKuA$c~F7s2))9{9en+-fev0tEtY7U1DHETB1@j=N>+z5l+W#zMvNTm+!W+(!Vsb z)k_wq>1!^3myq(clxh1>Z_vyrxYM(>cd=3}w=CPad$!(TMg^cs!3^)N{9u*a=+*+Bc>25NJqHdpr)6ZyLpGotVzM|Cn1Ey8K{ z7+f~a-)(X9tIpe#B?8af!>gLOx&UR*|^+Yo)*{ z2|d-*I}f#*3677rFFdil8AncvoI!|v%Z>RP@q9C04i^-|x)8fbCZ{0xm8GR6+emEY zsnI|)jkFI6tmAFGwh~`*&RV6vQ9`o6KOV-K?+`ayfh)~{ZhhQP{a!Y>Gv85ISp2-3 znp141Qit@FuCNiPfi{$AUaoTq#8V0mSWKx!D9gtLp9<)Z!6%h;WUg7K)ww0Cvk}&z zYO=6}-OLXQ7%E9_e++JFB)FhlciuVg+N`G2BeOS%VDb#t1jW%+QZuhwMBOu4Xso6$ zhnVW~?tTm2v>Rc=FJ{E~ssp-E0GUNJ&sq1$zfE}vSzl~V#WL9oIyX60dBnRoDyZi( zypW70q-(nQl$E+DmSBroB&=ld!=yJlwBKJ>k^8ZjltpzYi77;VJnwbLWI2!C593DU zgfXHtxJ^DUtE2qc==_mL@*_%|%OrDT<$z_gtZNdjETHbjF%s#e2l1L|Ls z4@f@xHmi1H(rIp;G;G=eu`g^edt|gfC>#Ln<}am#{i@UKF%v>cM5c6JqPrtc1qByA z6c@sY+86WAJLFn$9bjdpAHXm~M|(bsC@SvJxOu#Dvso=%{)y+K;_P~~^4wKcBo%$^ z8hG<7ez%v08rgCAE|K!3-dDeNVD=)(1;e`9FNa3-tImD0)HXglJ6m-|3%p(M^V36L zPmdaRk7!>Z2o$uy__N0t&u6H#;=F3r4+o=3H7u&F^|Nql`k4DWNPmz?2Eqhi9y-UK8}=WfbL{VC z=q&~_HEcpO8CssN%D${kj{?B~b=hL*jfL*+ZWGB^`T7SSA-yY4{S*8T9iN}d z`TE3JwUbr`ihW!xH>+x{qNUv=lRw2;G?X-hE)EmFBUxuO1D>Gr z;Ri4KmGFS<*%6Z=8QBjzQjwH{+m20X@_`+S+UrFQ9_gN-RtfJ1m}4Po1J{p8OdLNx z0Qxtt|33_ICq#SwCHxyvz4N2yozIs+Y-_-+oW|}7HO1zM7#fuEA&NB@D^SSi{yt)E zU4^SY;UQY1M2yF7$N#hrgb6z8`6=f!%RiClkL6#wx0F!+VfS-)+*<_8NBcYr-XWHN zzdEXaIj}*0#S#Ag_&4DMsg`V`M;HbmJRo`yllBR< zG1q~1@8#(Q(0p7-N7f_XSU~Ha*uwu|XMYDh{`R2%`j}){6B>QR0CI1iH~C+C#^?I4 z;cz%xdeE0H;MLpLS3IxhbzDQrJYQ!|Y#bu(3b6mc zckuG`4-QWDF6H}wCCAx{ygl1jrDT46DONeNxEz*oR?j`=fOtw63kHn;qMy9tigY^w z`}-pxcPri@BnC+3vN?S(q`oo12b=yOG?;w#GF`17g2Dmr#GF>~gqyEU$Qyl!m%yfIY*9OTmIwuh4@Qd%Gt?I)mHbeq}z9IHs69iAyr3{|Tu zhwYg85y&F+0ZL@+Z>aO;yP_ooGE6HATn@8a1#J9`GgTsTAub1LfDeZB!=4sqCa{6A z_Go3h4EzO=YM>cBMMVSZkqjr!ks}ebDhxXUQd3ja;8~6Q&ty^wQE zjaLUtg#3Xq6N1Qdj}#V1tJ!~Acu>1g2||&mAoM*vpV=WjGBh$Fu9F$$yfn!u8Gf1C zoYt9sf*KJp>@DL7{|x^GIYdQtMTjbkvDx4SZM0__vyVl@Mr(C8^WcS_F@cyTwI`I- z=6Wb`@2`UcRPo3l1r|gaqFiV%3SfyAMc;QGmbb<53LxGQ*f+BRiaj7Xj~|#m^ zR{5ezPa+XZ)Z+KzO929M3x`_ed7;jKSx%7YW-r5D0AUd%d=A+2s>(`KXCMM3Wss7I zoeM|$8aMJVhLRTXP*vjYuc^!=?@=$73PO18-H*lG+|*LY3u_7f>EYIXwnWQCg@K7l zUPh+qI-p3w*}u5F)w4GWuTm6D!04AIA+n^l_5uQPvp+=8)#2P}{}*!OuYqIgiATqE ztIzj`<>akFLy2R6@D$KRBaqs+J-P0Vru9q~6Ht*L$0a3;0(+TSF1gzC`XE;58Ps&z zDi%PKPf^gKh~C#B$GsecTJSAC9rZT!P{6YqdS|7aG)e+@7U^(Kk_Hz`l;HdywtcbsqsnqK%3Hqp7vy!9gDeMc@T8`XFWzfFt#Zm<&eC6P15Vl>6W)`6`MN)+8UL#x{oDBd(+2-F zs(&}QX>7DV2Z^|5wdq|}mn2|F@HfugwQFn~7a6sHV>&X)hnI!@EwPr;%ufJ5RL+&E z87w;2K}kCB5R!ITqnUnQsv*6*d2*l8i*ZTw-F$N#lLvRgQZqvdj<4s0XqmLeQ?v3i zKOQtq`}3!n{iwU!fQB_n8qW+MxgAa8Tc4?sO)B_Z&$#GD+PPu&qJJ@SuuM_q2Hj>|eL$`NvK?OH zJ+MT6J1$Vu2Vid8wa_?P=27Tj$8$7`nyD$nw)awBZE5|!^V7OhmF8v2RYhP}>t7RyAbaH>n<7A+bkk zvvQ({8Py37aVVz(8&ItwJvtTW4?uN4?c>{W(pW)w?g#`}+?|ykX%@804W4H+oppL`9f# z{xaAI(VJ~@DWBn!8Yk5YrGz=UUqxl5($&+y0yg57$xZXd1YW?k36rU`_SxyAoB;t- zE_J3twg{z2Gx?z$uMmkD?bsmap-Th=q7Pj72V5cOq&0@YvyM~`DUstu3~GVQ2$bAF z6|m)4KyHCMqV#m{I*LEJdVGV6*k-ff(4fn{`hc33_21Qg(|f1$`-_&Ah9^7#Z}2q5 zzKxr6vy=LEL4-Jv4g941q-Jj_U9yMWYDbyq>pKlFBVF_(rLS9u@+jNz#DrDXOe8 zL`cGEXikz&!x`{d;Msjex2Dl>Qz>)-)+!%|K9NL32F{v*9%4Dtv+=AgS(^}qp=O*|YQMrCGtn&|t($?x=HHAE>n zWJC1KH!Q6~k36Q*EK94rz9mJ_aq_90z5w6iE}h3C+%X+*pd1C0on;7!;(pSZ_391# zo!!N}&6E>E!^IhA>2luzhk zD5VtIt)E!={3pfOGT$XJ>qG?7CCcfs7&~JqOG0Z1H~}1)FUuS=XLfd*RvVPE$%ol% z!L+hQ?0Kak;_I(a3s9ZqLQQ5mS`vF?YX zkAgXp>o>zpc%YICv5fmcKm4jGC6!V!F92hMBHP@?$?xnXp+fHy$0mh61xos+Jq-G` z8^S<;!kVm}S2m>#Q-v=2Y>1SB?lP~y+44mt&4Q?x6pS(Q77*G-enpsaV7aVh$4Q`F7EVA=+b8O9QlwX# zf$n)XSw|-o!`I_XWh{CFg(9#S_}NsXP9$%S1vmrt0qV76xCO1ZPy)7)@n&oLAxdd= z`{-b?YB3t3Jo$1!G`7$AM5&-^_qP}2#aGIN3hW(>TYm*S6t1f3M!?^^yR>q|ET^TYF`w_9-&}h;bfNT z+{V&{_#gXN=Jy{T*yAFRh#gr#jikPZ3HAzqXoS45kiURCTDOQtT+g14-Fs?HKx(9rw6MT_DFjb&tNOv^%@SWq{2qK_X2)r0xxdJb1;VEZVQ>D6>riSck&um& z?9Ut2OHYeAk})LZ8qfE=2|nfUdzS~Sb{2iEFPAF^5?rGFcbCU`w!c^>3ibFl>NCgB zmlNL}FVwW;xy=~`J{;f8KhF%-a3d0-3I;!JhFmvAnM^K&tsq-Rl{bw%HaxOJ1csi0OeT744Yp9dMACKZLG)uX;RY<&kBR{8`G_AZra=*$Y?nMt>QkQOuX58 z>uoVBY4($n$~m_Oefy$*Sw6qRkfziDk9>Id0~nvl%_)5~sUrHUnvD_h3cN#|F-<=l#LMn;0tBCNRLB{BLW}=&$o~Nv{vq7_2U_?C zq4Q7B@RvSRYCCn4c-Kde_%Q4h80I3?;e4_dWOLeV#i%XJ*gr*?aB1=6(0O)`mv2zSlQ&Wp?A8%6Q)vutcnud*ewvL>N_X!>BQ8;^g{PIS+mgLEqu3mat>h^^POae)X@6498 z7t0*6MRgl56E{^Io>g`z7vdy?ud(x}{TY}Hyuq~>0cu7VeXXBCUqTU_<&@GGy?l%6 z93!O93lzNFnVP&r%I%h=c8|eO%gC|7_6ohg;L5_fg5|m?{7MzPJELxp5dqp3EKL%r z;`e+1B^?;b@;luVWUrvy0o39CKyK0=LxSWnCXYGbMpJZ|!!~MjEyyu@>Z5VDW3Hy0 z&KejfF|$j}sNXrOCj)xYryS&!4*w?S{Vll{53y3LF%1+PD?J@=(jkj0w!jHx7xzpS?ojA(k z2G!G%UK%RuIMETh*gLSx!fV?UA#}|syTGb7`eGuoivLkqT4?yJ%l9hG=rs=ZqsFPf zMkrGpL#XLvHhgt$V4^w)e55 zgxu)P;$jm{M9oRjYpt+-ztkfdWs~5#u%Le-9`A6}B)m&b=c_&b4_d49KuJzdKUGUInqE)99{tL5qOt*B#MJKCD1_67XQ^apd!}Ww1>Ne>~dmFvoU`>(pAqx3|&hbaI$>N`jK9JaC-#k6gyZr_~GU*^~ z+z`CVp?s0>_nNezRX6SDbutnnC zC2?`ze-j&)$Lg4|j!wU%J{dHx?qt4wO+VQ{O^0XLIauBF93uNUAAW|iQSF5Q@p=L% z;L8S^qgyoTu!+U$SjsOgLp$0y!&jt@HNe~2fhVBBJ6HRSWMBi*@E>jjCZ|g5RjuNU z6H<{K8dBO}F=pk`W)pJ{FKlhfr60g@T;F0^fyF3{dDAYirc-Oe;z%x9>$o{jR~k2Z zkf+XL*oGy742}ysQP%Q?9wWGbfD+%;A#wG&0IO6nx(*w=Yh0|HJ-K@D(u$`av)?~O z0~%P;;aqR@Tzhvp&_*0sQaJ}dkn+nJxoh5j%QU^yyuBUpTPh-Y(wVhUYCgQ2f!0=f zGw1<0BeH@PMBKIH+T`(9#Crk0b6U?JdtrL`W6~G>;Fw``pV|vEYt8YQ|6WC}Rp9b- z2RYWVSL1$zBzm~rJL(k`G{M&TMj`nvt9wma-Hr1F58Q|ET51BD)jJoHe2wUowZ)oB z)&>_u3;K3%ojat~1EcMR?!sKZwk~>!w3{dtjSI*-tO29UP{XT79tq=)5}%{I{Dl-I zLesgGTvZP2I%>FtTF4e;b6Hwn+d`Qd5dZW@Oj_EqwaJ|pSr7=seLj`W_6x?0Z=Znp zXlrljVS%K}^xm!@C;~m@Aro|XSQz-|b97(6KS(T5)*6me?QLGfLBIv|)>z5omUgy~ z-!A<~du@2FcbdGQPSGf*T0Bi-FF%V<=J_>_Hr0e@ZVx@O-?hjBCO)K*Cu2+Nq~F$d z+D(lE$f)&@yfhDnV_RVuf>tei0tF_oUTbJnxaV+3)@ZC#1-2+-oXikoPM=TR1s!Uz zs>u-FILCe9bq_fB^<rXM4 zSQmNS_EjT%!f457Crnvy3_|;j*^$ltNrsw%>tuixRFs`{39)XnI>@d~-KQk381Pba z`_VXiu{b-}a>mfWAvJ!B!l&_e9AlD!++gLMmeN4$<`CzgVGoEe!4vlBnSo?W(Rlm3 zh|1fAS%EFfcL&bg8yFN6k|vIL*R(0&$^s>Gdq=B6TDG=$vXhgydl%L?Fn=iV^KBo` zVirFDzj`&q921W6@j4;p@`J6-`n^@JL)f@0VsFcTRZysi2yg7Pv5km86mU$*<&~?X zmnpbjOv(*~xom3Cc`w*~+RDhBDDB}kvgcOc&+c5gaQr?#bYDD$ zr*A@S`2^a_HaEXrTPz<2S?zag*o<|&=Th*3--=&qJ?0|m^37qX5grwa!Tw{G!{^V) zQ-F7qp&9=8Kx@*w*d-<=#x$0xK9j!I?Y8$;NPj-R-KEia{>~mInt!LNO2G!|TMr3N z;=RVo>U;dhmnv(uU$VQqOX-S3nKY6;lyu6}xG(!g1bn%*zZvw#0fQ+n{w9tYO(?VL zAWUgT2oLm&gVl9)b&mnv91~g>$)im@K{ozl)EA*6eU|itJ_FP3?&?dEBH*&wH65DoPUnrb_!r>oR7M3-B z8f4#yCO?-x12zO_oP-TmG=)gB%>weRhjHb$*OORBqSD=n)Q#8xYqy!6;!6*sc+b7B zR|G>e;4J61yV{!^PH)0(D<)`1w(1mP4#g=J{vzA=LpB@XSaSQT)Yht}3n4{+V zsjBqHwKKCZ7o+;l8vuGKfPdYHA$ON`hXwf*A-@H?TvE7+ArR=^Kd3HUs;?O#<_E zv(sei8Y28p?`r}xBV;hxe-niW3G7IqA82bdO|Q1|gra{T$B~ z=yhDJ97$>ZjF8xTl4vXD`?yA4W^lo=<?3y-A^*<#Rs*8ZG@*0)Su|RRsaLSR z%jEL;8+%eH(`MG46O(*zCA!(K5%-}E)48)(?!{co)8FeTR;gQg1qeCQFGFRILR%?x z?-O9sVrSbuV9_RLAhxOWlf2K8BRYDTCw*EA>^%px8Xc=U;SLz0jqk)}m}Y2`;~gb0 zj-%e9-IAc?{?{4F`d3&k4pXZmRgo8rrP(OC)N(=HD4NsiKg7=)*;4@LKT+?Pf3 zc-m~es!+I|#I4UHAgl>}XG?jo@x^JbHe>4F;i-{D(W8%%W{z&5dS*|FZocQ`Z#B?) zZ#TFS9QLXA>1E0uQxoO)NaUl-bpM@EvPSL6a<#06V?U*`E0_SJ#P=bxbUQy(2L-{% zl}EwBE?GUtQO>!4R1A`e;SZn0A%k#`hXscacJ_XG`E|XhD{Q=&(J)fqPu6=1iRoi7 z|Gk`0;unFl5&mnBqF8u_Fd^a9EZRpUx{=D?Z{mWdzyDyXuhK6PxY2aLBa?{_cu0>p zp)JvH&#AB~hMEUPf3~X;TT=(qcUP|L(;802bQESY|G_N%e+?bJNS=ko>uken`W9~Tf;BQJ`$A|)*mpMZir zB%Hzzji;Zw8G`GrVV=yYUfb&VQiTj?l|;QcS19SdgcOi6%EQUYNr=?J>ZG7c=n%m| zNoAEF7=REw2YN=H_Qthdb|e)(udZx<%_D=^!)sw$r%5m63$;X|t_=LDqjn7C1mpeg zPuZE!9+t`M)K>giUF5=z2C6VnBmgjWpZM!16929?NIjOw(4CUTRm_9x!04g}EfNi#MZon~3qs0zhE%pRrS&;Q!Kb4hh_bu_s zc!)88JO5$33Z=DfF*pp4Hvb_8;wZf8HjgJVH!DKI1Bxmh2*K8;utghZR=9%1fm#R) z%XM)6NAY+~6o`V?y;HuN}S-6D%x(-2Z7q5MaWX+U&#JQHgod(t#kX!z{`k52+ zu6W!qzRTsm5x54!!_l!ZB_)^Qpk_}lozgFK}FPIA`)jW<7g z$?Qj_18s32&92hTB!jmVfPWW=Dvs5j9YUKYGh4>-8AFech_W%L>%hKeHszJgUflZl+#))okhU@e?h?iH)o`Dk>>dF@4ph?WL=&iXpQ^@91nDj?^ozN GiToeL`4|uY diff --git a/ui/images/screens/Dashboard.jpg b/ui/images/screens/Dashboard.jpg deleted file mode 100644 index a793cf2d78067344b0ab693835126acd95812e49..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 69188 zcmeFa1z45M)-b$jkdW?9rMnTNyFp62yAhC3xFx&UP60_N=@z6Vz72v;9Q8cU zIp=-9^I!kB_As;7nl-g%W=-7p-ds&wEdVfuo|zZ|0Aga40BFE(aJ2v)dMRrG17j0W7h3}b7imR37c)H$ePZ56gxt?KpIKO27})C& zKC>{lwBvlnLwu7tClJ48(-9Nih}fI)5I?>yl~6@WmQcXT)_{Kk&(3ku(s1*CY0ZyV+0NIoRqM0L4k}5(9Pj z>w>NYfwkcjw9<31FtD^26XYQVa%lBU^f_6CgcuoF1ce^6vNAF-2+{L113!X-9E^`2 zv$3!;3E!m^v$V6H~9ec+Dj+Sfm`!EF(-v_#ZC2(}x0*CR{ z4uJe-a02w3fYSv841fj>26+!2ICCxlfad}LK*ufs1cCo*8sH6p1P2F)0EdKtfP{er z{=q^+LPEpB-Ma^S?;ac?+zlYYA>2p2j{t{)f{KcQf{TNLgG=xoK%k(Y;Njpg5fL%5 z(U8!vzeoI40Yd`0Y6ENl=Q=1D2*__%000IK0tx{M1@aAk z7Xbna1_6!;Y$rGf7$`Ut;5HeMjf9K>j)sT|!N?@5O~gP*&qvHp!fYK*>Y0KB07ip@ z--CjL00RfTDG3yb0gNaDnenj=3KODSYDx3^1>(qILY)`>)n3iFjYqOh_-Q z{UUXN&(k_Q?og!`mr+m{Z>I`CfK1DKm{Wq~- z@iAkk4^;r>(zaU*W*p3H6w!Lg+OccB{i1pN2a1K44x_K{a=Z@jbL5YRQJT2XI<%8O zQ7Sc}2LAv3$VyN#lK)THCuK&SkV_zU4&Xh?m`CfYprUE%fWUT0aq!dJPu~ z=zfVBCn#1z?D_j_0iZRQ*L1fz06;`^je*Wn0e+&3>*(JAv=}h)ruioIHPcx5dz60c zck~+)-hJIrVeLCMRa9U);Vl8{y*z)%{}zIg`e=ica3I|%{+jiKGKugF761%V5|dFM zNTY|zYO!!1YpR(MUPl7~{Jge11ORbXP4X(|l^#Tmc>;2jCuG}_pxla6QgB7S|F+@j zw)Ct8ykH(%G(W>rjmZ=Mpir?0)hQ$}Gb6GFi-7?ViW!&@`k<^GqqO$I%`0^COBG7uDS5j zXqVW004oI%Wz&rSaK?fN*FbvS60lj)ba(hI3q2tBx+{WLn{F8(Fw);e$bwRCxwion zfBN6);Gy8V1kcH|wIOTJB>Kn_5Shryk&2<7P9#q65A28aOy5s{T0 z0LEqx*o*p3bs;mZlbJHxZ+ZH1?|gyb=r2{MO0+%X~Dj4c5uc0%e&;n~H|M5He4S9`rJvFrNxKvSmcPM=C{2h85hF`atm<0eW`;Aek#QpFUAts}C7bKjg zb{9PWO7Re4-i9TFCHyu8sHFXrEMN^Cq_51{0wsT?E#qb~quLxMqTA_#50970+cGkC z!+humCh*MCe6Ryr6Da`2U2Ck5^9n7XJ5+9j5(nqar^yZ2_W}xSE=iWuw>bl7_qNyS zcdG%ch2;psUs9382Pz%L`lB1pKO>BmH;a!v{au8=_Z&J z-);0edx`m5{%rs$cAd8^2_KLKzpf%sz5_i34xm3VF%<3}J{lf8(Zs?=Zw@@t>>({Z zDvFVs2TI*Q)tBR%-eQ0jC*dbk<46hkMDvaJH8+c!pa8y;^UH$v0B4szD1Ma?>Uj12 zLnc08DxmeHbnp*GiHJuX2oQZUgp5T3xuo9#-?K!KP1nim{5h%U&9~fvrc=Y^Op9#? z05g!yK0P2_X>3LJwj(n~{JO%(^^fI+qw00b>9r@CsV#v?KtROn8Saz}db;uUG-vs; zklu<=n@Ch1)#sZhBH*X~(Gh?F3uuA_DbdZm8AIZCgBZXkm)qfaTRbd8_D?1!{~PUF z84xVZJ8=MM&@J;WVALq+0an%ej{Ze>J>7m0eHR7nDl*SI;um4AJ{aU*kOQd+hyT$w ziG}sr)cpaP!{MU;zyRtSOBDb{){ge#A>kimKoq+mB_Q+{O~r69S!pG@M<+g((COYy z3ZN{ie5!aQd7!d=W*ww>)9dfrzs&|%donM91iKu{jd-Ou6C0?-B^1iWzm)-jx*R_S zpC%s3gBg3CewRl7 zbv}&P9dR4p%j9EJ;W(X(U)WNT>cgn$9C6(5j3h}d$Hj^Z+q>__F%!(X7f_TX>jWA*OUYA2SS?z;r_d)infX{n0nsPg$sN zVkwv@I6geIq>PVvqDJSb0-R7k?t^5um3JQ6rU??{CoTxUO!f1{C6(#W;NgP|Gv~cM z%b(DT>*uS`3yUK-+ZXc*s}M127pnp)rIda=B)}OzJ72`gvQ@9|F8xUx7*w`D>7C}@ zK??&g*Mze_Cn05222c0=V?GcHgjm$5T-eii4{xKCmi4i2S zc`)IN7H^4SkEr@Mfy{I-K4cz-zf`w)Ru#Jub$G6%DkZwOk^aHZZBj6oUw6gBO!!dW z>NDwCl|p{_0RMYNvYr#o`q}XJvQ$=^$`=i^aOMTtYJ3ff71Nb{?nr5 z3GdvPXbbCur}ZV>9LtRFjo&&-lUZOE)(e{(8{0{z^1XhI`z8IJffodS_Yg;B)M%(l z{7WtoMbK`RpXq3&zUcmwdq}j^$6uYB{-_Sn!^o%~cOYQ&kCH%&72+rA1b^ogGne~< zwZ^ITx%MBEfeQSrvsYK$&HPGokQg3SyChwztb}9qET!wWmnE61F!HH#i139^+?_E% zB7d#2eDhd3v#`wVxxP5UztagKBc*=lq3zz>78_?35+5|jT)bD`zPxQHdESH;3=>_V)FKL+=HkjAr~~NPn{ssj9Ts_7HzoWd$wJAkwyG zgafi^J*peJ4^=uqGW_*ux)y}49dIT-OEI>;$LGC)2RsOT_=&ERV)|?j4rNK}>k{1o ze6lxf@*&~_hJ+9R5SQ+sSZ2=~!9RHSerUtNe;|p31NNuB1SQc1{6F~5Y5)p&`vwiX zWCVc#9an0$3wooGp_m&bpQ|LEqeKZr&x?w-o_AjVL6v;L#qaEUZkgI?itG6QL(l#! zcbdrDrxd+Q{l7bK$9%_HWQwK7*kKUdixtg8ZK%>$5_d~R@^(F8f%{JWA-0>~iEEHDi>*X*M(xA;Ix00Q;_8{!;^=QsH1- zMt`|`6yijJ4}2))P@D19hy+Ok@Vgex%})g8LpG=Q9m5mTDNJB~fV7?>3*Zb4qm+AZ z6LE=3{{TTk_UHPziP48}%-Q~4Il!(rNn?LChP!5Ykk0eQ%<7f z#xDptawYY@kmJwGx``qF0CnAC6j=vf+yYn+-)f5RE!iXdZ4Xkk8+%ZzMeiUK*w|qi zne~;gh!wd#!XBz-uqtvs`43vpHiZesJmfA+)mp~Tiz|HcX>@>5Kmr>&c-koc2{Fle z-OlndhqJV%Ld@&Gj>sj6n5;V2IwfS?dS+XOR$|Wsg)fC4&GH;I5bh&&nf1pL^NL>g zI@tlrw&bMi1}n{^te=Z;C6>m4RLzHYTk#W?syFLjkLTtj$Lo4nLS+bpn4YKE?1z+M z*@~R39E|#SWJ15!F?S}>M{LLsq<}3#Ea8jTrb2!ey*2}NkhW+`8uZd3&r~r-K0`c; zQgK-8!se|1SV4aO$M8j2Nvx0-2!vMv5vfDFnHpU2;);v5&m;snRV1@QYj`^jLFw;a zM5N)7zrsIwAA&a_U{0SdhLA_BisOnQjibO%iXxOAZ_{uQY)1U-B(yua6-75}uYa9b zYolu;e>l4ig{rO~@E8TJ?>N6mfxD8l$T&=q&NeMGV*OhcbL2B$UI8}4#>CqqIi9;u z<4x`)CanX#%!MgSyuagN*K)x-as=C9t}yiJ#f}H4f6xsvh=%gW+}Y2)`x=ld>?kofSNoR06h+en3!6>DI_f9;}7rydGD$UuGj zT0T%t{3j^E`*9*$jDOXkk%-ef)r`@_UUyI0SV(yVtn9DdtF_;)%s)ZpDr!e*`2S`1 zK1lhuS%)sh ziIBtZB+(pa+g~{dr#nc)U!lSe?ixNoGoBuDP}{PdiV)B7wY`}EHxq&R3NT5KAFpBa z$XR2n6ic76%OJd$C|ZEpR~7D8WCN+@B*N_nUZepdbJq!3iuI#fvALKp-nt%30*N-s zH&dg+nDHf?pfSZ5=y**ctT@=rA%+VFu)ip^*9K;ebG1-}IOdGcSHhjqr-0x>CoVnl6;3c;b?WwrS z{mxnd%aXwltYdIFlUP`Atro^5K|u0@ZuK^4VP3$t4q+F5Ln4io1wHZ;hQK0;|2E>? zCzQ1#IRYc)x)Wcy@XD~I+mYD#X|xvVjMHz%I`na_;MpP6{F3oW4w+7?Md#pV;p(>4 zaSjwf^?e8@kD1OOsvF);^mCHy>5g>&s^OP1=gIo5Sw|}_w#0;izra{@(ERYLe1Bc) zKg71|<(OVqeq>8iGh^e|j=G#FH4$#jNPrV-Xu#M0S413@OH9tc?3|C+ZcN$1Ws+6Z zhU(^tquA@s&=>E{#{k=E!Ms!%c9kzcN!Az3Q zBEI`PF7A#SmZ8YNIfd6?u-fQ(Tvx;Zz7@}Sw{*hIIzHnLJLKv%OYJh7?rIjcESooar5?6piUe)>y zQ%#K@gQ;Xe^d}%JYF+SfdqqiQKZ;nje-*5$`vcvkIbx~o{!xu%@t$C>t>WqA- z6VG-9(7EtCBB;Mu-Q{)uIyme8>@*lUhsb@i5aZxaBT}i)hKozdMI5p34Gt;pO7rOPvqKh|7GXCkf(UF)k{hq=|3XfFRnPRlYW{- zI}&5kTfRJHyUpSF=9dgJ%l2ZX@z1drBn<;4_OC|HoEyj+E6k&Etb^n~H42Jw$VKK> zuF`T8=7#F`KREl8h#W>8{lVPde-f>+Hd@M}R3k!qQRX7zteaGH(fw$uuHhrsOzXem z5hfeT&$QfrT9r*O3P)%8iN3m;e+V-&L)GLrjL&0i5?R09*!GP@J`tSEd{OOKj#e-_ z9YYot0c%yMNoiR=6d5W|+4=@mN>DH=65EYISm=D##bZkIo9vC(as zuOy}s+!_DqFj$yjSg`C7Mg>;M6Q$5VgB)y*lKk3>3hXiwW}ab|K0h&!!_G~%&8Z)+ z;F_9dJa#vx#q8?^8*i1F+j|!KY1k^VKZ&_i2LpqWf7k1k z3}s)up4xslQ(L5RWih;zzZ;g6`kw_*?If=%UT+(4sY;J2{7uBLzQ7?8T~w7w-{;>8 zd^sRl$F6t4U#@%Vc@uDzH9I_YQ!aH-6r$GAgT+*7=|HD1@d^H~B9i?!)%9t@EBPm3 z|Kq`9jJKK@|Dj7Sm=lu2^M|`;I3Gog!Uu_x1h`+CU$}66BtE)CIX~;%WHoFBm0?zg z+u!5qUPi)BQoKL!e5zDqwn4t2>L_8NiMH(~a`{(9nbf%+Y?%wz4f&)ZM7<6Uu`x*)V9jxtI2Th;IeAy%@T5zPlU4+_9DW14`YtFTJB&%HL?|<@f zVvTZbNUNLTYIuv6=qJPPa}V`_Z0awAKnBzI-|MHk#M{QvFF++zQ*>Dvx{t`_q29YL zlfT`d+`#QyJCHDCMnIIKcXX(dIj~7+#~qs6vfW_Vm7vzMa|KWd(}fY^t~ZaEuC>ry z)eK;kIC};(*DZ8b3%rB*?qyvb{u#Ua6)KluUcenpQSj7cBDZqegTeb$#2$eYc5$DM zG%gt~I+jpcCg0kJPWc(I8jb8T;tFOD5ZGuYu^H@qzGQS0!}^r!*m`b0*4VmT6VJKv z?#aP>7pU(#Fbcl{AQ9I-Bum0VhU7pdI5;pbdOE?K9!s&mDaQS3k~S<{>Mid>o>$v> zSQv5oUfgN3C8xpOIIn2Sp8Mu@&pkqWzb_eyzB6#LxSwD-KD`e&M!H{Wv_$Zn>x&c4 zBXJ(5#=er$$Bi*2Y~xJz6;P*SB4ynLQ*}G>Li>u2nTuKW`0|2*T9j*Xc@?B1RXrbw z_j{?FbD5r4RfpvR?*{J10Z`BF!(#_Q7kCsSFO3}ISFy6$1eD<2j8pIyD>OJCe9TGB z_ru992orc37Dz5^){I^+hU(OgYD3~cXx_g!6{1QQ_@qogDWXn5wUzmBLMo?-X}rS} zY2z{O;K+-9DR@P_3PS66mo?*)K0^jXM4Ab!CuooKx=b--T&!dBdUdRf7hJ;E5M%p+XLWr z=#!Q-;92^6I|qp_i8gWz0x?J}m4LTXGBPy9+cI=Hq_r*ubNh5c6LAUD`#4s}Py5PF zEy{gOGW6?-1_NR7Y%DS~PaK#{*9i zce6ErpWqYrfj;npSVj4`^)e}r#Qy!ZnHX-J+WWHV@tQE7wf+JTHY*9SU9q=HI~5uY z9l516Au};v8g=#)2Gpu zPuaw4;}u#a$}=q6+^G!m8sBcm;e1-P!w}IS4SKxoe&k)&cLl(zF8nZ>L^E#EGZGx9 z`>L}+DTz~%Tb`@tDa?nn_&o488nGFBUy<2l)Km<#a5#ku7BkIeJ{h#86`l}p0*^G+ zLfhvQr-xx~!dDi&0-zPIGPQ5joa=oJ z>7wu{6I{ouKnI*T;u zeZhieBY$dm3a|9)-E*q6r8rWWnZ&)$W!9OZswJ#^v1R?iu16U9KhOiwMj>9!d2_I`zEeQD3EY4C^X z9g;tcA?N#h_7x4pzqKOI2Xu0 zw8X-}&bc17_ETZq0^qf^Ye`~q+tWK(Gu-Xv>yDL`@>q!#j@u=>eXA-tONwILNruO? zTHxEuF=5GuChBD(VoNp~ycW-sfn)WLHlKw2VMYZs+JK73$osN!L%N5#M-oeiu>&Gc z2fR#SI!>RmgxMr<;3k{}+PcK-yC@4Zf)}R;KM|_!Y*esd)n39buIj6p+Ba`TtV`m3 zV(|5BFZ>kGncT4tPH7+8goO2QDT>V@`iV0hE9LIS?97EyB;K~_+??r>mzAcSR`*8F zfQSq(S9m|w^UznJBGMhFG#I3=h+T=gf0|Q7eP$6XghDstwc>~=D@3?s7lkS6l4HM7 zK*%BLx&0cKCh+~L4z`YgiNkaKh+c$-{IMa?#aCjHg^gkQV)#A@aG#Ir?1btVMLvC; zsDp_;GU33m>vUqQYwO8;_d3F&PFkF;gBRrht@XJZ&f?o?vGLBXJjlw27#+$S&+UgZ zE6WyKPSzPVPFDF(xuy0@&FVBCmHck6WjJa^TU#oquBo;^9>H+B@tf`a?PC1TH^3oi zn7?hQPZDl7hq*L`J-&*JsZns*1_G2=vi}+4*t&D-!i#OofN9dkh%= zfIKKHx&m0rmPnxf6X)?nraP+z<&aJ$Yl&hFI1Dr^)-|K3K6|^|EaP{N=Dv@VFpmpD z(jVyHRx6#X$#F7aH#K~H3PbwGO8vf?93QR#W%17kwN8&%fz9q2%bhf(!M?g zJ#W4)7)af*d6%xAfe8J^c?mo*J-h-CUk~O#CI*)QE}8^40|D%IAgFoFOjUR3Gh)H{ z3J`_=86gb%Yy|5{A2|dwIh>-Av^!n?T_&Ze@F`(7)8nBp1hC$ck;dz?9gSv^a)L-tJ*yw{rbFq zu5td=_Wl?5E5J*cs^0#umoSg!=!je}Qe7}oj@@|AfwliO^+6$dL1DbU_5Sa05D*|g z>sW*_K0f(EKxQY@>MSyO26yg|jJKAfaYNk0-p*q*PA25SRC=$mKx~%nFo&z-vV!8- z+*tZ9&e>?`r4+;jvQ8Lfsvrp+Y!*grUVtT6)FJK`sdW8u4){eyn@m03_S3}0(+0M7iHHR$P zylfjmk3(7m9ftxXCTc&TGO1hM1YPt-hEn5MnCagTfqmgUU!se+IPd<>Gcz+mlHt&l z9T~ezRPs1TI7$ZXvE*o1=NsDUg4o;d=A*+Azc0L`kDm)|p?+E)(~G>&yV-xpwVJK3qz zf@Rz1)x@_Rz4(&DmcVUjh0VUM6fb>=82EuR9KUHUg7VMn{$!Mu;m# zPcorkycqHXQyW}R6*Hn;H-z%Vyo!XR;^^p5C-PJBP z2yFt5eA;(fk)AV-^oPwu$zcFV$&Kt0)yufrMgU<1Mi4)8*?>h zJ6@{ZR0l3A>oBo-c?}nu6U$dgniTNI`Vv zAbK(OrOr3OMkjM5V2EceI1k#I+SpY^L(+9eub}KqJbhJP+unk38i7V^D+Tl6XjUmh znB%Yws}0YxVc*CUXCZ|MhI2YU{w(vJtUsoXv5h?;QLLvM$R&#g)GQ?8n5SI{9$(?RdkYliG2;<)-Fw$c)+SUR}XIh>Rthy>amv+Laj< zUHQ4xFEdIUOKoyMNPawZ26e%Ai-0Su_xvX;ZqS3q5`$$`XJt*Ln5e&r$hP+UUp4oo z9~Gb7&F-U6k57>Apq27F@G00K4w>sMv=k&dKK#!i0ig(1nC0YGfZ4FphOA`I4ZKHQ z6$c3SjaM%T&F1f02hGj8hDIvB441$6So9$TVJZk3VLG}GVLIw}77!(>WZ5EX9fPil zLI9QWlq}pv@_TGIZ`q=}+=zIJ{=erXBc5V7dfmSQ7%gi_&IO%58DaZ$sdUhP(ir8& zcG>vk%-Jo7 zLUQQEOp|I%z5_f;U*1^eJ?0p8dQNg~S(V%=UF|h~l2DkFkE}8Q>FmXaL9(v#sv=;C zun4qda)X(CAh9N@yIAcyZj7)_(Uo0$Hd#!D+8>qMMCvY>q+N3&p!vZuJL5#4T+xvk zSn(f{zr(aK=3Rq-4)--xo2)vo11T|%Ly6UZ79Qye;2QltVFgSyh<#_6}gy_n;=EO@lvt z7prZ@Fw&xDgaK9l=05s-!*YQt_pf2<$$R+xI; zV^|GmTDG!*bwI-b^6F@^Sa z)J2L@B4AQrepVe#4u+)$)E>n7Ih5@B0bWsx_0z#~MO6jE`*nV3V-;3c0IM$rSAgwH zhsKM>fvPJ&)#=z3;OGj#Id?g?st!C@o^N`5^+^0r2?}=N4Az@hyTy3$f{b#y0XLDj zkhmxcKLTYB43@tJk3jH&JFYW?@gd%gTZG-vO3jK&YT2Z|fGsL>zkF6Xp=ZSTpR#0M zFfCFV9x}PaHAv28S}CyQB#icN`~}7V_}?z;uhr-`DapB(+qDv^-0lj1$nqJ_2R;qz z-KtJ#-n}!!bD5rZag8YZMVVG@=XvW1d%TqYv&^@hOh!tT3yE*EQ;(kfZgy_to@2r* zLtO!AiO>StSn7>ll52n=fs%uAo57kE*PRM@%h?5ElZ*S8W1%5F#&rp|lT_0eIrGwj z;H^bTf^vw9W`bi{TT_@v;E)W1#4v4XUE z7-h%3q=9~=w0FqUkKY8X-jj@+r8cX062&>TpVgK#Ru%P#OuC{zXOGgMU|A+Wp}>mr zxp<?Mc2?Eyp*E*{QnV=?G!u$m z(qVnLvEZP!UtG%5p>5F7ig$zU)lI zQ`Vu0RVUpyS-sorc};PgJklwcT%dhHhh*;^)90QRWTIoHVu6j4qig>#MzOtP!bxZ& z@2O%Ric3lh@b`*uG?q!_(qc zy$v1ydh^FHz7gnCvG5Ag5mUH&v-VGHc5o{Q5blw`)rpvfjvY;0cW%isN-qG?_wo0x|cN!6nqoVM7y$1(P)Pzf>5!^s9Q2Y)@%Uf?_enIp^ML7z^I?G?)p_ zoV9s}=Yd{cXqE?rglo0Ws|u&QPfm^;@zjUqy7oM#>|d*Mb?`PB3?95tWGzHwO}v17 zA2^P>Ki=;1-luAh@S^|%D6B=N{~kW+YE0-71bQt!eJMF0*UF7gidrR~XH(C}>MZTs z8ssTcbrRh=9ERdNXao=0dN7TF{~*_D@?O5l?j$k`#d(uXmJ9-pAn}`rtneueDireQ z8u^`mP#cS(&(2PMx9r2#0ByQ&DBU4FXD3tbLf(n(LR0}^vMNaq0`BAGciA-SHC9r+ zE|%(xwsSbqCm*Ximfx|_?A2I>^gjQmOug+zKhwePsj_{?{PV;-UW#iXg(vF*=k(-m zZ&;UnQur0~OUH_zT|qyc??6UP#^=e=D&}`MoY5;Wtw!=6hfmBm)Z?|_6D8~<7Rqug zO`fwl(ER5_#Q^|zAFr*t->UR-sGJ6t|0mu(^oKt5nW!zX3ios*e}>|q`=lDH{Y?+y z&fMEZz22wJiZI{~dbp%2L)_zxuFzGQB=lF9Xa(GXLZUfn#OaEG5%QEjK?Pv`ZoYrL z2LC7tbGtU;zaKp38$Va@*cF-792vYjHQg-$7G@!7a~8EEDKsON>X{#_q-rb^Y-d=P zYb|>1r=!=a{wlj`*yMtOx$Zs^xcC9PdFFB03Z}Ldk)^5oT-^Vzr@DVArzH0sp zU1q@vckg6_z8`hAw;yvZL*!7UiUcHttm*|yXs)!@|Z6liK&GE%sb-k zgQU*@L7}XAGNTGOI7e!oA`?hr-ecWn6n3bl1^?JUGfafO^4Mx5{0A*YHnY3ndw!C?E;E=K^--?1h0^U}37RAPl@VUH-T5nVFBetHu-(CWgnt?DR zu>Q?-vQ^@G)(masJRPatC!yZ&W##eq$G2%5-Uvd>nc(dc?LK~2XPCzYXz3%$XWXUz zjA-Z0V1tCoHf}!dR`Px}oc9Cx>al?CN6a+e_b33~EeeHao`w%fd3#8pQylG^+8FV` zTgI}&AAT}xJTxZwKnrpOz^R$?gODiVZF`Y&CXD~Z;s}2h+a61A6BA`)I9on*r3}Wu zy@N+hN$XYq{FY>C3gXIGG0!5(q*vYBSB6qu@{_g6j?9Z0_4vOHd5Dka;4xAs4e|V; zgikT>Kl|@eF!M6eON?W_{d~enLsNDth@z>5AvECjiWX7HOozJtHCIg3qYTZ<2autJ zsPLk#fS7&Elo5pkY>D_Tb(BqYCar34On}dMSMxR$iYDPSh*gC!3Ai5Z;d-Hiq?DdD zJwLifj%0u*N+OO{!qi(yJoWLhxG204WiJB-ftn=4e5n^W1a}|vlNx(4Upf9G796;ILL~kCs2~&k zbG-mn$Y~Ux6v0q-;t^;ZNo;V-dEi&xoVaA??bzR&n5c^PD9iuCo>AfCgK9=IWm{L4 zTWEVi3lsHM6r#Wkv}qno?ZjsvzY$Jp&8e3_XWWNG@8p|r{C*p7z4QfMP*g#BuD#?@ zhN3ng(+iruH~j4KKb$g+uA!m!< zj}~ABGeI#>5G3z5LtHhEQRdI8mW9w?>rLSyI!FCcKZ^FT19#MT0BSBa-A}S34@?4g z=|k{G1!*jG+TNPP!o*e;2*CJbh8D5}fi#~_A=_WxsIio89~xK&I}pM0om1)e(#X0hFs4lO^A>=)%Z z`6frXsn;`~XOHBWP4C)-V$_=p5k<474dm2JK$wPl23xlrDi;WdZJW8)Js2D}k)B>4 zF)Fvjq1f5ajGt~b@%z)7O+fE%d>rdqxol33Dnzi)D-~`x0G2u0tTeO0j~O!$;WVrsG=y5V zy~ct}tv-XRJ&|bmIL5+M#9O3ZWa8?t169+PSyf)fU!oExq)GYvC5-VB3xY*M3HR_H zqf#3VAWT&+vNKW^+bX}EuL&?dP5gm;g| zFN0v6d(qbiEQZTBL-a#d=o<^C{jbcqg(=+(B z+DvQS=KkFcR1@wIOPgqT3IC*!dm32kgM36xGCzJmIU*@4Jv2Y^P^ByD{G|#Jw#CkJ ziN1MpuL$zw*0k5DcFKEioBN0Ha7{0cJnrLx_&yru44)_L>K9IE!mp^eqMT)mvcl=9 z3Rza~*e_UgR^F@6dp4`|_abuHr=D(as3!D|ma)a;7+TTE_swc%#ML)Q1ji){w`+SF z<|F=jMQl6Mt`iILI|{GT3eAeMY{ZjcoN?z&GN?Uw;O=!e*>H~D+#CHuGrpL!qF&Ug z(D$DZd!S$pe4f~7HR<%*#V7<>Oq{`rs&lla!CysWTl>;~25As~HJ86tLCv4QWC_du1u2VJ(*=QVYz0qlPhk@nuRJmr=Z7>(GO~`I;!ir{p-F1o$gY-0-81dfeZ=SmTmipLbIcfi9+$%K1Vx z@SI_o#uKoIH0%#Y?-c|mR4r%$+@0o$_3ijoiXf6hFQiNH&3^=GT8Vk4n#%er2ffWV zK~UFC!LI;n)L!x_N=~~{+a~Vt8S+l6hX%whcBuxH)Z+BLODRR{o0E7^#VN5ehlf@ueX*-S1wIjUBF@t!k9MBK~YPTD_D$D*Vi)*z+GQN<`N z0J2R7xe|G_Ty=AiVQ~@GaNtX#J{0gK;Tcx8+v^KAup!6Pkt4&~8FjB20VUi1Gn4Gs zhi7L{Dcit?98!1-8R0jT?Ngrz@e*ftKBBQ9{3yfUr>t9s7MtI0NX4KU_^%Oqz?UM=9`FJQ6CU$b7$0S^mf)wJxnoP$S6Nc+NEPZXy7T#JK=3fnKGd7_9v4Vi{d zi&U^C=M8>As>|w zo%NZxsqW7w5W7l~(9$hgl_ZV;<$7 zYpZ%|@^fC~>xF#cCm)uSXBUmdTCHq^oRQ|s}X0_HiXRk+856WRt4(s@TLxeM8y3iblnl>C_H4Bkk1 z1%OQryH+9Aw)^yiyUG{YZbjWu2pPLh?R#h5?~*j8dy528Lp1 z>DQnYO7%+~WJhg({tQtIqE1?^g^QaftlV(Fvke8uGLZDdol>f}E-59PmJRaP+WU~P z!s6W#a=e-gOizoCo{(y9GWqq*Q!L0d9vKW&gUh&+whKIZmbErZ^@t~j#%rk zJtbK_CSLi}I-w^eI&F<m>%iQ_h^= zF^du>pS&Jl$68C=>V--egg=6)zF_Y<+egMy&6P2s=P!8tS&-EQO&pUx-X^ZH6|U^X zkTQIXok_|4um6zGq60$^GK?!H)M2Ece09+M%y9P(=x!c=fU(>!O)g4tK>*q} zDz}kVJ&|I%uho=lKwE+<(}o8}TG3u&h2)Kdsf_>vypihA9o@u(V%hSg;A73y1~=z# z`2?rCis713T*>;l9T&5}bCt?VgPM_oKT~uDS909DDmH6kjT^lc zB?Tl2$NY$z6hoM-t)}gbREw_tM9Fc`TLcP@0S0rvv zF)0UF;HUVaH?d4K9r`Y;sN}Lzz{YLa50x}AGxKo_?HKrn6bomEQt4nILsChC6bqrp z`Zt;&in3QX!}P)L)KtS~%F(XcmbjJaZAdf39#1PN$#c}R3o#0fNFexmExvhijTW|YgMKUSMC z_A=0gqbVt%L62kivn!{T3Z+&7erwMu$Xa+#-po3dp<2}hTn+Bf8%$5%P%PL`(r8WV z`r7&rmXI^*V@i`mcSOKS{ldzQu^ z-$=)B_LS!5QT|OY`}zfg-!JEA@752DH+iSrdykUbflnHKzgoVmn&AeAd+T0-u3{;O z7525_#-LvT;-?+$u{+sz%iF!dQ0n6_Ce!jO9omHMLvp(%R0TW}Vo z-XoL*ai|2vX>zYWdsR3{j(j>Khm>=W-5P*ML#P+Dr372`zXIF`OAYOD6%C#udyp%6 zY8A^%vmc(TsyfewWP+$?<60R@Q#fLX`REBA9Px;X=V!4h@zdst`%UA~ke&kcjc}qR zp8#F`@;+Mo(-JF<;>7^n3#eEp^Wv;~A8=%~@0a3@qfh~S(@al0850SGY_0%Xk&Xpk zn6AZm4-fIasB?;}NI@@{KP(n$K<1IHkRvX3*~lqIHGbFmY+w&O(-Ox!swm`qBgtDK z??}kn#1mou{-}qE)Qp6KXvq*VtS|WNJ)EnCU=xE-YzOOTB@Z6^}!$a=+lH+Eg@C4V$*2@C7sih|4!3B0RqL@rJnY&B*D=w^HsO@0Qx$pSxC z^5fZ-vPfnKpHjt&?fRmXl4usjc4QBX2b+BrJQ{CUbgG01!fSD&&G(LVGo zMvoU%WzZfkdh&>!b5Hg-WZl7Rlf}KuT5p*5_{A6Rw#Y|!e?>;{?q7~d5h zO4WcgDGG>)s0c_05fl)`0*HWM2Nk6FUu=)MxuQz-R!2kYTrpP zxS-r{)DS0PkpDzbhV@K*$AD5NXL3;7rNPhU|DWV-MQ49Q$6bfq+}yINuC}l~UT|~K zgiGOj(|1cug!^&3=`;VWU!&2_Eyt3WfvJg*mG*yCavWkmC){>j0iRyclX+6 z@ny*%J4I7Z6Qk?cMz8)Zn~UWbVc)5n4JymgkkO=v=E+WWyt7Kwqx7L}h3~R&uUgWP zQ_dmeVe=y)yWH+D<5-nifIRb8V{yE=$_L6Z(R)j14_34q-?Ow43V^>)tdc3adQgyk zVahCtU!vi{doMqvcQW1eH%90R#iu8>(;&DfDquS229j#bs$kwwyP!8mALzA?Jd5m~ zXtb)(C;!M68oMHDq|A<%q7N);Zr(w$-5uT5vhZc4_oT>T*xJ?EeI_YO+3bX7I z_z)MyoscJLS}xvnj@z8+Far_aeIJu_^jwgD6!#}!*_LDL;cFXvcRg`s>$ld8KlZRz$q3uyBr#m65u@DNi71GJMDX@RTZHalrN2S# z5ifpP@$KY!KWn$V~$9n?dSZ}%oJzBDLNvH z+Z*J{{iS8#C!eq~yKdOC)q6V&&5vvJx^zYm@8na}iZz|%5l?hrlu1^&52rr6r*e^@ zE9IOLz0nO|v`xAXmy|oITE3F8pRRaj+#a{XJu5S!$aW7yqm8X8f8Wck*=G+eFX`Qj z6r{}(tdHZp93J=Qd7rZ-iBHkVXFjt1S<|~+RvUM2Se$IU@Be_hdLT&sJ{!w?&?mqs zs?sc;XF&?I*WR&%mZ+NBbWv09-JR@e;HBe9nHyo>zI0PHg}44rEIn1tjzIp*xxx1L zxg{+qOz$D-<_@hpE<+;&?8wOvyDsNe*cVT6ckOKS+QOtaSbn0$F7S>UH7r$kq?gi5 z7?D-1W(bmg1@#NRxTo6WbZ|xm(D{ts(om-XtCS-HBj8b?7n=kL} z+_P}E^!}~#_X^aE+hIXV67&eR)la~5_&dvmhg5H)I2LljJ%N}T4`f{5Yni(Til%$V z;h2tIi;k!e-0r;FO(Tzg)*9|@pTNAPoZfhF64VDpPi+!xToNNa zDN^dF0pU>0(5s`k%+ZOLC+$-NSs%Be?#-Et*|oWKx{TAy4Nwy-m27X8wyUWv3Cl<_ zOBdOULg!;~WG1OkK!?f|A+Cg_4q|e@eaJ;PqW}3D#gt0sZ8)`k!=p_n`psNMbK+&a zGNzO&-S|#ysXdedW_1svI~M$< zG&V?2;Ub0UAuSm7<$mh|N6O5#)b=^Va6fP?-qMDUY%}%Bzk$88Tlq{BTMgeVhb2}z zQI@g3FWU$)|^=n&zXeW*C!Uyt9;avV-Eg+{0*;D9nq0q6u7vW6ys`$TbkI1W4Ag8Mnc*f7bL3f9<6)uTVVRa4Gca3 z^O#H75{H74&T?KDcer=4#)aB6I`r;+vwV8TgmcnLWQH@tsJ1c06;A$|2Rr)MblNBV zcBbUADMumL;vej*ybxaKj^QgFaCUg3OZ{EAIW_FF6+wEz0KxVo*I@#Qav=D`*$}xecm3m zxzqL6bqJ1;`EIN3rW~s4RI2H?a6FjTd&=_cj-7Q$20`1;BOGp&H1ISsa=@Zn2M2>1 zeMZagdu<%ZW;Ek%fA`VA!)E4*(i;lhEy*Wfax$VkL06ihrK7mcuGax<;wGde11^ z+2dI+q2B4U5EG20XD-{``uoQh_v!>WCM`Xwwk!PvX&%ms4 zqS~b(m6p`eZS=O)iX#sTGl(#5LnIEe?$j3R=z}a}`{mb%6vLhR;T|)OG_;H!qkS8{8 zK=~f2Ga!R=l)IWI(q3Wix8Vp`NxvTvGiuBn3+(i+m#}2&*Sywn;abi8!@JkAL(e{z z+4n^EX{}iYZ`(PY(^nq--O7j8f_g7n!qhu4K;z5OM~92EtLxQCrZqN}S-Vh|+OaJR z#YL6uS9s=XUr9f?H-OO3F786g%JCeJ zU9BJK(0C^)!Io@-J-_3Mab?oEDKb2 za^%!8&8jF-vUZ!qu1})p%=ubup3Jke9d~o`ZbK*+a%%W$B~r-3r8<4eFQyEo{58)o zIjz%u-e~f};QqWgCA8{L;Vt2jC7(4X#d0$b-uDP~Rv%|#k z=3CDvxfxaC?B+H~FEJk|r%Jg-vK5YLJX)B=)#d}a*4sgxfXv*&OO?|rTapbT9J9L{RIcZW4g=?@>Ai7VkVm~g_^^OOiI2pZ zaP9&}6R+s3@@jAOw$%4#eh-;Um(UMObk!=2gkH~XLzCMf5w%6`&4V3TuiNS5K8{?J zPz5s{ekAJX)v#)Aojuj%a8Hga@I>=l)b_nRb!~z|ZEc?b_HK!Tq70teI-vI8LLCpc zcAq+W)~xhHn@i#8Iyw*iN^Rm5c?ZnIp#SktKsD6eLFn9Y_U_9Oj$Lt$1c4DKXZUz> zY1s9EM!onzyXQtKg^zaFt2GST1M)#vh!earqUTFK9e&% zan};dy=7%jhRkU%pZRF^Omkz0#Rj(1OYTm2_l;c-?!un8<w8)p6NL1?K(Td2DmZsz=+wh4L2r|rUWiXuy6G%!4{_aX37 zfYA;m-`|*;t|}EyL6bAESwMRMo-L{e5C?c9M()JM%TfD5Z(KFtkM?mdWpa{=I1>4E z$(o)$ry^vbpu~s1rYq3&ECSlpLV$7+BDZX5DOA&Q7cH`l*S`bb2Fy&<=)_0AE#4dF z$WDwf*7Pepj&=>saxLnyW*!Nz7t+x(W54v#P;1ywYa5G)6E>R_bB%Lqk3iuRd7h%8 zKgVWNhC{W)d=;9IWJs@D`7@8PJ!+XV%{b0bb>&N`)$ z7N^QAbD?H*PXS(E9z{8f7^Snp`eh|KCktXiCB|4Nefw71Uc9lc!z@k`qQNS)#43DC zGb2-Q?mFDH1o)UPDzDno!oSRn#yeD`k?5i(Aie<_5W$;0dVR$Tmt*GWtUA!7N-F|0 ztemA?Dx#E>k&D!H?##zkZOG>td0y9%pG8oJ9O6#pBYtFiK!ai^n~&j1vbLdAzl(mF z>TtV+ix-28WoV7o0+N%lENm1xlye>`zYabtp}2Ra;DvgOlm8CNR)T<@;UZmFm6|I7 zj{4%bT$gCZQdi&vXr|NA)Rn?-@zx^4uL1BNEJjX?a9_Td18;btY|#M`{!DXRSA+v~ zo*%o%Q@73OVE4nczrRJmhb2<)*xz=md8RPtpX;e=sKh6l$CzOlr&oHT_&-WO0P^e3 z|E%19Ow~`RY~Hq-tKbKj9Be=#%+>GPBqPcyG#~&bi;A-IZ; zege+iJ-_c-@VkW{y=jrEYvCNW0e6jSGUNC9>Spgq_GuM-6*|mZ= zM{-G|@uX&-vlivM%K31Mroof!hc;m66|UhHjxWK-lf=2pEDfHN!7Um5z29Yn=WOoV z6GhbzW%oy&zrW{+{wE-nB#40q#h^K)D6L}fqFfH)V@{BSgkd&YEo$4Pdn9)=KHg@a zua1^0Mo({ten;y4MU$}y#rD_+yg9wnu%lvOWW1mEo#bQDH4mMg4GR~eeY$vky69lS zt;baGgN%v{@f@-;4G~*S(hH+E>}jdQipvw4^?f4~SG2Ir89Rxu?ebcRW^R{P6_L=^ z#gx=VKRhpNm%{V?i|i^iaD|>>OD#s5vl4#yFnfgbxgjet#^=jhqIqMs=35`e(3%6D z7ndVA%Tww1?3Q5a2Q`(X4@+$c6;0 z843~E^?^~inzz!BV*WW-ho(%K20zI%nI&M3Z7Xk!`y@E<_>P`0f389a$&dhXK}Pja00U+aHK=uU}+%9HBC zaj)S4x3S%tov7#p-|fh!&CO->lIrmb>!HZs6g!Sryf|zb5w<2+tjD62hW+zJqeYcP zP{*A8VvO$0Zvx_{k*ad&{Mk)OqU;vHFUtoT$x4fN3K_HavUjorC>Yr z&!U3hsUn|%@-N5Wd1ny*g)ILX;y*(Hd1lj+j3OgD`@rt@L2%3>oVi_)gCdY#TEsgR zEM6T`HQF}nVeg?>b1`?!`lhF}O9T6by>wRpMuJ@39_Pf_1S+?FiDNFW3LbB7J{BI$*~0^Wd;1nTnC#4Og2`I*)oD#oNc|(%IsaJ%BoFZlaoV(wJy55&X@RWBwnWEE z=uOm}2trsu9`u&hx7Hc9q%vocxzg<}w-~U&`5kx}Y2ZiBs9^XliTR#YtQix3e~J z_C1$8LHlwcLMQ?ZDKmP^#ON0Z;bxi53x!pUJ3Is%sxH{aD z*_1@8pMd=uYUth%xr^~qh_{&~)y2g~YmWi5XGeRTt0?tlM?37)wMrD~&$wUy+!MXe zF7QIyPmqT;&zYsl7_NhiO@_T9PHLgt8IZBks;3qYuH+r=A8&RUT2#7~(-3m=!i?8x zdC!%+1O4M_F8BU%B=nsA8;_^wPc&Pvcsvb?elYu`^Gm;br@Xn|uw9tb{dG8aUk{qZ zX^7FDB&pBZfxU;G6ZfMB*cZe?2H}FnE58lSpT)h^!u)6Die`8Ij&$qcqg?U36aJ6R zT_9uX5Ow<_ri7~ktU+p`uR{zcmh)QQY>WE$qt9!vsEG>xKjSbnsiS3M_~;{_8H9z1 zlJ~h`2BL-&qTXh!i~sukuxcbzfAQiv4ejQ)pS=wjsFn+LnrP>oCDWT%wrFke}j`UNcL87nyK|c zv>0AfS{lD|=%0BkCT4OiTQu2jtZ&3BN4}n>y=RMG5lCp(4M|zyd%J!5aQCaU$HM>M zwwAN@9@+Gwu6xP|U&Yq7C#Lr3N-^dxWJqe%-Eu*s_CUo8e)#o?4rRYW)=DVDeJYVo zR;X(k@7IZlJj9E11tMwK>9DNEZhi#9HifE`T2+5bpeMgl#RAER+?ri9I2Ma6k%3PR z&M2FOMRSLYV^L5I^J>gGBt9}2!dopa#2K+KauhB0F$!8@HBZ_i%g|QwpobX`bRu7{ z*Az*r3?UtN+MM|^pkpG^rcfB}u}fw{TWsOh_BCQcOgNUhnAZmpZTl|9!tf3y^ex^* z4jN7?#T&!NBsdhk*%h`z;Wo1xsR=?15GI`DLuUK?a*v)^3;R#l6DkQbB-#jwvY>L} zb`b(Cqn>7s88i*kttipdckMa;=@ya5S@{H$D2T7YSmMkv{1%B0Z5FwR$=(5N9^rz^ zvjC~R@dQo5AQJJ44$smx%?Yt{W|GjUYVFoMiw@MBN5fdAA6V}e;~HfFJfIh-(=xA9 zBpDnc`73TLb{g!zep}fOfo}o?hGP2E=fLlR0voq|iObEz6JbYK-s$o(bSKn&0!Td$ zCiG*%XDyAanVq4pWS8fc##}&P z7TiqL0)T@sdJg8JvKq8<5N&o@2}kSP>ZzM0L~(cS3zn)-z!$CZRFBk)HZ!TD&a}@> zEQB(YTV#L`s3{)PS(ROimCktt#P?n74JSEX^E-VdKri*^tV86tM zT^ic8b{NRcM~Ncv?j+RK>?*=QjL4oaq5>5}?35i~wuswfyvg7+OeN?*5SB~8~s-o~hB`d@&^JwPIa;s5i37G}66zi{Z&Goy3zs*XM zVQ*UcVYk}D@bsU6TR*P0zpTA|MTD&9xwML`c?(U!Ic=qLK*{IAu^$8qC`N9Y-94Q9 za?$pY$CaChgTBmj-xfL(BG&vJEw;^$D~BGl6YI3orP7>|uvS7n|LCS?DII?m%6InV zeo*=)Ptix^K)u{9mX79G|5rWF{|-)90@)AvY_SdZshM{b+McZnU5~=;W2Kt>_b;^2 zT5~SO4CC=O_w7N3x`$}WUrZ|UT@^iZDS9~Yb#BTd*8ez(1)h?_Z~C7Q^>0>#nIULYY(&DcBij!nZ$t?GA$|GO3hu2r5Kq{G>4v&gVZY2}OD4i7{=XC4!-(f#{dc42q% zz#XmBnoP~1^j`$P-(CH9Dr4DBw+G5={RRP=VR%_A5OnFfm&s)A0)$bNO)s0YK^k{J z0o&1PzvcKlRR=FypE51rw33$wRQouS+2%uT2@U|dC(Vw0+$Bv{tXMFNkzxom(%5q- zQ(red`pl4q@d6ULQ=*q+u+3Zh0Mb3alLe0No~IMEq|Nv!*~hz$w76!ftmlR=**czE z-vob7u4pRPdR->k*`caPLP2H5w~3f*p^)Zqvo-UNN%;n##fEcM4R_(rA`B6Vg@4$; zWOc#iW}cGgy&pY2^wq9*W$(!wvA5__1=Vw>b(Nnar%7V3#z1TQn8*NA4jZG*M5mgR z#1S;-UC8P5gbZu6lwAx!4it#s?fhcV0cX*h*2OBAS%prMC|~lZq#6B5Ez@lpdjv6#@Hx_VV+tI7~izI9HdDyYoRiT*9} z5(ofRabOXEUXt>UEVNGaMX3ZLRK#}`sqEs=;#P)j*{*?(yjs{-PQ|dBD8iX*R~i!w zfeg|3VIBY&&2DbqVL+}NQG^sos1ouuWmv=a5fv%4OY-j5;I7@UY8glT#W>$OEFa*p zONYgLCtW;l<(O+GHxXtjcVL$VK3Hr5IDwR4>1C|FzzP@RHg2joLeh|&gA-|GLL+E_ zV1xyn;pzQBN5#FD0Xj8a@#7zYb-8Yuk1>731AywWHzB7|lX?A&x5)O)*}tfP>pOeT zypPv(=Zw1x0@n)?@WsU1OwP{$g!h>fO(<>t7>{j zj#zW2ff%yx8Gz(>)ylHPump4M_}C#&#^^JM99}MIPTYNqn`U-YW-aZzA&G6LV;nvb zp%c0N6L5NSF8{!3)hBd1FlP0da8@pqfnG&)On#jv>2>z?&C6+;h}YQy|HdVYz!<)1 zsK?XnZxeUs^*y#YfYuO<^0tEjFhL?Ql}!tJz_N$IB+})Z zjGb%2bmbGE{_{-IMK6EX1coipb6&xful=T>{ydbWzAn>n7suZq<>!$fOsyXGdTd`Y zJPzSj6-{uZ%{JTlE9O2^8x;g(Pm))}7mGgfk=|8K;uero%PMYXm zh~K(r?RDFQ)zs&b>X#WoBR>>jlQ~Czze;AqKyqV4H-LBu_str^{}=!90OeAPKB9LT z82ukVF{VVP>f)3Ee}c$!m;dH7>d25ZwDA5Ynp@8tE7<|FbWfnaKgRtNV5*ogy4z}} z(;x&Jh4ZA9Qd71F;SAibza&5vJ=9g1qD&TvZR}nJce=I(-ozr*u6#$E zB3eH)uM{uq;IIBj;Q)A4hXS0b-tzLC2RUFShx?g0!^OQ3!yhAP*+t2<9PopZcmq3k zs4CBE{6Sxif>kMh&}P^gQS1W<%gWKS()QAr9A30_IDHg1Z%rY;o{3Vs1a zmIq+O&T0V=OCh1M)cqpd%J5b{L->0X?PX(tD1|nUqCy&Cj4{8= zv-LL~zWyjkgSKOyuOj~yrM%<_=L)vHMJtkm*A^$H(m_(HUq(w)Y0GF4QB8fSz0HI$ zJZ_Gxa5n4!0l;NPc`^6UYPu}%GH7vmEvKMnPXR{vcw>3%*%CKP z#yV#XkwZ;Pg3%fgE(Sduzes-GFB;o-X1}5Rc>Pfnk+yP-7Pf2mu(-dgGYv@+e2;fw z>-B95%v}&OK03B~J_dWhDiy6uOI6X{HT>k&uJjww)aaP2xi8|fWrL_z5c)0i;tqrWWC%^F_6O=r(|6s8cv%P}h5;Y7 ze^9{=BXE(3d7#XYkH;Sx`K^|aO}+H5TJnGf)nEp}OX5W*&oWh0+K_(h0x9~O%`J$@ z@CVm!hga$HsvLjux&U*;3hlr!;9j0+LLYb@r?!`V7o@;!=aXtnBe>-QZfDviz&)m- zoK&Iif^y$aoIhN1xr~_Vn=P>-IwG7EBU^%H<=k^kFnFM2iFj{_(Z{!q!ja3yRdyz6 zh8TfmHgqit9Jk$P!v3-DK@XSA*L{O9|TyNYVqAw4~X(eWZW4e#Qk zRbl$Et>vsut&9%B*;MSV&-y!6#PCv2aMb-z=|ng}%nW#D`mSmL(G@H^p? z`fG%X=28<&OIw5bvSDTl^`BN+Ri*B{#l` zyQzw4iZjwjG^bc7L5WOGrlMHOgptS{p4m0o)r5ooqp9s}91MwPofk4$!D8ry zr4wlB??N&6HA;fqF9_F$7D8L>Kr&svND$e6#|y$IsMJa; z*^RVHM!JMNA=K1xk?1X@4**)*b=nMUb#`60{|I z-_+TO?tn-|%-aWdx*cg zFohL|%*Gmg2HQ-BWFtTXpcjj|wtsQokHxq6Yq4Ql6Cvf)q?vn4Jq`NrO6)BK@Y&G@ z8lr-%sDyZ}b|D95S>%5w!cf--4%GfM2bx@;xd?uL`D0FD^^tl1)0eClho8~C4LGj$ zo2vX>XZ{?R*#bUwQlppLS`y3@+2%LJ{_Y)af!~vKqD)XB{z}AfVuEi7cS2VxDK1cZbrqmsLY!tRzt|#CvG$B_(R5=e3j)$bakiyaIfEm{jJf{KrClK{>r)sx5yjYy&w0heEhs7{YzcYcy#rhszz?^1-zC09HBsMcfUs|X9C45dz+7xGNF2pS~vNh|W@_@2cFQf#s|QdOx0vGP5Tk#47L zm+|gmb@t_oBzzll8Z;;QNy?X}ZBev+&-O)4)Z{P6tOlIlX_S$Dyl>?;t*(>!3Kp^) zHE~;h_!;e$YX=XEr>OK@J(tNqm3`ayD&FR_LPueN8A*or#IP~l*!c)UMjfFI!;2T5eXzsbfbUPY`7`8o>Slc%w zv&sza!~w<=ST-y}grbK!OgvQonrS9OPhM}bMlsb0+m`Og+w$^eXc9gLGC#dc7{>3Q z5MX(bnRv0cI1A=>H0mk3Z6dhCO4Kg$JSSSxW)311%9f)1#r*l#Se>AypWtp2vVF@Q z9F;jF5Z1J-ULbt>3gOoNX=XjQepjslQ6Kq)e1oY2tjam<|JjWK-sgth*?r$_)ll87?8W~qYCC8K{cA1-xR6fIcmck+rS)3~)%*)aWm27{ z7`&uOm_2xgU-&bDkGdOa|I<)A63-eOwJiCQ%EkTh`s24$2ZDjE6#7zC({f4)I(6nSLp6MGl1Zp=pRtyrW@dTcamYgp-m&c|hwE?|TAD`e z_?8Ru0#z^)ZT!{;9gLpts=5f0w+LIZU}6T<;KBsfi7i^oi-d7>VYjI;Yr)f>e;*yC zDv*NmOcW4K2=0mDG@RHCk?}=kUF(Z5Pw0`De314036xJ zHlL&aFtS7UzakV{SJWgo;?%sfKtEOTT};vHH%eYLyEYx%k( zg1YnhnSvGFkVtfTkEI#T+DJL_rY>xVv2Zu9u&iU+5DyibpbQy*HME=+n39OPK% zsZOR@U>?j=#Ft=V+!tX_YuanLMl>I&jzmvrA8RcoRZiK@HV+E*L4jBo^kwW9#VC80~coqVBG?CAmP6ODb zWtx+sIZATH=CGftF?z?CTE#<{Uu%hOl7!gt=I3$}Bh4faS;lKp6v{ohBT(-mdE0ey zv{FNNa*};XWUEd)rkq}OeVOI*>}@ws27waG3>yreEw;kq1mm-oqueX3TA$7Bdvg+p zV7E9PiEzCKl^?uGPZ!ysM|;887|ng6p8e+8*seyaYd5dKqtRPuz-r3pnX(R_D3bT{2wXFoPR0Fy(Q@rjwvc0pZAgd)$5LZ z?se-;0fTP9F8BL@Ckl#U{&lBJRxuwwEy$S613 ztjUW0gmikUN|_Z)14|kTBO^c+lGUU#phu!&C;LJwIpT0!medNP-K6F7oDcRQ1>$+> zhHA{XYQzuYA=tF^K$i;h=9QCMPg}C-FOtk@wLbw_6}MhWqM~%lCFl?`=fwuArUMfK z-POujh3LDKxq~3ieU~!^=oN+PjTO)u?Qn^-1j6pA5qRb+!+N-X^f+AXJVww3C!XVX z4AIu&jF+{$m=8f!^U%l!aLGKibFvLyIXqEsekUnb ztko*?1qy4xiZj^60($MTh`k#gKgyPk@*ib66rCsLZD?dyP(Am>Wt9{|4b#lI!|U&W z6U++*@csHSaCW)xwsB|o``rIw>`PhoXFt-p44!;>@1yy%v!f`FZh-Z!sroK8>w*I{ z*Fqgr0@+Sl1ep2Y9f$;n$v9(Pv4nbH*ia>l(YU`8OT{7LxL23=ye9EV1(K!oCY&o$ zoYy-d#tMQj(1?&wF&*SqvfN|*s|W^_bjQt&(SOZzf?+2Y&edfEXJ0;G33q}#J+E1e zDzJ{&ahW6c>CTI*1GQar$!0o*=$lv(NV{d(1cS0tQA7w(oXbyq85s>&4HmNNhsOaD z3~dypm^DP5VAeMFc{|mJ)ADWPM~Y;H(i(SZucK*b?ZEUXJ|G&s*26$dB`>5}$G512K z-Z$=OCq)ds^*YrXHhuj_iZb1}KROjDk2f$YvJf*Nbq|(Ophbo(NT3jX zy8n!|SiHnjQ*DOiOA2_7u+l22LDiP=h(bs_Oe_J_O?yKMQ3R(DNR^zO+)_wq&J7aF zEIXPAK@hYg*psyrKDC^U-1eFB)dG#+VS$%??aP{G0Kb0RH9hIq4{K_}anxPZuO zVr7mBKG8?sGsq>BRX3xaM31G8ke6-z>H{tdm66v2^wTtmsCgdP&+fYf!VoA zsiN*~Aco4s{t+i2fkCxYkj=KCbJ5lnmUJ|9WXsHlRs{4_PI@*CapdFyDl-5A%e|zm zU8Sib{z7`M#);z?LG9%e$O=G~oSLX0nZIx-tN)B`k2Rc=mp1;yV%A9UU9Wxq)ItnM z*)HC9JAYbCw;#G)eZ)N=s~LK+ab#9z(zm;z90qRI4FeD^VQZIXv!6s$6>^%;m{}^w z0_8V^UgQ!Qg&-`q!vPlR?DH+67>#X51@?Xhof4@sTvi z2|?3Ll+gW=Tk=w3F6mSuw{g8xNpjwM#&#hFZG_*@zT!=Yq{N+0g_4i4@?J*?!;xEw z1^TYg6eNa5v6wB_IFIq!&bqh`+MODMK(%wU29vA~0Bhi+2{^t>{5T2E(lG!8YFX5? zs+w`9hLdb6;xN&yVrTnp=3|9SKBSt_=5}jJGY*s$S;I9>>X3%&O>sH0`b3Y4er`j~=da+j(VFAt%`Ay-RxZmiW|Zx!}_!WUwIs z7+uBDlRrwVA0(>~Gl^y2iFY4QU99%79{u}ELno0o6-KyDPey55vVS!CTgY+F^k~EL zZJ<^0x%!twem7rF`~+O%4PlCStw!ClEC8GM4};;aQP+E~k1C55O(>TOC5kl)6g5t& zPx*8)tv6|ME4U7w(K+^$7sIjQkTg?XloT1ujJBbhd-^7v5=NpQRo1;3qASp%M%}Ki z&2qlw+||3ei~Bro9yzZ#^J`EpYPykLRC)I&fWG6_I-B|CZy!@SSbCV_L{TDgnS%{& z9Ew_45+g8BR)KA;roYf|jGOz_syElAkDE;a0D>V|XycF(daHtlg@O-cK<0 z-oMAEOMo>+`4cd;m7$quoN%SSZOn5eDeB>#Y3^LF7E)BTmpxV-v}R{-HiH{6a3swHSEhF z&|mus2NXo|UvWtrK={+}g&n*;uXosh!YhP&$cz-8pp6!IKc_`Z6Vggx!T#jI*G}^K ztlb6Sy>&s`kigK1P$Vt|vo?aZ;h@^&?S7ptTbBSW@P|L0UPRUgw;|Kt>RbQPy6V%} z%gSmF6Hih&3y*_~r;P+BFv~AaARX7*5*mW|x!^B%;NgJ>;o+1o*!K5;1l;uId~!Sj zB;BWL$)Q+%4jZNy?L$3FD@I|~M%!pFInoO29OK^+^yW#|@gKYZbrB5=GhTi9z{#J6 z#Mc3`LhNe&1dzQ!wa+_LsQY(jt7GjWIhmI4-T}h+=L<|L0*ua%sFs=i9IOP--ctc7 zdkxY#XcHnZGHk59$))gF&{i4(zy-$NtFHHqEqoAhJpUjo<+?9Wu0~A=ZDDu~;ME@2 ze%Jy=7l6?gAQYb+veuzhV4WU@qiz8R>(_rsvs~y1ZG%#l`M~eJ6v2*h@4%^FdLFQv zQG55Le%JoXl6+y)G@NR*6mZi48xz=*=wT25$_!%$0F)j!T~DN8-0Q#K3_(=?)CK=@ z5DY z8PXD+VOL_?u%)2)CxnOm$#{4w6E=JjNCyCZ5S7sc6bWO!_h)`ZZWe)w{u`=(!VIFq zezBZso)Y6X^q0R;qxUW5EAaBa%C24C3S9Y)O8VylJwa68 z)1Y5jf^56%3gM4%QHW@FslCnQ^RPA&_U2TOJ2%M!N2S+=?V% zSvG+H2zZ3e&;N~auS*uKAlGyszuFl81Z?8`3<_S5JnG9|iac>_lhB`-_PMKU7MbvK z9lSvDjJ~l5d^h*&kLO$Eza*`JzvKLxgw#{|#szH7cgE0s4}A_0L8di4 z{)#>wz>tvuI8vaW_c{Go*Hy3j`(9HZb&PKm54msql5`mZ#cvL^RHWMDAA!N`&R|!0 zHCx>no#F5li+*ULzL~sACSbt8}DmkcFTw^7Fs;8>NKnUYCAO z@@n{UzsZPaU%@hfZ$GMU8NZaV3q=04w4W1xjs#JCPyWta^7qh}0Eo)rTej?P^O{KwtGKmb?dF5++ z)bIZUJf6t|x#;8=i)|6zY<<+XQn}tpoeur*7<6u*fb~nO>mb74czF36Gz4srzyE`L zbPE49`FquqeoguDezVwb75kp!_W)SH?~%X85cNd=6;$b8+4IZL`T17w4>L%{@7RB( z>KHgq6F#@%cUX`r+0|Eji~pilb9kLd{Ok5JWOTYy$k7js-*f3<4?b4@1F9bYHZc&v zCjT9k%X_e?@Bdg$&p)6dVn=hm8uAQ_H~YnB*Zl`nnP%hI+;CBi7u8>Z#{PgxkBR~8 zAmOLMQL8i#zU~Kp(yw8`EB<$MPU(|iZ)@I^b&o^AQRsF3@j5u%|A6vf-~J!_$?xGm z5bMO__A~Fn=``Wz7_c?y2!Y>g{3Y-wLQ^vzikj*b!?dKo*85KltLJw5`grK2$LnBK zUc6s@^#QEdpOE3YAA{rL)p>9u5D5M1{X)R|M}MHq{m`%UBmclc^0z_x&By)$)uC@~ z`n{@u;Fn90pBjT>Kt>&Ez@ zhClX{L+aqs&M&U7dVLr_1b!q8lu)05Kauu4>-Q%AF=p)#l?DgIFOK*-|D3C908Aw4 zkeIIwg#Q86u99g`;NJcL-JdEQIkE!3bF^4b@A{9VeFFYO9MO#doQBlzf<2{qd>u3e zgnwB5;}F4|-9+~OaX8ppz+Kj$zGe%r_#*@bL!p%OMM2MZ4P6O*F@EXb z!@u{2j{Y-Zq>gKDbEXkHhv~ATlxHti!1RPf*D^g!dzK24v>_mEwQ6~Ucyjg}FoQ@M# zS{q;2AIiqg-jf=8fxX=d!XRwtE!y$+fC^9k0zFB;)Z6p9O0pPC$d&`*9m&Qddh}+? z&6c)PtrTcH4#?6a2tb{T+e^eqB4;SGU2vQ4WPvGlDn#x%S|& zm4;xLskc1%$C)Y2v{n!AI9EBEeB?RO7q-fK=+K$A6Jj;a0N#L>p4kMAgC@#pRki4# z%8F3dt?LvFnP4}}42QBsj=X8)YUI?^r4x-oG2^jjH#nnhBIFNikmR_z_+0yn?sR9+ z-p)!T-K_Ez*U&1v#Y87&CMi?+UUWJ>5s=G^*!+{5jBhsk)Eriri$W77q!~TyEKhmv z?zq2k8VHS$j)pQ(-=JMe);HEN?9Q}B96(_q=Mm;toAM!*xy{c}PsbP3MS5D( zG$-^hoV)SvW)_T$vSDonoT96GHW3-z@?)?X^tLGLmTf*pgJ~)~3q4Der%WMdD7;a* zP$OjVPSb-z1;F{KElUJdBlzOz>H3R4y!ryPbvFLwNOVDxshCkyY$5pCWzMM?(F2!{ z`I394@tt00a?LkVZDP77*K5KxcfWg!mXX^BC{HF+T$$;kg;VHZJXAu+nPU)T35`gD z#AvkCU@n+DA?Kxo%hBxPsj2XI=DT}jt8bU~Eeau3Y07n9z2O6g3KEXeL_~82o{sh9 z5wq@ZQ{V~012TaoG5RQcOHT_Kj)yZk04RJsR!q#WUkXVhs%80%W?m_K$Y#PG5W?Vi zG%zikawCjJ+{X-H)UO??UCB^!ch0AxiRB6^+^rNc1_dV{l*|`pE7xMUyrWF??KZnR z9MZR3#3zrZ4liTidy?E>hWGO89M_fN+{&J_Buk1UK z+V~t0cyKBEPBru*jC#!s>K02g*nM6V&H?c7U<=iQIko(xfg966^SyliaI5+;6) zS^lum%Yuj7oAg7M&au)lOW*;9?)9j4sa{M!AiqMdrBwGpPj-*RpxCS`lz7hAmDJ}3 znWyPZ?W)?!O-tL&ng&}zf(zb zl6h0xIM@MDGfWqGDx$l5$Dp~tJ>4lbr-V+GD7&_QU zLa0(g6f}#u7fGAx75wR{v5l~kIbZzJtboYLHckkZ2 z_r34^ejnsFzP+oaWWGRNIH^zR+D$mxHN|fCXq?^iP*-7QH?^Qp> zfUYv!b-oaXHBFgeJ#wikz%z#&T99z>B6-O486KRc0=@Ji577;vTZUvrR(*hmZ{y-3 z%EbpB*%H>aK->XG+map*K=$mR8r zftL_uz=NAWR#kyr!a3hMO_1}(|M|kTw+$)A6m^bvNHw9e@%#fZ61rRbmYnc$y z-L$?LP$7f?5k9|62<8=F%F;&AK*Eeia=NUaU>qVP@1DS$@W3*SpT>}&A|q+bRMtm!fgaTxm+<2mx87SZR|C&F2ZGWXSM5I!F0IuS z-*Q~S60aVTXR4GjIK_{splx4x&gSCAvN|J@b2~6jz%D;z92{(TQ@Q zaeX6sV)2dDOxGkfG6_JJF7hpr1gfZ){%9(U=fgl|h@>EBs6Z^P05BCTcCZRe;zi@P zt`e@q6vhQvh^gG(p4~ei3G5)e6z8cM$u@Uvxr6_nPH22=-T5n+{0c!wA3lJhQAGuP zL#$ol30G>6Z@G#OSg*pu;qrl-4(8XC#aL@zpyf>U!(}sR<3C4~M!}s3ax^`jD^0_lFkHv3){&CyZ zhpRu||0}{^PriPlUvltBr55Z!+(AD7oF` zUwGw!6uLCNNoa2q82-8A&T@L~M@hH0nkQbkSdab>RWi0$$E_b+Mw+E9)?ugfWy3(NqEqeg~ zwAn8Wo$tWjJ6iAsSoa4Csk`+WV|XEbB`^qPY+E`{TxnRu?F^wVeJ54}WnrtTFclMp z_M}rrG!D@1Pa^8*3%QdVH3d{S#n;>@_~p}<3!=7=O?5;@J$blUZ=I-k9TiXz)VXtj zHUwI|%*tXfL_0BoNbA9>q=;_=w4xjFB+v_ z1`1E2`q3Ca=Tu8*H^C-1nDETRrFHOBlC>!Sb?bL?+$WCpF`epe-lqu>ee6h)F+Ff=qoyxJ|P3!%1t#C{x0g|~v52~zI(%lCh-ppxo>@AJ} zsm;WTvPFbO7abs#tIKGJMpi+|K%MTXMC-b)Hvl|ZHU#c#!WuO8 z&OFxKcm3tmhV%i$t>)@u?SNyQGNLu=;k%>fm#slA97zRA^R3#&8dpQF1c~RIj`G#) zriyKTFF=A<2ltFH^PQoY;{0|ffTZW8;52eAZK+bukRbnYBpyCPTXVJ19}we9!{Nft z3SB6xIr--JQF_mVfqXu+KRKJ*BO?hVAw*$as+=!C^C1cB7Lo-Tx9n-8VlCXM>)vAg zvW>F->KxY8d#>G1yfWA=4OhUi%%;1*)H+qSgx+F}VIhqp>G!xg{FO0CzS)_btEghW zcKd#99gRVPOfAt$d`)Mrs^zm2U46p$ec1*C0I4hMQ^7-RNCaqzJn`)J)-1xMdw-Yb zIv!k=#J$(-hgV*9KzxmG+~i3?+-Q?V&9gLMeU1+weF4^-zWDs_ewWg?1|wep??3Q` zt+0oIKp6xh`@!X&klFN4f3f)qi%{1mlk2j?`n%1v77iQUTY86TfOm~T`hWY;SV`VA^~A8tU&`p8U*?yi z=F&13_2#$DsWeI=(yV2LueHPz$$_-a6-GCS7dFC5B(BsXpxU6$!>#lh!kr^=!kQf! zrV>3xq!d>L42LEA34@=v^!p{#R=ExVB}G>+Y)uQZg61WKz!xx6YJyN zFFyONH_?5s`tVih-eN<1s&>SqZs~p4wLhitpDHsEIHjZ3NCNL7-tiM+CLygx5IHS% ziUc+DJ)2E?_5@DRmZTU%>dBsyYAYQ>OcFD*L+T|p5$&j+mh#P1})+ZQpwc!V(6qCH?;2vahQxiLr9&s6)<}XQg zzaEP1_NdPdr;^yr(6ue5qJU&I7jmA2bCwwvK8B8|_KD$&uv^@Qu# z(|0Mrs`IW)*+E)n*+H+OmU2n1Q`rk!$?b>-T_}YJ-60hlWv~kr<&^RRy(GDfr1YY@ z1R}#`B1A63*aqL-{$4fi&3=a>?D0VBZc>(=^S{=0LM;ex-r_fGv|_r2EhMH*!q%Y_ zou!Bx=C|tJMOD5)CL(2rOP^cq8>frrBt#C=o@jUZ>h$=SG3|%U3&rLAtmDkrYYH2A zOg2o8k{T=BeP#~8prw}1qPh*C5WtvW17#e^8hZr4QoDu2&;}~sXeYocbXq>;SiyeU_hCVLnjBq=ewi@-V6Xz>&)dQaP>ZJWQ3W=VfCD3eLL{ zO=+|dL@N;kk&RN{O~rq-ADHJhVuAr`3Uq6pcDpZ(3E1X3()I~A!9JT-tF5v&AYXyi zLeEvy(Fva1LH4}7TZ2`jWugf*0IR8QXxmu%xx_s7Nj4RLwBSk8zQ?0B(JukgXuU*t z@HWjZ40D$lkvtU6g5q$l6NRQOM%px|IUm`?r$tD&r?W|1`yuMhZq$Ab6$3;XpLUXj zB%>eeO6kIF7@SgOEeEG(fQ%Z@kvbLyyzB=XXKe~SuFfaLoqf=eyh%BSQClHSpKHK5 zHCR%I&4A_TT#-{!FhVMLA+RZ>`_Gm|M1e z{3h0u?r=VWw!7`PdOZVUy6d#pvF+W24fED%&YsTTWEXQU&dW>=;t+9+@l5%G2ZG|k zUwkhugjGU_KPnvH71s{TpI6e#qoPN^JpXIAe^2NrM+s9oYL*THF2)3!uIrtdwN5&m z#+Xe{BG>e9JHQWmrfT{Am(s*d{2?MBf+$am*vEJFxjeRr)!d*_aQhj!UHE{3>yb3- zMft;Qv@vTD|H~}4D(sg@^wpf6@B8h;`7h0sYbu+E_m<||aeol8#^o=H!qfuM=nwY) zbZT6X#gWw%OCT_J=BQS_-g~XKL!acIx10{Hs{!6}VGmdm;s1Wn=6`nD@2{5jpOoSE znf-;k;IXiR+?lvTE3;9HwfM5wHIh2{n{!Am&8(}zw|8Vr>qAs&i) z0~Nb=C{-KWCt(p?RJl5cj_YuZ0YGAvAGeox>Jp=I$i5ix1@(bVjVZ)bx~`do6rC1# z)3Aw6;Ojb6qBiO2OFYyyX-)3ZY5VR2A&|S(6bxe%F>+pW0O_8+c9|npTMd&xKumJ% zWp&YUoAdYpJR@z;e-j$HF&H7RHwN)96r|vy1yQRH@6>>(rR#NZ6kDZPHVIkvKnaNJ zLy!!pKt3(I0>k28=b}{3wM4lS9g(s`ZHW>lttpM;@1N&pY}OTD@ekHzE@pHmMumBf z3-QhM-Ylgd2}~7C0Y~uYES-4VcRi7sE@{e3xC0L&D?PK&OF45|KyH+d^g)~Ig|V1T zkb~`*#D4P&5#V9_ox)aeNS zNrBZKaV@n_W2HP1K*Prr03X|7N{6PN%g^s1(HS}i+3iVhd`IvY9^4ZE3!L)Zh8;`^ zIwJ9u!F=^ONH%8Gbo}Qhsb0Ey2OYHJ%%J5^;FA{QX5s?$QXw$0KLxlu3q1+h9Jp~R zJLQVZ1+P!;Z{(?O)uGP0!~lIJM5DG#H6Ip$gO>YHf#}R9+6BpmV@<3rnWuS9+7+_h zPFAulXFK6+`WmOi13tI`L5R_7j%!G^EE1w^3a!34P6F|Gzq5UypTPq^2QYU&kaL6@ zM5ZPT^fuVByvSs62Z%EQ9(EX`;B>U$=)%4#X5{5Ua~z}pSx;jdU^uN}fNU*c-g!Or z#!gfX!tpUPi#j99zs~~99K6}7+OYuU;>G9ayWS24dLf&ZY**&xgs>_zmAjgAP{k5D-O~v1d?zuj>)j*Wh7l=5AH>)yKrh`mp+>aP zq%w^inRAVwH9E4qpcMFK>GAWgErioVfJspLRsRy1A&hbs;6}DWdZ|y^_~eWv8|F|e z`0j?x#{dvkrIbtFg59o8O2oswF@SvNN#3P1$d-Z4li0iLEF-7x07+?h4F|aK^zJ9Y zr89_|RU8UNpQp2e^tXRd!4qBAp`FDUP@{iZbhz3sRHH=A?ZqeI4F7VM zu+J0H%NLW{4-{!lejd}5@(z`c-I`=01>`>Jd~!$jd*{tBjrKL)`2Qd%ERO2``{D@O zNCuiTren)=<#{iCu=r=I)hox||8_OI300W;-7xMzQroedrwnlxHr{Ue_rUyj8UBb8 z3Zz7WH%zL!MMa|t009oIDuf#Iq`_+(V7c$mE2Ez;cRpEE{k}tc0>~qTV&TbWu0Is* zeFIf8O5qC5!c*-cC9Oe-e!f7%vouLSO!UujTPISqBxeBk((~dw^mXuV5O~p)65GR@ zq0Ri6=)_FSy)jeV557i=9NqnaCrj7+=%m~DlvRPScgg6KQPmKk zp57A~sB1vP;fP9io{2I^sV6bR(INN- zab0|hr6ox`R=QBC9Gyt*_o!X~%P@?MQVAKLZL)JPDk2HrfE9b|qbqE`C|Bq%3V{ zr}f6jqPgSA9m(&VU0xz^z$471xOI&U*O=vPc%={0x(tg@Y-=f*``d8uhGm;^H zB8*%~`QWwr#=o5|O!;@h^DBA&MtCosBYAX{yY5qRxt+89!d3B6pRIi61iAvxX+DN7PMHc!FG&`QDvJLs5ZlRd#)%>8V*`>+WBGBzBBMUAQ*DOs%3j(t zgzbe&K?m*ko60c>Kgp#OcWk`?l%@Gd#u}!fxhRHryDnyU7+14NDwlY?WKVLC@|_g&RH-{zRzLFDx=$FB_BioQKm0VQq$PVS@V!%%Vi zZaJmotV&QH%AD+Ny|oPA0HTQy^H(w+7XWS)6118zzky`OsMi6^PbQ?42hEu6$pmozPXBsZ(ix*z|09iSa&&Q^NF!V(L`F9(9m_+j@ubx+ROpelkI86bG;IUR%*oX< zF_2^-2ybCXu1gd$5;4?v`cT(1qqBPx2eK1Xb%4f;;5uf9D8kUNT*e`&C1i<&tMNi_ zXVpSt&hT@nZB-AXE!%*ehAjkHyN+UTLrXMtqh^pwsMR$%1IDCigFLdFCE8l=M#X4o zl`FKo-Hh2CD??A}H|TXtaLAVOBin)Q@#uQ?#NfgU+QU z#@^(e=hKuRCk$cV9n-Af!|b}%vQ|mc8`a4S6E=kTRBl>iy;IfX&NG%otxQ|1q?k!4 z!`NIpJpYDmfO3*Aa*zr+o1IW^#=VmXVBz}I{%$GkEw>LQq#LDRz)*vkp(#k2)AbTm zgx-T5Pk)ToA4H2rt0kP2%=YlY%FYJbOHv&}B%SyAp~0P%(WY$H>S>NSuG1{DthR!2 z3PEVU>JWXUcSt%BEASXokjOp#GCAZ!vN)xWC-zRqX$_=L;ATtJFRSeCYg5NuE#W|b z7=f|#kqDJtL2FUk$h0Qrg3>LNsI#%jqR8fas(FZ}?~BaJ{QyjNzR)jB6>oBi6y2B- zA+R|GH`S1PRAAUeYk3$tBRUpEKeGk4Q7@fpd9E6`AI&1|3HLC|8nX}MPvV?#MT(+; zu||FvL<5l?LD$NU-4j-(cDvG;pvS7zw5C}U?jBCRB9HY`Km0U-f=hrSTaO=}6ZdGY zX9fr|$?!EyK(b?NahK6CGy4iBbrxMh$YNLEI=SS0E^=;@u)$Pu(DA9_g zZn9tvOpSbq0ZKQzZ-u6-A?pMa-tjDgm`4IHi$Fz zAc`(ncc{PA$*GUw&|$Hh6Y4GB8cD)k`-T9Gh7{`Jrfglqu6rJHlB0F^kkqSYZO~R$ z400^Dv0dEOp=f3%oh6_m7F`~iE}wHVu-<;B_pAH1-?UjM_$vLIBTO`OXYbb|E(U9V zJ^uAnp|fyj!>{RAQVFwq`>QjUQ>k==d!%Z^+r!@!_?UFEDGNwu@dgBac4o(pEj?Sa zX7<+JeRs;lAyg@HwQIvj?{@~<&!7E|JtIGhkxd-|154JZ?Ru+*wWER!KX;G|S_iCm zkyg<*Qm5Y<kKpe2#pzoG?xW&#pT`f^iyD9M1%*Q?$iA`D-!ztJ4|KDxaA;m* zj5D>UgV1f6fraPF^07cjZr3gexkU}zVAFGK?^)d)7jCDb3|H^I;=41U`crQJ)qH&s zzV*7Kjb+kUf+dio6X-{GMxfKCxe&9$E~Hsu?rSa-7=jEXHN)-3|qwirG$!cHxt(-J~VbSP!8(sIg~&B?7$zC?3&1m{)XJDIJ$2ID#iCCdRA0d6;P zQMgdEqM<)O{yHAGDPMT?3_a&24eB7iA^f`oSQ^cX3-%&LAMc*K3^>Fv>cy#6MKuq1 zx@gjRbNtFF&T)ohg(4kEk3p1gL~TKXB{$1Idmva3wf;Dr7vg>Sk*NalhFn`U=9;C2 zf77lgCrGQ21d19g~b7D=*xjKS0%D zekXk-A`;>(PU39Nw_3D{PJ%#zNl`O>bPT>QXip0gc_frGauN|vgr^hXwNBR?VlIFV zj!>H@Ipc~SW0q<7xFhE_yYCQPO@mQDbv2EyxN42f(%1DUv96+Y`J77)ZMSyMWrQJ8 zYT%`6RP2bc(sOw%L|g5CzBIfY5AvmYFish{Ne|yC3P)~pQkK_^i|3{G!Z=9*8d#i7 z^zaagJcTLmvx6-_TR(3T^rFAp=|Z)F)u@V>1=w1czOM?~sGESwNyWYvn>y4lBd^u~ z$@1#&IzHLVI_DfHIL%Z30+?+Bo{dA!VVl66j~TH_RZSf}?!jt^k-1qB;&uc#r4G8a z&T4ysTsqfiw>pq7dQGCtiqav4_Mb(V&-0m*1;@7Q!35V4C3R(-9rv}{(O0_^1E?m$ zf%J-!4m61wDhFfitiTE0kq0$fs?_+Civ@Y=N?kgLlx}i{khhl3Ag<*cHO8F!zAk)Y zjnjiIKN+5$bZM*uVDdnWX^f3+S{{;1=fNA~2Z5gj{wU(D=X?1(>Mqo;qnEF`@c<7R zkK3YZG`-5abTx10<)7#GK++qUP(5&yprIE&wnyxJF-q5{8Ad@a!x*_2d$KNwgd}!I z+-}ZH1Mq-iQ56|_CcKo_;qNPw#4E7T86v1?$l_KnXs8i$19vdg-N%$IpXk$Q*Y~T3 zMdBQ#W(Hi5V^it04JGrBySRh%i#T`ipPc)~t0eE-{c)A7JVvnyBytvH^TQ zDO!FpIHgOXq$|43D$$mKx<~4T-?H$(Lz^L_s1#ft+E?nj==dajO~1u~s#m7RrVB3L z*;l%A(d9|_rGBknMZ#2HMQi?4^jLV}*Sv%${wCF(eK;~YpBSxuIQJ`RQBaa(`ExXP z=rmq-Nq)M0m1Ow-qZ>LNY|$KgZC;$-eA%V?>5l)F5Y$L0yEFc+ro==`Ep+}ZoN;yq zXZ$^+*qHv`5dIrD11jV2E6yk>RyYvu(%or1T?p?uol{snuK99*Mg{d^ZIu;GxRoGB zm!BM1HOz0SM$pWWaG|mG3t(gx-UbXr)Y{2;+B>E8(XS-4RzD)_!QZ9lBxrWiZdz3B zOQI!149C;L4+R_owbZ1+$H*Bl9gMZ@Q{x?TpwX^sbj>+xV(fe-yTb-KttU5 zQrW8k+c8NlNIu}PPLcynxlnCE>XZn1q;|GL!~|}-=@>HlX3ZGTex-+51?FR@v{M<$ za4*QdaXW3%^yyM`&gBE$6i8AjlUG)n=GbN1!k!gST2FA3Sp`&r)2L2EePl@2=aIL1 zdz~R`1u57f$u|9Dc>=!*c<>WaAvu?KcSIk8Xy4$-rMkmZwkpPT@i&xVCtvn+!wE!} zv>0%z=G0 zDi39N=i)F@G}~$9~AYGQ68;WL;u0A0}0$N zhMc-p;O+&f49>HY%_#T9=c~!n0sd~$7{mz;j~EO4GMwa%in6xW1&o@!BDT3Zu%a=i ztpOIs1Mb@i?ke*1T0USt3coOiByWCFI1bJ%wL2xH=DF^E*ujVMWXh%1X%ErI#5Lf^`DCc}u}GAbJ8KK@*mKcztfq~;YySui=E>iJoX-w{KdOJ!Vh=x% zk+P#$Rzr$x+Tdbf+AK`_)%}V`zT%0! z{cD?7pvBj;UlTn1mgCoKgZQ0BA?Xo}YumqROPQKPjKysxd2V+O>~%|I-+cR z8TZ+7No*IceV668;2c;beatWM=o$9oIL+td7F#hF-?`jt-+5JX&-{N$cz4OZ2?Qir z#cEjfdHY}H6$46~E~lM}+-OqLaj7B#HB9p>q`&BbI^z+Vb>17JTG3&9q!&k8rzi5# zvj%-`UC};9n!mE;*gB+t^!UZaiKgHK7Nx1|0eRvP-X_pR`+MRN>o4N^WTMl?Xm=p0z#BH7XHZAk$o!+m5&Ft@t&!0RV2#SUr@rI|V>~v=P zvNRleXdCmy8hlsAn!G zrN7y6Yn~%Fu^NQD9*rS2lkJ2Y%uUp1 zd^+tXr42{k(sLq`wb_PkewuUG#Cs&QYHP^xp#kDj3ZI=^!{8BYylEo~!i_9f?)drM z_Or|MRJl;k57H-F{MoQP=Z=iIv%zPL?4Ax)_Cfs@9Yq4=m{p1zVd16V2^qAu15Rh-Ze#$Oz`BHx zL>#@fT8ZVGM@1s@_TIRNad5KrN>hxjGE=O>97Q&DK)_E-C`%qLLxpBQ48jfF?J>R! zi5*cknEQKt!AR+TWTh8Td`rIbYC zP)fcIMKI*)6t?FJDN6zq-qLM3#cklqFz{p(XiT@U@3}0M9f+C zJtFE;`!)PZ7`zLBC^ae zBk3y(a&w+nC8vp%(Hv}x-FQCVtk4wgAvX8Q|K&*by2C(zF$LT2L@mb)AQA15^9jfk zs2}F{eTD72*&zm#xO8xeJFO{Q$Ic|PEN)9t=`0LO<4PU^CBEhRl! zpfuGr4P6zTJU|ZLVaEO;vRJSlv$)BY*kiBjBV{?B4?u_=#Rv4@HPO{rB}-)0dKR8i z*CXkClapteGGNNk$Zx{33EHS?QL3cQn824!gKlpL3>34COFqwY&WXC$WK`~TXFBw> zmi)Gfw71LYGG)2?_@dHyHlZ#B&}uEA?>X* zbm>#eYN=HVpE7>X3L;Y)pvx;@2?kn*U5C8Ssyj2cGk8r?$LM` z3(XvlI5j5x@UT(H;Wf89IV=NK#e-rGIjUw0ZX(8eCAiTU5T3b9k` z=i;8D%|e5cQX!e#ny=pvLxlukxQ{~X+#v}S)BS@J_Syf--mZ55A$}Y-7RG>lnqTid zp9bMWAG8DQ@E@x5E^lOeo#hyvL92*w@~Ozb9*_%(KeLfxj z;eF}htN&RfBYM44sW!YojH-Po`c(;|YC}8h@}>jHYn%r$yM1uhQCS;-cD)%8>Kcem z`vZd+4*A)U=Z2|bC$kU>x$U22Rb^ENZsYiVfdX!$rtW@jl%^6U6&fJ;D*?8A-0KC2 z;oPCrp5He>eA6EY-u^C0ykKN$aH({Cf&Ea}2NzYx%30Gxnz>bfK}70S%@@5HLBMf@ z&qFw&YX9qT?Jm-UzSw*g!IvLd)9<{ml>5re z=t)G-<>Af)bvcWk%F{Lfj(B{o~(5#Dq2!T|Jp2E zdYXv;Lxob~H95`^{1D}FX;$&GDHVVnh08HfFhwbtjjx*N%)m=1YifPso>*YTcMGBO zw{YOU88&=1dG+B`%f#4v+K0Rk%#WLF+v4ZD4+SIlD~?V2;A_QO!sDrsHi5@2)ExNw ziM*8kMo62*hWIDMKoj|_n}J_|n~t>h-4mV38i%fZ{}cVr&!iRi#??TdC*Y2Med;w;HdE=I^C))F`{@DEK{qjj+ z{yz`ixN#`rXW6CEL(Ei(wz*Y_E0;0EYF`!dQVJ___lgtcFDs}JTI0x5Kq053yxz-? zZjX{{rzBpUT=D0C4?H7`kUbEmD&)^o{i{fCq$BGd6bM;oa|1>G-(n zEoAEYA?s+>R*6TcbI%6<%KQ8KG?<{tqKq;Kz!iCq8lzaMDU-Cl5?l9OpM`uNRjju0 z{5@r(JDs@Rap4GDj88AilzVDbh%tR_CTqm%~1G37YWDeS<`iz z#Z?1KrCSRATvR@*9Qp;&P-Ke&>3{_1@Z>Ky^pnlJb^0=;NN%$9bHUc3$PX^@j%Q}g zmNlJjC3nTES>O0S;#3!((%dPh9o5)dF^3lai7qozPcj+&eU zC(A+ZOeHagldgZ`_ie+Ry*)Wq2;kPp(Qq;=&AGoZzcTsVD-=NbK%tcP_=UM(%1vtu zhe|!2!d-N#e;;JtSg`TTN*{CU5;nmb)i)CSuTPfnE@BJiUYZ&`2n%W+=nOBi%Zs38X8{8TG^ z-M;?<=zQcXcf~)w{zkUrki&`ev9@?j-sjis*Q$^Yb*e~0h=*!9X^c%L;e`6EQ1t2D z&YuF?xQ(Iua-~f7XS284gkdyt-PF{NLIxk&^EQUj zbEDfRot*!!{}A62+0{V?hMmkwd-eV?&o?q?w+vwTQohiwqMoFXf)%{RSEvO z{a0nL{=Q_64wEnq=Nn zZOufklrAt*_1d><-wY~Vzmg3~*7BF)bYKqbd-(+zI5_YAw{lf01~of3J<#-hDXG5~ zY=0|!c7>F!zdq6{!FMfRRx?W%b1PO07bi9!GgmeaR(3Xku$YglnT3Ou z2Zgznjh(Xy=)AKBL}6zs0@CGCVOMdLva+?4_k&nz`l)JJ_&HbzSc1evDTI9leVkmK ztUSype4HGe-35I_Kz{%id@ld4W&=_D(Z$0-1SIhrD}|m4m_o`0VnxBj%FbfJ&dW~0 z%fZUQ#m&dZ$4tS=&cV*c&dJ8X$-==W$i*qh&Q9^S1bW5>v9uP{ke2-$uIDEa(BCHI z?d{F#&Bf{hv0>v75D@qcgOiixxd)58ud|1l4~w%q)!!jVTe(|6>|8zUT%0L>Lo_pY z@$?V@JyZJU6r5aDRQ?U{zsoHrr{DAXqqn<-F0K?( znmiP`cFvYA-tNDJ^XFWptsr(@R+h357bl88xGQM)?_lTSljdX>;FICtl9v5*)&jf| z`~uSKvb<8fyd0dMzm2v0U*Qq z{l8BzaBy%4@CYb~h$t9nC}N85uPC@P2&O)-GT~08Z&rE-+DQd>m|)0R9hFpT!jsNQkvBG1nwzMD**Io+F`Nb zumNI#8v^w%DzMS7ITz&57_#5u0CQ&NaWGfn^ay}_AZ#EQ73M#y2>1PUk+|>IPj4+d z()RJqBD$SJLuCo~avW?4it&o(vrxi@w7p#4WPJii)F)&!S*$JQJ|25PAMe)2uWNb-KR%w_df#Eh`Y)^Q zcb(rDHhC}XziRfvWJS919`E_=Xt6)$&GR*&W4-7@E9XOHotvvz=Lh4*5Q~?*n(ehh z4-2PcKiV@+jYDFB9@a0x!w*q@LZy*NA12*_U=o=iP+n0f+u zcFMhRwz}6f-To5b?ce!;I3()$-rdCezIsLI*qv>q6Nv!szH@K$fS2RSN(g$@v+g{- zX_)nPeu@8|Cc^ONJmUYF#$VF-Kde;P)*~DRu3MF=x@K3HM3Hz$>ZFa~DL{1dplIzj zdhJ$o8Z0b0Jx9JWpR(rG`*(IsEblD6Q zcBKQ(nqJ|T213f>8F$k&dCLIbib=dun}`1DRI~;w=Yk9G};C;NltQ7%V>ZS z)k>n{Fii?Z937ImfVR@{coStYGQ)RYqnkqPnETbs<2=K^HEX0uTcxS6hERIogTBb6 zim{TgOfajK;#O5zmMd7vb-r{FZJy8n&AyDr{gCQe{O~*0D|N5-bgxO@l1Wny4Rams z@xf5Byz&&EXd3LZ0~zXBwku?CO|S7xc|faKLCX)XC&1d^o!;irJ)3a)ZQuL8#rMlg zx&xaRB1MN50hK3RmxfP98F~WfF8l7bP659)A6|RCzzrC$vYd<3%*}!w*R0i55HS68dS8~Wd?soCi;}W>9zRD^W0xUwjYVp8|uoSR@ zPI8^8j`I}3NO4|_ z`>l8NWaE5WjU|IS#proQx zN6AH!7IkC+(vT)#wr}==EcEAZ*Kl@YlV>qv{p}WBxiajX;J{joN=|kXT)S~hlV>4j zJeZzLD^*2n;0sJu807@4YahBY1x4hTxI-?g7x5h8kXK7scp>H%POfYBq{(@rG<+;vNJX+23e+F zfH3edmI2EtWTL92%E0Y`1@6cQs>X*4t2`PbgPlye_EXr28z{oz+Oq!LP`h`s=^O$< z>E@6^ok>+NZfLLrDL;({`=L5dD7t#{L!Gh8Sxu+TbSZMnu~RklaGE+s)yN&;>X3hz z!`sRi{SaRxikTrFnI8h6r**G@;2#CWbgu5Lac^wtyx!nWSy_-=hwv^;6$OjIe4VZd2pXKagl_FTV&%#h%&zaI*vj`k_x|Rs@~7Yqwuc@llRl zGnNE^z&xjKTAKSBSE(7;7fIVVw=aJlhRZ9+3Nw|)gw5)On89EkD#MvEL<_;gv%=Nc zHqe$J$>t?9;%l8*HOc_Ns$`sbF*mh@14Gx!AhU0gk#nPZBJ?MIPX3)^7&prf1y6wQ z=u8p=Vy8by9{L|^^|MzASA2=6UAjj()UW9>=e=LtR~|p2=RN@*v;BIag-@S_V;rsm zPe2+$`ii_SzmaAa+f_jWwE-I?@O+83URynA@mzmfWnNpXm*xJ)aOn?2dUgbY=a>Ry0RckING*WS5swWR8lMfWhB^Ck4UFxPM6F> zL64*ak5QlLf+Ty>tO~beO)9`n;dbm*tf$8rd;%ETt3Y$HYFBz2;GPO!LVggY@1nP= z=+QuaoJ6}LN&m~%$3Zxg!mVJdIFi)X#}0%6@mtswNjhLh(B7 zI)+)DaL1`S?P5^j{B7nV)TP z2t*G|TI+MmQlSRPI(t+Qb99ElB7n!?e?XdPlp>jEW252EBr{K>|Apr?*gnMqrxjm@L%(y5m=+GFtX zUY=QjMB6md+m?Y+VAL=`yEDw2=C?oGQ_rd=%$aB_5TuW33u#=%wcF#>>FFiT2n$6% zUxX=+WMRf(K~&R>XEZMMId(r&(Ecd!FL=Auilw^v*ewm-w(96j1+2LT2?vgnh*;$>xnwG^Lck9yA%GH?j775J^@CB8z%p( z>MfGrK4!RbISuy&I9Zc%x#9bB$>Anhc>?fszv{&!T)QGwMHC_%C!N@7Agt$>WeTXrH*&aSHDOj!5+5F(LcH5Y+z5RLL!@@Ylu~ub<};C?RS{dStz6c z5>5vXM|(g+0i01VW7MA>kxMckL%-6{0t&9E~DRQ?Zbq_R`~_x96&2 z|dStkx!X%~~Hq(u&xNoQO{cn@ft?IgJdCAypC?DDi72^oA2PJC+!Bi}0`5_FMz2(u34F<9HxuBbRsxD{HxivxJ0kQ8AD`!@74-*Q*;edbs!F zn3s3M!MD;tK*S&>G{cruQOEj?FTFx3c7;32kV}kw3NUCSHrqZEa@W9@V%o#BV@nwC^c~l~Wms;~Rw-Z0E^f$h zH%A}jqKKM9JWegG*I=!i0cfl=Rm#Cwj+o&c!NTK@zHdk)b4 zukIUS4Byt0GZs^^x(w8MLFkHs7Vv0LeTrp}mc{#$RUj>XTYxFe-F6h^7ZP>6u!$8K zKD_!&v4Yk{gEolKV}w>^1|9~RU7xGmsgo(J>%C>JXHizCzIL;da<|!=QiG2d372KQ ze2urP+UaUjJ?2S^%znPqLcA(-cI^Sg?Gwvd=n^1@LQ}cBX4E&lJC?mp+WT@$OMI^EP{^}_ZY`H1}bdok7$!Hr*aUxI_ zjOG~Dhl0z_UzHYbramzY)yQl*K>`XI$gkT5bxeqK2!(f$mwrJ>*b>7PXjdi~Drh6kh9vOI?HK*k8JZW27s=|ETP!vjbZInX zBbmoVii4zDjG&n zE2@ej5Mi{nbp+D%HJ1PY)q+?DfF7IuC=JE=jkye8*%kG}DwOI8aOI#r5TL=^C8Q~| zZZK;}j{vhBVdpKBH|@X8a_UR}tnk+m+Sbm>CjiEqfI+CpVLlsI?teQc~&NJ0Msuv!jjIR_ci~F$m1b9`Icv|N~_Kp+x&B+G0=)fvt z-0;yc6V5 zLotQl-epT7k&C@iDS&$(Qu_J-xdFXieIUKnuWH7FqKu|Vt-ktm_8 z6o`x^%XOZ5ExdVkDF(4GiZ}v{a~~7vt|p~*-`75%WP7|7u{fCcyM*O`3mBzUZf8L> zcJRT=`%d`XC+_I)Kb`>j|4}_Hi(t7zRgO458FdlCMc`lbO5l=F$C}Ad$VUh$BET8> z7vPfEp1`IyHYJDZkU%E#ev4GP_u$5ihOd5YP|WLV)Fa<~)#e5?o26g7TQ0!C#PRtg zlcx@nohFcTX2_L!*^e`BEvNt#G_b}Fxe+Sf$%?|=!yG2)X)hLN{HpLB%$3xBR?)*4L5v&kg0hh1>yt`H%z-YqC=VW8UGR`$q zQGtD%*5AL2zO!Yst*_cIKhi2I3zC6f=W)%9j7-wfW{8Bub%w*B>P*$>gsU7u!)4#R z6B2v7E#P^_f_pHZJz({-dSOb;En&-8{-0asE{NVTdDgF4{M6NWX*2a{vn}5EZH)9C zkFe~k=ygYL!biNhh~=xsn=`76SB2A}acbBZ+pUIVLtGx9A0$@<%sXUL3L((f5fQOamxBlV=6nh+Py<`mfF9ymuuEBUT=?s%xh z@2ryF=(v0x{%$WH^nINAu4?mB!oge;5~F@qhs`2Y>{(VKysmy@lD;Gmt^*~D`iIYn z!%w!g;z(W``y{QA3RwhPaK7b~dK+OO0SwGVm;!=G?d^i$WAWxgeVTR__At|f$IH9Q zfCnv!Bi5Tr!odUN{JapZ8hBd0m~{hwcGtWc?NcY>W|K!oSe@^%-{CFHp6^h*22INZ&NxDei2!jy;yT$Ju7SOocC;; zc{_hTzvk0?lU-gTvdOb@P~!FOn_yOyyDs%Un-IIG2T^e4176?W#We>b)<&8~^ei zO;jtT)Y!u3q_k!)(9S)Sr)77eM(5gn5~;zlqDAFhwG9*cX0x%Erv^%~2Evo@2n4YKQ+ z=F!(o-g=exLPEUXD+iD7SiY6yS3|B?KNV$d{bca%AUt&UzsfY`$`!OCZ%iK!^8;m5 zE1pC+Yjz1n7+!|5r{>cr0{~3;r`aspZZ}EI{wcPaXWa%Kc8k9-14yXIx;=FWfYQaczG~9TzBfCbU#E0^FmDh7GQ^lRo`w772TJyZydUE>u zoUnf0kY=`bW2;oBd)T;gr@P4s;xJnrWit`)@-ia}IK`4j89oTfPWj2JF-&TOLR@L% zTx>q)+US0+j;#R0q=|)^n5tCPAr0tIQ9L&sc6ZRYXUr_ORj_kX_Tz?-=5)OXt)Wc7 zzv%30-Dq9^ird{_sZ;H?t6iOHxY4!Fd&-5W>8u^@o+~_NprJ|QG;z(pqVE4Wv=3{K zPj0x3)gm`5pZ=?VJNlJRyN+N9PPRB!k?Dt--O(OvFE4$Z*yv8IX8zevr8ZDZ%)>ns z5+ki%rZKj zeM~ZZ8@}N?C6qnc$B;fm2_HHz;o)znn_;^mUO?UuI}f38)d~#f2X+8HfafeXTO*gN z_gUwvb1SJ+KELbO>gG6NY4_C)6{@;zwUJ5j9!}}(oVYOWzBI^tsDA=Ln@0^)`J760 zHs%g^Us6Ka{daO2vJ7L_eajA~bhce)W3_{oQ7fX$E_k$Zd_hjtu-$O zG2^r7^sc_x-F|kI|Jok{f6slm_dRXEw1{>_ITcv%d3i!2B@zC|^2BVp=dd$kp;>8F z0gC5Lp@|bL3PI68{HVTRk8^80_05U=dZn3G%Ynf*PB1=;1lK#^@(Gaj>v(vZ^h>;z z8Tu!rU}whQi#mHomG3dh4(u0-C7wO^G7RE~H^lp=la;bxRo|&`3@%4^R+DZ7UnH9S zWMkd9w11s0Nc9zhp2x};YrT8SAcFM`{#DsG&jcIFfet5aEt{n zZM$%=1LUC}5_%)3KHH_{WgZ=D_MtjRXV?j^s{gyKm2*>T?a!jF`l*@MJps;ZM8}4S%PoR+bVM;ZehoA> zEe*+1c(dg(@`CJWiDS(k0cTnZHB;Ca#U;5GR+D+Aiba`GSv2uN$$5i?>^ccpj1dVA zNem@?sf;5U%C@T(+(%2~s@4dpha`R1uCPXFaNl`G z3Yjz8%sR@3yDrF-@hevpW<~)HGcnb1mf)FryN?kK7Vy%exGWme-%&Qi6HxZ|sXsFI zS+RUGr4Ear+u(41Wd;gU(7>y`o%%DXh?C-^iZ{Gl68j5wo|HtK84!2Rk682Mf8@UF}iU5vfP6l+;}9qHAw_O^Ul zY?Zi!)=zz7!)O;}m+*=RaY$YM>&fz{G?7_HnC&f=HuZ#%Gxh4`nV!)6633=<#dsRI zOtNunquE^gma;&hkn`gNgQd5JHljl`$czEgX?-`>FrzcS9V z;qyXia}qOq7cWD53xrxn$+s49%Qs@-U6?{hu{u)8s*4?DOB7!Kc&j9MQ) zK;LZs>}9>d9i%SyGm#^R3SdU%(xP5#kkBO=*+XxnI#-ntSuJxXqwGmE!)6J!zwTEO zd9Sq*$1z!1nUc9m`jEyYiXI{@y!Vc!n0TxqV0_K) zieYh`fp>a9iATfd-sRV9Fc~F+_KK)}pvq2#p+~>vY>9!hNm$9%hm@3}VU=Et=BOgZ47Az_3|taQG|jvz2XB^d!{=xDwd5Cl77zuI}7ajv~U z%%W{jq)W$Fl)v*T%0*Y*vGwiaMz+|!s7OHVU5~CpY~G8|!ZAr6WfEF2Q=aY>uWqBU=Y!$}14D%tJM@S~UcMA}yc$mi;f_fIrQyjUMBAl)R+%Di^YsRe zaCAoS0s|8NVV$81) z&p29!fk?*4O6W1CD@10|3X4j-bgAi&1PFgV0kDVjRtNcVDp^v6Dwj>hvsS#I-y8Hv z)qpOHeV!lKa6;EK0Wq;z&vN%#j(Q!ZZiI{kqTHaAM%%8e;?~j8)<~rzw#feMRz1*K zY2HF&)j90ow9=_&z4-G~Kf>k6PYJUY&x)T)H#*S%0*qn82va#I&f`;tSc?ak-_9rQ z(t`VsXe!5bmJoVK5j)7M7@tcnV}3p~Bul`^+k1C3`UGg@tOUiTW`#$M6p{5!4fCZ( zy#-T^d{qdJtkB5B1VD(t@N$UTJJ{&bS1OcQ$-YPvZW=OyD!uGAquk8GBgviBv7>cU z0M8VfQy7=9J7S&BpQc}fSLy5L?d52N#?FklHEf_m-w<#owJ714HDM-b z+q{R0oF=A{y@1kq?V>7|rPdO=O|5LEmX^CZoagUYN2w|*-jig0Wl!G*iA+~AFDl|% znJ|j5Q1QB?b+wPu!cpY)gRHFKHSk{XJ8Ke|$*y*Pd#&xE2jB@@;`L@^N}mVk1)B)A zvJiYB0RS8UO8~sswzkh`ae#j<14bZ|HH?)Oc&tn1glyns_EAut%;Ct44#}w0x#Mw> zuNbgwiP!ApCRP-di+?nQOI(Dh3&c=yJleyWu_vk+P7-cazLnUak|P&Y<``DcOQu-5 zs2C!R0ToffTEj-ADOZr9Bf?;v88rUPKFH-bO?eSflQ~T@3+!Ju&*k2wdAExzK++(b z`z{6O6E>ABErA0=QXDM}vP%S%F!=Mt-|`koQ(Kns+N3pX02ncY0Xh3i8U(hH3!HoN z73S-e*4q@IU}ht^s#46z&=fO_KV*~Dkde378#kl8CaRDwH zVGa}^1q>GeeALKc)x_GeNZg=uEDF}N8xu5YwYVsBOodeCTr${XiSL?#k7Vf$9-3Sl{u^w?WcuJyl*#%4Fc#lR=u-2D3i0s_ zEu~aOxJl{yH02vMNxB$k0OM?oi4tl_SP!B`l=9klBdFx(bVi*Au|mV;_Yh!|P;li3 z=}{wKgKD@$XY|8gAU0L0az@v~WPVjo(GQods%*==7%u*4?PSyzW4e11eXl;qlR;0h z7Df18AeI^qW{LDB3vz*#)541T5>F)M3S!ko3-WeR#zx7Kf8XfPsXgv7$5~E0Cg7fk zOYfqR-w}y82Nt{;|KNHxWp3b-G19lAcAH+Rl-ZSRV4}V$F(f0-A7`E76-_Z=#4=Fv zvoMh`Y#j@eZqAee6NxmyNu%8+rs@VBd2u$&Sp<)XWca)d{jF68gJ3qE1P*KzS1M;U zIt4%z_VsxBoj8j7b>e=!4C$E~+{gN&Mu)(6S++ts=^jbOQfyT8sRj(#jXcsozrko{ zzF?5nz_u|XLU`cm1`mIpQoO}2F)~pD5xj!}wIMwPopSs%;~-@yFLEk|Su@N$`s*fd zc6?T%ls!Fb9_%2SQOm=AGNKa2hr*vby0t+zV;zn$k`!BnKb!^o-61Hkrnt7SNw}*> zJdoJ&R|QMgGW@3fT0^iL1GC7@T4R^idRr^ylytx(-L2T4(Qx6#;0Vk^+)GawpSV+) zN&4F&nrw5zN+Sr8=tG{B28RY)nn%&#QfQ3#Np{8O5_tLK1FXZc(t6%+gIKUwKsdOR zhq6XdM+J84Pq2>5%!`Dt#lmX5Zs9zrNQ8?j49-Ry2hwQ9g`vO2tA@o91S2d02dy`}y z`;LnOkPmtU(6-b(1%yOQPJLy~w6>5oZ9x&aaId|bx%-Sof-*SEX~sC>BsF?`MjD72 z!z}DASH{)H+tQgk^A)l;Qm_V#KSL$KLr zq_DY;1vKCq^k48`AjLyE*}36T-A>M|aX5Wm0#9@MX_Bbv>(xD^i}pf;Ck^SPP(04j zopfnaU=|MZ7Kn}#1;Q<&F0ZjSsKT~zNGDvcRn_}gEjda#A(Po5(#iIa;2HG7pT)86 z2ejWx*hxCYjdI*qp-EZ9zv@!^>H}6%2j-Za>H5aHxIuQxgA?O9pLB&dYiO-EVAgxN zKN9dBz(nnR{uF7Hp#DzW z0~u>1QyN|i7Bim`W91V_!y)axTcoVn&ve&5Y`Ix|oEd5Rrqs~r=?3*!t5psi0iL3X zNYXH`3Tw}*{47nFMRAw6=3m=a(fdjEHktFk>~WU2DHtW~oFuKL$C7YF>t}=I`K9r- zj@tAu07a$@s8X3L5fbQ#b#GSCGM)M<$07isR^yK*DiS+uea`!4GS<Zbnik3Oh`^Vn!s+^a1g7#7Tdp5F|VvBVO=5U zpUhlkE~o+*g(H6#DJVTmCd&Zm8Fi00uxZTWnfU2tISete;2kR5N?gF4jmsD>)|YGn zV|t9CVK=_t6Pb`aX7Dde-v%JKnK)ThH7a+e6AByP64}}e4w=8)<8;O#55XrC24207 zb_NT61lg3xW3&7ybFX3q!VdzNu#6`5r-eA&r3AB+VnZfCWbPc5L{!X81v~o|4Toh* z`z#|mUXU0FC>fbh6PHv`sgME$L|{bA{59U6?QXVqpR)ne!%SqhTqG=($!h*;l0P1@ zckdtCcZM}Z%sAa|8CpHNNu*^=ZIKgIYMFv1f`uC-u51RITHuDg1b`9uz$9tGHw0b2 z3hyuAEFJE*ac6!L&)roSgky2Ut*-Yi)kK8<>oM#4NEGI%MIs(O!MO6CM&O6Nhce~O z(zL#$;WbIV5zs}MaD&-uz0B2naA~fOwId;~@y?b40<>6qHBp&Pu+SX~o9E;FWT*Td z(TT8S5$#z$gIn(2iATL5>p@&B(*9AKe}!Gc+v&zsi4|t;Ikd#+V0&0atoIbpU%^i} zFZgPDHX3=+AZu`@IOIv}Xu2wncKPB_#*0rXw0P|+1tEtpx<-nYmJ8q|bHu!P-Ru$M z>-GuL>Lb@c{gkzZTPz)?Cp!^8QszW|80(55duULiVrB-EMW6 z_yXEhKlH$?onX2gr+b1>AYKsC_(EAHc>W2%Rq_P}ARb2mn@Pe40+ob%ThswYGr(lI zgSSWb6#Hnbx-44CnxaO?rTZbB(F! zZPvt;-S%2v-?s@a;2@-7KOPq`mULXyi7^E6t1^uhDJa!%gm%6_o5F!}!yrNh6n-HY z1|y-{6z|$6$|juLmp?vEIwMCB*@qdL9ioEm6%>OnD8VtzZ9;Anrj;EC}@C#F8 zmPfGnZhpmHXWMp*z5@tCXv{I7qukp;)<+>ImGKqOJ1DSY=u8kSg_l*wi}fliGh+Z! z;tDjOeB4-z*`HhFwzy1I+u4sw8nm3XRPZ_}(U((fHEwXmg-kP~JLV+HTae))n-c;C z=8!m4@!?(@*&+-D{*hIbw7hk{=Y8I@lVXli4DJgAV<*Br0YuWC0F`u)8&f4ftMnZW z@qaCR{rBQu7?Q9ETW?2hq{*0@f)g+r!MW5@K0ez2cNw8yf|CxAAVRn2o?_K&&5JO3G?{gN%9c9w~e+%ra)Rhim3 z*zMFG$weg-SzYjJr6YqS4&+Hl7D5cg%V|t)qjf#s8NWgr@gcOeox;eI!a5%lDCT)!_>+l2?XOHD@&ObW|MiSnp@)}u51hRS+K z&{RlTv?i*gpdt-@hI}9x|0E%u&_pdtdW+3`-V7l=hHHBZ>u|uLti)op)3y@HaZbki zt8|_?3MK(X6b6KF5Q}GqEYnwYiYCX%-9Mhj3x~78>5~5!)UNn*tsp&A*Oxw~Pe`j4 zJ=jL8g5H#4Ok1Q~(G*Tpx6y{SWLmr7XAnuNCP0i?K1L2LAH_mWs}irmTn3=%F-Jkf zMj*cDg(E|)8Y|yIokzQh>>A9(C_RomVdP9gE6x*D6b%OS?&(Q6f6S-8 zPzJYU%JbOK9!?014IDU8-@N#yZ_5y)rUAs2S#cr9T zf=VxmNF7L3jS;a~Nm)H6VvqS8sd4l?*Ci`L_M$8>Ck>e?fCvz=Mlq($QDQsDZpy48 zIdE+JlVu7nCVbmTzy4Cw$MbboYS0S<+I%diq6?cIiGB^jsjBvty#VCoVl4WcbC)Y^ za)8MU>bQ&_RqLb!8!V5-{n|LAc_^^LA~&zyDT?vX(O2O4&HtNC+b>wS#POcV}HL7n9hY2<+F*br=80vY4CZfO?QT-uB>du!`m3?vjDU zY6{}8fiV-eB;&sh^%ilFC+a+35`1P93duIE<-eR-w|vuyh^7{@E2jyH5T+eNnN5_yW1k3M}Ol zfZErM3QZ)w8qu<#K^dFXV3gf?%TnRH6L76CTe^vq&s;+Y4A`r0311HqhgkV%eRSuW zg`_x=hjx9H&Q(aqVWZmMF+n#*e;uxBz?@t?uN0@pLu)#xiK12(Oq0)}++*c0acw-F zF+nyVZJ{SkZXWVp8NyJSg;t(Z@|`NBM@Jt?#-pt)^>s2cV+}~C&+Y|vp*S1ny#OR_ zyo@I~_%&f4MyQsag&?8oK?cR4 zzfy$&ODNV9E3uP?)W(JsTNUkGjjtp{&G!&~TEQ37gJ?fXmL_7AtkcrKb|hGTP-{w# z3V4|y9BqST>xmM>k)G^?xaatW8Y{|pS5Z}QdaN4PK1$wGgZ)u~#h}&h5ccbfu`X5= zfBgYRNLgigSN0o#1;5sP$t7Nz8-)c-tl&+AULU~Wusg;cmewUTu@Vw1pdL$ai_d-n z#YOikh~J$)FHBe2H7s)AZRYour>+iN=Dfj#fF3NNRzZ*P-E^DfAbN-l)kiqbDVy0!A9_LqdCp`oFD-7 z3*spPrqPst@HahT?oXfim8X z4LQ$uw(M#`GE*a(;}OxoL}=GOdk&GK2Px7b3N6x~t2P-!Qw9hqHKCq~)P-op)0fDr3A9f%wg%v7=r z)&|3S)e9AIrB4v4^jj|e1zd2Ykkg9y$AGtPi;^OP%pG<+(3vC9g zF2Bf$=)csh*i;hv^{PphQ;9cy(&79OoEIjMZ}7Lu{Ld_0^ReBg?+d`T_*q9oCUu&h z%ec{$e^{{TF&t6wELX6;%UU?;*p`^4qrSCDj5q%?x00uYi{4u&iG16? z^$U?8T}#fTqJ|+|Q$~B_nZkmTkP!$?hlKAys*<-*-x9afzh}fSaMvQW< zayou7S;qbU6TJ8|%Y0n7*=;jo&|l#Gb9pCW=+uL9_}MiI3yeH_X{qUl|2g360nI}+ z=SXH5^|R9@AbNh;`o}Ga=SzHZY5!9{CWrJTvji_ARh((6eK` zT}8v!oPwaazl+k~14oxte~1Haj1$esk2}@d>#w0_q)O4kh7VqKRoUxccFB~j4 z92%Yg!}V-B-MO`@VMuo4*Eg7ly<9gaXj z?c_FNp7r~hZ7Eg5u#B4YUEC?M){cz%yU1%fpf4#+X|uPIOVS<92$f0BY>pl)qw%#c&+{EvAZsIFx^0@BhICrmCH{(|=-=waTREqATabPN!lLuHu zsWuI8#?X zY7d1%YId?~mnQUtvpR%d_^w}7{mf(8yPx~OXL$Bq2U@YIT*;)~+IX?!r;ZnV9ky?j zqbzJV^@?xgHY~$R-vZr_o+in3kblkdbJFOVAd{7Jm*}@h{87@egpA!yE55fh(0PR! z7YBVB_QG1_w%z$kh0*Otg{?NoCXIJSg4bXKLakH8>#nS09_`Y38)vf3Zf%*f!w?bH zSzknzzQJwbCOyl?c!kNmufFW-g^6Z()Llw5bYAf(9)(IEIto2h$c@vf{dn{yZM3H9 zxU+#F`}!-Y%s18~zDVBdbhKC9Eb-T=!;SF^`kfP=ld=zYf;#g}hUv9c(Q3{3;BkEJ zVjGsli6yfoM2@Vl0=UcR^=CeIE&k$BSTd2+6<@UG_)`OuEs9q>d})Xu433h^n<^Do zCYyRRa@A{JaSGtyzv38g$awIG_^6K`gTeVTiy{=U7ROs$x&}=SAk20B= z#n>Yk4Bv}xB<+rypsI^PpH%RGwo#|-w90>Fc5MX@$>6q8!ryl4AK;ot9lA zP}op4VdpkVoO%U)SRL_kiEbY6W_020^*<&aI@OJjo@BkUlc_3Ob38C=%s&a;(2v=n zv&>US9<(@8Kp7`hpo`=tfXz$fr$!@%4s?va;cM>iH&o6lkLRsQy=oKWJ?!_cKGpv+ z`1<5{xw2(qLYWTt#VaoE8-Cv;fk5lEH0NFh0c2&UfmMqN9OlP{7be9Ulzj(y)E~pO z_#A*0gqj?33Ub=xt~L1g-G-RT3Q8*N3QBK}9abm2T$o*%N!1{m)!vG`!^+MX-7O4q zj?L92Q&Z4Z?WVl+B84jA>-dqR@%F@JW)pM0akcHtMHY4juR{rbs#~8MRwJ~uH2E>w zq@enF+#Dbly)VM5{WdF|XY#RCH&3F zS-fL|lX>pk^pz@>T(%8AOrv+r*%LbSXl&!D=9i@cQeB4B3D3))o);zwr@|d0dar(k zI#cl?1ywbUk*C9Fgl^7@jQ9^fKx0!iPIv}%KjfGXK6lIb1g!eGvbbB8GxaKsHj;>H-y3C&0Z`NtB$?7r77gw90_9V`9}+2=*5&l6M0f{_UVUYD8wH}>8Fs;zI|8clF_ zcY?c1kT$plcP$RZ2`+7MCqM`gw75fYui#FLJH@R9T4{SY<`uu2y{mEM)>TQdZuZr!mnXmbC&J;f6%wKFM`VzBC z54xL(%e*Z=NIT*t*TYhh(Hj9^i^H=?3h|=|94OZKr$gFL7g^d^JeupopC4_tI|x@g zl{aMmSIKC(JswL>7c$=-PU9@|-B5DpO7%kKMT_B6sT4M}>e-Mzd12N)$i% zoYJmGR#u%ztX+=z`K7^1lVR=0pT?IwRQvqfmqWqL{8M`7{vn7iW7n>x2$|r;64f-y zTm3<16-}jSm$0}SYX4<_n`f1kjLq({RZCiGWbl3iKGhJuY^yhQU#JM{mmgK^2L%PU zTg&rX-I=Sy;g%92JwLumm`f|j^10-=yqXp4|ClVPp9gKYI^aG^if9nT@0FXvdP zaAHD-Ly+Wichq)UE%mD#n(+2prOH#LTe(ye^>tV7Mj*-j;n9}Z;pC3=vBw{PQ_kdK z<)YsofV8i5?#UjXm7LdcW5;bpX!-aPGacpn_4%?78%t)o>)mwv%Jcapw%DBGY`GKu zlSy}Jivd)XXQNu&O{yQ^#xgd;0pzfH83386RqGSsqTDu*+3JG%&hLo%x-XM&?IL#U z(omv1UE>+dA3WAqK|>YHSG2lzsmg(YRyNf-2a3=AyfjrEwsrEtQWQsQ@KznqP$oeRul>m;M7d(f#rC51`Zd_xZ6w zL8?(IGcUrgwSJj5`F92Nb2l3w1vUj=^2P7pKbaiZ+p!h9aCn=oXkx{DW7XZ( z{ZgK9*{gCfxlqwHdGd$+4T(OgN}1{;@#sU^4y|xxxkOinMQiDnnPF(GnB34;L*-Qt zL@7t*K13|ov+dJN*DUqCZl~{Zh3Ydwy%l)qQEU&o8{|LzRFH%H_%XG8?O>ex-u8H9 zs@c2K?!4calyL&NY{gP?rKL0k&2K$sUz^HDp{(c+0`sWiZkITz3A|+%XuG2Xo_Phm zKG!PIjg6S?e;X8G7WA>JW|3in?25W2 zOccYDPN#D8x+x-IYCIboW|tQr<^94cokt{Ke?7!@HFUeS{zFM;Q(?Y#o+Zs3GVi6S zcnRFwg_8GBnj>fG$hi54hokyBjLtI7rJ|TK<}-{;Xvu4K8q-I&a*4E ztNDQ)YX(=7>xOp?-+7Qh!Te9>E%Yy!y^6>cl=XxLkHdR;KX`Pt8B687uqnCDk+PPT z7gFzKqx!g<`o%!Pv(VLx+Z>shS2(?p=@W*pL6OA0KKY2dU(K>!NsDSq*>4yBjCd!D z@)7OASv$e4p0$rj@4FM#2z0i^=*jGdmm6I~H04!%Ha@u~<2K$KFE^W3v|lz@cJJuc z>=cu`4)!u8Umu6_`%-41GPEV=+*0arg61u=s~b%$O=xYXAk)4nL346RWjfs(X5Q}; z4+dR4q|57DCl;3FxC3vBNJNe=u2Obi2_1K|cUik^B)qV;h&YHwTiEL(obTD!H% zaKdcyuK}$8suDrivpb@zTj(D^d3b|}k@jD})KfX>h}>uBpx)M0V~WM|(B;Nh884P{ zbHVqZiV*KPTK;5yL-q+@vZy3tZEtE1y<&%-4aWpT0b{+!67obMB-w*udJcvXskd;V z-QCNL*W6_bI1^!JYlfHJ%`Vp~Ca_*Nnx2d5Fe(ny?971~U9v-pE$o6U~b=YfePVOiq$7dra^1H3#-S_j;=S;CQ2#_Pf@mUBDZL$8bpk`^4GZ ze68Y<1u;0uoOe<_2Q_V0eOB}9#bfYSz4jmv zf;X-VF;N}_z*=6M($fEYQR>`z`$}im(&5%bbcw?AsZ+&+Uu~jgaeZ^5y}{E}4~9TG z6_p5>A$uVn)0`DB>mkxOib-S0P-x0)&EYZX=`f9DILNTN!ms4@GoE2-%8@ZPcRk?Hi)v-C{?=m|?bI8I2)eeW+GuZ2aae^b={OY8LyCx!%DCfNIx zp=b4I0jp(IdcPhMB;S3UW1HZ3U~A^m`m~Cy{$|w(vXlV#CkRs8mV*N|U+e3DO731C zf+J>=?#>+5hMbw?ybN5oulexCc=G|2pr{*1PN+6OS}f`fBUbmi?Srj{p_bCYPa+fN zLnGf0HuZ(P<=2y3ygw;){<4^hx+(IcO-v0wv=d>bWo4)s>>)UIjN)|Zy11V8Z{AL6 z&OvJuy_LTVdj0@fr`%awQDE)oB@l0OK;9H5r>*mw>S4JDrqxYq!DpMDXE$-JTL z&sWCu*z#cUk7<@I9^Xtbdku%GLC)_u2|dY(J8=5EH->X4=+#~0NIWx+d(4>ON?PCv zG1|%rb0aI5;gVBm8wZ^mC}>$uGlz2uBY@t3&_Wr3KN7a>Ws%kzS@k%6cj;)# zKm?avYcrUgv-xup5;u^T$TZprWh_fq#h&KB2+_6Q`jGlIEIZHM&_H~4-6?bhj1Zr+ zZ4F)TA8Er{wWv!Zi-F`PDN{~TdQkeJ!@=c#r82fdQLAJM>x0)$d%VNiyk~Dk=_enI z7x+-~_a`}3Ba`#kQ!p}!+9u0OSyl|*=$*b}I4D$DIL;7ZnmzRLbC+T29wnv8?lv;2 z=Ma(WKunf8JG@iE&gM^MHsa2wC6kCx3^suFnTRqH=I0+Y+f~KSFsBIn=iAiFVTxuN z45?^H=dH{ZwqFo0tlcut=s7s7+er5LeFL+ZhauHQUq;SnTUMqi zm=v6GJHLab(5#|eAfn;+7*t~bVeNoTLLC_uJktc_{H<9M=>&_=5SJLq36Ovvkj)W@ zb<2w+V$I%ps$=&Fs{_YPFl;gD#=YT1ig>ZMf<;$uA_|!tIErlaT;w&oa@rDvfF|qu z$N@b}1YDw-1dd73N<$GkPVM)Cb3B70RgY<;I7 z+|DnaqMSH8cob3D4jYNc7Yqa0uv=FevX(g*)6;N0`X8>@^ImfZkKP2?a_)!#S&iTV zmmNU@vWYBZ6tNcW`VXD!vvvsvUAQ5sf~Bj$Gi5u-L&5bDZ5i0P0^US2)41+}H}$(Z zRbx!DLvWHK69M)K&G8%~a?coiu4fAwJmA;j$f=QAwm31=9>E{{%)-_J%-)K|8j{!M zYE1zd0!t;QAFR+7+}kWg#*wJNoif)Nn4(glgOd_DksTcc{;f$Z_JJYIvZK`ekICYx zWh(l`2)vP*Q27gRTHKxB=WjoqI23Zdi|S$zv-HbLI-g|p2vYWT(fVD&=mNzejU2a%W7EU&fPK=LZ4%TMsD<_uE zq*|3sGyxAx?36jmOTFB0WN%1hLzH4{NmUd92IrfIh0@C$dXh2<{uVM%wG=>77{%Jku1z`k?D=}k4X4TjNT#WVAx|4u6-fAM6vkHPf`R@Peif0g*@tC`JL4>4rbb$?)L~>{%{39eBqbfDH zcJ!oF2XsO@jsM^S@ANLHW#Uxh%6Y4E#08li#AyZ5ap=ie;3q1jrLc# z!#er<(^rC3WwDY2gcols((4XQWir$M^)knj$|cVfkD(T)9#|jcQTm@Q;DwO$)t4=+ z*{*yFViprF?YUFM+1vhPz{E`S44Jn@dc8y;NB0N7C(^@uRQfNUy<=5PApC7QI{BVY z$AOi4M15yelZUXf3LOHk;ssEZZ2?Hov!U(n&ja*j8~OfT8!OAxV0!&E^YwZ9AHY{b z9BUCY_X^kk0(cn@On%e4`XWx>d~5yk$&24qIJR%3asA|@AtDeB8P?DABHs*ta~L3s zXNdT0Hc$1=RncgcYTLwCmhR(}!fVkTTR>}SrT)O}ruS|cPT#RU$IgkR$(Sv34p+uB z3lEJ3ovpg^r?ew6%T>zsEn)aCGFxhB3!0kY$f(}LPc>vrMeLm$>^v=zkWG;vtP^)+ z>7%c(V-^0un%XiH>zxw|5yv7N$~g_19e%`T5SaZ2JX2mt5lsYv5?4&Y_4Q%nAlhly z1C*NFpje}oZkY532FoxeFI%#VnNM>jnB+sgq_v6cNKC47{*nWT*H#4PSnJ~=_)}&u zg_esC(XsQ~k@F*v3sKoJ^KgidnOcvB!Hza3ySII36acWWcH%l=F>_EzPc*ZbY~v{p z)S())EFN)e;hj*F962)#pA5_;&qB(O`LiM37-jn+xiR?75s@=GDAD$wW7{iAeExRCA_ zi(r0nZP6|PZA7hZ{b2R>6sGaO$EwWN#~r?+CC8Z*e8X-j$6hYamE}SZsX=*`%Bl#h zk%Fc|8F$1cQmJ3rgPju1Qv8Z-AiS0S@hjJqO*h*Vi?gVk;E1~hIC0bgIgZi-zmGZ2 zN7!pL)BBGD7PviPu&k26x1(LdsRB>H8a&M6^gta{%ct8k{rR%%0V z^2>D2P7g=4MjS7HHcW;iT5ROIHWxc&?HiBF=x0lmSF#(*OET`MDYe461j$$eJtDo_ z$z`oNIjwyhZfbC@!>p?<$jOEiw+N&x+Vw`>OR(JGp6CHHCyPIdfS(_FXfxM^5X!h@ zN$;7eCXg%6DdVl(Uf$Ib^W@$-9OFvb=)OFDA^v$F`8?B~++xqt@m$(bgDjg5%6YC< zu;BTb?cq`btplerTkZ=5{TRhk$g!&Mmk znUu3WZ>Nm;a`%nG>Q5GGk)@fO4DJLzd~0EGOva62OLM#n59Dcs;Ewl9B)r-5H_iS& zf`?Ts&7bF~OtiSJW*rf(KXsGn7*3VsfA%CaQcTuH!>{wS;K8z?LurEpHGO` z#~zk(+dF5mF=&Ojz%KBssIZ~CBhX2ZQ8^m{Jw%FzXYDCEwRle9+KJkekvX+@+&?V^ zlUfT734#laRw4_RxBEc#T{wHS=?iL7Gfwo%XAAj|j}L{wAv3%z_9t7T#}7R5b97uo ze@kU_IAM}>sF?9O&2gLeP!FG+^SxGRDGFkpWXyXEseqXHKX^+jR@q+p7Fm9E2Imn- z=j3vC^daVwmlq{WUuK3V&*Uh|3-cwvII;k=9onR2%#`)2Bptn&6pFuI@1@C&VK?&; zM|jgw$6}||3y_Uy0StozZG5Uvx0g1`}04OXam0IRunm5W$idnX*z|}Cm*s+N+frMW76UvboIvbhM>!hvp zp$sa+n^9=P4jcqgg{vB|iYUi=vjywZ3?8+sA_C zGGPRo4+UtaHITQUI4?v49&oNs!%zQadv@>`nc%4G^iF&V7>rOXWpu1q5^lcv?4{=!|=0C#6^iM!J;=wtoNGkL!$RXRf0DAxuqCVxmcu} z<2(rYmA74tUDtq|1m!VKK*E;p&hb7r+ZX72&;R{qI{ydo5KY{9{ugn_=r4SPNadKA zQyd|m={~D6#SP=$3q*+KRjenJ^37nd`;ooWGE2ThC4A_4ui*Rb3yAJ4$)`X42JQco zp+pooB4aytfADO)G(pahvH2kK^Xr3mD$HWUtNyP--+w-3hyU|HvT>+N(3WqB%+GK< zr)u`TOPRNfzZJ2>7Rk0l7*Ei5R+gCmuKNGk1`!8=RaX&{MQrt+-F8YqwLrLW=B~xD zGdW)h2-As5Fmu!LMiED? zwtU!B!b<$V+?|<=ex*_2jP!2Ge#3DvUfV1B291!tEV0N$)ASkl=;zfp1m@rnGuxIv8cN5eT|X3rRv(Y znUsWu0I-OBT2V1|RrQjB5Co^)^-6n+uuE`%*$P}N@TJw$0+J3p+c(x^c?PJT}((RZ>5d*5$Bsbe9dr35WVoHDp&$9z0v zuxa}8gJy|2L*eS_Z#lU71)A2voWrr*&|YZP$6qyu)?_j@+1RKrl(iSvWGX96#=~to zT$c*J1c%`BIOZ%;!?&lN8Y=IrX*wn)%obuPSFmV18=Lc6eQhG@y6-vJx~wZ`?h0x` zJR@G{eAL_|Zn%h~@AA6{9EL4(Gqa1&u<6`YjtGfGDUFD7{ePhIA*@#^jqRu|M2j1guYp*?zt0P_5N zF3yde;y=b1cPG6<((Aj5q+IxhQ%(EU1LV3&IyNMl|K36-gDL;o(NkZl%`=o|_rHBC zY?IEPSq=Esol8A`x*8Df{>gsJPCg=cvHCK%)Sj9R7SpaPgWq za7yI^yJ}=C!}hSnW1Ei7Ce<%1Jgc>Cdq2jA=(myW zlb3`)a}Xp{ZRA7<>^T+ol+v$#y+p>`3*zMQ`qNqs?ezr&bb)8BGtqA_?$)<1O=+~{ z`}7Bp<0^lPvOXDClnK8QZgT1@Htl7u&u%+;*Vs4N|Bq@zW!pyoF+U)=!hdffBdk`~ zk9EbtXNXL$)W7eCjQ;n|o&P`9X7mR0jOtSq(5Fxyy#kL=xr-^w{f8hGS}*b62vP|( z7G)vfiLM=K1F`Nsx8Fk(uc$$IHH~kS&kdFMl#fLLHW3vbZMvswj5?N7<6-ce1dmB>wEjqV$IqUuE*?fiF+W>1UufT`Gm4k3r3}`bT{cvcQDvvbbQeYa z)&!6&dR;EM%-~zSEiH+oX%mTYNoq|rHQ_43jJwk{Rr$Uz{KU#w{Mj5P9$v^<+ek&a z4IBuxXqX!>7g|wjkCLxU;cGlC4WyKsZJ9#8;pDv##k2ad9yw$VE_h7C6)JWdzV%`% zk;)ZRreqHxt_bIQZ%aFHG>c<9DqJ=LVFX~L%jY;r=wMNbaR~hOY{>gCt5Ae05vdrQ1+GsFa~)#0_4;J^sP76T=G4d7m5Oe6Vph^%=32;ni^4uj>>ttpl`$eQ-1)qxWDWFN}LQ&?W%808_Gm4R+lcf>6FBi zsN&c>z;oX5>;%m7tbYJWA4_>Fp7#iRa&?!C(ml#)d8m^{HGKbOOB?(0*`3fq{RLkv zPtcR%TK~5bb_a*nTAdtS)s)=+6sT%q9`-3|F%rWK49snU*0h;rhZE^G1;$SH~i{(Sq1kdx>trlc-ZVLuWe270xU@EnPmtTeEPd?ygDbt zJDr=<%1+lS(c~fl>F`9B0~<<`gS>$4>xFMZ*)$Pc9^Gz*54KP1i@H;(J*`c3>R0Tw z?7{3UZWFN09Pld+$<18SgWkT7#z&l$w~+FWJx`gcDN~Q8+I&pkm*Wtxzv+uoNNMW! zywi0Y4}W#hF!l6nhiasr%haLx?ty&U`}(_IHzCvMSAktu?H*Ez80H%r%iG2-2hF1g zJB`D?)VC*k^?u+T=a=96Cg_aq2{;!uq1{ksclamO)@S4dvny%=j`&Gib@`K5TdhmC zt1PazvwLsp>X(CCUs5s%-aa>b{dlFDRopn~)2o}OZ*|-(x;L6?Oaq%zUM^F-+uL4u zviW@>rKFv3G#$#_bEv03rBMGUPxdyV@VVm1`iQ0+M<;U=TCg!=>Roi*jdLbn3a%!J z5uG;oW><@Gkcp}JxSLwU1+S0W7unw~Obgy2&0z22b(qhOV+LQa%#+Q&NiSscRQ(^o zwg1KI?=L)u*Sn_MbU``KGKr`s%hPu zQ@9KWFv?T|88?%N6^Pp$`(HA)jp`PY1eeV`GxArc;R~xFIeUG%_Q{v-Ln8ODmkRS9 zUIWsTw&~pseVyJRmUoZNA5}?_7f3lL7~TIiP0`m5&>ffcN0#2FA@pb+cj(nDFbwM- zKz~%GDJM?^SGRsekybuweOyv!+rK(Z=i`u2KEjRB0F z%guiPv)xbo^?ZJ4gtRY8e2g8E_1rsqG$z<|x_-HBu5jVaGf%$Y@gQjLhCWgB9xDyD zI4{U?Tw{uwVgqP)z}#X3P2eFIh{(0l-<n@>QGRT(Gm9*31&K+Hujypy`aL z+j_#!6uD=L7#mW1cBL64g-<1uWDSY;wE%F$85zTh+qSsbJ(ty&jf%+T6%m)o^3+9QdP1(=s*e zty&g#UL6y#W*e>jMdP21V}<0GXE!U}+pW~^f|uPs(EEWlk(*C-U9feqn?V>HEqiO; z(DT)bhR_dlOLaMNfg|oddKD)uT&baZ`Z!kui`;U)`9->Xo@PI!t3IUj&jy$1hv%#> zPL#_J*LR;F{;M>eiA}H-`eF3%Kjx$VpTC;fRi^(fU`52__v{3ncOQsedFGpD=`bvI zBvV^w-(zTAc27+njOaX7G89rTvIh>Z;j50m4iu&(`Q^6#`lH?J-5GQ`i^|t$lqyiC z?7i%prxLuLxU9KPr;pd|=hqr7D>n4RLz7bKQB%TwiI&(&CU?pY^qQ3X7*|1EhGx~$ z$hYkA%d<5$vT&m>C6CgM#h>IKv_3wTxLTJxjqg#`{Eo?o4}Om)R_5sv06XM z4LtX5L%%h2Rs5Rv7LZfPIGm2;5nI3+)-e9+N_RuPp*tm&S0^X*>tL#fU9Cn{KmNk+qQh2zYtC&AVrn*Jh z3?AgD%m%JuQ?mNCV0Y0aY_J=eHF@wfajqUGw*@sl=kU?7drY**8S(sJ0izTh$`BE9 zdQPDJuKH-nCY;}0Gz3>GVc4Xq%b{7#cYoRv+Kg0k@oCRn7#JO?DR}t==@2>KODVy* zL}beCkX`K6_ts!RhEjyZDJCBHyM^QtwN*+Tcc;wKK}S-patH8r5}~81T}j&r-Kxvix<{ z_)8m}>8iY^y6V!$55;?vGo5Gi%tQ*-K6pjTE}O_{Q^!ca3tBXfp_<;HSgbe*&KU1z zk91>L$tZ;lT=Dt2HN|GTZ_ZO!njm&etPh1g1ycuMlpN0E{zzDZ(Bnb9=F2l6G1-0F zmU6tc7IZDd5nx*=y>y`a@q?A5RKx0hYDbOPOZ`W+SJ&F~CJ2KOadm@uF~q|z-lRvG zo3E%}fD^g?A&2oFV&JXvQU;q6?%F!snu;#b$bFs*6xC=uReO-)l6^z+l-PYBSlALq zBPY~W55Kl_Q0(z%Py9Xv|MVS_e9@CA5)z_qmQu6&&cD6Bd+y-Vtk{gjZO?pwDxlT! zQawQ?tt{mT?o1F@Y1OpJ9i!c<2HB|%5ylh9oK@X`KjFv2{*aY?p#jPh$oee@1=Gx2 z_gLvrBn`!`UhQD&Z!*~I`7GCOOfD3$GNovhT9dq@n&0Op``x@se(6<>$~ap8`fMO~ z{>6RQYnI_YTvE=xeTLPdg*_reJrLi+0oUj!aZ}WsWa-c5VDG=8{pC&qC zDP>0#G7F+6a(YV2?nr#j5rTkxdw@klj`Ery8=E|#eT%tfS7h;V{>F35uQ(*MeTwsP zAMdOmeqM*yDn2^$)rCtj9k4--@`D%|D)*HE015@M(3jC|-y1P~u|sT(ThIo=*~hl` zPS#^;sb9RU-F~=?x2%UJH2Z6=3@QE#X&i`XKTcn9UmvbC@cVhJn?k@;B{$AaRF;yj z3W2gIqlSPxXy0Z7I+1HS+0I389?2o_)KDg9dl8uh9(B4zEEuD(f}T$b^;fh=zoX|t z!u6EsvL~cD8{+hi@P_#zeu2iKROdPD%#KD$QTS1U7?L*CD_vH~fJX2bFl}XzLNNs867!2kOOjHI# z?NRXKUNsGG%>uX;@`#!e)Yt6_2KX3A$6BaLuX%5k67NWbyxOxVKS?l|klkjg*{;{| zfPPo?vVZlCxdZq7a~123V>cf;_Zf9gxpLBHQb^;nAlD$4-+jL{;;WxZeO{C0L z8X@q8&@jh#MV(|!>~hVkjhGxp7KV_J%=TR{Rkl+3L!25vw+AtR_NFxXe;F#JXfsub zPsO-d7(=ljPxblf*s=57rKl9 z?HwSk*lExSovc? zC0!$GBfO|&5`cZL`mQu9ApVRKsLR2t235{CiBgin0zeI2w{-*P8dH z=~f!I`WgHEVmP7a=~rlM62rUVNvH5^CX!x3!gj&k@65-DOkDF+7?G^L%O%uEgI={m zXfEQV$f+o(aw+MZa z{uw)GLYIF4!MfkFg9R+4PG5dGzufrYZODo8{$E~7JvU-9pDW4KNY>)42rx?a36Es3 z7NMVA9z7BhnB)(|s!LA6tR$i#BfJx4#Hm*LRv3Jm3`O8qaNOm9f zYLPh!(bDYgQ;={JH2>=UC2>T~o~!phv@{C!a_7_H<1Cyf=lHax|K3f2#Bqz9Gk0Ylw85}MJD)myiHYP?Q=fwC`Y7Hgh)A2qeDRxc(U^0+|r57WX+cX6&F#XEupt~8Lg(A9p(9k zTAZ2>lsWkbp8=Hzjg+h&|Mt&)_7||?WH4O;fAoDq{~x;HLaPjjM1s@GJqga%pv>(n zz+D{ffF!oy&z=$L0&dktMr_x`u1ouw!qkN|K^hn*WqgzXYlzlHyut~WZ|};3Gv}`H zVG*|m!yq-P&1#b@Ub6U0KxoAN1Gx!*xOF;^VNIjb`SqU4ckWWjKLB@4OY@>vOx+Cb z6+efO*ShVEx`>A>y>@IQB6QWXydKY3DsmnCNk8WMpdYyC|L6OOlkVp*wF*qzks(V?0nTKZsusGb%%CFD^*Q&OJhC?xHI_Fl zWLjFREBjXnC=^ZBN((jIh_yj&?m&XxDQ|-0Hy?Rii>5+& ziQ>CdX1Xk~oM}v4Ei6PQtev?-nwe!t4w!T(#A1BEIq|x2ZFv~VUoIOcu?qpii)O@Y zeS(h#?OVISbr!H2=E!MU$I1u|e&KUhTWln*FVe@FS~5jWuc?a!aZcXDELv!h9Dpk` z)h^X?7*^jc(dyO`i7bF?t=L_(q(RiNBFjm8Vla(7so*2k?xvJ50fKmmbT+RYOKq4A z=drW%2?t^{RL@*$atOa8u+se~Y^!}iV658Eaza)DRI9=(Fd(%hQf5z4nkm@B$ogDN zSVDm#2W_weR7}u!Vp=>JS9fT3K^V62_rA5aO0D5j2b(rzr{IY442TVig5wB`F932! zvuzT_ML*wW8u^ktNM}fdh40Jc!MswsMO}56G$puCXc5#C4eB0eU?ArPm=9h!?2BGU zhOqf}=Om6K=lmj=DG2E1noFLgXR)^@^PAb1pi|B9loIAA51i;IA-&8K0NxFX-;>Hq?z)q8$Rqcv#a0X2Uof;o=@@57Io<%0c`C-w)Gmgrr1Pn znEhJz0w!O*F-~R$vjvZC^$`btQ04>5?H3OFh+!-IiMGt%rh*~e`tsQ&Ik`>M#`RsF z87>)&-BGcTH&i&8({l&ycT)FtT+`}=VnkU3k2pT#cxyYwQ0j@{sfiBPa`K|^ww;+O zF%-WxZIksd4}VM~hwmyBr60{0SJHQl-#m34bu!d2h_h4|qn}yytj>7@#itC$p@>iQ zd)Z8d^XmBWi6ZfABOJKm^+F^p{Ngv>xXj!}ta^j;*h3d&Ge6^UqYQnS>64PyeR`6PUV zxUm9a`9sOk@U8fz%CTPYqyRhV3>rf7AAlXEYdm!WwMRRvQS!v}z2$ z0+)_$C?Y?3@>>?^YK`3g66`B#R!i2%Pp-u8OQXlGQt~(XJnjSdS?dP@vIH*fkCbAQ z@z=@4jrY3c?XT%Y(9SKo{%o7Jy8Y|YFiL}8bUrnFnC?UeF1@Kn8FIpGXZnPzQ_~nM+x)Mncdg$xj{XSN@x@b1 zGL06s4(9XKsfD}=;v;dM&V3q!a=+04jcp%CK0jQWnVbm}vu%qF$(hyAcgF8lYken! z6P2>1Z0~yJ?`m^Ir3T6*l+2Sh_sxRrYu#pL`A+gEOetVQ9@EZy@E`TIYEbC04+UDM z`h&zdp+&mg@zg}`BhY{dq^WE!ndj<|BfxI44aEW`jGckPUUNb}ds9zAhq(*k$vI*x zuJ++U^wM5zF>3E_cUvjNpj0@=45bayVmhHWN5&bW5X^X2X?&c zJp4Mm3!#brOev#Fr`mh{ezE|jN;2V6G>;kY44RtneOTQavxvI6T#Y@+w&CW$1Ue%5 z-@Sb#WgnbcE!3ST`&k`qCf=81&lN|2g9)q#>(A}j>q$x`xV**jq$w}#V4IKi5#x40GM3u4Y4o@H(Ig<44`;KWJh2OtUd)Z1)AJZOWLhzQE*o>) z#T_M{M&w^Wx7nT5nDlwyM}{lgRI(^Fn`zUO=8V_48pA*Uj4R(;>WZqMwOP<3#ntBF z4~q*>*WpYXnaxHBKiQ-^!|V$p`Gq?a2NPPs$_6gY-#(h00H_i1qNwylMe zgB{xdL4<{u4X!}xf<|JF8<>!)A&0i?GzERZD~>TD}+xbW#3Ush6!K>wbdkH~xXD%zU`LX-9$ z)o$r3s}i7JMP^fM&)b=Sa{Re=aHqEb51p5}q_o|COG0>$k+4f4OJ-z~ zfU)x)#WZ^Kn>caI5#hZwv0dsOHMBJkpY;@eXso26&< zXT2Ur;`QrO>X_B#ZU8Yo6)IJ+QPn147I zVFf(9p=bR&*h3XDKp-N?Drj?UQE@lHx^i8Dd(p+7&?P47X+J+D-VYKkGBHV8 zl2@jeAJ(wWKU&K1AHrsFu>y^!L=* z;tApjNqL zBdVIL2e9c~OFFXAsTM+l@IvU>x$ke0#q+0J4r@uFFpd`(F@8;9e#M#H#3)_w(;W;V zYTL~M6f8YhWLg3AroSiOki?yzS2HfJPbODmME%YE18Wkc%|*@&e@D)n`v7q^O*QfjFB)D{6_gc~2(xFy zBZHP@11a%5ZdX`&S1}dUn@n*54bDj1$Ct&?6NWoA4#c=UVM^>v_+lCh7i$S;)S}Cme9YZ5Rnb+ zX@!#0T49Fu1DCK?JJ})F^R*5Hcb}~T#Q0c35piS{` z={3$g5lbnbx++w%2ybjxHPi1bWkARK9`ef27>3^i00MWomB`6@q1fq*W-l`3&6^OR z>(;)E&C0d;9Z7V;#(=0b2u{>5dDnA(a{?LYhs*?MDqR#02FpeY4!T#?gsHIAeSu>q zm7jWLnp9(B;)dPr+&=thnD9FFkTqF5cG63BWU zz4pL7iE$N{ZFSF}0T~lEt%u~X>nX$gA35mRlaeKcj77;h&mNZul?!)dsZPIw*0h=m z=MXSN1=ICo$?t8KKIk#)BHKf>=3tLV(hSw&;qh(VzG1_BdI1~}o8NcCr4bsroGXB* z@ll)?vjd^yjA6=30E(nL`6X;8wl}R?1 zJ{eH;KD8a1Jz9fXvz>h&$1@xrc*X=uMhJm%d7IZ3a3OWk@ZH)zOL3HbCE%D@uF)Rj zlqf&Gah|M|8Y-9{Ozi~B5eeuC0N}^IQry5SisyIce~iRa22jQES^zNZ0e}!8$P>G$ zf883(em?>KD%3wMDa%!D`YQ`5NjjzHSq?LpmME&N4r#2Ko@Pv{(ax+a0IGY@wTZJS zU%Y`dY#W`2hkv~r6#b%&0%@p&KdipT8lLCF`0wKX?(LxcgCYeMCeCegNu{nvQ9Km| zk&6-gID=sitrUJk#%od5!jD$ao}tnUHKN+jO6T4pCj}HTpl~ z{02TBF&Wf4WoEf*Hvfk6j9ioQot)x(n-`ymeuFZ3N&_Qa^*-9A|Eb2N4>`Si0(<>N z;#&%&BnI41tHQx)muG`jrF^{Ax#=epYS8tTnk}C*{GUgwej&*SSrzz+`7o&x|37+$ z==sa9*yZ--gv=6pR8$ZWG1vRGP`EIZA zP*`T{1!>zP68+dWRFCW%1o z$$-SAii34*)t!F;a?{V23jYAQ#OvGt&q_=?duC#(@xUM9nAdv{VJa+72a@c6x*D%E zuZc)V|E#fJ%)4omy)*^q434NxsDUjjq783~CyjU59E-G_=m09OtxBZ4QZXzx&4TL< zE36d>FLD*%2{I7~+fEL*i|UqG!)DmxyiG`)LL(Z+O8j8YF_Cyglj?iBN5x5XT#UBwUBm*D!c3kUoP7%GdEC{bEquVsH{|I zm2dq_9twqj{U7YTbx>T{kyzzyd%@^ zx_f%AC(RIMQ@^)-BAcbk(7W!%AKsD`h`#T<`>F-&g7-&sYSLSJXsquDXN*fg^#33?5AtkD;EXr5o!X)l$N__0#%QRYemqdg4d9{xg2Cgre*R6MTFudwm)fq1cN>^LPv%KEKq=zYl(3Ihp| zl6fUk-j1l1>EGFj{DJ3mzdXAo)1!7TOifG&z5ZBB4;OmbXt~NAz=n7eTkiQgZLP(n z9km_XI`&&Uhq}8#<@0NE(069Tv`P7oe~xY}baJP7nNo`B@`u{SZ@F(4iVbh?$AuW( zczSf#sah>a24eB}hXji^zvIM@s!oI^q-}!Rb%HvJd@i_?%hN!5x{oDe{3Gj>*WO!^ zQ};s-b)h>;xtA@_rn7bu@z#Fb+}!gCwu|hczks9c^qQL_>QX~S+woi6XDFdmKy$Ee zeRZuB%ibQPX228eX#l|Dn3hhtr z=t^Rzb%@v8#Ze1-*%cfdx~d3pD#rs$YTGAocvv`!7|dS&-#x| z;mzJhT|`fE^8JfOwa$(QW4|4V5vB>dThB#vMve}vmMgk&D5R|{fBj3OY=&O4=tDSc zj(YY9jw>{ZX%SVy32z%RC!6ZvCeu(==4Pe5hR>&(@tly>t5x~4r~ym&4TM^4o-vg7 zgR1y0v})aWI!ekI)0I2l@qQiJN<7OM+M3KDye?qBUUuRz&0yzgylTk3qI)6|LjBUC za=OeVnOP56vZsLzPZTn1<_uR$z9{d~Ax|G`b>Q#LuKi6Z-lg0}O^voIa`d32^kG%p z(jon@DlZowvpMmcj`$pX7r7Q&>Cce8!*r4%=N9z>mdOGFVW-;G3`%t`yN?&@`QI-~ ztO=HPTEm|s2nq!BZmA>|`iBKCR=$ZmvL6eyC+GVws)tku?K~H$G*uh>+(0nfp%pW! z0`~!AsQ@O$ZZECpZMMdJ>TJQoWX~zvMV-R7-P}@tpl##>&yRxq{{pD$xLsq|GWPw1E3viE^5Ra zbJRsxLv4;hOFWo*wdz^){_fQwoiG_anGk_v?7mOj4lV)=zDsn3xmm%*MzrS`?Qg!h zY#jL;5ZOpg*17rPSi}UW5Z(~9l2=Q(3pmjOCT^mU#_oXp_cC7W{$_sxwZk6IEw!Dy zgQx1wCEH(t77%JP2r7>EB2;|-$HuB-*KS(;f>rgsyIBAon`U1=MUilZMOWmJ!h@DA{Tt*)2tVsOnR$dA|M$2Ft&!qNRPp0QlL$03_j^*HY6dF zIzX|bhk{ip6y;_T%ZjfIcZ|IpZEkNg<&bwFUFtpfu*-1x!PQZFcV%`K+$~If>xKkITzOGP}Vk2&&Ef5xeqEHY1DEFuo`$LjYL z@Mm@6uZq7ER$X&G})cP~HZJCjkyHcDeMwuy(fA>agr0()uBi9fI)1c&~OTpR?%{3j~g`oSOobDM(t2!*wi>5xSA z4PVQJr4_uXRmL=w3}IgAMA;$GS>IcPcDRExt7e!rn>v^!$>Es9^omVEKzy}v&~soE zvSm|@J2XHZP+V)bn!N|7e=e$ol6*_^a61lD!sZ_bW~)+R*}QGF?F%LrwAoqy!TT)U z8r1ogl_@C_=LXgjo-zBlfa!j{RoK_3F&(ZQ<{kw#6kB&(lTx#9|0JhbRaF_?hsL6? zQfN2k!`$U}FlqhWQ`i~i`Muzk1=9GzL?@-cSMu9{ZaJ=@lz_nTOD5p9G4uCWHPc{X zDfBVr%wvYPesl!J{1kS{-wUSQTp)64@Kp;$+J6yqjnYh0pyU~SuZqZ_j$(01cVOBh zf#wU>6A;awFF%WiJ5|{)=ehybFjm%7r5Lc5>4x#L*x>fUGvt*<6v`!<@jDA_k=vz` zXP$_4p%y}U>kUb(>SEnx^6Q~g6Xz85+HB9?RfN_WnFF|cV#eI?2JKz6=u^P;IwPE( zW;u_;u@X~PDai)k5QS)f0=)oRPBs1wcTXX8FBVH;-LiJB-e#C2Q ztpAi^RW?$?TTFRE2#i%FSSF}gpaZYlFetwOa3Ne7e(DAozefEpqfI0*8qHm^*XGli z%K7GhL1-MY`3ap+X9S+D>-m`II{8Mc3Nlj=)5W@)PjGVGi?*hkd$Ou`eS^R3oBiq| zo0To5J}A2^a+YB%(;M%MUhzh86O{1F#Q=0Ft)MhEDDlL6rQccLSx4FES-(sJdAT?j zuPlhqC4Thvv{Z#|zRG)Bz^`Z~M3VLtx~2cm(5+Y<`V7iw1PRzTQ}6`+Eqmd-YEA^y zy6(CW^}=jG9as|bRw1Yu$+#D-)Q%jtX={SedM$NTm-JoJ|H>I|+j7~>pZ39-6Xq{z z=ALLK?P^g2t398&G}&)#^GA8}ma)b=zN=ao_0mL#L61f4220qJ&yk)|Q#eO6TFU@{ zisF@#@#gc?abKB-Gn;T0+dRF}xzd$1;t$W0{IeNUe@(6mb#1%!( z*I|_SZq?Qf7`%MxqF-+`%9Rv#N#J~VW!1)9hg0rZxd6eer@zQ(?+uey+#P7K$%hrM zi0-lFw-G=j^7fb(78TB4u{-TEFbsb7l0PqU@-@tTu{+13K&r*eUW!nO#$k6?J=+Om_~^a>^JLQddh z!5`k1tn|)?o#xeIroty4?;^Uj?b3o1&Ft=8BFW?2d~6&MsGy_mLxn2RY(b`BUbn3jKMH}y!@s>yq;rTwk&;^7&B zBeV0zZIoLmQLfWsU(=bq(3h)y7P|V?6d%XF(vq5dso+e7qv2U6Y`_ZtIBp}fCB@YA zW&m$Ea1;;b>Dc??OE5c3_aAOrQ$qqJ77DCjH+L1 zo4xMpuy~S;D-Oj5yx2q6AoFxyj3NNrZj;)j$MI8_o&DvPlm8; zTyHa#lO|&cL(-%ez39wc658>cfbkqJ&(3bLr{;-3t4fe!`}WYnL@0Z*1EQmy7s}OW zm^F9iylme56w6g(o_~G?n|$j8e3e9lnMJe%Ym3;_QXNXRdk$o~aC(X!%pey&fjQS= znxcXj24IC8&A@=*zxmKavlm$Q2lkX6Vd`?KX+rtb&q|+8TnJeB?))#gyqZd&3~_k* zUVz0o4!L9bU?BZT=RI~&P*Mm{$uL0LS=v>Q&cDtfZqQQ5)@aEIW(yr{fjnpyxE>e1 zE;|xxWg|+RJ?LxXN}9B6x35`##W22#+u4iTRGAAH2#!(c?OEK&?r+rCuxWT_n6@9X z_CR~a{?-rV*pov4N}FSBUd+w=|k3PHgJz6?7 zzkSuevKuXuzGLi=M$7B>zfIK^o^u1TG4AQwN_emmQC3Ub)CI+q;AZo34a7KR2FE(% zc`=yd0Zg{|Lu@_EE8Hr<2a%e6AFPa#@ROcnv{(7S3{gs36M}xG2f1&KhIbNq58Aj9 zch-Zx48}utSL)vRAAj4j8Os~8ldvK-`ZO`GmwpUsS=?C!^gN~>8URh^!4uqYb}3UJ zSJ8yJ#x|A#f#p%|GID8v29I3XrhKXS7t=z{#5z*j`#RR9-J;G}ox{H2I48+qj3b?A zf8?{W!exx1-e>)z-R4imULd(E=i9u?W;&7TO&@L>8i{^40bxpzvGYO!Op_%Qbgsk0 z@z6k+MU zaxB}Bl{T@NtI`Y`I(O>leRN~nXk1j*F4bMbn&Jq~f{GP!z7mh-=jnkC0e=C6KGFMG zeTYyvoNEQqmznPeFtr^FY2TKzLE=SwnJQyr++oBGTim<=E!pL57GL*jYXSTA8%@^HNWJL-rMl4`{-5C5nwMs zE?}XnKDqB;RmqPP5Ff{OkQwxtu-4}F4V*v7|@=TgzI zqpgYli4Obe+C4jeS(%#DeQ8B94(Ap32UG?T>O?dfe9V3o0Xna=^i`{sqARoS!f5%j zLy-wGvyfW)$@D*h zg_7%ONjlb1PlNG*$hN}q24N|d=-4%>lwqLV7$cym3l8n7nGKyf|0XG5nAPOq#OhE@ zvUlJ}3!IW0p4cP?6&Y#tGE_U}qJB@G+dv59a;bR?#vS0sYFD$(#yzzW`J4@OMX*w9 z0ZjW1z}6?Fo)qHwKR5M+{=pD>_S>8DXZ7pfCW`sLP1$k(C=!>B8)}@yjt-hq7?8Aa zfhSSx_4nFL#CY(GF7wT-7C%tP@EpPBg1qQ!i~OVI<87hY2VB`yH=5%y66#XgopxU< zS$p*(2EO;vg)aJrZbni*8?ZPVm~4|M;#%wBtf*MUdMWtm)542A#!&2-VsV#N*r}ph zZ|uDY)Fj2epTMQ_YBPd%YkI0@`qErfuy?%pZrSsjAHi=0onjGddvb%qUXQ)`rmNtk z=XVqsH3FU#p;XuYGGS*(yIwd`U*3t%R+K>>%3Ua)X(b)715YIS1NKblNr*msF5mw! z*W|Z7m5;Pn?@vbFDQ7sQe(dukB;pqulPoIrRxZ@6!GNZ2LLAeQ+VfYDWEv>p@yA1d zUTlWYVNNj$1;fSsf|>e4OcX9{=F2Ea6PEhuW^^S_#NRCYa+!_1=d!^Y37)TZ2z;Cf ztBEXRTAGFE^*rPbo2}sMBAe|!k7#%};&}9?vB)d1F z2ybxb>%v=f0jG3zGY ztmzBQ)Y2Xi+N!D`lv^h29rUWLHK<*1zgRb^s_8PovRCWJYmhp@9%w~&A0-c6W$!uR zf*z$-Lg(ll+A(PNSt_zy_z5@ui~E6i?^Tm$J7BocNToc{g{ETyT=HmdkYV6HZwWF- zjg)$rIZZoA&ko3Y(uiw2Rifep|M#ZJ0mw5H*PD% z!+u5?>fihGY%knh=u^b?7`RT1SX9&dllxJ#0Lx8N1nSIU^njJ9562+m^h2F${aJ7G zJj15&D8#e$;H(ZT4mh$DdcnTGfl?;Hz>hM((oT%l;t-UcCO00_>c6{61Bn!4DpVQ9 zS1-_9aB9a>m!GV8p#~OCn&!H#xna}8>GpGBqT$%8NzZmvnLT`O`n0yjJe!s}lght~ z_61LhIltkMPeRUHxPUW|wGe*~*_S29{PV1p)V(LBbh&>4zg3MbUN^G1NmA97!tBUO z{67SAG?62Z+>!?~Cy_I3A2wew*}%1`I%CtbcxJb2p!TeXU(eF+J_62M`8QdMK~g0% z?`rHPdj%e*M$fCRT|EXu>F50q7ANL|Q`scx4OF3c#FKqZ2Ys~s3O=u9=|3YL-qUXu z!U$(Sqz1hIT>@Dp)=;L6f?+Lc@%ah$)>#frSm^$UP`0;G7`W9h%^GTfIh1JCvi%;M zRc0wu!j&6*QY_5|LF8yxJ3O19j;3pNE~189NgNJR3CcN&AapGLhxS>W#v&W|WCzH0 zXo_?qb24U#CB1r^ffVtrsPr9{dkecQ1OJjhCcvN$Ef-sE5%on^diX5JHlUXU-tS95 z{!@*i}l! zfFtTAPzj{cJsTgbU(G? zx5VL*dAhTs2B*aoP@P2I4$)Il>XK3Ov}rL12hj~=Dp0Aa0&uYe63F{b&@NsrE%Col z-^UAx4clIMnN27vN&2i>PbF7WDaU<8HnYX-k$JNwvSKS{CTLWINrM_UUvbM9Cq}p0 zrM-3$W-x37$i5>{Q?M;}W9jfNN*ET_g@EY>k zime0NMfa1yG>^m=6-%1YGYC-%K zhk2eGQX--052{tZP92sTQ*B&MN1vp0*%?X9f$iV8xIv+w6v`w)@$Vh=@4DVhSaDMo z?43H6&geyDqQxV0RXROUYr?ch9w6p^B?_MWEwrse6v{z3*cX+;EHb9;v-?%M0n-I! zQB&EX&Z#4YSk!lXam-53$mbu!qkaTOrEexF*2Cuqq401w|LAQ@1(VVwqaU z37>y~I1WUQD=3;duowpa1z>Bj-zIef%thKk7pk^mFWz9DQm2rJ(35lP=qjk)7YX=@UBwO8N{^?f8x!7!#Dih-j$W3K5<<%6^>07z$ z;)?ZdD1^rC<0EOG7Kd+FOBBDr@S+v0cs9twU$3FSo$r18P(VS@L_@##Hcc+*I8T+!%5U&3!;u z7oy|Os7w7Ed&fVv)Npv5%@&aml2a|N9=b?`e@OTmmPdvo(aX`sOC#ZpJ!HYk;{5RH z&vR)rD>G7G;+(=aX{_eg_y;xkt2k78Nvlg&bXq-@#cYxcj?`0voP!3qn5)QE5#iM3 z!YJe<@>&e|`6~`re&KmBSVCw!>rvbMl){m1&k5x{;*^N378OUk>$4|X*uRo_QR_2z z|4m5eH~LpppflhvV8P|}DeNzxx3fW&#-(p1^PfP_|JDJEG^wh)w}C{FAew^}sG{cqYsS&F_Vs1Nx-N)yVJ3m4vgz{2HAWS*164my4zCavlRH>ZzqAraE5q}C%MwyB{b zS16U=fnuOc&VuGY-U|t1Js3TZ=ZfCUe|BaqP#ua~yC{9n%(H<;Q<`F~yN*^X*oR}v z+*L|uD2Gc7sT67h&3%wmDRLPXs(?8sB-%`dE)8LG^)9pTm5vxpx~_Jpl4szSA2vXg zi0g!cKH}KRv&#rOEPV}oWNm=OD^rbEUUPA)?XS@H&vUlB z14%)%%G6aDR{&(mIe7~G!{eIHjh4ZU@%)p!R+SUuGklRUsoQa}gTPQ*(G;MPAhojM zLio~2!Byv`ng|xK(NxEo;+0b^gF=@vGF4mL8&2=+xH}jXI|e=}#n3(I^r4IP;E#T? zGwKu`;yX8G_9~0dI6U|v53Nt+4TL|V;y z^H{8cfNY_Tek>m96u_H^Q$E@}COC9ay}qVWL_G>DfU7Ec!R1S_i!w^J@jSDQrsa9j z!DC-+yeOueOgbe>BPKE~X4R$RN*$+anlKhmDCq|kB|m;x-mYZ79x=6c3WoO4Q`E2esLmryg`6p>(%}C^n7k^x?e79FCcAKiUQ*T>U^KY+2C0 zfkawP_z$f{>ym+nKWl!v{Z@TUPcDJKkZwnG+Gw~8XQH#En$1;l5r;#zt6Ktote!1) zRm|oX#UiOE82@9BD*6iVTe8a;%oW<`_Z(vH-vtukJIs1BagVw%NIEg(P4m-VAK20z zE?W$!z|Oc+{H|;;qpJd0vu6T%EZs<@F15+hwI{wYTE=(Cu(Q?_CN&Q;hfTFmy1L=? zNfVO0SE-xwl27$xJI3I@b91N4uGLZ&sK`RX!_i6`7!4({ZbHxbe%MmPv&p_+pKVhANTYbnLvx$pbM`4=zX*bI4i(!434sZMdO= zl6NOb+OVnIFO4~=V6#rSS>ZwR@=He!VtSfrE-?&0=8S;CQ;MvqCp{XOe^c>Sv+wq#tJ;6rV4GN)0B zq#l6|)_z|9kAYCuVG^_GQvVHYCXMFpgP}3Fe`(ACmC#0u1=w$ajWZ`4cw_3aL-Wn^ zr0TGxhQ5r#ngpmpA%v|9P|C!i&*&7@S)Nu&I})%?-CtTJrXf^M5H82Ha`ziQ3P)o@4 zm#R^=>-95!jiY0h1TOkSj#1KNFwkoz9q}Xexj^8&)!WRuXA~7TCl&lPHI&j=LAjCQ z0r)dW5nMBhSxd6QF?-VO+Krt)Ql5|q`}8Lp3U~a1F#an6t=A{kLt<5ir8E!~t+)C6 zsWZ+f80K68jRF1WEAowrarMe5O(Gz;vs*$d(Nm!h^bpCJ-y^(zkQZvV(;R#=%gXV> zh0`)9K^V@l!^yd($4e4-2) z26kw}3Hp3}{S-c0NRbYwp)vWUpL%Aven<|DkfO zouJ=xL^cd1(-ak({G>3E3y08{c#11VVd?>o1}{hX=;k4Q_eF_Ng?}d8xx>XVJ0stW z#zNGZhEmtMDw|-7hoy*=S>1L%ka}7&iHuN62=ySZsF#O;=do>LHr<$=nT4)}n?kt% zO{X5XAgc<-^W#w8P#uPP+PAHOFT0i1)(15O2m(BSlz{RU^Ri)Hfd#6v~guOny;JWBCcG&&GJmVha~+HJ(|?0nJq< zM$%r2x!)8G;1jDhEP;$!ibMg0_%9V+_+9FDAs?Ftho18lq$U+`o#B0I&D)$y z$!F5@dNI-e8^0a#L}2gE_y>VK`yW_#FQ$KE+5a{94=lUY^uMv}%})P;WpDov&b!6W ze>2LPUjCc&z9#=~&ins6;Xl##|F_Q&_|fZxuh5K{G(Z^p8i&K*Ab~i;6$78w3<{Oe=0E<)kXWChent& zP;;o^?bTcZn!+mYD%ZpP(Mn9EXWnUU)UR7lg%9ezq7@n%oBSl=xnfjl-95BhVJ~}s z<#H-A|ND37?zFWLwM_Dlo0*~nM#DxK6|a0WXfYe?8q^owiPsSPmoFuz^<7K;*osBjT zreihr|0QtD{}s!YVnenElX<2y;_J4d0)7=X3-ky`_+fp3`P;U5G~p(y#9ds~?(;|{--rqq?KO6tiyEuJD;Y9sUCm%`LyV=9PfIZ*eWG1A+ zZaw+3X97qz0gg%A@$CwUX%@@oNO#k-_X>%9Nmf&mL?K;|PqL#yC_m`Ct@pQz?#S;F zjQlT;PQ5Zh=RyX!ZR;KhA^SF~FsYRJS)wvb`zP44s$YLpc&b*s5X!{1o_M&XqX!=C zqw!en_HZ<>?whEQmq$eh-&X7Y)Y>*i+AwBK3=IzYwcri;HP;Mo|G1ISKN-)Fo=m=d zl?vL;f5Gu^Kc{0%;3PX;e7!xMIUD}evlX`NAZdpsiatP_LhTj2_4(0Ks(Y6X@n3p; zG9WkUhDGv* zS~Pmf$cTSyoILx<|FyQVIpCImp}J`A2Y2`o^@&K-ZK175nY0UIWBzn}1WO5<*7+Nd z@sRWz>zjDN+)8#G0DzbBqJ-bn|30fbD#zXUa@Z=!WBrlvHQnA|qV>@yS*Gyq%U_Ur zb}#7GFr;KSv6Xtsz z1^QT#iA7fT3?Z%QcZ=PyR=sTQ3ZaLgS9Z?El09LGbQExq{^t#CdjDKD&*rj$>=eVX zxrJdBRW|fy9R(gGLxHLjV$>5c-eF&cxn}KisAKJ0_S$ZtFN-q)0bk{b`x5!Ar!H?k zMSKb^)w_dbS#?imne`a_KAT$xEej&Wkrr-J%d@q$?80W-3r=aZRCCxm%!I~4Q4z66 zQX9pn>7j_s1&NfrK&T;i7YeR8-i3fp$(@^9a2D50V>_O4n(R;4@X9?f{ zf7VtItn7*|)LNot+uXHvaHA30sOuvmCM_y|itGpXRkd^1zHtt>&?+)2(x7aR;NzW92$FV+=6DEGu>2`Ur(_L5O`RzouF7fN+NMmmWC4b>xK*#)`@pA~TN78bm zm1u!nS~gYj)8;*~o{)DuXMjM4p8Xle%pbVa?V9`*`8nIwU%*Qi@nWlem(v{OcXLuX$+uL+^%}a04&mY+ zMW3lx)@PEQgV zA-V;r>A>(lRpF0az|6v;^z;3&f9pbN_HbIcxA@4()jCJR#w-bo)!IqCG9)# zPmFs1SEgrN_t%%)|9c!GpZEWw9gdle`rwBzug=3~3CVRH#GIL;um|zf|31Y}elRBg zY=9ppZqC zJYCg>%pti;CeG3;#)|n%q0rELC$A>QacRI;qgG}SKH?9;&(&RoxuU(e;=XOykt#@k zsb|v_sbaw-;1EKu?iu$ZuD>)kHb*cx-#1HoWeOcPS+_XY)4#K$Fat@&B=^D&YEt`k zRMJ1E)Z@?Uq6DJQ6$aZp+g?_WIUfv^#1q~$JvMT3l&fstZX`HT3nGEG$302r(r_!A zOm%^3usoGq4M6P@?->{IU`Ub=?TgUhj#gfl{#EEFq4Hj-_uC`WB9f1`Zbtp+XEq)> zhM_8;MoraSn%J~;3u8g5pmr5>mA8I3gWnkzhE8{Dh4mvZ>ln}@M}qYx`rwYP{eA;9 zl>CH=DqS)(qD=lIYA*uzOwx;8h)sEMVpz$=qQf~Z?aavc>fV0&o_W*F-OQ``gvPBV zDMIp&?rX#!@ZT6&IpSj!ALX-$x27!M6T$oJM!v?0UL(Anebxv|Rja`{X)r2-Jvm zDp{zAhm|HxaypY&D?`)zwCvNI7C6I2W?d5T;u~def$`y@CynSHab zZAgJZZc>TG6)GV9QH%kxWm*>DR#|?(Q}{*XFnpX=M*;^{=}>x@9W@~n^08brHn|4c zVy1%NDn4U^Y1C7QTrpRDDvugVTk)olT?*_0o*B|4k zKo`+AgZC@>AQ%vEsG1Q+P4e!FmtzN#jrk8y#IR5hSIpnqR5*- zs8nQHCS_=x1V~PG$>>sl0@*h>#oAwPOuz<{lW>aEn$66lfpxkh0sYu4#WIuVxVo+Y zyo>-ygG8Mor4K*mo6Ms`r&P%q;0-l3j%-$LkWBrOa#Np}>f!Q!n`+Jhk1-BDrueM^ zQEnl2wB0J)g4f-nZ~0vEO01E)R$2}+IWCOTk2Ca+6#h|-T`rj>cKRjRFeRuM##WU? zpI{@FJ1NXV{GLTdX|K(TP>U_EvEh%Jm2J=RTFTFh5@;-pfs`6dJSnB})&T_%+l-Yt zJi*{pd7{h-3@*zF0i2+Q-RQ@WboEQ zVd!+PCS@XvFGF&dK&4GGMmw4 zNSR094&!sl_N%g2CfQ_K?Nc>ES+dFR1JGCRT}E%YG`f~HsA~^1@SV93xVz-<85yK` zGcOsv{`&SGHSJ4{i3uH&s(|Y>b~5`5taIbA7%FtzZb`1V_)(=2t>h=ug`^MH1qupu zGq3m5nDC`n?p$0rBBmF7*Vt`qBM#{?^z2)=<|pv!>+%o1zX&OS5Jf z%Q~x9;Qk2H=Xsv3%(S2KZMt0J56nu3r>uHlh-!AwlbDYaXlvDUUaNs(zuJo!*@W&0 z)-2Tqoqg!~89sY|i$x0a4(Ykg8+!eAC?8a@QMdCKkV<;x&!?xQUVG@DRB-dYT1-h7 z!A9Xflx~0!mj8B0wlnZXXo>CtlZ5o6TC2C@`CXZDgy`7e>MCs3zulkzj8guz$k$3W zAKq!2(p*xnC-*J8cOqAcg)v|uzk|TV@;UNO^da}`GPlHipS`$2_GRxMk5R(RSm*^4 z-+PYhRsI2o4v(c~iDybn51%w)lhZ%)d5ym91~eKCQh2fFG|S_5{oecw(E6amn!KNu zs@yx}XdvxhhN}#Lp(Z;8dwz~qh;bJ4+4-e?4?QEGgf<&CcpiNVqW8MyKRxBozxuSb z?DiMnHr@LD$%J!yB9t@wHQrJ~t@h1mEdxLOCW~PV>lOu-(0Puqu4PjpXU&(YqFI$? zK^!eZVlDuI($00w8ay#Of-<-JnbM)^#pL~#T|Z+~`fBH}-43L8P;b4l$h=c?6QS)P zKF6GS=Q`B^a_pY3HSRI)m6)Bbk?`K5+~X&(eRsP4jAu+{1feyRz%8p)w?1{AsPL?L<49$c9G?^(@(@*M7UK5>$als*pT_rTk8i_Y=JhafdJTrpFzSN)C=D~2E z$T#xOC;X3lu62)j5H5tLcjg^OA!U$ZzI=7ki7j|rnQyAfTM6>_EC(FM&M~$!FaIoyM*^pV#TDg&}|HYTCAW zxkssRQT!_ZTL;uC{omse1Wrfd(6|zS|Y;E`>ypXzy9`3m8#=)Ywz? zk1nNx3GnDLZt=)pl5o98qMJIgU>Bz4xd%Y}(J5D5a!>$hRG~#ywsc8b~2TtZ}hJj|=*7PE2_5f-#~& z-`KYycZ5#PYDvxeh1+&Qs>fHQ5zqogCXAI0Qr;O5R1TeDWQJlxlrAR#rz0R!eCZzY zXZ|GJ%DZHdHtmlu9C*>Jq&RQbT%^b(hV!RNq_mmey9PW)@&=_IRQClr_1d6%P;|4& zeDKS576SP|$N=}g2A$0jrDD||wa2~(XuX7tB0upgBJ*8}2*s$V*U=DUYKrK_TqI}Q zsDu}0wG617-T{9MqB%QUSB&zfn!<(e^PUpFA#y2_E3g)W&vnQn5mwwralX~?=g-)T z1Oe!iSfCXddph!RL*ftG)-GL%@9XDY_R3+GntU`|@?!M3S>{2w1}K_iPKhLDH!ter z!x*H#BwJu;mB+=V6N7>aG@waMx}2*!TyOlmIQXCq9$poBB8)nqxi9)R;;4NBD&cyn zLxCKn9i{!O*+|kBLVMfI6lI?AR|>=oYKBgyViV%nX-->)pfX!m-8W)i&RL|%iYx?l zOJ&9?s{*!POK0+JYRG4b23&Wzfl}O1E8~J=ob|AYKVuLJh2)R#H2N5w_(-}Zbz7%# z=enoQT`*{jvd>~(_j_A~t!#=Qu&E(9up*UDWg!Wq(GU~3i-vM^Oq?qsY4u|0OQf`D zOAVNhiL#8Sx%8So4?&TN{DUX?y~&OO(T{^QhXhhl>K_tjqA;&Zkt#%I*YH4T9MA}> zpW2!v)pM31R-c}C@#&F96ztKb^~;qwp?ga<#Yr5!7w)CO5?C$>s!xHCHWTL7L{4HS zf!!!5OogHChSUPy!AR&Rn`8neDU!XN(vIBy@{hv#q*FMf3BICnWz_6}Q4 zN21K;Owa9n?}!y z8T?nW_Q@G`znl>(jhW{4y<9Hz#9?(U(UMKh^1xv$l^x7K){{1M4vUpA8FhqmdvY=9 z2LaU9FIS;U5y2!LUWyQ>a2e@z=4VMTq(3GNMG2fUw>>p<39Ks?kdj+5V!3q>IDl&a zCICQd7%)@ZocXxr>8rg6V^G6LI3Is~wf3SNUr7sBn`5SfW`*-cr|o?7=Xo(rjtbp0 zBxs8TtZo^2zDqwPl+mTTz0pgl1BS7^Wo74D_8I0IrrRvJBybd_lDxs?qpq@V&jw}T zGoo+ z0%k)`cHwmWId(ks)kZ2I&60pmydpJ8p91KxtI{U6zRH(XDP=1C z0<1mxiME;Kx4AZs|tFXu^FJuHm)5WjSf|sU&>FVN4*WCXAP$$;1*^m8@0uRsy_%9xce~Pql2(a9U9#0y83eNm1`C)F@>wCLZ(9}$w5tF4wXSY_@ zb>;<7F;qCLO)O6z3C-r^xa!dkp;MVDYE;!gGf1Qtl2k`kjpUIsHKrgG1@EC>&qo7i zgC5YSp3fzJwy>TwUa;8*vw29QuL3znV+DG>pvpxKAHg`dh>DOR%*~`q+H~%9>05MX zq13^ma3+BcJDZyh2|>**w*9QIOFfV}P&K>L<7=aY!TIJm^8_$I7at0@K}QkdqUs4h z7CpYTgKzLY5hMyZMVt~QdPwK0O0D0t2$^TUk?HlU2jZ|a;Nq~Tya&oJM~;|T`hIMWSD{Wdlb?>+gy{tCq|68p%Ysn_6rS+yAL!r?~lo+m}g5nNn;Xl zN)u+EKeO6Tb7muqw-|v#gw@mx12|$6RhpMlg`xDtvu!A3sJU90#8UlqxwnpyEXT(5 z4_toXDS=#>^F_iD0~eNRi8L8<%GIiBhe9#q1xlY!8tXzw3104t@EACW1(P6ppvi5H3gp00q|aaWm4k*zyhTQy>vk>5X-wIrkImz z{Pxon{82*8|B<~{pg7Jh){Z?KJFIsG3QkK1Z27XVX3L&n>rA3jdxPs(N{~juKd--) zA>rR^sl&RWj-8s1tO0b`a?OlO=Hz>VGiFT_LbbWnPRbmBHFo@_S;mjzVC0yQAK9k$IvqKk|Jbtr|F5Ua!q#fD3x$~b?QiXl4=a4QW_2DsJfm@ zcC;8w9q4}+T1!80F%Ppm?=i(OCoyFAj^>JCM3<3BVIfSHRt&~9FE zSr=M-;7IR&V}iK-+9PAB@GssYx+|TdaqfbPZGqT>-stXxl)RA6dO(e|PIjzg^1KmF zD!MQOB)l$%Q+k97p%XUo-cPM4KPtX!D}G^$(wsT@B@v^uB{iYUcDREoyja`IOG7Li z=rdpY^o%hN z6|%-2-pxhbK0p7SIRZ|e4mY3sXxWrP=fVISd~+%+8=!Zf$p>!E(GLrvWcdrol8kj* z%RFs25v0@RK8|b(E|4iBW6=C?lWs*R){lQUaS&@+8dy!gkCT2C@+`g*1^Uys;}>A{ zDpL2T?9>X|@BIu$4-H8A z%soN&hqD1r7uXO{gN`zqqZSkq~F0$SH_+Z=H|pE&kXy~uG2xdDefH%xLgeg$m*jeR07Rb z=Kh*U!oYRvDOYy>AMCw#R3zWBE?Bs`ySp^*-FV|p;WX~R+CZI6}>hQ*cv9$>@;*g9e;%nAe#Ts_+2z=K+!7@rQjqxywGi}X} z$m2USSxS>~;x_EQWSLa9-8Npu6Gz^HO;A-VB$HR<9fn$?Jn-YVs^3{bCV}oF-t$~v z9s0Hux87kN`Z?b?)p}OwgpJ)mEP}S{wltsKYVJ{2+f5&u3u0A)ENt(ddFP6!ez&z+ zFKkDdMcW)W4^%^N%xzq+>f4FFQRL!z+fM;Z6NW{(SWV zFiv4w5^^VU)Mvi$5D!1Z?d$+uT3KFR+EC#X}{Z5sIU~%28~P$(&1;%q4hUEf;#9;^4ZXh3 zS*d@GIL5OCxj=;gaw%9Z>rd-nGzIVOTxbgzSIZ^wOaBIi_4h((u>U*QT8QE+jrKN{ zh2Xi%kXz(R1a2P;*4~*?v_E)pw{u@jOCC0TxwCV%+sjRuvF<$GJpC~yOtb1Rp2&iW zHc!^T2^ah^fB@y!G~Vx0_&&kw==vA&?@vbf?-d=&Z-7OSH$eG2&EbL=p7nyVqGj$U zYdNop74EE!{rH9R_6C^6^7r2!Yb&_JruQrF`Ewr4dZrg%+6rvM=d!NoWa|oEZ0XR# zMPWyH%VPVBfJg(dJm{vOR;ICWZKR%Gv>M47KfT)%rOoiH-E6T`<7PF@2t~$9ooH)g z%Z~|n^1_s&$vL2@VTOIx9mn-y;D`7yS(9m0!^m(n_GBHYrtO~*VR(+gut%_1NREd^ z;!|j~{GW$g<%mEgtpdUEXp;=t-4cVQEKdtcs#B!d>GM`eh{;u@)j;xIYA{qe0Fu8Y z1O@YMHpJI4fnlmODURVRCfq&5P->-9qy4CU@|Sv`7LRG2gxFdHw-kxwqp@oL$6o)Q zeqM+%M!XT-pO+v=N3~W2Q9d8~S8*4(q$#>`~WvkmS|eHK8ydOX$#?$ z2htcEZ78Zj?$Q@7cHG#0qha;Q86fO(7;T3>9dl$MkaMlY_G?+SXkZbqwEjD6i+_5b zhkDdjTT+kA zS5xo2hRa5W8~V*Q>jIgIo!Y9!F#?SL5Dbclmd~G=``bOZf1a%4hg9}-&-Bb7I5O{J z@#y{rdh{!%ZDY!xV5G(`H`4O36QhTLut$7lxPFw$7i|iXc;3UiuQU_fElIJTuY96T ziN;dX)=KG8XJCZK8UmoO(M-#=V&N>w5c#E5N};|HE`h^))h{t$h1#Fdq>hp>VG(vP zq$}@U%l(iVXjN6y46aDIKv%kgW)1Dvkt>88U2t|E^Ajb2`ij)!AlJ8;v7MsO zAn-6Vh+JhPGQlYOVX4-c9RtJ|iKbV8ad3_x3a>8NqD`L7N6*a8OBAF_oi_1C-cmOiK5Y%XqBE;`r!v2q= zT)B*lt$rj(w5y^-zUO#Yj+%%Wl=|W+e2%u1n&m613t)=ykWk292f*n35z{mPpSPf? zfujN_>|TkIyJ%LeKFNc^10$WbmviEC3Iv{apXp4axPY7l%q#POqY1{cK)|YZ4P_Gq z0r>7Y!I)zFZ~DSjny)$9G&#CAywogXAw=me7+e^HzXPzqz1_rW({Bsv+IsR2{X}Wk#QBy@vI*e8wU8wV24OB z`D7F?_asR6a|StIxI56Lv+MRHNy7aQf~7qYo(FU23~RxJ{+_wV;go5~4iflhfq5+Dpg`yOvNXyH!plOLK{lS5qi3j4jYKlZ0k z+kzX4pj}CJ(!n2n#~^HXK?HS!3>|TMmC#W)mqSp&a}Vm{=+vcLtF|`#>Xez$=_+v3 zHDPbkF$lhiwiqI1QZwmrkO>8AqS4(z2f;q|kPaX)KC#?^Gfq!~ChA6=rHL>+`Pcg-xG0=Su zYQJ#fvp5BVfoHI@Qy4v$C8+ov5e0Kpu~sM-icT8fyZw& zX}y5<2&w?G10&4gY(1fTG#8Ri+NqejR?6#qZWb!oL)8B(8#A7Ufdd{|()hj{US(Y* zs!7Qb$cZ%H-w3&m-os2Vk#iWJ4?w_d0#6#Nr=ipQ=7s61QdeuI$sKlh>j8yzh~o_CscmqXh@XgP3H-- zkTr%IqAW<;(_jp5rY|xt%R~*MLw1qRO;S}7g~g7Do$$_2Gr_{wu#n_=Xu+&C(G-@K z5d(=JVU*`l6t>VLsfvqBFSq?O8=vu2{xT3A+%d0Cns^b&&oX z=>$Q_lK7a_VBszFRN#T{R9^2rMvIum08*UO&QUYe>3C`h%FmzBA=7e$tg~(EYA$bp znyQAzDZ<;xw{`rt@agIp5wJ$_`B1#w-7-?HE_*t~8dEG<^iuWv?3HY;)2I8C8KRK$ z=G33BKB6WD*8Mn?EM&dsX7y?87c+ zd1P;{?SpYVKzYZB@C4k*(AHlbdS=SQB=VZ@+p&Uk1)hKhFgb9T)I_gWiepC-u7zBI zBGtC~%n$%JOpX3^fWVSY3=n2Af~@sfY*$e!?crPaD9fS-W;>-$%d9)kC5R0cEIKb5 zrlUQNt$CAB!3kFkV29(_0d}mD?zw_QJTnv$4v?RMUrrw1%gm+aYx(uUmM4{5on)fO`PKQCjEWr$3m8PN# zhhmi`h1t=~dQ@DsB-V{4SYG@U1YaPln^YO(PK7fB8)$!y6Cu~5Sy@5eD1^>Ci-t5S z+Mp~e4V6l`r^XsSzyuT9krflh=LFXrM>%#~yiC0)+d;w+!4>DzX04%ps`{Rq&%s9E;E44eBRQuXoW<+NT>&*gqHke^M35GDrrq?FAVf zA`;&*P{IKO)53B1;?b3bfk$|)MOi_CTpt_bBXXk*Ex*MC4~QYY(|(jAP+aN4z~=@| zsQhTr-=mRgZozSf%NQlawFbOr)OsdoK1qn+*JyT@u~HvF3-r`~igU!{nn3)w|{O^8}IAr;=Gl zv8bfODSbl|h_r-Oo9yoqz62s(6#j>&d;$r3otBgt|IObdKlmiwGNu+x3;mXYx~R)8 z0{zX0NIUOT=Ig%v<%a*tulaN7|C4$CJ1N9NN)*MN4`1xVGr2!Zy)ZPUhnnVm5@?vH z^b;@qN?Xb%IBlwwtUVYiT{Jm`)hU9W&EXiO^gQ~%R227rfi>ivu zj5Ib0sb5?-hxc|N8}60Tp6@nw1reC*(P=v!2cRM?&>19G)Wb>_{Vi=r?C$A1idwnd znqUv-v(M*(Pp=DKzf9dKJYb=$nJT@ny{Bb+g!&dOQ5-EC9ygEc{|;&MTpAU$Ntal9 zHT2lW`<<##qFzw`gXPZ$e^m0v8U8#5ejNV5$`Jw6v*4}#>HgF z76~Tg-n61im-b;DsCR*+TZ3_rw&3_$tg*etCJ;=B2nm_A#SXwRBAzkh&n4cKA1U3w{lV%1a~W)#D69J-(pgx z+FPWhyfEH6=LqAy&EQmcYcQk76rP=@RNv;QGFJ;*WA+R!AR!;{>J zu3b*eMmNphC?ZTsRi|6=J4hRo3rdr%q+kOCqyB-7?EDAad*j|s^hu2a%9Gq10Obu( zvFMu8Lc^m757BQ}RS0Byieh-#u4pv$J?86Dn4btkPMRM?UI9H&OXTcCUlnCGC+yA7 zj@N1MvNu}GRClAZAp>^Ftq*gtX2npLWFBpSQC!}LrRVm>`?_Ku=f7G%CaI)Q{>Rm z_?*s4b4{iqbvCz>2nS*W%B(9=LQEiR0PCyQM<#SP2S?k4q?xkHC!AGfjoms3;x;j! z@R;^gye6;vDw~fE4|?fXJ@Ut{YGu!Y!)ti#oL@}Wx4dx!K`YwUvz*XMvb7;>R0?st zVh3`Etjfw<{jPO}=^IJ=bbxkvnGReYX(OD~b z$`@SKQeHySauPY3m`2wVc4fuH7M0*A_ibp!kY^I+%{XpB)N&A|rAcmdsP6@~;a{2v zs?HiMSH^|AH!F0%uF$5_3rIvWq|+e7p#6lG4Cr_71(7-svlNCCN<`C<|2g>IRUvb= zDgvxjXiGL2DiC%Lg_&`$E|K@4#ziIstzr)Synamlu>u_FVIhoTB>=!Blx!^kT_a>! zL`TAv>I$j(@pm_aP9zDhmD1=w=PN%4gKRP1MQiL=mE|x`lE+2Fr8&Lx=ky#TXYHBC z{J2C0GLT4HP?UK?W^lr^cm*UZJ#ATYKM@tJjzp{G2Ons96{%$S30Y>&2KMO0;*CNa z#}y)a4*iea)*HjHk=^c`nby6<8O?O?Piz(eH17LNhJm_Ydt#H_u6KJtyavAou^8sI}UD6Y{ZzEg(i83%*$@ZfiM|3s@>q>7KCk32Z1xpERq6YU%)_*N!yg@-I78YeVslhwWgr{M z!UBDw?HQ?oAtQ8YZe`WshK8u%)l%^pb2AYy2zyPoa!T;^LBPBL08lOfx1pU8KV07^ z@x@me_7^HWKl|KJvB_j^xjh;Ycy4Hx=Z6wt)0d^ntPN8mskn{?u|pn(=$}L6V(frt ze>gN2!Y#x2jae}NCGb}i$AY#J){kec;|<$+36e^}>539k(yDW4bkb0b4gScJ`)jJF z>S;0UPU#Z~<}>0z+w9X509Zeyjs69|?R#J)NGq~wCB6UQ*f^X};XOxi`1GDK^uK2Y zFc|NN!k66~oHc0L;i!yB#hJ~9)RoRv*)5j#ifQpIAp&A=3Oce2<>dzGs( zSkbtUFryujIW1)!33I|DuN2!sm5qgkYaw%lMTLTert2;SfL8G%Aq+0w8z;-wyIjtg zHml7ZZb9F02ko#Q-I#Mxc`;)_=4mwiap8e+c+4xT1S6r{Y`GLWo&H|QGUs;<7Fr|< z2A*c+St!znXXK#1*64uXLow2banWKLYGH6CbXp~{F?-NCZZRtAh5u^JUpJxq+f6e4 zMA%=SDMZ4&`#yqmT`p6K`S_POKQ1-6RiwXuH2FysszP9XRG%uz=Y2bq^@(34T@2m_ zQY=JvpA2$Qj$Fs)ti~!Nv|e3AviGL-qvWY8U#k&^9kT49-wOfX%zGF4-S3#q?Kgm4 z<(Z3pta&abOzJs5p;I}qFCEcW0oDQW$OMJBA-FO_;xlzELV2JPvZQbea&Ng^yY^SR zhXvJ=(=(2x^_qsAkDXyC%LNTMV;*iND}HH~n}Rs($GSz{$nvqx%bx~_a=Wz0zJD2e z?o%L%O21{E*>ZGt>FT+oJ%hU1)ct*@w~z1stTR>S8V8YBP<4k=D`Ea2pxS@pNaFoI?RPRdwsk*woAbkJ+-26 z9+@8=XFU2fL?o^ie<0lEi4Ihq;qAL}n>QZj_o=et&a9x?Blx(6F(hevTg^NC&TaJE z{|-|zWYo2jvzc8TZKr9}2^-)EiPDua4OF6SURA>-dY%!7k(@*6Wj_CuUbj<=HyK$l4O748JzZ6)!E< zQ9th{qCBe9v>Gjo^7uaDXV|oTpMS3(FY>M;!Fp=j8(&aO7#S#+#e&3Y=05_@iS?8gEJ*-p<)(lL=3VmfCf zV!Bi?M*M$pVPZAZ`7a8Mh=yKW7jv^qtMvtC^5!8yLZu4y#XxuZVtaB7G-W7)Nt*8 z3z{;<;Tet;k1X(ajyk~8m22HwFkojt<=@r{ha&|x5mY^->7N1 zP@WQecw=ePbo>TTc`Z-A9!kN1)~M5WbwA5%uCba0NpY4rvzLrGjb#qE>_|=MRH}B~ zWb3X*+}piq&ZE0^U>$c(e8QUdIPmgywL`>dn&Pev(Jla$PVq;Q`Qu1kiQ8Vg{eaxy zrt_}B5B_M-bs%FoZBl$~QX_RFLF;i6c>R@E9e1q0F~wMsqnhhpikKU0r4k+*Rf(0z z-w`&<-8pu2$)v~HPHJK(rFq^~W)XuCVOppY@}C#NidJs`;5(_5uiyDxdEM>2>HJ?5 zV-Nk`?{3`IRi5S+5@JpmQ_IN34=KL{{!2m*ZHn2fEJ&0J3;7!cRVG=Bi%2h%w*L{^ z-P1M+Wj24@wapi|dzgv1pz|?gm{;#8(FcYxI|V9FlPjX7k!&3u1^%&QztFGmPt1Dq zjpN#Ut9*iCT8e0nJfsdA`PuRB6!%LbSn32Z?;&i>35YhdEfP1BXV;vs9!$IE zq;+iML@l+s00-FX;X3g%>i1rk_h&VeckQUv?D|NO-E(QIEGcEg=Fhc%_fo@LJXm$z z-#_GN&OMboQB*vyoPG=+uXmvv9f*sPD@>o42OBN_Kz#TzO{S_ks02pTM3C0mJ*&8; zlc{~8#Z=gn=%R3QD6nXH&Az8*3Y=!|P-h+Z^1dkq9ipTfw1>KPP8fz12;rDn@Pl@7 zpnSK6&kT-RVtnT8fTFdLWKsZLkk(d&BSmN(TMApah1)Y-NTK+i2nGxS^s@aerrSy; z2gl8vE3M9EQLfLk-#Eb&4k!1Y6(Fjb$Nr%^#5X{UWvsFXoYi~+`3?F9;eSa`haaS} z&-dhkBEvM`K$EEJe0c`gRo%|%UOvC#Kng@dk4-iFPeJ=bJ@+PI5O07_;(59kg~gRo<*L=P_$*z7lvAWe@c~6rc5eb8F26noC)Ek5>Fj z*r*h&-^T*2T)hFToh%qfuecK#xVPg* zI}|Y*8_}uqE!~)23@{+>pE-YcJv}Q_9=$B^CM>*K-x2qlYPKcq84e8;&&nRt0QP`5 zan8PnsO9uVwEnScNaJ6&ENE@2<(Y1)uxIs0+Dd*q6I-!+q+eCA(;wn(GNkR)K$egN ziIkQLUxho~Ba4RifVIjiT_jl?RC#hy*KdG^UN$Xe`o*!jDjTIY(TgyK@E$Dp$YgfXIDCwz?%s3BU z8u4}5N!bxl|D?8GKH2!X>QzzA)yepqe@x(`tqwmn&+KuP-FZ{$LdCqB6R3sXn;${H zJRHol1SEG;*GsHn2cazaAcR3yW!Hs);@kN&{6((>e!IcOFO^=seQ&O~@eL5^*~L^c z>VA;PGtm~ z<{AfjIx-CPJW>X?2#omhsY^qF9xo%@~*KFzm9%MKEbO+$Fbk3<;~^vYyD znMUC0y%MjG0gA8qQ(dzb0)s@eGv)^$U87mClf+Fav$Yr3~ z>*$GKkmaG=RU!?2xK@oU=xhxdrKmvc=_rKPyyIFmIE0#W_bg~<(E}o+5>*(o>q4J4 zz|A%*fzulokhaRP@h$VHR?mK4R~n+ZnT;}~sI6nhPc74(w!{Hgc|Tf=Rb_H9z)^v4 z=%|qX5YBN1El865Hj4j2IVCt<&11*^7AFGNgR7*J{*&@afr0(%&&5hP#*WmA@*6l3 zCd4?)NUPz6a-Wd2#C|w`?vlBV!)!*|X}xOZRCB4ho7>QJXgo5T+0@)Vaok?69ZUsU z`MET7RedRF&rI%F+g{d&QWZkr6ad*3rJ!E*x#NQ*dI9N3RzAPdylyWxMXp|W)R79T zo?DeWe=uip);y~$T-tn684}nYwCQ&{p?ptJW7-xxIoDnlR-iYlPhEtCXp#M05BIF3 z2u%gQCR7RYTKgCmTv5Sl+oiQ|1*`IeIV)=n(8AuwZCpuXO|3ijt$3GUcnvX_&wLE6 z2W~wUU!y7+9GK%;2_N=r(dRBvYMn*Vo_6Ij8BH% zdnGBeRgp_SlTZq1fv&-{k)U|U<7m&&o2i0v)HFvRCF6utF2$ITIqZIrK54k%-m){9 z)9N2*ZAy;@{?Yua=?>W4A;za$!|KorcM8WO=a9kNAibZ@ixY(gtxkkHQtZaIW=5)G zIpDN_D&&_w@_b!D8fsvvWV}BmfS}L|soPR`&C>rrcpR~~{BGF=?+;$j_$rti@oxWW z_GG;jCMmdl12`yLD5$a4lTeLf4~~KYVq_+Bjpu=5Y10<%wyLM(40vNHcP%=$I7mTW zz!SM@<&*|TLe$oMK~}ifNybCgo2z`VZHj&5LX5xdE-#uk{$iOtl8C7eYd}g-+AENdMmlP|M5soJ+0$I=cm0bI}AN=T2DE=Oq z_IRme$V$be{|Zh8GI=mJ!66q1AMD==fk+)ytQ4%8PK-3*cQ@eO8{nrNrg5rSW3_V* zP?<~BGH3~s1DrOHYXc0St=UPg$!}ZF(g?Vs%~v3NHE>`t*~+03t_C&A7WQ!MvP$9q znZ6lxG@5&E(C5UQjtIo!6$_^+6w3v3BK+{JsfK5)?~>`seWa}dXxp;(qUPmsrf!_q z)MTOIFlJ>j@8<2bFVSXD{Ad1@(N%422iSPot?zDOM3K@YFKXx z^&DWe&kzq^al-LVA87}wrqaPELeI$laO|Xsa!BsIXSgVD_rhA0b~QDy3d4Vljp}pP zo98aKtzP4or>3UJS3N%9qZ(l(NVyc4@1rC&Vhg6q5a4M5uFuiH;waN&uJtEI2n`~U z^Mi@JtR4{(oG(0dJ;pixD)0Omv6eQ&P5|%XlvN+;(^7-9QPc4dW)s3%i!oATaeYmk zuVXbQAFP^57Q#yzO^_&Fihd#NLEFbDXW20?M}uxT4-b||Rn-6qzGjZtPtKaiE)Q1! z*~|iYkJ~s}wwrvTM{P&4)E_xN!((sVt3X7SYq=UXKohS}zynZZ90XBflZWfXJRMAM zww{)+kAl6q2WHC9G-O{P!Yg?mPm5 zNW{K2s_^p`>Brw0Mi!0L;B6M49Brf?I0{iC)GQ*EtD_CMqWO~|w?$C_gW#0Z^9HoA zu*+Of>L&d?Y>kiyTMKTaJ#@wNpCYL5fC)N<^i5PXWrkpl)Yunk;29s%q*<>RV^X*= z*(T?7w~^~z$J6WSRCUL~EE6!%#1IXWWb-Bg;6`AgZ3f`}F$}sT;EN%Y#>}RhQoO#Y z;R$`Z!f2d@tMWm{-QkQd8`a1J%-glYHY>e*s$%&{5MZvVs|ghfxgi!yxrvB1xVW?9 zp;Fi^2M`fd!aIa4fksB*Of?c93WK$}0)ug3eral{&~rui4ICRn>Z1w}mfE=z3lS%* zeygk~rWhn7^cI;Cs+2CZN!BFg+`xN?!<39}Ch2rt!NoJ(l(^wK6ZbEvX3- zEc*DMd4$~*DniKsxac}hLB*FGqAIymjA1HSbanhd@j-tbbEtnOuIL;T)il()ZsZNr zUN+R<*KK0Fl|Z`Li>J@nG*V-)-a^;He*&*)jf35>;!awsB!zQ-qntuA5i+6 z&wf%LR{`68+TCiN9W?5KLRVLc42GDhN|ZJ(FVcyRLNog0E^uUS7OZ!wlC`H3Gg{|< zEfogXp+9Ty%sf=C^^F|xXb-VVw|Z|Ym+ux@=G%%5@(xfC9+zh4`)k0MIX1T3Gp-SI zeVk&-mOF6itUmMNZBw_ZTl&1mbJB8pYf!n#YU)9WFDqcezVo?zg9tv4~QN`O|ky9 zGo$V#n@OrGS*l`(E>8u4%MpGGg>Y|GXm8i9zVo#IajLicK9w;N}13`4wz$fUypNxu($)$LGU%Mr)G%|rn7nCa8HONiqEppT$5T<^QR!>yBXK#wd|fk~KZ9WkC5-7AvddItp6A5`jBWI}9r z$qs0#Whe@VW%w?qD;Hb#91nq^2vzeriK$B7!}*m@dWD;hvpNQ5h1Z5Y+Ib~8Ttt*q z`_mvbfo!JY4z>~g4nd?NeA;S+aYoA(q);!uKRn-aiF=X749YdpT5P&rf^HwLQAFLO3y;aDoOY!jE{BQY-yBziNXRuO4!X1XHl@TKY~41jfBMG{Ht5ksCr*6E#} zQAtawq}W-3`{_1gNFer$5r%FcZl8BLm8zxLa$gaA#GdE?rw7(no~BL6ZGwTB%O`EaZxcD8_4~#z4^+0;7D>lC9&bE5 zv7tXnt<97mErpEQR=f2=xlwCA(w+;o4k5SAqw+}J0*j`!RtlM%Xz_XciiwI#XWa3a zp3S>qLR+HmMxOPRjfo_o&b$iqoGg!MjQ5=+oO)KMh#PS}idv+6xVHqP558R3xhOrB zTCl{|SaVBz*=4sq^)N5bvNxz4>Wa{5$FY~dPUFF*V!;$wgsY0J2~;M{)Xdf&O`rm6 zO~g1v2l&$~YXqn#SYKJj>fJHccGl=z`L(XR00(#>{*G7ke?VXa+b3Zs#T<01Okvzb zCr81wAM0jXN4{l1@g3j{8-&oK^r#PA3v-Zt(3)vD(>rVK)H8~c{uF<;|JuAs5=QUo zs_eI+=klbQb-D*_9BU9P&P*RrxRXc$=Uj~+pehYTA4m)Z53qW$B|831F+%9rp&n|~ z@D4r{p?s2J*&z7H^wBE*Qi01p!~X2`HqPmo~@x*ytb&;{H<57F%v~J$f065y3ju4DXGyaduH9f$l1Jo&rF9&yOAd$D4k zuxHNMPm90S>J>MKRM&5;|j7mF?mdmNJ8m{ko@;z)}M3+mhKfuJ~G zwVz5HowaM<3Avi+p~CRWh3qz=LX7uSvvQTcGnWf^*ki3xnsj$a^rz9bi zp&wpU^4I|N9Z?-eo5KGSPB#K1tGIH`;n_xH^-B&wg1>9WAF$tT$FQbI5C;_cPjHtg z@c%8HMHbb(ra9usjOIY+QLUBght(Dq*|y1l%d3% zfY%!aXQcUd^P^D?Hl>NJ$@FSjW&;*Rn}Lgl5})=zU|Q#rqH!MGh& zl+`W>f&hk=fX2hwkjAnp=A0oZZR&&1jWfAVG)zTGu^DW&u#(0IX3PD37KxFBBI8Cn zq7L%HDg;W5iEhL~;kcrTU=n&s7noIiWUof;4s#4w3@v8Nj^T96CZ_o%RC5DK>5h9N zDT5;<6hq`&fofY(s)g9>iS*FC#PsPxThM%w7GM>6;@m)1VuoX}|+UiIsf5=@0I#ZcVr_ zsD9OzHRDvqp)iAj;J?w7K#deLeB>`yu9GTVSXuO}2lG91_J2wkfP7Z1qyPGk~Px6z<&5vu3V5J!q0x^3& zmrGUGS&-}VP^uspg`VacCc#L_mDtp3R_J|+yI}+JEh&jp^MVk+YPMJ6^^cj<(Mgf@ z;;0Fb6lOFr;c+_y+Wjp$XyE-Q)xp5&`CR~@Ivd)RZ1KY4vGiD7@y`k==@sS=z7d0h zgW?5OTyj|XS^XRot!8XUKPguwF^jjqCKwL`HE~t1WC7Zkscj>gjyIkMYK^Z&#M~3l z@3?~a5rc#+ql%L~ZdLo4b=1Ze)Y3LowKoOufn)ULVi-~E<9N!Hx!i1qKHx&AP|`A} z^jGgWs?yl&Kni!`Nw&g0zhHSw3F&`?CMM9gZw#I`=>i(a3=|XIiN$k=Y7f{fGPqbS z{`SyqwHwD}zh-|sqfdGg8Zn$Q^omr%f!yi-0_5Q zmhhqhMUG5mV*}>oK1KQnp&~`m?mQtRKcv1+Juwf8SoO$X0O}1<;7dx1Z%OtUYMJ&mO}GfI0(oko&*Fll)0~ zbaSAOq9EFbRI|<+?T#C$KFlZNj&rD zewP&8bkdAP%iuVVR9cl{+v3*gN-$~D%8Aog;0~&;xyJsI=c_rZX}Z-is#B**t_0b95=UEr=WRv8T9sh8BZ(yo<5De-&*HQC zBw5f%9iEA-gnD76m}iG3_r>d8C0504eetKn#h%5AlXA8Cd7aX9`o^ctUs0x;+4m;r zKa<*QOo&H2KP6crPqM&|90ajZqFKdq0FCH2fAG-eru!Zy4WYgPLfa%(HwkQhM&4hZ zZ&x_IsL7YP@Cz!fzHVKN!JwS`Wl(``)i>>`s80CL%DdtO1agT#G3)VeI2*A@;0fd5 zSb@V`U?;e{fOXX$wVc1det&xd@Em2dNyl4KC!*Q^a`##$HTK?T#=G*+lDqd-?9yzZ z%(ruX=aMYU1EGvU&$;p6K=*q@eG5$6gHr zjO}51AHeiOY0Fh!h}vs`KkBz&58T!j0ffs)dtR0h4#%bcQ4;ev7(OIbD1*2cv7wj8 zH$c1mJMv42uz?UQqJV~9x6xlYJN|pl9(V)(PYxJ8kIOM&-C=_xt}6H3FALoACQ_aA zuR5}nAA(21BO-~O()gQd*q>td_#I>sJU32wYpdw2Iyrlfs#8)K8>^8YcjH*q!}h(p z%y6r3uhiCG!^*DBJqwRsF*4+~R4m5C=)PHJ273nbm%!;+AQNpP|GF3Q3ufqRfl1@e zhU=(&AQ3OW)pQaZIg!1MW$*dspo4-9oYQ7<@jkiMDe-XlQxb4+}C!3C`0KwbyGQWC&ldG1jOV)9%c!5Olen5)sgj^xzIZ=W>t$Wv1k3 zx4jlE5HT*pjMfH^(x^~Gks+MLnuYtr{E9fKTZ^^@DqsJUc{*7&S4Z4j3yFLKG~VHR z@t#3M{v2jpB2Z1|m}n?A7IWtqx`_Q|{jt4?>f*jfmtgtYR+!| zM$IgXs5$Tv!+Z0zs)HtqMB&6Y^2%?lEQ`_mo@~;Qg4dpYTtTiHirg3>?SQK{J_IR* zPE;Sp9=rp?eUrP3y~}3R(RBEcSC3pKfmvQG&Y&;Mb=#|H!Z5A$`$m`ROXUqsatpq| z_krnXo@7n4&>hpvRC7Ed0CZs7W-+m(yK9_pSH_iJDns`@R>aYWo#(~x)>Ltq588VI zIOFS%>)Ku(*pceuI1++ICq6o%!=0`LH6;E}U>E=Z?&7>YQi1K>AxaOuAq`9X*Ch0` z{aGBA#MSVarU;w*9wFawOtE}Ya4Lc%u@45{Li&2XiO~YUga&yq0S5qxOaLb=j6I}R zfh7PSay+Y?su_GR3&4;5I78qjF(l->>Hp;)jA%>2oqCdG_E*kQDH>1I4?_RkaJkPR zY}96sk{}Ba^$~OH+1*^VS)YMFo6mpn78q3|+tbN}h@Hz1oSJdd#f3e@Nh-I3Hc>b+5g7Z9c4_d2UOe9h!0^JRoOJ z1+VPCS=~~m7HW-kvZjMoTvkrv0=NMfdbneLdi~_z-zcHBj!R6wqHIOq-Icr}VRzh3$&~rnuWuTa35{q_6FcEiHV?@x}t(>ywM zpg3q`XQtd&zxN2HJF6?-cB%AuPYf{)G&d)h7db0AJN^FByfADXwPeY6`2bKvV*!B0 z*n9@8a-Dzie?~7RcY=c}6Q+x`A#;NJSQ@@&p0Xx@33C`ACQbehdDJQQbLn!*Y0VLYEasiOcRaA5?Y4Sd#V5NN` z3tmjUKpx{cOa28<0~QqZ28jRm6zS(~SsRmhpNsQP4-OjEM+}5C#*Tjc{(u|}v2z%? z)mEN+**t`XP~yj^jf*4+8;|C%T^qLRYEz%i7a6C%yuU~O2ECV}m3hv2%3tqY@E>e1 zXgD!nqc2>k}l zz1>nr5JK-0voyXEPB1G%08pI6K|63sUQc2YT@+uo_xz~Nz2Vz|_t%`o7kc>>kLudq zl~=Zc`&kE3of%#&i{&4FnmtdgjK>stp3`XlbM~OndINkNdjsfCJuH^e#DhAe{@M8y z{02IBUy$6u2`V)$NF$#81U9Sts9l|g_$Z0YHNNi$t5@e$_*o6twfb7sTI9;X#_B-& z$;`R#JL#2H&g};+JE=|Y&I9jfz6xZMO+&3N`s%RJHj=VJ7kRfNfqs9*J*Vh4?+7vH z_E_3>*=~}_=ad3W!5v!l7zYwchnT?$J*djo!=|NIwxueHdF~Hf?{PuSNv-DV=Fd04 znCLB+>PVFFC@yR^Kdn>7YJBA)8p#&8T1p3rZ+%G6yPZthkqUOa@|CaV?wRKdjJ#v_ zu|>+gmV<=q-xv9Ib9Tk@b|ftG1)sStLtK5a?yjRQ7PU2=%CC3t{Vrd=OlfflNKAHc zJIk}VrR*2yJbzbcIC}#yKPtptJ(AgPUO81@h^7~f0`$So6VP>p+2Z`n!|(g8)&**I zJEsEFExTS#8#YdEfPVBRcFEatL(g*^mu~fTZukr0&(?2%^e5hgh8o@e6Y_s5yMb~P z{QVgBbo~Z!%@Spfpbr#(-^6BBRpWs4h)jr9f@0K1%2~v(R7xQx8tNcf#z%t)bGU%H zgARm2{<(>AiUf)L_v5gH{BiDr-#Act+=zeq>6??AoWE;b>W`ZL%Ulf0{0(rjFWRLE1?bsA zC1b`yY@`QLcL+Ekr8jLq;jn^hQ;No+E;RnXl}ErGv1D4GKC|(}TneXc!Xi zxn=ke2rsm1l)LpiIkg9h)Q0uX1^>I~(AY>LEju|)G=KFR4BHga&7csGym9bHk0gyn z56%BH)8{{y{r{XZSAA|0&Yz=gmMx>2rN=Ex0>Eo0uzmx$mE$?@CY4gYBXu0Sr*CUB zKmK0+RX3z8^fxN^e*l>XQKd6-hJ+W>;sh5#nzY*nPS={C*vmN0V~rk~0H)gWq;`a*dY4aha!Y@t|2}2%{#x$x z-qvxX&Qs5LFVIaq!jv?ed`JF^Sc=uW^=q>e?!yQdYmzV$T$)x3q<;qFsv1vhuR8mg zS*FvaPHVHx)#!?!t`61taicROs3L6gt__)corG~`d->9A!>T^+W<|#LuHmL>{pe6> z);Wfz2GwC8_sm!qc3mey6ia6FejdX2P`$Bd&+)wnpy4~OJyu4rtKqRBCj@k z5UC#vloFN-%ITz5kIx#>BVxmp=T&yr6+^>0#iU3ue{fXP{s@NBmFPE?Wly0G}(s!^n~GhV-b z4e+9DQXyAgj_UTH9LV3d+!#0dNx3|z1j#{dV7Y}3ZjE7~vBM9iGu4>wFg4(~ztKF# zVDdlMd&{V{-gVtOIK>*=EkFob+-ZYbptw5$io4S`P~6>%ySo-jaWC#ttXOd`ZT}~0 zuYJy5@7eO6^^W($`LHv`n8}zUBO@bu=JVXoec#vh`|ahL-c`9Uouph#FTQ#xvgI1s z6di6gnCDeH^=i(V>uGgvcFm)#KtX9%Aor8hFxiKeL{BC+cR4(r7Xe%9_)i6OBzsYHV*qI{b!rV|NS~aGm_M0 z_^{`$AHozQZE8oTsWfY4l|L=3jDaKqjzwUbeNv~##`k-@{#kI%wXcn9b0!&-Em+&U zfLpry_Ss9%uQFHk=?*>cfK6+GqNuXUr5|-IKPea9nOvP&Wi(;N#fcj7ztOBy=9C$D4jXjZh0kfWjc*$u&@M$K;UIdI}Sl0w61pP)LoAgVNOH|5F&@Clh|5 zolB+mI&RhF`2F}{wdd&eFWZT(a}p=(X030A94802qxGaVqu#$jKV}SSYWivUGQ|xS zXBnNpE!m}cJ&1jtj2o3`{7x*amTv|8(wG3-nN{Wj70&1=yg@511~4KNIxSINx8Bq3 zySusgly*bDB>U8~ys=F(c*&dLUc_feWkLFlFVJt4-2R9-QhraU*tk`@bzY;MqV)MM zbaV~}BRv*tdz+pGPuJVrF>j{>UcX@k6C;BqF(*P)-x>l0E;u(CjWa5|v!8@)U< zXyzF9!a0%~o%m^Z*g%~x8(_f!T=y~p-_~hVaU+ls(3=| z2os!whY;(lk+__;1`sy_Qb54M^}%^2Uu1fm*H)u)qIDFsE|XuNX{f^*LEAtg+8fH4 z#x{Te^CI$5#buHAz8y_ET?7^rmn1D2UJ$VaxpN|v2n*F!R(q48(aV z9>{*QjAbZYFouOoAru~lm~eT8`GbTknbQQ7OYzo1sZU{gMV+U#qdCJd6H#PE@TLBA z1^NyNqRx^l6FRet*4I&#BT6?IleM>GYYt-{;sHXJmW9`#@?ulu-qKVEUV&>I^Y{x<2KZzBE!;-N+fEZkS*6dRF^r-AAws)znTdk{-ch+Nwj%` zHU8@>FgoEdQ=*biOmmMQ*7LEGn>!znAgmxlJ=Shyvx;hbl~^&tLga@zkm?$l!$@JR zNI(#YrIf-hEf$h5jt=F5S`dSWwbKNivF^r;sS^sYnSi)^0o=k+{5o@|Gj)pCYh%&9 z1Q~&G#*Z+ZGFgzuyY(}l$Snh57Lhy+&)B{tTy-*|XAWDwkc<^F5QsUK&( zEkGU?<+#{6r_G&hcfpK*aXS4?k-N(c%X*#_|89Rw4}|v4ti5pm$wA&)yZaAyNGU* zvuSO^^BjYpIv6R2c6~4@HCXy~j4Y#wGhdBoz&6B!Vq`GKr1+ziAk9(Hayqp61X&4y zFci9@2#l9&>j9~wgWa21H$|ALo2?54xk~VdA&aq?(KmJ>dWg}>17?$|Vl@`(gw;g% z70opb`CiM6R~JBFO^uUrYHCH+lg>>4VgTDpTh8DC9UvwoC<4|9*tbZ@hoS~8%z5Ud z6s)uFqfq+*dR_4mq~)8g_uhv>`g~4H#Xo3I;%pdZ#@x!Dn+_Eu4835YlBocF5q^{D zz5jYGQ@eM^5+z{A99Ny<(l3`$q;YcFz1+_Se}+FCn=f$V%Sa?TV2q+7rw;>#2N3aN zl>}3TrieQE&6%_?Lu`V1uPnW!<0+|zER>s}FhnG!6~9X!u`9zl_))%!Z;d(HjUYHw z$xb_$!Fxdc0F@+6zl;cwF@%c1J|zU?ix#^Z`!mqQG~amY^P$5oOgiNNiZsu@5Ni2a z9&O^-pl+?%QTt#bek(8yugkHCZF)=k$|_rN@nsjs%!jhJ5_4k`0kGcfLamgnABuS? zt9X+K2FgIWcajS6>d>k(b6Pu`a0m^j`6?Y18tN;+Xwy)+i6V48e9}IcpuIToyWvB+4eXY=jjNrLz=O6unYtX69Q?C=@4R zeHn$5{XhB$v;9a0^>3FF95cybSc)H+D4}Qwg7Hw4sZxu`;x{`SDYB)0!KC_0l*a(> zi1+qz!0Sh-u&+wMH2sTf32APbLikm=?~zJ^nBd&^!}d-DDs+S9n+yB4Q}&x@9%J%l z3@J)`)I>UcVAl|fR0a_-W?Ha%1@cc;YrtnBq_W(&M+??_K(0wxhIc~P1d*Y^^~2ratdC}$nim+WP7>pqpeTaYRbhVYyGzMSkQQ`53oHMzq zX#VWi$N0Z*Jb|bZ(_NPlHFP@FdaaL$3t5#Ogc%~9@%1|iu0?_<7=OWJ9S_j={ zR<1a}Ka1zqJSTOUkKi5KIeY$-CVx(6*Ak4$v11+EI1`<6Jw9}6I!Fv_2M}AIKs{2iQM8r&1>@WY;UR$^pWqXx#PKmGObM@c zYfYb&h9!pSy_!u$zglsVmJ2E9uUbBW`0QG2BVWEr)|#T-c|B*|1@67=O2DvUCfeu} zsVxk_UoCmrkAHK7L-bB=_fhK?NQPV-7ORB80CdiN7V$8|3G?2GOtDu`RhEnGX2VOs z2?GMDvz=E_y)D5CtWx9UQ+iaSMQAeYcY=ql_B#eb1oy`ZsU#_4(pm6TEeL}a^~hZt z5DkrS5blAx?~>S0^F?Ngm^#js@Qa4r_#Yw*!3|Lj5)vL@g)kulg@QWbxi2ar`X^V` zpwfhAwall~6q+Ai2w5)qs~$Hv zKnlMmn#hi6e#Fp&l|oWb2ZfU`<{6bRd%sI4*A+614Uz5zyIw_fFR%Vhr zZ>-=2uL>TKOwi5C%GgPVq%7*s@5YF6bxrt2h;uv8gOX5Jx_p@E47AC0`is?hXJI=~ z(J4**nqSj|zYfTHHde+I^TB0DDMj2A@&`RR`Y~aB?iTm@kwU3iUFU zs7b-_4@jqs9$Lv%0_~64+TyMJuzb@5%#mwCPx8kWa}Ta8tZ2@_HV5MCw)=cxCVJiQ zV`EYNJR?S*IoLJHF)4J|SV-MkHwvD)R-2{bas#aTP=+-Pp8zpPb@y^6!SsIwaK@Vv zBUA|KxQRi#W$qY;{02n>NRqX#oba%#&PaKol^=D3;IL|QC<<#(!w6$(P<6C%CfP1O zPaW1w;s>&4UA{-!DZ2u0S+bQRf40{AUM^Wi=CkTO&96Aso2 zxtP%3-D=eBI%rAx3$RmIHSKGg`B2jyhm?RbKeYr1{#jV7TQnt3i!#tGOv%>K6**#O z)(_)}M*uKyyUPeST%4lzi<}E$j`b9@>v$}q-6>D$HN=+rF_*jzn zbUjaAr#rPNy^VS@5`JxaD`R?Imc3}zN|PE{rBx*}cX;qh`Ltpza%7*0aAc*UcOR(k z$YhCs_XCACRdADcMW+f}Q>Bc2+ro&%ab>sJaUJOIn*8F8kz^MWZ`r6Ux@-|z-2k& z19_!iyj6ovd>C+s6j{1F-Av@LsaurahEX5Qk}&^@9=HKh*aa zk7s9tc!mm$HkYbHONA3pof$AOWuivDV~lMvg^31-=YWN2J~0IsMyLN`?sMpLFN8qM z88w|jP@cE~0Nn)!3i*wZU{rF(k$$)z2Sk)pLnIROn_PNQY>ew~4bdnMvg6{cJy1Gr zDZbfMK1-fertwpU=D==2jO=&*hO>`?snBr6*zL98O7_zx>%MivJtZ!HD>1MT+;S)t zN&CTe{AF|)B^s0~QhH)w$od(-^LBR1yz3X;XJ@9QQz`d849i|&hk-Rko*!P_`MzjS zGj7E8qezb>$95d7y~$p%T-1=u19e8Gl!)QfX2LWe7*Q_FxaHDG_(2oCsb;cLsWBX* z=-c~*-qQV<4W@kPi8_4T8(oc*7|jl|Y#GQJb=Yj^Gq&o6ieU;*BcF^*pttejOF3qY zChs$OiNwb5ffh!M(%&3a1f!5Gx>uGHA^TSW+7Az)rZl?R?| zDd=vU!kC?kp=ga`i9j5;`x!xSZtl7H$zz-NlH|ryrlu&HmIr2R7)7FOo-yA5J@+wm zAxh9|8~t%oz>KcLsDltbjC-gfUodSZxod*``S$5_0cf9pw`DttwGNcIZmj(jhL} z4hQu$npJ{~Dw+XV2OrmvjfLs+A|^~uV(-h@3bvs)y8@BgY`;IZUcIdy1A1d_L%B&% zx1ridiOu}P`@aWDBSV&*7S1u#VR`+J`4e6TKOihjLt5XX(Q%Do1 zfscNZaj-CLK$4phu=mqvOx?=jc^SjVlfXnZkY%KcE1JIJS*Pc~?fVAF1Nq2cdfJ1{ zSIK+$-z<~keVf1J6~(qW@s0Y7bADmbZ5m?cshqANB&G-xs&d3Y;m0PiS*F$g{7udS z{B7B9V6RwKzz^i(WaIu!i04O6waKxQ+O0Y$I-?CjqX{k|X={kVKv@=yixe)?eBnJi zE!}Ab-hVPss>sP4a*8WlU~=AKnEVvQJ+&=>AQlIoCh~#VgF#r}#z>hl9tzJD$1Q2~ zv5wlbeR@%0zLb!W#6c_GmIIrQU6!Lsa<18)F-70qU*mY>d=pd5WO`iks~SMSIUE`s*TZ74Ml0_Bp~N?Y(gt`E0iSE z6mF9;lBZPbpw9~VRTJrDvzBTewK{iP39rBTepVESM%eNZKR01c)OrS9yMfm960lS9 z^jrv%h-b+zQLDO^z^fl+##WuH5*m{;i9!=~Npr=%oBHjImoHwPh&gX`X*I{axz)`4 zLQJ@U;AFs@^?3G4wURwSCQ;|(3cIR&sG$6uu`6LqkJ2-#JN_Z zlsK`dZLR7-R|Y=}Aj^dNI%+(T;a1V*3Dc*KifeA}s$eFjl4TlgEA|EQn0a7*Cd&v8 zP*!}2Ex5jg0Iy;!Su8*C-}1I-Bg5z#EQ_iR=gI1WNJSYGgmk)y4b)1iZM8?{H{WWg zaOenDJ~zrP&x&BE6xx7f!V+ zDm4MasIGbrljj^?QdQe#c|8)83Wk+2;-IY)^4vR2T)9q&Z~g`7vDs+Q>>-QGxU)}g zz4Ui7{7IB9d!jti@9jTzU~&&W?>u|BhBux6sCiquZduc+3qDz&?B{-aYZ1CIB3_+|_T`ksH1@HdqRuy@JsTq^qu z@PS-;Y!^n!)oHbLdC%{^c{|wlYiZ@T$lWcnrn&c7rf^S)RisaFn8Up{BSv0R)LZF8 zM4OF)z`|mszoLh*2C2*#@U9Hy^Sf@3_{*Fe{;<6*UY0iH($us_Cy1Tk*D24J`2B33 z&*wMXg_~^Vyq2B`7nuq3&eFm9wx4;tdag^Kb}_@p)+M?&D&!+jSbiiY#T%tZq{94C z^>+ihgw6q-$y32#dmC`D)4G%^x;;R!(KXp0e4d?gF5xmlBOdN-Ae`ep+9Jp9SZJj3 zy0hc)tIEmBa~^h^NybP{UXWg{%Ir`sj2gljb+eB~+Fs`*Hmr{ILH)3&CY709!yreX z|4N|+D}+CZv)ZapTiMmb`a1+XT zkSoQguAFbrItkATU~#spFY;bmW)F|3j^hB16qo0vWQv1gEK}vw3X)+6E0SJy-d4c%$4C%!*7PkHGntK~0H_q>S1WWBjiCTrPzJ(0V#Mquepe@&Gz`AP$0Jqj~r-wj#E@=`+V7arPM1TL$nFp4|eD zW-$qQ>;w^M4oR}&A#MpDJ_yIyC}1UMjM>Vbwo8c`lftI}mYGbzUUT5NI&lAMfiS^T zlb?`t*2Su&F|UbmG}>IIP+t_Pi2??m4CYq{i>1&9eHOfGv9A)tzB1ya^ASHo zlb7b*QKbyxnJfSrh>CnLFbH70ZGB6!H$jGdq|I2OtMtiPu}MQUX^h((M8=ctt66D* znlQ{j(#KLCm*1XzvvQ;C&PnBuNe6{u>DTJtvy51bO5asFL`D{8c{^ld#ET$!MGU(}ysn>$4$$Dh*D-P?;44{K zD6|Azlsm`}VLgoBFE5dl*4RT?Y81_b?I)6a(Dcv@;upOc*DK>&>FjAerzQE;mKe4_39U; zVP&!gzCCNrm7GXq6iat6zC}~#(H!x@pMm?2RAB5tpFH0`uM>oWqSPdJ<}lyIA!EvM zmMzXE^i;#I6bXG*($%ssON!N+dJ^;r290cUFHI!YgLESMgT(=HQX+6>FLI+LbA)-h zztO$Hr=mrC0UE_^xt{N)m=&YBImAPPWxgoJy!lbFdrd_O=O{2c{nQ#<>TXNfDi*CB z+ZFgh=nB#$9x@An{m5>W9m%M~j-bgJV|fngwV1PRdizX<{d=cTO5oo5m?kkufiz}l z>0_4nX*5`gw9jdlFFtcrf^EtJaRh?LzH9&x)~}cFgoYZsmvqygu5u;ODD1wr&`vv8 zhe_%41rEJr1*xBpa*zrNjd5WP#pwgpBfDG~^f_u$JF4;WODktz9*S}2g%$;M#I%dUEZf^v!X={%C_h*1#dt-f?8<;d}+9FHV-?D zpJ?;_&amS$*>)96tciu?1^yjlQNowe-|ZZfQL3P};;boh%n_>AMA{p)M*>af9e2f% zP;z(|KU^Jm<;CPQI4Y146PIr8rV&%6ZOX4M-3L!+fJgz)jZ1O*=0*6**kQ+i>%T(w zmi$Yl@;MU?p=`Q=U|o-@45`t*7a-#KAHhpC<<6BVhu2ECa-22Vi{+Be&V}|2!R2e7 zh1Pu1w38qJ(&s(&ZCTVp3vq2}3O(muRZW)v-h8Dje@fms{lK7rqsRvq18LXTIoOzT z?UsLGA2S3ylP@O(^4wLnf1{3nw4diZ;Ll^W_n`8Fp_tW5rP|uyQH#T&vI%hw2S5JG ziTFpK_3WOLuHDI`ub&%R?y}vGcIrB_w+2-PH_TCia9K~ypZCex zr{>ykeKT-vr%7ki{<7-z?N=X(Y-(OAK^m51pO|imq3@DEJRK^E21sBw)q`K4dI5b% zlXoOL;)ro#LssWG zL}oPy^Y_o&saHepu2a;>fdgq9>{+^oEX@VQORq$yAB6Fc&J>@uyhb)BvsIUjnKxqt z*M}AT_;Ev6CVolkzccEW9Vz{B;KIDJJ{wh3LW(}bW~{`S@F;vY0KC-8@$)$gC)-(< zAv;RTXyxAiu{heNdYIkgZ1w5#GqSagwgbNDwgz4xsc4LJ;(B>}a3J}j4ldDKku4C$ zTt5(L>i0s0%8$n}zma1u1y6UUyZM9`N{25~__I|z;jNsF>)i8Ex_LU_lwxB;+>&FZ z>Ly_aqB66w3_|ivgXjGKorbeXGpYiASWJ?DLxQ&j^mH!aU9z>d{q$K32DFx(O}3ag z^1~k>kheJn`UXv6itEYZfkBXj$s46ZDi)^7hZ3WO0v$#X3|GVt>w}>kB!U*X`gPr3 zFQgfads=0;@N}x$STbx2Ch?>w!UvG{2qBcQjSu0R3BKy?iD^k{3=?F~qYkB=a5f)h zW^)wo1fd4H@9_|H>3J+7$|;_s;_JIcIqzW_SA4jMFk2d=nuQ_w%~@|>MTi;(fB}DU zFKiVpq+?wer?Auol)Se|^eR7YQvIDRJ2OW@sPqz$56U`^R;EZH`=xjh)u2`hct$uR zO@Oz_n@K|_rNH))xOE$S-&H4B1SJX{BZGpF>4wUrzYJLy7n|H$>2&C~i*z0I+~j<& z24BO0JoBO<;l-nttO+;G#zzq+6zj7~sx!hVC#|excjgIEs%E&?04akT4hg=WNZepwP2Snq?ZeM;mUKPoVqPeT(b zZpmDlSf8Wdq&!X&T`%D=HX5{l1LRGJQ`J)xNE~6}<1fuj2B=PGPUt!(y-%)Z27Te- z!5`{#px`EpyPUXDFuqH!?{2h|yE=kc_gXdn2>oOw-FYYiFsW~h6R#I}HUnv0tUcF1 z8`(Qf$QDR8*Vy}L2dto}ZrY=`=1im1H(<#@A%N@3{lr4g$=`1bHNaw{DQVDF)g;-c z6>4`v!41`IU2%^#Y|JmM8N0hGP7%^ef$hJbAI{|}IzU~h$Y|4$T@-0m&B6xbP|7Cr zrtrd;@(bz{jVhpy=Z0kzW~qDT=(Gtp37Cl$zP==Td_g|k_B`s zHhZv+gU@{P-522a71BXbEQ*DBZju-)+UI_=!5drr{PX+GR1K68yqlxtTO2V?C(kCZhRQRI8Xp}cRQb)9v{&D@CIYJAIklb3T^VG zvz7fWt?q71EnX(; zXMAB`)qw8r{sz;x$2Hmh4n?a4Kc{xwUx~_a_OB?^ek&4SVA) z0OGd*4^`_un~z~Rufz37^B&z;jJWl53R`b$p8rgiNT13fTMK(~Of}mb*+||?|5h4^1aj!pXO-U#M#)~cT=x>k}C}FnpHBUyRpU; z^AY86pb{FVlL{ptNuh@?i~+@MZL4x;V;wzB3VB;}`-fy4LiS=}bLO4B;2>;Be_ z(s;ax!brqnNf&n#4Z19z=4vkkNL;$|-k7_xi*!XzGxXVY$;jyWl}hVedO^oxT=iY@ zRHACMBVp9HqA~^5^+WQL(d4KVh1>d%TDG3%#kt{aFC^n^dy z;cV|3AFbKq9}sJF7&Swh-VC}L6IAG+z-oL#XJzc?0n{$$gc|&VM&t-L?CmNEUAw*D z>jRDUR@ZgKi`aVxIav8eHRfP*KI=KF)4NVYS~7F7z~1#u5gcyiuFI8(bqPq^Xmnk# zEOS~dZBJ2l<;BC`L20qa#WuzX$cU-%Y#2_4H7XG`B_^vEzd3E>D{nRdC6;p*StqFN zTFj&KJe)(7EFbn}H81<_1a~1|B2AANc_q)Bo-OVTvYN3YO-OmU8|gKU4K(Zgj_Nnlxv>FiVQ2?@V#HJjxh-6Ax|gv=CmM zBc;5*S2*dE@z-6|6ZLYXd;NwT0wLo-gJEjq$R(wnJdb&4;C-9K9q!ZPs}0F_TBb!3cG>TT9uxECY+i~;{mius0;H;axnvD}P0JPSTe zKp_3Gfk;bnS&gZ+#5zuHnND;c-h1^En_@%(Tvbe^xJQ^tIR_g>fWNcZa0n~C8eJwL zG|!;G+_eylA;(ql{qE0H8=$moy21;3^i1LV#=GJzy~$f(;zo+PXu)dg?km#OA5!8pmn*>) zIPP)T#zKhDzA7Wf4!IfwbuHcf06$G-YK}3mdwTrtSF)MwAbX_NzP!+Z#t1x#PK<$9 zsFX2AQA#+izN|hgd~T(wSmz_Qo*J2}-tER8J1#KMh@~0&(edYd787q--zZLy^=hj) zJlRIRSA1!JZM1`kqK%lu*-h{yQ%1ezokM-qV2SAakZ}?BM#)Y`oygIH{O@aMechv3 z?DOYlXtz>666&J%jhOQV&_SYNjo0nD!9OZ}J{?IppJ!$syRYKw$K+C+!{=R6hlHXN zT&q4_2ZLwYTUDhwE7YvmioGbto}D=reJejVE}IZjC?9ZkW2&IEU08g^^8`t}ab>1D5S))a7zB@537wS%AfU>=v zMo+brKtb5h&%QVS(nw_PA5&+zUtRIv7CDrEw*sPk&lBctaf|s2z+l4`N!83z712tTy{|~OmTS`^4ukfE7kc@R+iT8q3SqJuvb~;~8|g{}s31%> zCvzYnZGlceExC?K)Q>VY>5#|SJ^1}2%bMNz^!kZ{Zh+E(V(DNj_MofATv6$nC+@F- z2(d)PO@69j;5h&*>{IwzU51^p&i3;9>ZB2?w4sBU%DejtkKEL|>)_QDx+kx*iW)Ja z8QWt+NtcYmJBOi#gz;Q>6qsR--rk+LrWH3G%9et5#)_0^e%Y?=zAFngdIfB~S?Y?a zVg2nh^B17Z{;l!%lQULAeINF-qo)-vWXjv~FTnB2-N%FW2Zuwe!C3ZM4(`C5v!8U( zS=Q0DaUxebFfjiMKUl`W>N9}!!7|F+!kOL0(e)*wU6JpbRU?_|Mf+05$$;kmfd6Ri z1Bt!+$VW`+QPd{XS}>u0Dd{T+`m7L|{u=_n<;d%*>@Ursu9vl*r%hqkk>%?(t0V@^ zmhIeF+Lee>-LAeP#uj@3ep@|udFD<{bgxK=>q0Jvz?GEWb@{rHh%>XV{8Iv`R9mf# zNQ#G&@2KJK$posRl%z@J?)Oupk&m~7gKXr(148j^MZ{T0tMydNSeZqHA`CSHu-bwV z!i^z0A+O!3zdKrLz#M5XGFq-F;JrerZOEOdI8Ub0IKt{ye~d~I=#$Uf5a@2l=DQt0 zb(uo^j7@ZiiMdnF%q879T6f>Gf<#+eBx))S6->y80*W}sN7#<+CWXJ|JABxA)Z)=h zh>3vDawN&3VN68%@G7fHFOP;5K+HNBsNdjWWWd$cD)^)vjEPTY_DIf5CT zvOm2@BdMoBMi+%D_tcEQkA8!lNQCx$)XW!VOCKn$M2WC(uQ^NI#J z>tfmqoDwFI;EfkdMA;V;5Lm~t)9jH%nZt@3?!pSg))YBv8!}7cg=Q#VG4LF?8k{L7 zdM_H2h5VN^JNNRLqvs8RVt?aV_$R7r0n9(2ScRCm=2uaVgv7uhXu zVcGu&jsBkG&&7geOucEy-}*HFPYx=VoKG!3-(qG*>?Cy1@X%Z$bFti+tsgsLcU#7o z)`b!1@|~=?zshdA7mUb%KkP64Oy{C6d-TiWb3FZ{aZNtk7H*t(Yj*oyWv|Uep@5b+ zEGw^%HIk98utAG3ic&C9y-;IWNQ?vQV2rs)6o0ui)fM&^p!?$OsrA4H-%S7OH6w0k z<4McHA+zUQ5d4>|EEeUvgolBl%uZV~{1Q>{ts9Zv0|4DTHy z+uh{#etWUhabeaO_{y)c_92)0s@=QjMs~%YYUYxERHSTe;~FD-BWqO6PN(r^g7x@a zu_M}bavB_xv=soVePn!q^oS%z*6N#SfrmqR2%JJ_s7!QsQ@KjRg4Vt znz*BxenmBLm|`N@d6MiRZ+I^&0C0it`JU(Z!l2=&K(X4z_rKEA)9)6JT%HO=Gm*;k zwFzYI`R8^Zsry#xYI@~c$ng)jjoe2#%bV#cmC5mHcJ9t#Fd;GiRAm|UazQ{utRLo4 zr%R4Q;BxWac;dLrn!`)k4ud13sp~G+?ltF2n5TNu9dpNH;^8J*Si=|IQdiCaQtfm@ zx&3O64iU_zwJ-I6f=vj?sO z1$(pXBgc--wxOK=%){Uxum6Le!E45U!qC93$Ng&Q=!VLfDN1_Jwbx?^nak+3S@r%d z9Jd6CcijXKQXS?7T;(m^>o}U-@1~m_8#lkp+y5Ro+SK-_Fkz;=N$dW8;r+tz7fBo4 zbRnK|rJ3hyg zSMf8k{`r)eK|-P1cosI6Cyoy6WiL>^i|fQNcMS;60!aeJs<~w)+_ioI|DsZ`^$^tE(2tK)Ii#7O|RthS;Ti+ zr*EHK_zcIMPC_qq#@}Vzn^xQuRW&4Ll6fVh7D?B<5azBg?>xrNkcA10Rpz)@hMs{0 z_oz#cdfaNREt8YE6}Ub-RX-B#+m^V9Po4JNJ9Q26&Pb~)CmnoODDows8b%+=>}>+- zXqwoOr`6Z)eWUv7g4v52?we0~E1dQ~z>V9X=RI6<($d4x?Q&)EjQ-{2)QjY(bKS3R zadq_ibt>Mulr$drZ&A7Vqx`=fsLlQVPRz{1`~HfI9x2Ge*F<{0{8thn43sMVKN>ra z7r1520><0Q7;1J6G6o1C0H;(v1ZB7W0T0FG)kKfdy6@I!uZHxb+a;vMQRVpB#LP-N zdDeXo@!7)Z?gL%j!{_}!v{<^f0pi!FyfQbIs(fTiv^YZgtA-nz=6qhfx>!H?RlB^U?BxH^b~RCpmDMl90|kxH8&&cPZxSQ` zC7G zwA6i(=UwMQ-+TLh5ZOq7aVXX#zOt1;7)4)1WS7v|eyGtnD5v@=Bx^_Ic|@t8?>XU< z+2Fmy%Y8jAzvBh78dJ&G-0TiNWF&oHw(mXkQ_Ergi*S?#O<7Hwb`4g7lOIYdjzwV{qeaVHU}k$jx3uKC$5DUv0h^ z7OrJWc9Cr6(lO*Plm4~dlXPG-r~V%p+9TKdzg^`>{0p#z>?W%Cjea;@sQkB94eV+= zD`c~l3f*O|oz_)(FcsES*qMX=2V|v5zd&?=V}b8wzl&o2v`DCbslu|;5eXF*uugmA z3BMck0lGtaoJnudR;>PpM!H*z>4(rw6h2ynUM2i(*k++KJpA}O#%AVkG_8Vv zj;!{t7&ffYI8Bmak)Xdd7KS(iP&Npu!e%x5RNLi~zGJ<~)w$XzV)gJSO?bv)%rGdD z5_9ic1Xa`n-{d8t0m)#pc&_vi4)W)lu9rBgYb_`K3qRU)vvik;Iy^Il*C~C#Rk>+J z_L_F3OD~gQC2PaU>>}&AOdL|;YtiE1;5_f82D*$tuN;hCpe5&jd#Lox{)~BwHAZL2 zzELWg#Yd?CLYF7KD^;6t`(tS*g6e|x#4BjggF9!)H^;?Qk)8Gb0VwxE3SCC(F90Rd zWxCDwmxDI(h{|Sh#6OP$B+vh_H_V1(l>;q-U}y9R0RU$au`+gS7T|RzB@eXb*X)nZ zGs(yhRuMg^7HFBHEFbVmBcGWg+5h!OPy{X*gemeog)}<+T*Noj$!Ybr+y2g|N|nPQ z(zp|reTLVStn*bxqlHai&LK~!RP%IqG?m8rHDPN0$J4=&$x^So9G2r@@9A4=(t}2) zT?2K*RJT5Of7pM{OX@sFGf9b8l!(fN^Q6UUgVe_H3gn;&fB2n|RlkLCR6}W(mNq{* z>ECDl&(aUc^MiAj{#4rj`SllIqfE#8r2Oyd;>*bQr-$e5W%p%OMbwK1FQorjrW+Cf z7dFDW(-0Bfl*}zv9DwRzAhil&sxM~nnXmu?2p*QOBHd_c#C>`$bJqka(;r!;HjCp- zcRQ@NJoE@ig_>qof0DBul<_|HC)rNvIm*pP4E6TuPM7CE2~>XYe;`}ye~t)DIL8Wn z_vTvReF!guZqr#N)Ca#^t7+ZPk!kwSJyzi4*ra5 zGpw8s7DSLX4Q=Mjis7MkA*Q?UBvU4qs8--6FA}ykYkE#1Vn-IA&rNq*_r@!<4Qu`I zx?_K`4KJ~AEq0xcS_=mA$o+9gw8ti|250K|;a`B)+~Z3;Ir3qxS^JQ&JKzDI)BQl& zAi3t!Humi@+j@7cw>4|@n%*>>IK9&DJF?$c#@A<>*W{^ZnhJtLv@(QN*aC?tph+Fz zeH(VhQN;bt@jUGX&Ysn{(VsMn9&s&0ap9Homi0S|%UrgJwkdZS$uA?(c7qevxkG?Q z1`Cn(aS-iQ?1cJVdTY?G3w2>@)}c+H_;_u!y`+u(QPcOy+$PuTRu>}BDJ{rc%pP0- z9T3GcLIfe8Q0W#`sruyHf0Dl@iv(GTWc>M(cUhw`_L`B)Kk2{6$bXNK|0lsun+jQs z{P!ZnhF8D4X_0BMn_DYtBp2^rBj^9wxb=^l9h4chnU&5tN;Y1y=;v~~ov@2g+Nf^z zHbQ!WA7QBB2-$_};SIW1PA6}TQt2Nq9gaqp{>*QCNEWU3Y+cx&JymVicK^!ig!I^! z%{1Cvk*Cz~F$h^CH*sWmYLjdirQ#-z6;spd;I8vxGZv$@gS+oAHx<_MSa!Gm0{l8X zYl;*pF|BH;{hC{GsFFHyJQqIo$FAo9sRD9djEPmPZds62@-R5bl_RWE@WIwB)l3LQ z1tKsQamWCGWdMNme5?Xu7*FHW#`&{?Av!ZGa-?ueT5Y(1gP1un;Vs=B!ZVezE8jA{ z5p4rymD=8Z!?=)2RYt<(^u>s^=!=v#UFPGizD6kxeR7;f1;+w(40-#!9%eK^O8;rp zOQNhc>ehF+c8jq#fs$)8<)>dQa_u`l2Bq(Z^cQiTlfmeBZAI4m=?Z$Iirm)VI=r$y zOl$4xwcw{Hy4ihE@lqwD7YXO5u4)EQw+Y30>kau5e7?sVkNKh!cW{lAJiZA#3qVQc z97V>CIE8>2Hpud;0KXZN$=RmfR6XW-++>So^ID7_UZ{39E^&`@W9$HetwXFAQ-cvT zGMV7^6sj_8OQ`915ne9rE8DboBdQHDhyR$=`<&hwjPWZA{G3^J->AD4(J}oW3;=X3 zvunS9g~VcXJTpux{4bIZo?6Y`&do#0FuB718Q-&H2}T9WI&H22h5jx^M8P*g!NALV zh4?l9E!};EKN5yN-35_-q$0*&-`v%;{i5pXIlix|dk?OSdXaf_4;9w#2)GQQ!N&kQVsj0o6NdYQ!Hr-A9Wg=2dzR zk#RzaSj(&?PhmpaZF17D5!$7bVblTl4V-6J7r8OuIIGp8q$olQ;r-nK>O_|-1KQaV zjK}z!gq(N333RYCD8>18&i8-h2fYVJei$Y8(>-^}=l1*$(i|w7c1Ysx|Dw{KXymEu zBaMVUkP!ZS^|!?P-)0z*M!Y^^3UTfIsf#dQElL6+B)zGD6Rq5kp`N^xZguHg=S0Ct zjtkzBIZcVKra>^||7`hHWD;ZC=rW<~ZAvK-_Mv()j}s(py1d22n9qJrGcr3?3bYGV z7#mmUQxH!}^G(M7&K6bsu~LW)hx+AVLsVhdxf`9yT`7lK%c`SsP!3~d=4P*eUW=Hz4I(nu7p zi!Zg~Gf)cu2dK4w#ZaM*X0DKe!TbAZLM`yk+QaV`QDYQ;0UnozBxL*p7O#^9TQ=|7 z&Ybcz&Wt)YF02MxbSu0dlCWWrIwB}6)E=seJ@lpx;wu{h;0+(LXpfKU`MezvE6g=V zh6B;AG*P+sH@H>+Zq{_4Q}P=ewPc^b3He4f)rP-F4w1g8x~gpbMcbxL2f&vhbA0G8n+Kj`(Z8V>;}3t;zpq*1 zzN>GEO7TwhH~b3It&Kd@SWC7i9N+v@7f5;3b)UD_lKNgXRX5VbuDtc@&8Q%CCJXaz zily5)E4cAoBk>KGv0b`e=v0otG6In@_DtEx$7P3V)tT;1>3udG&Tiw_X`*Rko_BqB zw&A3*m-kAIl~J3Fi1-qE>v`Ox$y}kfm+S98m&!_#s=6i}MI}^uEiZ|Bd`4qpLjtuT zBbQ4IM|=5fB6CFeL(MSfhRBni@SCg74U&FF~CVIJP1nnRB zloL29N;z1%SF-w&FPb9avNqL#hn-cV6pN8$t{nH>;WS~!CQ5>=X#eWSS02CkZ{mLe zibjRs6sj$43h878YD`o_P&cOuH@`>`qWw6A=Cu9>+CGgN3-~WjC?xam2ft(e1*reK zO=|22KQ_$#R4ZM)k8&FZ7g0pgi3aKk3f=_wnjQu?n04#b(6*4YBrV!P;e?{H;*^K1 z=6}G7VdAXja>R?88H!8=(g|Oh3#p|G7rb0Yt5lW2&VuOCu_*K$T=dUq&I}E9U`!dM z?491_%f%4Kr$P`20HBuj#$07@-NxKSx)uD|q8rWWv?AM|b;p|poeYiC`%#>@7lah+ zm%o(5z{|)e1h}WDyiKn%qONUh>9lU#)#2n8z@!O@7AfW4EB*)`{Xi1a)Mg)-^gq~p z%b+;kcHMVyf&~vQgS!MLz~DAGgS)#7ZeehDw*+?y?(Ul44#6Qof?LA>@XA{2J-gOE zXRjk4cGaq`ntG;Yx~Ho9d8WGW`?{_leHQ#7SExU}f;-g)rQ;UxcqTG37d&k$9$=dr zRE_pZz=6VuSY57STrP~h^S~@N{u7wkKQT*h>8&j^GjlT1b=DSBd^?MB23Q!){{kEt z6J27ST(Yy@z~m*!A3lg4 zU3Y2Wy4XDJf|EsR%92{tC~I!^WVeiTerEU%THkszzF;&EUW1X2$#Jtc(%!bsr!Z&B z!J8$1lBGr3cOqbI;Lg#WmuYd`syWVS6XW}Z0q%N_@PU_dl-lCp{qryr?Jcja=V5%R z-N%Iog~x4rZa$n#WqJm)G@utdR-$J5#_x^`8>yR}Pp}_o1g$y^-|JsCgoPNS4$&G&Y)oLS0Tg z3j>J|m4df1NXeSUg=Gu=3zm<;@SD;ZD5uFgv+<-a|dEa!GC9mtHmj~v4lsXmh zZ|DVfhW1<3K*MLa&BiVxZ^qk02KJyQF^4~Z-_fK?RA?K$Lh&*qKyjp*2Vby@IeA<5(VK5_I(#gnX?fU+5nXN%@ssS2B|7&*^Ysnb8?;Xia3T}q@u}Z-5@>Ax;8HxpfLGFGEc}w4>A!J2>cw{uW!#_C(DnkhFbSJ-yL-S< z;&bpM{*5^FG}W(dAq=b@5xJISq;#utvE+49+{Zdzs?gH&xq+7aR>P`WEK6z>mCa~K z(XsL^Rl(s%+6UJ&x|@=T$eXpRO{m?wwM+9ZQ`MTo;PbO1@Q46~<__h$xDAmC#+P)n zO-BZV;Y1Vd$a2HQp&vRj&Ds%@Pq=(zmX3)d2V~PK3~0Y#ukq)bz%x@W;e#YqQ6Yu} z_VbiEtW_L^fxuch22As@d5+<$sJ5lVujY>*7qhw9Ar^n$=yIomEu@q;j_g>2P|%_j z)bSk(oLkoQ+pJ#RCne`sdk>V`9RR;soXiO=Ow z>>x6S76(8@)cG3zlzYhK+|uOt8FlM})^pdv>iQ)9sOXhWg|Vkr9M)ZTjqaR7Yuqb8 z%(c_B_;k+kqs)c9?T^YzaP@%c*SB;O)ic8An54wmyG-+P*Us{^?9w6<(DU&U;4vP6 zOlq0`*Rt`BsP=Tg9(iltIu@DEuXTc|xHHygzxOPb(u9)@TAoeoOOCBC{PAs0Uk=BU z@5lAaJl;}F{eT|Mm^P!~p#BBOa05oy?k0-I+*f^Is zd61TkvNXv*!t%`8+0A;zDM$XvYW}rjs5>Uy7m*>f9A7lmq~$C7{{p~)oxj&kAN@IS z+Cb;-ysxg3o?F*AReFjuPVw^k9AoC8$GrJjLjJ`i{?nC6)V7-#5j-r4q`b2v?!H7G z2?Jited+B^9rklTXV-?am*S&g^()g@`*A?Te8y*q9WUEQU2)m+ks{F!?J4u|S8+k= zgy5%f>iN-kRFbkd8%B?sDeB^qj2&8F!~xAQa~iWOar9?^s!E=`k)_Bx> z<5O<E1r?`=`Si8zHGbRtiU`guQsogwv6|D1EO~ ze5#eZzWju&fg9>eL2!!DKsEg81I}*@O--;nt+Y`+AbqpU1yKCr{@^T!j1jM!j*Euu z#MLA*VZlaB!cDB^I#hc(M^#=Z&XJ77KK~|SEfH~K1s^HMAYF<7>XdYl#L(LcjHF3S z1eoacwPcS{{!%KD#Eo6X6-&LJD|!H8zVL}X`4E_K#h5?a6mCnAn>=1C%TgL_0JN_} zq%kAcSow+F+S1n+C&z7M4XtQOPEW8WW_lAd+TxcirZfO!fiFiW8IFXvVoc*f#AnRi zGQ4>dU4u1S;Ndz_NJ$Rx*>yGbwF%<;7tD*}lcbhHB%LPAi9E={;KRC`FB%4rlvt0b z(&VAy16(|zEOdGM;LT3e5+pUxv&a^w0vvJ)OPv}^3;+ON%rLQnlr76BXhuab(8O}I zYF0BFTuXj_`WZd;fOnhX zs;DhRFKk?YLd5p;LK2tAX2#N4#<~Pfp~%1781rV-k|YNwAhM-=v_GV|R(7aC6b{R=>+Ewi+ zM#GUagwZM&Ct6)7z0#_r{LR)Q6HA)?eAO#iD`M={X8A!@VCNe74LDgwNP5d=Fq7!p z=y=)CM$(}k0r6`Xr4;%gIgQ{}9iysIL`U#C`E=AW zhn#TE3a8CY6Km`81H4PN?78RGIMfUXX_RkLK8HKlT+58L=w8VUSVRlLbK&9)gi_y| zRr+UGZ6uUEQhMV|>*d-=(|y5S31c z6-yIQ!K%$Hs33ao#aP<0PcY>I(8N>SH3#5>9n;W(T54|;UuZk{a(@PY8zk1I8YgN> zjL6Wy(56Z@q~}oc-9+FblDleV2~CG5mASzHNJ~BqM55Zkp;a}p`I=u_rdi&o>_c9o zaH@SsEFXH*_(|r7PXjsD8fBy>sy`R7(5pwqMnm*4?dQD1Ev+Q+(jK<- zyhxf~UKwhts+i&+-q&=ue;>l_PYoggOAdgGjA=SY_RXqAn+U*yNU>pKBIotS&DPjV zqYk~rKQ%=^MCOIQhG!90K}4GwDmm55l?H|;TLc(GbEFJPO@1+hczceG28~UBot<|c zo+XHoRW0h*$X~NRcj|k7_19X~=Uo_+!A=O)=1lixOk3zHogo|jU5{rGOofQ>&ICrR zkvqb6x0K+vLpmD0?Umap>|>UBw53SEay~Izu(jM=C8U&o*}Hqr@4%CntyDL*dFChr zfyk$2f(3W>ix#s+O-+l;fn?G`wiJ#hv|(dq<;sSuJjI6aCJ2$)hc6^Qe=|%x)^(Y; zTz8rd#!!%&HM@3~e0H|7bSN)e^;DQy%JH3U`3o>|xs16)w#sev$l`upu(z70Y_K}T zS0F)6d3A1~2H8+Y<@$JQ9fthskT-iTCkN$J#Rns5;>Vk_y1cl~h^%W>`CTbLf4Mrp zt>HFWC7gCTuGb>zcdUFVQxvxBc(LF2{eEO}&3@KYk=3d7O5m()Sx>zx4=l5#Q@GZu z(oFWhNLJuTTZJQIMK-*|wg|gJi)O6g)e-y76!)88wd*fHSIv5mmG;NmG@A~6yMU-( zwW=$p^Xp3o%CVV5Dny=dwtJM1yun|_B^})H}5W`bE{WQ z-1$i$OY`U7cCT$Q*=nD!Lc;@YD$Me}r@b_DnBvA7l%n{l+*z{kS80X3clS|kc7jte{hP+L9yJSojI2o&0ahXUYdkX!4f3xNE-;EyffVweKGL%S%7Z1j|rw9YVnfOq{}F)NDg$v zJf(;ly&v%jU6PZ|4AYd*$cjvb4yoRFVtvLu0rV$Lau0}urbqZ9b-`gpG%}LR>;h?9 zLQ(|ya&%Xqk~LG#XLUg4Iy~$POY(T>i(!bjxndp0IIJ>+20*NR0WO(kTEZVwrePuArfJ8=J|7^>s}3U!dI^dmPTyjyvPQ-Q2F zbq?W6R-ZjHr=#ZbzWWN5J!w-#GJycx+v)+}Vcb$!a2Q_BWY1@YbydRZU^Atnu}E*3 zY3=mPcPPMwR3RH+h9IGfXU$wQR`%VW`QIfllTfr_TAVZ%RX3^LcmD6g6 zpI^zoSx-2<0Hi4gR}oYfzyJXB{YQO3quc=ss}#bDR+#@YE2M*yZG%h=4BkQLz^gV( z-9=JDoO%7Kl>3TsGaxi+yura!n8t@YRw{^kPczZ!Pjsmi%smuvEFccSD)2-_B`c)E z?!Jj@mgAPpea1)Gz}QlK1}4UQ zjMa!7O9eG>gOOY|yF^pyS2{PtIq8MII@?3(T(1n&!1MWX%OvW* z7vkR%`~{#+_$RC?o%CPS??miHnLS3llPyDrL&7N^Vl_7xw7Jn*xWg`^70XrHOe>TM zNm7Jv5o7*kpZ~LVo*eTGGn@q_9ZPPN`nWphDnd|lTxkalP=V544ZwOWK;`rs3kJ-5 z=i7K~)4YScd{_&3XI$a?7eE`lo_Y6b`?>V!cKS&1b$hsIUbff;d-%X@A`Z4vrKq<>UG{|w_m}2@C^!j1S+q%HhTt*=P|Z&#}6Vj z3H$aMikV?#2gAgs zCR8A8`pZup&y$t`<8Id%5C6s2h9n>R`>tvo$_K6~vG|Ra8>n`nG1GaE($(IMHwC!r z_ww=|-u=3=^i6~CmDBlf=Gp3WhevAaVl-A$Sbvim6Ks;1f`%+#m9n2dyz}!~Jc5JH z#Sk~+`aRR1#tZ$WImK@?b@%ULsxwNDWS;_l`kHSV6|dhGHMiDTe(0+e3{o5FsxF|_ zw6bNiC`=JT0_Y;p7lpw=iPmCVwvk|pn^=F~wA?~xGw~4%Mm!` zCQ^e3{AX4nB{gG|&E z&Uj;aNfY9w15xrMv*9U@SK((K8*H1czWD@%0#4faw;`n(U(RFjaOdc$>N48|bLgX) zlKN9N+rhl-1-wL#0aWVu;n!5{=HGq}rK@?(%=yvrQh8lr;egeZpPRnPg)+su*5gQp zt%>}ANz{*WGs=-EGow)3ZH@?Ihbq=`W##fg-vEeW$Es2d zrl03<`k|1ECc9{yCRwKzALN%GxH6kX|KL}Rp_dAC9|ge@`;1Qf@e}n9O|2+964K&R zEJlpztf|I+wy1d?*&M?(3M4Mj0HO;bHKh8ZO1zJn@tb&j z@RgawT!PBc5#T;##+5-+xMmPD`=jrR>Ci%IQKZcylYmzeB98_ho-s+02UcfEe1JU> zQaz~Ti!B`AY|nV20aVVG$qR#0dZLV&nkZzK+oeJHDoxwwJTgB;&AF43d2q8FnUL*G zVjplxaPl~;FemXTda){Wqq2TfEnN*_P>IYn12m6J-=zT=a)6|pDKB;Z>0+f#?MD5N zG<8lxSuX!B`wYj(&PXqMAEJuR@>&Z52TSxl{0!C4E`#bhQoQsYKi>Su9{=M=h;)q< zL+dRoSyF@KtS508p{r06c ztCj2QMSSvk?S zEK^EB6bJj4*3nsS?&bNdfv|6rR^X_0h02wvt*_nVy1_`@S5fUAdy#uD^}X%NrM<@t z(Kt*urX%45-+Re%q+_giSlD*KwTA36XWyvuJQg_dLzyIY&52%xg_`5(ZIu_EY3k`) zp&5#73FHmBS`{s~bTxT^Bj@*cGge9Qk(w%T14MnZ`lzlfIi^cr&gLCMdwutQ|9GtW z`r=?E$Fe6e6>OfRu!GUS!|?MMr?)^m+#>#f$B;*5ek<8zL8 zx*SR~2{+G6H=`Zh@~uB*?VXJ#e1Z(z;ESSsbS31Z3QgDK7H14oTUKm*ad2I6rN!fkD!Z!EFR&h3sHU{$N9(dTzdYRMQ#n z3Du{V(N8V36(y9sNv_F)LXU9CpyN`eU&VIWY!l4obnt1w4KRIMRD|U~pSGCwoQfXR zoIQ>BvgB|!KPhxIDOreqi(fN7H)vUvK(*B7)4(xD%efuqd#dJsNx6nVCSHEvnv5BQ zemdStX=b%UDJyzJDhwE1ZA$Kh&l#EXxYRNk3!g_f`6!!s&c!Vf<{DekOABq_I)dJb z5Zs1`vX|4F)1%;)=AYn()grg1aiixN0EWZWB|dC1Bni``hA4&MCi50y(yt$2uKjG& z?{vBaF|)dC)H3}<0hgI>64KKZgT;-xT-^AS@HJ>c7&@Vuc7_smoNG^@k_*g3ap-8l zaujRfMPBipf*t5$pA48;oOf@VpM`z=aIrVcQ(U>%4 zZV4`wA&VUguLI`V*-adA`oe8bQAk9mDh8__9yLt5+AU)iU7DZzmY_=eO*_OqzKFH3 zvRxCyJXdNIhiym()?oyz{_@ge{x>P-x4%)F%3B8x_EVp7%n9KM;6Tg z>^&+*5lQsq4h3kcpvUU86U#6{9<(Nn$L~E%UT})lk6mN^Zb!J6V6&#vT0`HXY}|5c z`AGytEc}vnLrlbN$JKISnK|I?W%2c-*uf`hn5dBRxA$#fSX2X!!1x0Y#dpfiPLa+M z%I|HCb(#K?2W;0t2ll|Vk?l_R?&XsQ$3Gm4Eb{JgrhvJN%loc-DE#Ye0Nfa zJ`1}|DIsRjJ_<(egV17_fYE(fAT~2b4IB{Q0=Wvsn@_C3@DKQp(u=vqTvdtd zHvc8X`|}H8;Af=+i3I2b+B8|3pUj-)K$8-1|ZsC=lH_eoNjJ|PO*XHTgOL*@y_!hHj zIIPO*aH(S6>GR3YP_NY@nJJPMvvGJnn_2F{)eciL9B$;~)PnB-v?p0)5|dE_@3|Vg zWO+MzJK6AqY5!KQ&K!QW9<8aEB@Lo(!2j(AN3#5T$@uRis5kqxb^kqs{(A=fS7(qx z!ax6)Hp^81>50N1Gx`HRX!1{Q^yl^OB-nfp$S}uWn1I(YTx%Z>f-}qpc^mKzvj;Ap z)E@!#U;h{jg-yRpT&s?h{;cxgTc>3t8a*_@9YX^Ei`cC8ahYc$=YZIC+g7N|W%>JX z#7`_Z@>bMf`c88Ruij^*Fq3;uZT3~W;_%BdBY|a~c@+vkAEW`bVM8BRe+8V+EY#GC z9s7zorX}BsZ}4-7x{9nitSyf9R@nZms`{B+tUN{ZjR*-GtvdALs_=RfHkKF|-PHUn z*{+=XNHc3RX!o*z;aWr>(p@?*@<=y%YOA?Xf@NzRg^K31KRtws>eVky!18co)X9(b z95^5lkoGg7tKnemt~;CM)tUXuGKTK@{qXSNV}4|83FvS<Zy0=VoufpUh9{v=kRnTsFEMhwd#K?X`g~ zC1tg>b+rmf!kp$H69R4~Nxa8lCLG3R;A>lUZut!BK*()BLD~N%x*Bv$k8?r;3* z437E|4QQ%h$tPMWrp^?8Y%1z)#8b_>y;HmGr{ZTn) zcwKVJf3qQCkQoNs_l=fYbKv1H2Nk?iELMvC+@?+j5Q%OgT4#7-;z*bCx&9nmj9D-% z@82N#HTSnAl_OTP;z2Tn|Ir>3f{nYnF>B)Wkr=TYsbpdDqeaAL!G{UZ8aaN(B<+XC zuOsTn-;RrWbg7SKK5{4WMM%?KjlUIjyG#vg68i6h|L=cVB1h^^ZmI_iG}hr+r|V*j zjb`<&x_xVQyQ#9i$>03@+@SgIdGg=O=RaBah0P=zaavh&5o(|l0?Z1H=A8dM^uj(0Rbs?td+I3E%tC<2pvwpHlB?L6PedSw3Qw&lRs5((4$M?23 z=-Y6d+MitOM0fkPL}po51KUQr5I%Xj{@>%D3f*q(LmfaPj5@bjfg} z%O9`maE6Dy(5!78e>5C6EEgqvluXU;_XJ*@8?#^fcNRK#{?S=K7Lp@8cTm!)fM zqUF@F)gzf&HN>iLo<;lQLD|9s9_=r{fU3&*eN{|~Yty#SrWEsU#;D609 z^wkZ`fF(*j6taUlz{=|Q7;h5uO_)54<+PhzjPN2t&AfjJ+s>^!D;~uuE+k#4rhmC zz(uZoPKf`St5ai4hDRK)(Cj#cn*~|cWR8+-c~v<>1W@n;a@}~RV?ZH*#stENgFn4R z9FC5!G;;~v7)uX4;6 z*b^+F1YS|&?YSjYGkmTg#Su8+>jKWG5rE`KGZZRG!telZQUJf!V9ybUEnHl5I7AX3 zOsbc7z$Jy}V@bHk)`H{thsZ&0yIK6Jj^>JIMmkn z>a3yfku#lSfy4+d9$1(7x~19U!SdDRCoX4na!l~~C0Q1aM_cJhi1(vuO%$i+ySQU%5P?yN3Z>aq z>$`Xm-IT4kT!POx_5?9BfWvJWX1713#!_0QBhe_`>2#j3Xh~D6;LWH(%H}>AxzVG1 z4E*r3G6&Gro6Pr!X-ubduN@i1>|KO_63o0O_|<=p7lLOh{|gWc4mA0PWh1Ry9{%Qk zZ9~c2{PK8rB?CEPa^)Y90CIZyFTyhgJ8K=FT(dp7J*Itt`kr``6Key8fEXpJZ8YfUCm&}=uHYkgzUZ!c8MD_Wb z@OJ2rNKedE{+hj_DI~i;HQOVI56plzdugb#_D#y|+K1onxr5lvTQpf&@H?HvF-^#? zQVU;FEwBHTt zs&tCWLBF^fJTQG9Yz`L+LgX`W1X=HsjoT*l=2|#dK_O(0l^hJTEA-!{*9+^hpqcAO z@{D{;7(gVdd&ai~hn`|xkOGokb?st3S?(}q`Ag-8Ip;)ZkTi%f-vT!AX3FdYo>CPq zrWBSk{$8eS?|7wfP(fv40=P5jeO#QZrYc?wgNSc4M>)V3Nb~_sd;0RJ2@mb06T?mB zF~{~LshqG1wtz7_^SYe!ylLd@Y&qTgw%Fwh=w2&`%#WZ}#LaNWrKIcOgo8R9y?H5s4K+gro$ekm zj6!-w)KUszRotoekc95kiNc{7;y*axNjW(IYK2wJSzm{R@_sgZksGz&((T3$@9=EO7Wo(XqpLFit){qGqYQg&f(DEcRj|9 zT7A(JWRv6)kcMWimd8)&YlgSRZkM9hrr0yLR$XDu^VdaP>(GZO z{9ev$ok)Cb?rTAZ0vyPXR)Gt$3J-0K9xDB)>3ifJpG|)MoV#lS+vz%+&x&)~m)4sb zDj)m%+deAoPH^pvY_O3qcdykcM;C25ND_qhUzYo8P}pGtP4&|{E$-|%!{^KMVaP@0 zrg%hthd3lpEYOY1b8q_y+~`G#vj7hL6H&B1GJWse&Ml=2@0KNSyzzbpuU!+}SQ6`+ zI@ifY>5O<|GjWb&)2wDRF^%Jj3uAoJ>UQx;e4NXzuQI*iGCOVFiCvXl>cSI) z?ojRqcXfKQf^K%w^pHa56?QdZZ`N%v=xnfGb! zt+wTvm_!tJ(4M#&qWB!Js(7l(0(P)aXgOcLkG|1I61+=+U3H(9TBf*Q=IxZ?$Bq|K zq;cr~Rr1FN&$EPyrQF^_b&W4I(Mt#)q~#HF5<4`cuVeww_6(qn5Nie@PK1EG@I3(4-qLJY$U zlwWb$JoAElpiP!>>C(R$CIL03PPa)d&KYl<9A&m7{1%azN#`#0Tp)-c&Pl5D{0U8- z7Lk(C{PG0DoH~vcs>`Fe9Fd(w_?_yp=s=Te(iM)25M0V0hTt@a?Vo~4i>(M%+lxHb zGEXdC9Wp@F#EqW?_n8;37y}>BM7}YfZ4;IJA_DmJ%Of^^ROa)vSlj2WV!;^iYQU*} z#VMIl;c>H*w<1F<`hAXG{sJY&9os%PV%jpmQ)S}^KTqNXJ_N6p$L3PSexDh zxoDN4nuul0Mfni0ClN1-KND>ZRc8CrWIptM1G^^%=7ztI19oC*gLLG($2#Uqz}uw) zJXc=03J^U#BE~GyMwu7owBy$3EA0nW>fbpI{dgl4kF7>i+Q6KmRI8x_dgZpLu!Q?`P6Tw)|3x%*>GfM?dc5F+cAFruH) z$TGcic+JzEY}3Z0SC}ZvP;0>a>wOBH-4pY6*B?l_w@Pfk;`LkJ$RKGqj`!&kI0Qyj zZt<5B84wTQU8J_$Z5R8aym_zjUT5Bs+_k%?o9@^2W5R&Vq5D%Z(un;n)*=cr$AQ_x zs?Exq<4TdkAub&-B}v3E*J1t~JwdyEi;7hds-sXeD!@)C9EU5S#}ucou$ApA#Zh>h zD`Sl&`|{jvPs;aNuHn{rmhSzn`xr-yv4a8eo^1P?7;Dt`R8iD1v+Qqw;o6uePt@O&43MG|7k6iibLFRE=+LEjavJI)K zUf@YIEp~I?1Y8p=Y_NkD(q-=9rKQi3uIZ7{a`360kRg8J0^(^jf5VuITHgSw~S%Gs%R(0YAz72LaJ7{X8Z8+HVIc}1<(oB?ZM=#+qrgcGyN<=ULM2(en4!FMl+n{Ya`FNhZ)#D|d5y??F5H?;> z;WKi}c;DtX90qZzz|D#;=n02XdB}l!gDe&)cWT-H$Tj#M(YSXl3C3S;u!KV6oZn6A zATrQh5nDwm$am#@=z^B2IimW!3D%ZU>A*`I!h>ttZvS4K3=+Qz7o$iV;U=1srAv|O z_V%g3ae{xFEeeq&YsJRHv6wJBR^>b8LK)|r6Gv>NqY#J>vl2v8%9ZYu7H2%epF$Q= z3cKQf6>Y-TZeB(k!@*Fx@DmmJ_xG%=lxiiJ8@^2COj45!unmR-;obvx))O_y$+9CE z^JtsiCJV{z*sIHNYVGIvgsUS@kaMPwDUlP+y`y?>qI$L*%hXp{E)7zERth0$j+%z3 zdJQDYH@u0-bLc?f21NI}Nz@rRn#T8j6Q^u!M|=D6lGsd>$OY4j$< zmb+5y;*g*^x^+HlBJn)mo$GWc&SP?wprI6bYo@faSeVoy7dF=hQ4De-dX#_FoSbOf zBx~MtQTuDs{1OCJJMox(8pS5-2z%j79kOQaUP!kY)Qn26RC)n(C=PbZSYhcBbh`jv zYRTm#kyf7L6i#o}7MdB->--=a=0t@H_4IO-M_%|>x-|h`;O%?Le^gz#*S07d>DevInzo>A)=|< z37)E>h8wtS@R*M?*|Y3qTedX!@i*vrZBkF0n3C#f=*6=c-F_a&YvgRaWp~ zFBST3(`rO!7V2*RmPmzb^SDHePgQ(o3r!+MeeM(X9}&EXiubnIIIWNO9M{1OhF6oE z@{sDwC=BK%<{_yO>bS~vI1qRMiMRy?!tsD(Fy6*T@BK_`9xdb*$LmsDuW5sxlq@gk znW9mfWmvvSAcY6foW;uw)wSTe8K5qW}kiyN)Swzyyk+w|l3nGFeM2bS*)C%@$(exnI|-# z-1@Vz>RY1voD8NfnK~R9fJ-0t-NzsI@*3?drPSSb=#oXo(C`uS)ewBd)xX%!KP*u= z>b-69xrnXFMyA&ojlvWt<&z9;~8M?Y_ov_kkRb8IW`({tD;ejLn~Uy~zGa1N8+@WdhxB)k&e z1)-)DPKJ#(+Ob+*^M#URTAFIy#YzMa|Fbum4&Ca*tXU?rnR&f2AoTNk>AY>H_km|6V$vQ zw+$a2GSlFea>`Z&u`!VSncJ71aGX*{B`-j)9dx;pTCSaFW!v`MYFKu2*Qqkla(zqM z?PyIHM_I_Q$na@B#(9H3!s&i@aq2#1f6H}AUw_!uVpO41ZI!=>#qYxtM4^nmq2dFs zF)s9^NU$Z@l}mLcCPIO{sD$cWhJrw}3}1cUh)YM|=>b;@KQmI#b5Diw zrRL(}fc1{zxsE`{XYb{{@(!iC%m-x(wh$3pd-G)^o1wJB6 z{E^r7oRj}FQPl~LgSrG1N6)dz+=hN}p%l=M-@5Tmg&{TPET^|@8;?m%5QYzeh5=COXP;_3_Z^>q{CHi zhdEL}*n>DddR5 zemmvQdNLDPyfX;W9EAgy5uj=WqoG-0Up3t~6Yhe?KZHF&DAa;2II1))cqnV=G?75GEkjaGOOhaS0#0bn@eMadzdXV9)8bB;OCN_&gc*FH&t zWSKe=hztjZhV8~THLqJ`s>kNpU^jycXH&$OH54CMtvak4&V?mGD8GskQA8d61CYo3Uqk?)qF z2F^@9xN!IqiHi9KxHM@+c*3BnlM>aI8G+sc{Z*?jZH>k&Q2M<(krV6Q6Po!+H>EwA zO8?&GZAN=)v_)y@3_#vg=zmAuZS#+A&Y%~k49f^0n)C}<0rN#H&PO#CFU?m5qz;omTY3$g!TCW%&qR*%CKX>Qljr;5iJB;y#gsQ7}Y z&FzF4^+|M`Xg5A@PsxCZ6+!~PZ;`BQSiJDFRNIbES`Bd#!XtG^t>l7-(#+pX=6z?_ zl+kHx!+R(;g~u5-u}whYsI)oY&h{ zdJ_Z`cfKW-Wy}8LmNj+zo5v8o)Ddy`Ifj#0wK#vQ3C;a@gET#B(Mr3n^fE4)gGM_5 z9eV^vP+|=v#qDK>w@}nH+Hxo)ca;0fDx})6(1kSPy}yD}j{I12f*?8fNADwtwJbIK zJOO!&1SotM93wtxOI5R)SBInJF%Dhjq##=t)+H9#7hC zJB|DjiBqhlJFdu6=ww4A>p0Ii^CEr0w-FK$1wCUQoAs_hR<=_{GBk3v}+skhn z!#84&HAayA9C*JT-cVb6Vd&7#te(lxv8gVIUa zj&}DSA4bI+V!34gVA8;G7M*K!DjQ_l>1Q~s5$qA<-5e*55~SgSD|jR6h5GL4?h=x< zYBLFkD{MD6?(Nw0&LHdEM=eto_wF+xKghCJasmQwxo5QltLh@%6t~UkY$L zEgfqzjQtW%alK2aGmZpflp(^SdZzWRM5$9rq@~H<(B((P+}m-WId1sDC^iy5zifO+ z;{DlBGwHR}THU;{NyxlRkSjTIp|3k!BzqinD}U6oyMB+kv)=vC>?X4N$e-(xzI17z z%QaWK;Hh*xvGr9nO}%2Wx>5x(V<@-7X2!8N7p%z#+?k88Igvv;n2QG{#Z|j-<+7J| z3W6QuCW4$BZU=Y#&hA+=H2v{6vc~(G5M;ew{m8X z#cc^uxYZwa>8i2niPS9d%WmpD_i-dK$vSKE7ZG4Zd^Gq_J^K6kn@O%_GEOD#30)Un zmgig8cRuz$rjN(V4trZR{%ENkqpdA#QLB-+qHZ%EV($Lc~*a7xBbibuGiDWb1~?^S)3C-pE9W!!>&TjcPKK8N$8RZVEc3 z(A+ae@Ke(C_42yVqbwn7jL*dX11<}GM zK4E$J-Wdv|D`Qz{_u*UF`>Ns1V`iwY5xEkfWYdj2OLi+-F^1LIjiE3-#tC{?n$JJ+ zxiqO`B@-hz^~2YZLIKMF1oB?a!p$#Z7W!ZOJ%2fq)`KFX)F3}*CK9kvaROFg%pMc! zZ%z>cwCAXJQ2r10-a06*cVF{u+zIaPuE8Zp;}WcK2*KSQlHjh5JHct(o#4{AJHcH7 z1PDnWw|{%jId|`K&&=#ox29@pYW`WZx~kXes`ak*_IJJC=lMLN3u#dn@-+#fDlEj6 z4GPp>+XTOXoQsR9^#C8pYvz@W2Vt40oj}h`H0vh^d<(E2`SQdlGpb%NU%wRVva~#% zdv&Jsqyi;kBh+)r}iFV*Jx0^jR*91hdSP-D9gx!vf>os;EixUEx_XW8IG1Dszuo+aYBGX7w zNNvJR1X#8O(GYRgTt|3+r4h>YmkbdJPS@%`af;#-Qh$R%Utq}Th$@mbIPi|mm^(>z zTCasjf}-qegB*hWXp%-o6l6G%216`Lwhj*gDl=!Kq8*IJ3GNWok@@f`g?Al%^eLE> z7J6WXUf4V3F58)u2n(b6sm$YC(uD_Uj4nE<;8WwXMM0reUyWuhab(8b&}tl|yLd%$ z+i2$Z0TI)ZJ_R(7#o&%Q8FyM{*@y2z&_tM1QgCpAhFY(RiQrbBJ%SNl-0dNOyCP9? z!R?rt9N4K$>Pm9aMe9N`Tib~ZAa>*^PPo8^_f+O=sp?u5YZMVJVVYuCCGcZ51hOfx zb97*fer1T}uqdA)g~W`B*lXv#(9yls`#8ad8=EW~tEWa%M^ z6zA4~C>+H!RwJGZ3;ga-C6P4OTP59NzFLz8C#u3sT<0V>X{M5StspoBGXT*rYO8@J zYW994L&Yv8TC~`Zp^_q>^Pvw{a;hA#e;u1bTx>^^-}(DTHH*zcb5vC16u6MbI=^cr zF*2GkZua#w*s3yXwo?i-&5`O;%6n^ta)UifIrkWOd+(yG6eT`e0w994+T|d~sOLm; zQo(1TWH9;Q0y9jRhf+j%8S@fkCxWMd3W@tn>{?u!>WQtPuLy~${v223rX;;|S&g*7 z&aeJn=@p(m+HyOBHNSO=4S2daRy=0@cnItoO)qzsLMfMyiwHLsyeN{&7w|lr3tVTb zW2gu}<*0mR@ClX)N9y+fNdWwZ3|#p5$w~dSFWgNO8tKIN$?8Up_*g*{(THQHKZ2!L z;vIgp9`6gOw(E9*N>2|S`nzvO34iJlY(2JBQ7N~Nc87@`Z1Z0$4(dKt(UCLnd03)4iqn0oy<+tUa04lXsokyM7UE8H%z2eO9?nC7XCs?YCZpO9yug#x+Gt6REcbsK5 zlSKO6Dj}jVeP9E~G(M%AcxWh&2>{hcnnyMIM;vk;>D>xp?PE{%YY$_sF9$F)w+`cJ z%K4inDueMsVUxCMD4z?K0rPl5Q^T*;8W-8h)%-z{*r;l9=1GbY)(;#>YF%(V0p4?k z?$;Q5gu1X8*Oqn3ZRfW{=P6<@0W#mUJnvh36!Jbi>=SMaCtyw224o94$67{Gx3+w` z6&r_z>K5h`*~_^qAsiI5&do7-@>fjYi}ID9TCOv>lgu5yb(fS*VQFjqC4VvCB>I3k z#z*T`T_hlG7n?o@<>@2k2!pQ_(*PxD)Pxsn=#)8YI0KRr8aneeF4~cXu>GnnXgrTu z1ILt-JwLrecG)As%F;J6IL=3`HH&f5hYCUHwJ4Xu_mU#Sns)OrQ7q3Si18>j*&L zEa_EpW<^ZR=VW?3^_w;5piovGnvg*+=fTm9Mz$}LgBgTjEToz+DJ=8&3kj$qJ;BIr z>7i0)UWvT{f=>k{Dvs~xoGdUWMGG(HbtD`T;`DT(0lOa~OTruv9F4K5!8UKQv?6Jh zO_nA@Zntkp8HlMiFMEthCW{X}T(x!a{el~J3ziWeCH%c)^L}+Go#Ti^-)JetTr3Kk z7-!7qB?E1iA{HafutKfVRX8{D_#pI!NgoCokr%;b#^xcd>pY3@lvB{Yjw9EyrW<{d zw8Tz`f_|SSO^$(-0D#=KkJiNf@@cb+BZ3nx`$p4k5?)vHwq}tn-pCom8&tEbH7bVr~k&E?`>wI^VUUl$iS{;3M+w*Y)^F`Qw3t$-$S|oEwA*isJ8{` zRsI)&3iR~)8acACT7F8*X@TMI*eQj@uF78p2^^Kx2E;xg9;jQY_1S`W4XmFg-C>Pq{J>2XcVkXxqg@l-$qNEH! zq6`8U5zwt!0qGnPD8R*GJWLGNxOR**<6UW+o${K5_$ao*uHUYMfm2;`PXdYV!BI3c zWU9y*+daoU3>UT`INf#jx?>R4GP_pQ+|VrkBen1>syv(MWI`w_Y2|v1wHm{Cp(YL% zl298K$ocWJ0x#uP^%QJ{lzKi~mOjseeSQ8GP|1N31s#Mh%w!-TU2|4M`aK``Omo%10SiS7;R$C4K^WFB(`S9pM{Ja*{zk4U$Wb}>Ic)N*$A6nN zwu!XbGJ15ME4e>6z0437Zj?9~J>KW@RjiJ|T{D$aZB^SqVZ^yj(P#sZn~q@bhIR|V z)fEdl9aG)KbhvfYq!#adq;0p}6pG7f^}jeMh4r%Z4xt5X^VUM|sV1*i*Ha*j%rdZmnEun zRqP3~V8~kw70Zn!;5v}O%u$C?$X#f+lZeH-}Ovfdz>DN z=U!$^wq_OZ7?|I1p`NkBD_K_wt9lXKVq;2s0%*4o`}vsnc*weKW{h>awjb)}rrebO z08*Y!G&0KGR=-U8*)U`7dT%$ejCTuI7BGoUkHUb7J=o>u0hs1oboGt}Jnl)Q-y5X` zFA)1LH{^pSy1Ao31~Sj{E4IJe1?Rj;2eG3JyOme=p8ydF2=^nCY(g+x29Y_~c& z!4x0Ym;M08t{F!MO+$PQDhc};UV-Dv6@Xc@)5RRBC(we@6x*yZ3xza^u_t8JqV0`K z@+P$A^HqMDvG9?`sKuKyE+Yk&L4ai=4tmL@w63&FD^a|(R#t~}JG_kO$eEvP;|?*7 z$WcRF@Ir2xKr{&zhM?=#P)W!%f!c%`)<%=77LATN$g60z6UEnvME)0oE$XBNjr1g@ zMI#T>n>gi8uoX>8IQHQ<0X6s<$3r1xECU`Ei z!}+FR)dMhF!B33sC3srC%{gC7kEN{-+*sN1IZ@ei`=kvSNFiCoc&P(gS-P#b0D%~x z`bSopIBjxhMjDg3cXFjA)^?#Mjr3lGv$bNg`{EFPF9jk2zL;pRWz9iqkr=QKG1$$| z#&<*xiN?An-wFtJzu802(#h`hcA&{Zw;W?5WZBFv7vAC7PPp4^4pVLOeZctbeT5 z>qlRT{$_Ol%TP)u|CeGNnTX@f4h%o^S5~Di%LLMi)c@4k{CDp&++l{pwu0Em@wUes z3QM+q5%i78cmkD=NIydl+03x_oH0e~HW8wNHZ*0rKTW@wwqB=Q`)2&OdcHnwI{a?* z(M|T2qJ{f-qiK2#vOnvu1lj)x<};@qwBK&&%*z_ocg)JHrC$HOZY1ty{k* z)PLdYKSC;W6TQ#DZSj(^tPWWIN`uhkYMJ&3^NENJ^B#O%bdo;I9ZU(%q1 z6W(SoTuasRBR8+>^Ho!#zz3|04anTZY+(QJ@5qrKCR-lP9b`W$@|E8_Y3b(Myee!| zyc5dM8|`fZsQ+?l$rmHmN!)dbio7=6rdWz}{`5 zO>~|;b!X9LooFzx?)@-fTP4&!!-}L0If#T|78{T_*A&L7(CCB6H6q<)gX~`HfR1K& zFK><^o!v&Mr?XezKQ2ABC&)nqK7l1CwmaB++%ywR5l0CTI6ibX2K;*ao<&c<&RmNAd9$+34PlFlwMZZo6Pin0<~s%cFvfRm-Hr z9f@~lz#g5zccSyo{Vg{=A^}23_?aZ>UZ8>y?slV#$mYSgje}&y)NBjmSTnv)t#to? zWW4-mpXtdGe^ie?V8X)6?eWKyw*TckXetBmo&%3==MTsHHzP8&{7E(4V{^c^op%+I zT4jJMrkz2^bDwY&Rto|9=kJ-p%&IlH+(pqL z33(rqN1b)I9p&b-Z4j%kzGLQmC-F(3@;hb970QOw*U9GV27yF>NVx5za|gTc!bxL# zNA|40TlC%6)U{&Zmh3UF9h-YlzNoZ&gQxU{D}gZ@reNzF=fn=VVULUZ9V~C|T=-6J zc6+L`hH46BlH%Q1piB7Hir{&2T}<>r@gG1Mqur6~PYeba1i=e$O~Kt@TXL$?3LkAT z*~zWmbeM(J<5duVO8`1C+$IiM@3m)qo@ida+ScFe8eZac?x_AMDCTW@pT}_Q;@BF~ z6cBft1QM-P^fWn&#nIYI4NTqv3&XXb`Va^s7aXO^kGy)b|KeX&HQb?|hkdPZ`7S5w zz_`5aiTu|nu( zp=nrQTD+UjJmldyN|ip|NxAbK7&UG?=_MkJ=CJd3*&2Uj%u>Q{`sv;M0luuimmV=*^)oMGT5BUp&zc2fRepbETz7aX_)@i%!o$3kW zr!w-uQ*&=k?B_#?&2&U7B|90QJdaOVrNSlaPYid-qR_+Icv)HX@Oa7zH>+J%JIB&f zSN6_JI$W{fvap;eJbx{!FV5KqE&Cj`CoS^>UydJ5>Eq!d52}x6cGp{sTc)S(*N=%e z3$@!NE&g9C^Bs=f^oX;&Rh1ozjhHHC@0;cbaKEsY-_Xz3n%;iJI}AhDf1oGB%-+Pl zSsOLVP!7@unHT{w2Y&-72ZE5H_a8|I+CEaocJ}KV*QL+oeHoR$6UywK_~LeXcujuz zP*cfR)cg+1VXUXl$SI?9l??Z9p|$^+tq+dmAGseVQicE7m;UbkBbgy>PPRP^eW;W$ zzoDnQ%sxQE1Mx?s;2iLvk{}`vAb`dbI@r!Fv;E2Zvr@1fc6;0{nra}2uXoE@UN4s= zO8Pae7DTz9Iiz0CJA!~v)+ogaMp~fvsr`YMzBrOz$K&(aout44R+V^8ipo!z-m&#- z%DH6ZZQ}NGj2;Scv0HIZ5v)fh`su3t;!xD;&G<)03k;UH8SZji#8IW(!Z`)C>&?|J zS54X%1U=xkFZLfmSNnLxOKOgZSK6gSdF0bZh|9h1V9%b%*hQtw#)q5$RBQ3pBZlll zC+ZN(SHwK4_{suvVPY%f%=O}Q0-@CCU-^V<$=jam>W_}^gwXp}zcIyF1IXtqAoeLC^vH$RFXQ9>#>BSJ#HEpzikxjP z*-CPNY3l~JSN&_4-s~SbkpH*~-$DHxIQsmvU{*b@lL7LVF*j~Ry}@k1FS#x^LpdD; zU&KHSrS7OPfAvDhXqLC#erE8-ePf=LLF(Uk>5%V7wZ?y?Zzn6^2-S+FzlBi=V-$mX z+!h-@9=4Te0kO^#N>d&=d+5M4F2-IQ1s$;9<75Q?{YuN{2^YnZDDo9b73M1=f4TpA z(to?W{~KLXL=OyLNJjBl<%B{L*`7mOQbApWh-BnY}%JSkZrT!7I)8-wa}Z%EawS^VaC7YACcHR;}ej))joN(yDV*dZ4TCU9Iu4EQo#YqUwt81KKm z!X_p?kuWb_augb7(eOJlB-a?hD-l}W-zl7Ur@cs*U3SCd@#>o%In%YjgCXYkDWfy% z`yMlXsUAd(+5gf;8gKT~i1nLG*sJQ_2tF&7bpQ2>4?CjN`uFI7hzA8;RvB;^tuX`n z6hh~FeV6-#S#2YGK588wSWJuj4LC1?~W|qGzoe7W2FlkR^4; zJj%G(NNu3`E6<9cn3`j$KlQ$5#a=t#x2 zJ|X?zkU;X{s6PM!2^m(13!V+n9{`eDqw`}~OW};_Qg4}y(K5p8|KjzB4Hm(}KltoT zIh-p+o6Qzn4UPdV2(Z&!ESDnkB}EIDEn1DR8e$FC%aDkbne8{_&u)1)0fVo<}Q zN(q6tDsO=f88zpUBEM>eh4m=*w%H@sr|qi(M2B7MPjst$>J|tW3}3Pqn8Zh6<=mMc zGK2~t%aLIl&N;>B2GXE9@(n$j>q7SGgzSp_lX1lzai^rb=YD2x+04^hCV7K6yqln+ z5Zv2S?3Qh=|ewBSi7PUtXgl)KC@&NHrcTvhm&kl@&yD6-Oe?%2RW6oEeFo(c$44 z;o5P&Oyf}9$aj`!i%*ym#_J~C8EGB8!~dPZ{+)k8a`XAi_>t4@!Gyq;+n0|MCiy)v z-+#Y-(o<1LeoJ-KJp*l!Lq>0}4ZZ!CWm%dnZAJ{$1noA#h!CbG#^YZ$b; zxu<04LoZOQ#Z8#TR$ekM^GsnW+Bs)2VWaxB4~X^*((CeL$9@_V;yU(IFRA{W6#Wx!CB}T$-RSWwiV>?{s*#AZj@Za{)YlcuTV++vmX< zmea>M@BAX5%L`mlvyGVF4mMrHn}FgBg*fu9PS`B)FeCU`?kc?dLQBu!|y z`P~_BYePf)=xE42FL-?V*s~Hj-8nJ$-%(b4&zrsIS?`oe-0N_fe1h|xT8mmBo5;onLKXm9Ve%FT^AMQ+Of?pSv3Ez9r*=aA|V0K^FXS6!_kDXPRqqENbng$h5xFn*%AGP}&=|SjhsvKLEO+HsO-^N2eSnxUE?!R++82jV-dj2xd>h3HO1^D>k1!xDO%U zIZ?h+neKFbf!l3=RCTRN=)G@IYuUb@Km2aLm}nvt#)N~()?xtP%Tv~OS zjA|$>>gW?+RJda{Q-10uc>lt6tDW{+ZJ#9!9K{~>xbRjWf);D1MuM5rl7#h#cmhCL zZ#*MuE?Gluu?#aj-DB2cprpH>L^|HnLMdL}Vf%jGY*A2YXQf9@_8fjs2`z#%TObgTSF8$OU_HEdnCnWi+XknLy1JAUEU7Xo`v8GAc6Oj_fG_<-Tn;)aD{DH+C) zaopMAfD+?T0I|BDh)?H&ofFL*;XA`yxdjeGelPbcX zJ;D`rcKhT|D)2q}Wk{3$9AA2D7adTMH20V(G!sH0trcR;69kAz!d+5CrZe6dAsoKx zZYEPA5@Po4(ybsRcs#*vjAh!L2ni`dl;a%B96>-0LUn9kPr|$i&Ej->yo-yEn^rE4 zZ;?AV$U$5u?ef&6oz^A~JY_^4rVW!Sp*n>7Eoj01 zoW^G}T%NCH8aS~YynJJ_Ja!QW?zBllC>qzea@ls(nGD&`gbeI?osc9pfU5|=8Np$e z;!E=nXRa+m?%5CYb8l3BxO=`M?cHs55z3hoyB@z53g|Hs(2$0~GI;+6%W(e}U>PJR zjafSSS>B(3+I;hP*r-~qAchu%&_egj0W+y#JXZJc_}xPKY!BX)Fk>M z;yObnU|y@El*X^mY@@SoG@GUR__*lsF62<-16`2@kqV+1A@=l{lKxDF%D3-Xl?FSF zS8&KLSsbrti8lTr_Vh2)=?K%`O{e%{%A59z|1F;v&aLoY+CxBAS1J*A4C{{R+m>W) ztfLmD+QpR$DT<(O*NR#eUvr2_@5yB=`}*0=`ooHM?_GMPTV>Bh6=?cmm#e9t%IwHP zd}g}rw){}jX5sqHfqwciL2~b9lVzV+Cn^$)QKwiy@jDSWi9Y~l-0iyT^^5OZR=-E0%a}xPqDf&0)JAToBnFfC9pOlMob1@m$|!o^&w$wZ%xPEJ?R!W--v=i67k9& zqHvXTKv~M;1XrbXm7r;)X&@`keJsSmAR=2Zu{%Q`mR@n{gT!Dq&y`tC-=QPLK^Cf* zw7gbb)(YL)g*NJ{=^6(Rg)Y}lh)=mqPHO`@Q(U>+G3{h!rmEdKmc-ms$EqDJaW(Pr zH4;7h?3Y~hAGnLP1;u(E&gZ>F_l-SxYch@;kZ;x3suHHsMdAiD)NiGf8ZygnEK0Hf z1K4tAELrBrHA}6t6mGe5iOGIA(Kq8`%lI?P@lm;+IqJAJEHQ-)SQDinEScya#PJ|s zC#esEcY36m;HWrSlX_hTGA6A#A4|n(1peHESr0)`P}6c##8pE_Dyp;tpQP_S4jfhnxfWbrk_Th07LPFuEc{ytE7P7I-cnX zg@r%|J_cD4voLprD~a|VK+;y@N^0p$q zkBxu>o{9Vd9Uj-&n#lmw8jEK$0mz5Eb4Izx(xfJkf|z2UIlsHEoTP;$g^fak)#v1o zknvD}HM~(&`twzZKlAA)Z1vcEp-sW~qdULy4^I+D*FII&5L6iMLjG^Kiv;z50e1m- z&}NIp=lY*ga`^rgN{-sTpUU4TIRgI!O3ubVKGSSYZ_dg-UC%tPd+C~X#+`)>1Zhx^ zWNuLZn*lU;fmB@}kre*z0dJCxCo;deq8N-RhgeUdh@RV71m1XyHikTn4Iq+B?g25L}fZTR65Q2posnoyBf=KCTj>BBxu*BzYWfuK0PuDA>TYCRJIB+)>G&fSS@Mq2cD=f}^i1o%Hyr9kN8Ey+ zfjjS>xJz4#uR^HJU_St_c$LfdE?Nn~3(kRId^tX^uowfJ-roW_1m^4gdY4P<>_LA3 ziB4>WCdLqmW!lwRYu78&uOFA^v+QE2j8k1i2QD+nYFtcmOxNm%^S9V4zPq11rpy|Z zJdI{qV3*wFwowJgL+8ZNlV|2Wc6|OJGHPSV-T?yQYw|ckOD z1y~vbk{oMoZ>sX-I^44Kzwb5|7rDl7*Ip*Z5vS^LF&f4Wgv~t1zVBspy z>vC}7QA9@kOLmO7iUE;L;Q0D2ciQ_G;Di`w?aF2={4el1H_L*xZGBYVentx#Zdm*In<+oI)6pKu1IkbuF=mxj%F1->czXlQl53_tU_vv&l{`_G#Zg6G_;?%=mV{$m zlsH=AmKz};e9&l{QTIxKj zG)=cdSD5M@V~s$`H1I^+r+F2`_5Zs2mD>)^|K{#z`MbNHlA36D5Eqb@?0PxIW|^P{ zBv1z%qKs40yS7wd5_T|!4YDFwVe3y=G9|>>VI+~(vv{A65bS_-=n5G{K$P&6C@^!s zFDuO&(g=YOSMbDkDDNj?z!BpZUFN2)Yn7MODanRZ7~&EO;IiSjg$(M&@v9^#VFIXf z*Hp+hyRr^Tvd^Rqzk(4@SQ?m+JqA$|>`;3TcZ~{|O#!oG`H|HEyvp9P1PV%Q2sv*C zhubK)X9E#}qT^_g2u9zB$uF-Hsa0XtHf4B>$&Mw9e9li!elm`<8^))q^&kC~GUT8drQ4mk_j^gFZKZ-+D8F~eDO z-azN7c6kRO@6PCG&n$}y4}v}#WZbo_7P#hnjbKQSeIZ!|G6bFz#y^XleDw$LzTl=t z<1K9>Z|c7OO%lV4EGe>&l!YKeXtNP|kP2K_P~`+<1!KLsV!H&i#_5>!o?!q~#E0u* zG|uA=Z+&eDkxSM90E-wjLKw+WQ60%5vX_ z_fv{TX$!lU|EY>efW4o~S`lUSv4n`{@SdCjK>_3kzToU~f8+E5^^Pcwz=5ewzwxFv zk`u(}xpFN)^PYQi>h@Q8hR|fH>lWb$15RU?HR}N9oAGRZ@00j6Z||{+N<$j5G#y1v z9-Q#&%;kENh@MBrR{KkQ46WsvsCtWQKHYf8gtBQVx6z4lxfX4JridFQGs_Denda*T z5TATC4MW1|AmfkJ>|#>ZZmFOUD5ZONAM!FagUp}_J4()_z2QUYyB`nprKL+yNSTu9 zWsXDr{pI7O6Ki^A!{NC6T{YL58)W%@t>rQY)8KL*lBmFy=qG{H`8L-to#*T=>4e(} z$z~0HUnRve@p(3NXs)-nOjnwYv;aObw8T8eDvr*`up`U&>aJ^jwMi?Mx#N}Hh2ud} z^;&cGn%#_KsAiWRzw6giwPVkz^i{IS>OA(ZPCG^8P0CfG8{IW2@36DL!xQ;#C_E)} z8qz7kN97^T54Oi16}v3AP`dF#60~0UHaPtwz^ZI%y?`Ef_{|KZe+k$KZR73ThH{PH z^H;xK8&eOXqqj`GSh0*X4u)HoRf@F5A#5_!o~<`iZBC z;-QvgYoE!yI#y0v?8xM>y$kqb8FwJmNRHhoqP|293&Ea2#9q=OdISF0)vM9kJL0NwLE0B&^XeX#J zMf-od(}GzTs9y&{B1D{ND`J_niDiP@-bYC)MSxKSp4je(JlT22@ErHJ;Mvd=Y|G4e z=N-9|1Cgjs;QxULRrOK5^>0L|*uN8@=5CxRcyZ9Oh_N?A$`lV4rGcl6g@MWQLKucY z{YGG0RTw>8<1nsO#`a_+sRJ$tKGM1Sy*d&|h7DdjO>>ql=`SEu2pB8L_`O`?2}REd zKk@7f2g! zq9K~}VFZe&)iEUt4ynsHRNsQI&_w9!SCSz+D2A4|k%JanP2QJdB)XnHtR+&ysF?!* zO&E#_=;TzqRY)_Mv9Pys@@1aseem}}CD(4cJm#)_;VXOaRI>;DY zny;gMaFd7ec*UWD=1RXR<#O!R_6>;HD_9_c%OE^8tzZJKS-g$)Oqrk^hw+HZtdS%e z)Aqo_+^~<9kZCDVS3H;tWw(m(F$kYAA9u>RRoXWCWE^Dlq9lnL+fXt1uRst$3_qvy;T8`(F76dJsho8&yJOEb8#Z43rp_{Zm4U%;8IujAM$D zF@vJ7D@J$`NAQuAmuCz!L~U*gOS290qvojN88UmJI%ukuFG_6Fy40B@i)^ke?eL>z z%SyJoGe#^y1kxHpWEH`2bd2O^h=SX+_6Z74Dcr`lbz`-=BHkK*04$FAvKhSbCZnp) zlF_G+>|KeDFnjz+-%s3a^X?L_SXao(Mvy8Ei^1 z06VnvN)?LY)2dbuA()Zuf2|g(-X`9cPH&Bw5O{x!dZ(z|>yuHLaJx@C$p{L_9PMV; zJ!e@L2;l^i;yEhuL$Y_6SSKn@g9V)Ma=0eqwdM^aT#`m5L>a25t)Iw=^s93&YHmyA znhwoB|JavRo)DBb7;vO)R-M9LSA$;6163Dy5DtTzUo_a3ikvEb&(B>fKtWq49zP0P z3JVgUcZ5&*9L5Kj21*fzr^z?rWqDcSJ75DXI`cke0!yfDD?A6bZ;rzQGzhhX(6e@= zR^9oX>Msm-iJHLf-}zBuR<5P z=+_@~@M;N>qz#NbLqYytTj|Wk)(eweI&m*ArbjbCvHLT{;-bn_QnGEGBgIz0oI}Ob z474EggCp{GfUr>_;rY?lSil^^8#uojR|U|fD!5HxeYmAYtD-WaCANLvvs07wdjW^j z1?rZ?WKmp)4rh z=C&3--MIDcx^#p29JpeHZYg=$xNDvZGAaz|9I*Isf)wk&an&vK#xF(k#@_S+QlumR ztm0N}-CUbEIuLEc-T#%-AL_AbmqA3~$CPn+oS}l7~xeCM6bu8=-A&RF;g=3tyIJ|i_hpt}KmdbWHsLywf zU8+f5Ef`4v@PXN+4){P(9x4u5z z-UhUoF8=}0qkP@^S#uT8=>Or@7umHM|Gh1Rq80RegkySj&K%Vg?$@p&+~~&PB<7>v z{fj1zAko-t+4@-RY&#ZMVB95F#P6t4R3&)&M@mUu+E}Sz-=#_2kBe6ZBPFX^no)Ue zWSd=EyN|>TCm78>=d2x3-8!3Q287oatsa5L^=evde8?#j1|x35NBIan7f1t_x{q z8no5kRYbK4;Z8u;MiE3v(C^>wR(K2hWRc)P{a?BDk zTu#Rsw|d5FDJ$X%sUZ+7QD;=tY6ud{EQb#w`7HGT*(kMShLaSzS|7@v6%!p{FG~tl z`>AhG6rCaL94<|}WiL$B-=w7QG=MUY@}%Ndw9Axk-&~wg0WU(0;)I!QeLCJEgyb1h zSS*W!{L?cNlM$CCI4GS7#4w5lWh@{>Ss2pZF3cd~vLiy(v)hK)ogqyoLlnOKBt#-K0--sGB^ z`7LP&9;3N;a%~H#xMORn#o!@IUTc2O@!TmDh*%E-a*(s2;-=@v2ns)V2*udHILRLw zk+=rq*jlEGpckp>VL8M^hXj|5;Id8v;I-gtHQ1;5J0h+bbU40c4LZgvmd&yav0-6H zy+JO86*lPE<_zTLycftOY12*(8#QI}ZUie;-!mfthmcfgItb0V)gP zp*nTTEKwxhF`0H8zDqV4aKHHcqOm3($z?&-m5}xG1OCF5o)6U;fiX$M>-R-qc^>i# zo$qWm0B!J#)(EefZV#Rz@oU@P$4Z6a{~xNO6eGu10uZE%xMCE{H3fibLI zYDH0^N|uzJYHw*Sp`X#&qAuV!S2$aUky+Pgc6+JR5A({Z1lWJ%R88uhr6G`6v&*tJ+Ncp)FmKzrK z*4&~v9l$0YQ#uo$fhva2^!a6A_bMUBT`h$0MP3j*0Z<&Asq~(790cMmBW6}Ppo?D{ zx_wbe-Td)UU*Fk)G093RfvPRc{W<3;1At-_X~WdSu;OYSEaeIFh-yb6_wkN?(RVTE z<{k;9FfgbhCK2n`s&s!Yc@L|8Zq!Otb%PoFe;sPhUNmi z{W_zdNd{SINwfb~+5~@3C4=0lTS1Nn1EDpeL*-Z-=&>M=Yb*=q zB7onO`VSksgxPQQ2Z5S&u&5uAAQtA8a04 zT*j^noFC7wOy6(bt_@rZ!X)oSzOcSKm6AI#i(SNBDBKt|1eMo?0V`ivaG)$@aeDTQ zM4?+lUsaN(j_CI2(PSZNcftwHkXokEt3ao~_~}>p*XVDY=RPB4)8GuTVm+1lhxeZO zyOX1g0=Vqhjc2f`DElJKHEInA?X*@^m%b(eth}Q9fKWjEAz^hRzGOKe4P`JO{r1uL z#~njXY1-$r08Zf#*st>}siwi;Ij15%iUiUk9Fn3RBd>4aIWAUobVSh^ZeX#P~Ij_wjBIq=kAbEx@nF{5UjTorwF zhfj$KD8UWn6*lr~7K7-%ZOhmB;~mq7y~UnO5D;u1t!tX))X|hNHRk0^Kl&&XihSI@ zz{MeaYs6XOP@cAa*5Y~wNuOfJ$oI_t9U#G_w_0`kmRECMTfiqnnQL~$vpNKCY@+&1 ze8&}0&ZB%V3e}G#7@ZiVJ^1uFH}gJ||SD#`j@mNJ2#|(<5)r z+1~0Gyp*ngtl6z?c_a_0YNbFs)wVeg`w}XFa>0x-t{SlXWOcQ+oOZgF^A6&0$=`)A zz^3Iv4qLrKrLfpHWZmW+?=>)c0q}jCvi#-N_2usH;L*OP=0*P1m#%xG%}2}czaBqE zHW}UzcP7f!5}u(<%jX&iW-*!C?zo#0bfEJLC!XBPWwwFvC_u04$z4HxBjgP9kR7xW zTpS5ShK9)$TJJJ)R)(9Z%WisD(Ksr(L@8OzsXHAGzszu`kYYK{h05Tdn066BaN)AI&W zpQYMVvG{pxRk2EEOhLi1g1ej2S_qT84{u}Q)~Or{udXW(`aZNk)HJ@2vpT_G3{@sX zTrobY1`*)E8DnPQtMS3b(MjW@wW4`m!dG1LC+*z0m#6cCgsu*ZBnl$TOM-RTwaye? z^h-rjgi~dG;VRH2`di@WF*23M`6C^3noY%$#zC>_KqJzIMf0%;_g#=7i<4rma#E=U zz=>BK3BW58^b@m8po2I0=EXDyCn8HDaX`8ioG-XDWSA{1P#{rQyvm!I!y#P4poR!0(q*6 zB-^VpUhT0~-UZrZx%qg+jnHTTSVM|Hkr{wnAGKjsf$DR&=J`Idig*8CVQ zsYG{u@(U5NGX^RJI?y>IZA&Pl0>PF@xI21ZxV!HTQuZ{Bcj#%-?VNrq+E%PE*yWE{BTH5*1V!nPQQ~~WQih>s311H>3N2f#Vr{`)X^?O4 z?pFNU6bV}Uw5a;&Yr(JPGg3>%zsYijhChwbzraK}WH3=q2pEY0U6CM(m1a*4(TE96 z2pGj+6s@gVs1^Q@&{waeC}PJ@5CPypQw5l3a+oK~!q!ygrEjzCN@U8C(X%bF7*vA* z8h|SR0QOu_dxGWu+u5|FN`jT(|8{cWB_)A*O~MqmifT=|N(z87>_jv)60V{fx%DtD*;hS#+_V8EeU#fI)^tmWjW?W|5V-Y)OS*(ZJ$n47iPL~ z|1|OA=Uc>IpC1;uv8Q0h>!l5!F7F<#jOZRtm^{yD<;{5INoZHv*Mo02O309-HesF@ z&HgIpWC=R33d8$|Q7{iiTMVqe#_!Yu{IZDB@8zw>E{MCC`Wn8s0e!wcth+OM7WqoM z{=57D#x$D@fDU)*vSe?Jt#ruEKRJ;>n!f;B_Lf25gYLPh0`;*pCGkjRe=Q;FVBnxf z6w_8CB33Jl9j$EFZ-JezqsN?R7jDMp?Q5+w(7S$W2iIECPFHMdldlxo*ledR#cFjn z)8E%;ZYA;$d^pFUy+-}!P&i~AWt%nhA@bm_!u629%-4PBnXdxT5lpWSuZX{IO=tc; z?OkV3lUo`NRf)79N;7~Elz_m36s0JEU?3DFbm;^H0@9?3C;=gKP&xwA0upHwX`y!^ zN_P=L(F;N8D$*3ho1Ht(xOdk(dw2ir&hqEXoH^h3{dmv&J~QWg&+|OCORqfFg1*!Q zfj)XZY$ETqcN5?1?zfJ{-#}L`exQWNBtEv<}*hP4^ZGL1#)WAyb0!-I}hldJOiQV3->OWDA3dJTXbrhm4qH)p@36 za#J2ZUcMlH%wG8#!uQG(CM^C43YK*gR~?(i*X=k%CX2JjEsi!1d7Ap05^@Yhpv70X z@cocpfB=wT`Y5lKaNo@sM9V3-R`&VRBMOts7RGTJCDZ<{1#oV4criehHI$tn+g_rG z#nBW)4l!#}c_gTENUAEvbEGf49M*w>gieCw@Q3LInnxaqr|8rf#@{yO)P`_&Wu9hV zXLu^dxKYCI=i%e6NVuGssGq(|8yyy6J=?8?UNyMXO7}?MC zc|#Ta45HbK^l*KWAy^}S`$4aA+t?bN`#p0J@2x6#TX_q=qL*nFh@&z#VL{X41mOK} zs&=>@kmVkpJ|&M<{FBq%VNH5`9^h=EKSkbU~QbAX~l|M2R_I~b~F%x9oRBP*>H7tF9CNtptj6Q8Yt0V zBuuvQ^Y;@Ay}NMLo_Lv7oOqC z=aPYmZ2Y+a^U%{LQQ#f(Ujq3$V$J&D&tV4mD{?F8oV9t)HJoFB^Nj9xA9#Uwquwn? zB4ggHt-H8wOr}*1JiLN9she>2kkIUCrXwgEg70L9Dk*Q)YmZWwS^8TrRTI`vrYipx!T>QC&|^$3N6_wREhheEwJ`Ww1%}c`pe%>5-$Goc%du4KXe0-L);% zUHI9RoIJ8gQ2_Tf2Nd==+JtE>K>qbXyQgqpQs(R3i*r}Q*SP`?lIkF- z{_~_lPK9iK`pv)tXU%287(=Iyn0+gwp&+(bZ9qh(w;0_=r*sSP9yW=QYxm0ke#-pa z-?%@Xq8nWXV8j=>HCoF`v`bemZz2rEfNCSzI668G&Ir`ba!|v>sa(;KxQrW)oqoao ze#DofxMnujOA)y%q~56rrxWGjMaSo1q?A}|fA}so21$Q%kFkQ(^!Z}MJD-mdt&1Kl zU323B`$F4BFK(rpn69s-Kf5};zbw_d7v-;ZkHp>YQ&ud$rVdn+kj7g_OM{<>VNQqC zi(=Xn`%XwF*eHHgtfLOU7~GD0k?BT!YTB$&befXcT$O#1yVD|;)krY$8HIoH<#--k z3&#GY!~s{B7}p_c9qT*gMMJbsi&JeQt&FI-K}hdI`p zof$jwgFzGENB@+cE`Z7rd#1L#%ppBBFxBwX4t(f8bUc680R3lE+39L$;fZUPDFvHZ zNzo_dy);UaA_Nx=Gy&MiC=EieU$eD)^5y%Bm-NmP?HgKDlqWE*oi@1YLXE*KE2@VE zF~PJ073-5XMb@fl6uI4|B}T`&Pj6;56P(-ei4>S>yHRKB;Ju>q4ZyKWc&kQ!TzWG= zcdne)5%)`by$@Fr?6r!yMlvw<{cNodVU^J){_^ihsRMFkw zaZU-j@=K9R1GnF&F9VTXI|I92>j7CaZq&L>i~0>I-Jv|9m1LS5)1S1zvJwZb1f<;5 z(!^j|jQg@x!T?OdQ6VgF2|iyPV=kKV@9|l1!7{kr*mv0JV)gL_bIKq8~yy0c2t-10JSvz#dWizF+3>@qPQD2Y`BAbn|x5y<4`|_(f zNiH9<<E+u2*xvw{V5g^l)2I143-?E6=E1W+(}VeY=JG+zekz=E5U4*; MiTZb|km1|JuSL5wga7~l diff --git a/ui/images/screens/Events-Details.jpg b/ui/images/screens/Events-Details.jpg deleted file mode 100644 index 3adfa09f5d74a96ac5feb1af3758d1cc8ad3ea5b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35278 zcmeEv2OwQbx9}l|-h1!89i1rAJJCDQdnalTz4vkuM6V%;C{dylM6@78Cx|3!f++t% zNbdb^%DeBq@BiNS-nQ@TnOSSrnpv~T%w+F#PbW_20l4DsmSzBev@|^c7Vzyl-2kA8 zxf)x003ZNR;A(yV;B@{PgN2KW10M^EoinqciM^32v$4G`i@Tu%3y_(W1;8)l?qFza zZR$d5WNL0{CqVx3X$v{2rHKH!2B$o$yo0Ezg{73Ilc|cQf~v8nwK1;=xsV_!zdN71 zt%I$piy^7It&N>CpSuA0IdML4{;ZmXob(*U#ae(|_>3#5ru;2ZQF|v-Qch-8CSz7E zASo9IGmwLomxGs)l#Lb0$^v9#VdZ85vhs0q^RWU+zh2~EZcZjuzb?8eS)?_|ybXfaIv(vBR!*N zXk_o|B0vuA>BSIi9pvS&2>+$DY;DiRb&l=qqHOxD8-I!Itm@%l%A#!QZ13u1YzjuF zxNHoT-Om}FA%c6uCuVQ#YHMoeA}uCB4z6J~u{7ZmxQHIKY2m zV!Uj^!rYu(>=Ku4rR|(u4DF0fFWXvvYs>r7wtS*ariL!|POA3yHkY$^%fjBp-r2(5 zfmBq5lT_Z&*wXH-d?wnjJvDW*yklx2;bd=1dM+D2%fH~yDz(7GkK|{enL&Lzqfd4~)g@J)ZK!k@! zfQLs!MLfSy5s^_)QIHWaFfcJOFbD|<2nb28E{N;buOlHM;i97A;$xv<;a>rMH*?ws zK!b&BxQ2HP0u2C(25}7y;0Ji;W5D*aGyaE978WaRH47l!()c^=E02KfM1qlfW zaSamYYbyveNc3wMsBE{PFj>if4t}J<WdbGz4Y+e?9x2ml%+ z5S2_g4*j;$J@RYptcH$k7^K;its=@4H;1;2ocxV9&R4(7csc>iSMKlF zNPmUTgMJeLAa7##`xW9D9OX&ZzQY9~5#LW6T&tS6fWtG!{uB|ekM#m>ULWuGGfYUQ z{tGbP2a^XsLj>Hf1^~Rfw!9xtaP@7xy$OKvN~Qg20~*F>0OC9Oae4}rHi-bhpflA^ z(V><>7jW9@2R}tTqkDEiVECOse~@VC3G;$u<}WYEG}8Okq<+7|gP>ZIM^(^IWxn}K zRLJFKZ3};=-Cn0(q5^Q3wnxM;QVL|_7QeXbMal3zJ$eIf1%uJU5rF5;DDd)!B$#!U z3qomF!zzpSq?)-47cgBp+Dj!1aw#_52X!3ZPrt_a7Yf zSADvo@K+gV4q;wTqgOZo3igLO$O9MIGya)&mF+AxceG$tcWgTW0BG6!FXbu!fceLQ zRoWQS84r(!4!O)|kA}snkrBSS4NpP+2LJ#GPqjeK(jOcAp)|%>az1}Mu6}>0|I5N9 za`*2{_QS7}4SI#5NY7`cRTbT5W_Jn@x(NWNx>L6gGyNrti^^~F&3>oDx-zTWW&7n= z_iX@R{%wFI`0=9)!fEzg7Dn|y!uVE?FfR6Z2x@t4RU0KjH3)=D;MJos~?Z)KR`hstLbP?%l@;8Cl; z!{nK4=JyT!SGmh31xO};1^#0lM_)?*qj-d!$!3qSll=3)_!bTfGZPr#&2JJ z8!+@-dAw}(?Nj!<1|+uu0EYwRky(~M^7%vQYOY8jkZ2OEZv6rHkA<&JpfID%?;nAG zs(~qXIX6kLN#OTRWqA2}H+egHtx zrhG`A_Fr-MT6g81+|AyHjvEW^ zInl0g>emN1$TpFt{&6$FrOz|`4ZnI#tbyul@L;>sIWd}Bh}_2m_N!tD(HTEs@!eDj zM!O3T<}-P|i1w=H9^3i^09;2&ViMTi{BiS3p@YLB>ad4I{*N)wA(wv3<8{w3Fu`D} z6uC>s#pc!xcN1e^Xz(Ujf!Z37pnhWJ*Hm8>pU1+1S3D*k!}*L$)prV`yJm&XCTKI+ zyi61P{LzJ9Q1dl7J^B&KxB5%u|Iqru8NYKjLub(RK6d|``(P`Dk72Ew*8E2FA9CO9 z!T95gCh5NPa#zQpJDU>#0FQfqVcu^v1(o0Y2daOrfZQAU8eO~L`OH3eo;ynLNPdfU%=D`QH3YtEqp^s`mF|8|)vhogqjAf@6p`V}n;LrW~ zWBIB{fYu)Ye^o+aTTKQp1e@4&w<|xrr86bcr`##6ya|9%y5E?}`d2M3>aKM9Y^pN# zCpJ{T>x_?8hdyTujrkq%kUHHTHwIiy*Ug_GU4Skb?bO8n5*IMWocc|xqtX3n(sMKY zz+(w=!H0YB-}LPO#;`rvyfz{+Rcrr=uYH4R5Mnd*;}*{C-UC3IdKI zjL)u0KhCI#VI<%FG588X2i}?d#NMv(Fu}`I&{gTj8F=tU?Dw`LH`6mXbAqM-(rvR2v z(TXim69HV`Rr5?uujc-{zDWs`O$N?fa;Jc9mhJsOzEc3E0E#|+W>!V@Dd6ql0Ez5) zSj1>+xWOg3E#B9;gKYc+%Sf4u{GWYOkjsX;ZbQHC;7bW}|5v#~!z0>0Ef8sJE!OSp zPm7kE@Vk*9Ooh4{bfbQ9Vy60xhOsg(F+XCw@BK=& zwArUcM4!P*#u40#l%GOq+wlIkl~6LKZu+F<;^v7s8xVGYM34?3?PDrMFnlC+BM}Qw5_L}A8{N_} zW6Dp0{yB6W?Z|SeCLk;vl)$<9bCo!vi@=wX636NMDyd@Se3>MA)&IH&DB(hVV-g^o z7KLdbhrFIW_KB8@>jG6T_J|$}E~8eBNoR%} zQeoRywf03aS|F#1Fh1}5`Z`~9ZqYwEF}z?+UnT02%VKe05z>n@p-Pc4zN%zUP=j>klMOJ(6f?X6VpuWofcJ&A1D$*DAu@~0R5XKLod*eYz19xVhi z6ULzbA6Z1%3u#2O9$8;MbAl;KdJpP2Wf}8qLG@;O`V0wfFEe>ear$D?v_JNaoYh-hc4(g)ot!4fG_ga7}L#r?O`Y&yKR zswvf&igDML76s*?2Ed+IE64b=aRp6HFlu1D($&<+X(EeR?4dL?hsvm)I6L`Vz|7^W@Dqjw72xRQUWz8 zd!FaQ7x3)Q+J{_Qc#!!-JgZAGn(BZ=!r-7%vm(b>nv$R9JdUPl2)}e8y8h2iL~+iw zSD$;qRb8=|t_=wn;#mi&X|(rj_huo4*&X{J!lrYn5qNizMYbsHx03aCRW-+79BER0-Z}NCrolJI`}56`P={9B*+! zit@5}Jq#^Lk}pD%bIT=%S|mipmni!m&K3|?>m!X#`YI9ny5;vv_DLK#uY1B+j5*YK zm+A$BrJhTp+;v>+{pc&fsTLCWRgo7LjeH{l_a_4Pgz7S%P*Q}s-6aXZ3_sLN47C2b zYA_x9=j*5N%gzf=8+4yy(fzA1BBE##7^spNp14esnI^Ly!iN@B%CR5#bf}Crd4F}K z`gl0OBJReVIQWds;fq~GAV+dZLW+6KGZG~yoKj;Hs!_`S%8zcWm@~aU-vt~%UaokX zZ-Q5U2BhFYRK&8_q08Bb8WEsQ1b1+eRU(D{ixqq{dJRW^1^1+^d&LMm!RjK{iJBr8A8%MQLLPc^o7>3dt2gI-0>(b zcs}Os7n({i?2L+G@RyU`4b4i!qr2frxJ9$Rqcq!DZ=bDc&Zw zPWVPzuq2ys5CcvXY1VM}TX_46F`qTtlq0z~Gl2jXz#H|nd{0KBh?|g{B3b<(Q|P2)H|rON;SD z7t!N{$T;<+UFHj*(+pCtT`Nr7VK3_9n}c~Mck=Q5VH}NSVkD;&x`5hny1*EJqlZ#t zvk0#}U+ux-YCgKOB@tf*Q*|T@LK<4i!&89Mry#|LD(_Se#{_H6Gqv@EZ673ApT_kw zFF7OpU*T`b0DDaBPnU*C)+a#{pPsj0NIo5Zp~%|so%*~H?W>g&(o}}te7KSa28cMA_jV^R3WJ#DDI%uk!b<6M3>+y^aAVP9JSof;60ak;1 zU_)^VC=cSwX`RpuPdZt_qd{k6LEG#hYpwGS2(hAo7>N$^%4rRcxQWCqvF=HMfmWb~ zQfI9RjE2Co2FzPkHuHWtH&(s|Ehq|?)bT(jPGKBIYAN}xC zz)H9Kbj%L>jKddI&l@!cUq(%-@wGF>rD7~j0dY^;*7PV2foq^{@eZ}UmeD*H9v~~4 zw@Ynr!yd67x0i2l=h>{?;J2B#v)I@=qijeopfX09OtXE+#yB3vX=m6R9+RJ0n=L>g zd}r2eQ-o4dHC++C$ED}3kG({N|4?Q?Blk>$c`1;0u#mWT)ENH5OnwejzgWNB@C(Wu z`HqOLa~50+FAnuLlDHo^hj+7k*(*{H6TjQqR(%t_%$(gdH`PcBtKcw#HsbKYh5g0rOg*|ZR79kmQ8SS~a!k#i^YlCg+({#Lo*z0&vygBw@lI5VpW{OXGLLIF?4JUlJVF9IS0TtoVp^|Zl#2GY6ezA1Vo+>> z`qGcm>_c*b6N)(R(4I}ip|)Rh+s(#t({FlUn>}oY(TbDuq{u*<+Tlw|9sNEFy$7UE z$1&TY7uu&B`DdydTIaHZ7XTs~-l=Gt8n525v+lCk&NQ>P?av*#mhpD5xrAnM1T9ZW z@9yfosTg*T^hRdCSVlfnfJI_xrr-z>&6pf+|8Bq3wWBnvsqg?8BvH4iN(r!K#SGtR zxtjw2eUEHh@zx(6I+)zVnHm2ugS8M=;&Xve6L-OVk!?flNgi8Izx2*s#!>!qRNE2zqY0QQ| z&VC`^ppj1ru*j+G!KJyO(_!NSWH~7oR&0>sJzZHXM&IOfZYM*Jl8B4fV`VW8?&4uv zccj&_&WcM859++kjC=6Z{^>p$Iz@?mVVDV@iE|5gu12fHpzsXAM?Lm2(EYa~F{Sx^Tg(Ceyi4tu6ft`Fv-68 zfbMXj^GHJ`s^An*UvV=mDcKdJLg`A>QTtITORd54S)#pH{ntfALN4Y;jB^`t%IPzrz4b?p zxVv^=P}gQ3Shf46p(oz&OncusTF@_e3P30rj~k9r(B1J{?oO^zjs-en4BK$jbbA%8 zf98De^PPMyJdknG9;a8uP{7@ydjub?KieaKlxp;W+X}Z`a4bEk1q6*=i+&Vg9Jxa= z(t6B=equdpK8hbX#}_qnJ(;5_6TSHC_sVW-zI_T1 z=PeB1i4C36pbTt6*f2Z~L7tw(M(>&SDlDCg-#tYy_vX;OWqzPgNNa(P`c%*A-Qp42 z3Ng~V2T&mjLQseb4^gO3QiXEv%f)mVGuv-lHWP>AD?AvxaCU1}kYC^+cl464q7XPf zEFQCq#*|X@FBz_eaH?l$6po4EzM7wFP7tEaW%>lz%sF=t^TzqeC!P!3 zD1T~@lGD_J^|8^em1MNK4i3{Bfgh z=(9}{ch%S4K`J$U*lAd%!}HSr$VvnUPZWMOEL+s^9(LQlX<$=DlfL-g(+$x=pL?Z9 zCl)fw&e+zqILhg)Uyhj5^(AElc%5HdOCi=CleyX1uPFU!c%0}%Zt3)R&6o30UMS6l znFki_DXRKwX7#Q1RO?fY)dwHa3nqKc1nx#bPu-nug^eRpw&j;48==TJXu@wsknlv_ zob_DGEbDkHc;B;7DU#u-XL2Y85CiyBmM1=p8=ZyVLWLr3ta(+b&a}Q@ARKt##GfwZ zRngt6a_2^3LkK~D-R1)_-gY@j`F7sWStqLH5{2!~P+P%J@jWh@@4Te#3aRJD>|>@5P*`{7XjwVLJe$e;EaS84tJ{{CPbZ`NH zo<@V9!&#>TQ&lVi&Hk-v6?DeL&p;}49LoFqT;2jmEhwV4?Jp-cce$nokg`xjO+TIj z{O2$i>=VM86{rt*(wF`(doG8!t-0JtA@z?wa}lq!PfvKIqc|IWun`#eK@|p@+jAh7 z+LNtahY2pEfZP9&AkxI`DDG~d(N3-+Px%F{2NHs9Zv}d0r<#Q@?5GuS-pUwTo)kFfjeD8wq{uGhL z%<3rsk7SYJ$iz1?W-IQ5O{hz9AAAP&Kc_Bsf-;=3rCH#`+ZDo7fTY{v^ao={6_+>M z@+1G`q!+k3xa;O4@M6VfMt{GN{g`Mr#<@O&JEfTg@oUe_NXoGg5v>z5#_BL$tMa&~Qh?%VeG1T*_>Ox76L*-P4`C#9!u`O< z2gX_KSWj^7M2wo>3u{?Ewk~^eJI$jYm@&BD>UnNf7x^F>$%HcS9iRGWII;E3)~DPB z)Xe2BcCLpVdrnck1fvq5cec?Yvg8EUQJ(h9;spEVKHd7@#$K@fxpO{i^}~@@M*mu4 z)0VsW;f)rsn(rWAAOO>h2B;n4d87#`qtPYdC<5c!q~{@0Nb1fJ+JyI}3C*C@^ti|pjR|$wpq_&IzzDzCN~G2)Kx`v9{m}AIk4x~j{zF); z^7Pwu>Si?B`K~OwFiA=(+ZydxqMP-n=2CS-^0>CAAQ*LztBG*dW1Vh1Dhve?4N3w#ke~^kv$y4#n(x65zfqDKKm8)(z-#_WOZx-KU`0^>R^qLHNin_sew?OuEA>GW^rHpdx%!0Z zzPRK}k&NMTj+{YITQ#!$Qf&;Q1ZjNmO766-t|^|lfy2wm>59k& z2u4j4p4@>i!ah*+clR26Q>-m1ISK?>+{s7Mew1-}<#;IrTxy;$Hn&%uc^A1K`VYpg zag1@vS0^cV$L1YIyyZUDTnC+qt4=NClH33oU^`gH`#Sb_mziNXYsRlFRYY9Q-Af^s zKb@dm7UeRLu596Y8{5nlHQ}@OceM4&KYeoo|;~LN4izv_RVq zx7L7K`4RW&$0%1gmoQSSsBdq*?1gJ?S}bOCE)Z9n44ro3Z@SKuqtqonXe|xh)ioy8 z5`B`0MnByE9kkY&M5kDJV<9M@hoB2+jkvkQ#;W_&VZ7G><=PqUgD_0637k+Qu@WKF>RY#RQgxNXRBa|MMqn7YX zxe#V=tZeuho{4HDN8ar!*loqC4WW#z>Qr57BqCXlh9d~-?Zw@Nb)+`3hilMx((H@j z=pPOyt7=c{{7x4}e54ghvUs{%tNjNEj~-rmFFuXs^}jxEE1v4Ey_yssOb&W64F*2V z`{j2~bPJR1ktTZFc{x^aSL>rx-BzHZnGM<~tdJw{GbVzgGagr}p8{l79uDZJ>}V^*e8=gXw7LJ)ee$_2#8Ih~w7+9^&}4v-hXniY)MMUYhTcAu zd6^N`b|w6!#)Alcd|rQRHUU@o8H7Dj`VT3w!COXq^sm-ZM%z>^=`vnzKhy7wx5yqS z^EzPoX8a%fX0P<>kM5hd7b~a?-9G)L4l+e1LaIQ2J)Y$f-5dHv8ODQ12G*q+S@-P@ zBRE<+D}m0=E~P|O&)E@~;>6j0B)HoYM9f;2k2lwEKVf(ilIt$Fnz_K=@168br1qGB zZLE(gqtc;3kka@v^Qqn=rWy~-x{yQIGub^S7!QyW<+DvTGdj(Sl)igQJ>sBs2&Wf#6BqsLOR%bZ-ug~8&ozBmBr1&;&y0iYm12c z-ZEOzUInkH&eRuo8_6)tw&!K5tKm|`l}V1Y5Z|hG=0b|B!m&)V&2^<0&-UfcoJnJ9 zQ8$&0kIapom0U7Xw;xAiLL^sW5o`Qz(yuSi{q^g#5VDh3k)lzBqWclv_TDO;%WYd` z9vjXC$NbJTo3SvH8OVWAYRX}bZMWS81I&FWn;H;vtmQJ9(s8F&FB!+MHBCzV_bZb5 z#&A4IH_LnL^+_-2ytNaM?MY&NbR2vI`m+Q~Fqh;jeVFi7&ynJEjMfQi*#k+4|zFTc}VC zqDlrpEwskArUQedw$yJJJ`W97jYH?qOP8(vBafzU#%#~>UDRyr3XA3A}o#T{pE<9j0uV=~p`HA;E`r)1chqn^d+yyC?GrA_$g1QSk(c2vEd zMbM~zrPAMvRca4(!*$Z%pd`5HO^>X5rDu}0OW*qKqG7ctu;Y;Jj29t%-QDuN!rVM+ z`s0LMmDt?5uBP{6C!70J2w{U0tTDOKM4DAu*2Xd{ef+vw>rSEB|L~<;!I2{NGVtCl3Vpu-W%qT$ ztkU*fD_k=f)C%%^Od}D&1({@K1YM$`y!Xt~6-x*#gKtZ&BK$j955ACesNvAw&nKu-2ljUGlo zs6=i~;)R7p+UN8F_G|g7&+S)W0w1A8r!YQ#?e5B|gY}_(sFB)5(UI6p@Vi*Y9WG*s ztF75J;4cS38AavL-)vb``jXxFx@hVkUXu;BcZhUBah2`;l4SlMno$GZmi2Dc?rr*l zSuEUx3CR8G0NsQy(Pje1tfgrkDT?1s-n|3QA556ZrtMYinT-fcYfPY;SJ@u?ZtD`x zJCvcl2+KLU8V=!T`q5);KCTb+KA)NO#a1Uh|8gid^}$*cewlKD`&{KGqgJ0_C{>9k zM6wEDkN4|hH2N*pw3U7jI|6(Otpm|D~*rjjW1^Dg@GAOt(15eA1xXkkykq2DPWc0xqo zNP_$khHOEQ-UpEr6IOF(R@27K9SoyPq^zS5FFmM@oN$WmZ6kKq5EU#~P>!j|DkUePJG>6z zoZkASMg!r}_&ZdV%vhiK_E@K+fL=Zs$wOz>QlP`Pc3+|#Nx;z2T%al0KHkjr;3q0R z1(3~*3*Vs42|a2AMZPf{of%QGHg#pJNXA7^=xS74b@vEwBmfU%s2$X05afk4o-$+7 zoxS$*9UL%x+jofzH3bwG zS_U<|maX9x+meci*B6Or&(Gg6Q5awj?m&&EuGq(G@a?y48XJS%JV{7|y>zfx3@L}} zMgaBAv%49Dypg8+8js$1++C6LB+fwjI32tJD?^p)Z?Kz+6fW3thhp}nYzjYAzhkJ} z&0J9;C;4YwB6fG9q$%V1%UWc#;AsaTK`BWQ{oudd{GeQ;@WI$=&9^B_{)inBSf_yK zl(j6Tis4Muz#Fl$G)XBNF>uSKE8>TbloOi`E=;m*sL^Y$-tT=LMk=NHT$mfKiU1yGzpIkQmQhbK$ zQ-+z(k}~3ityNpRihjc1DYV#msA9ZHde1|k&4 z$f;i9#^zl` zQds)wFc~gL+5L)?aR!*_FG+<`J(>C?sdJ8EztgLq3idVnZ%ngoHXYZml|dW5`lqbU zDLl<)_zhAG=+!=~VEoySEnj{}6HH2|SRUN{=Go&P4+2c;hZL?_{cf+W8vG=a&4B)M zH8_JdpvT!G3sc6+`Tb-Dld|cE^ww5&ef!pacsGU0&iJ+Wv{G zHu~dVp-{#PgB7cqh3jXd1Iq|JK#>)&;m^MRxv;pVqJB*(@ppPfNASjHSemlu7uv9G z3d z&V!W!e{=!dWC;3C3h;dqtXG^>1K=9SOXA-{zvjV~qw{MAl7?RWg>B~-c;EqYFdKoj z<>Y6ob!IqqBflg?_bVBI$Apg_XG~oWRt7)xA7g`Mg$<$`mZk(j{KfDuB?Fe#PoQ5x z8NcggUN2tzf5*%GLZD|x_l44w z221}8@T~j&T(N!vy;Q9K4_*c=^C!Q!!Cr>@@W(cD(Rr}tV63NY|5{d@k2-%XtKaAq z-|sEfzOF`pI>_H#_x;mJCI4b#z=N_0`|55kRp>ucyn1^L^}GG;-+Kw|?+_5{TC0j5 zHAD<0wchO=9cPqqf}5C*-!__~)p)|2x2#GdrK)EtN6kSk6#UX7Y#xqhfoHUM#g?B# z(CE2X6KdKCqD8vRi`2Rm!S8?E&R9^-Eh@IlSrpQB^}Yx~5e&#-nIiBYTD!)CskH5J zMZJqUBy8$(iKnQ$ezlSGyUu{*{y>xT#?N1F5(t6brESi-o2jT6wKM54T<=Zonq21z zu7Lh~Ldm5d3ouik~w>6A&zj{O^**6qy!FYCz;4RF;YDYNgWIvN>Qr1^ zDBN{aIs8AsrAT}>%>%R!Po(MJWi&QDJNZ+_(xp!-Db7^Q<|e&#l40<0mptvFPvdC9 z3wypN=wo#IrQ!Ohi@p!rxG-NKaac0fykQO>$nRy%SNPnCJ9u)U{`Hbk!{-+tCpj``hm|S0+%dw!@^g76>*yDb9#C zPcm|jogsiXEBy>>M|9n3{2$M&e&?%c3t<3iaDdX?!mP|S3T!x zJ>KTN_T_-GwEWKY=kGErc}bMTr4rH~l6ZuKTMv;@UOM;$1nd%$JeWJaetq@dypp8v zS+birSSn!_N4R)sI^OXZD+q+J8F%Hn(6y2m`n$j<=hEuI`18-x#N9AvQ7lG^)f#Y} z(HW5|cRBLijI(Z;T9t-GWfjZxp|M-})wxKzRDIeH(9Q;8ZE zd1onF1Dvh|lgdkCV17dsd7_>7tifmk-}5@ikU;rb3D^;eLsSQwE*BISev?A#ccsj$L04@2;Wy6~=dUa7gF2xr}SEN|Xe6V-pewzMf;KRyiXRrFE z>Hm=xyjl?zOY!AYxm&_&Z-x8N><=kKn9Aa1o^(*Fs_rR%GLC&ko+(qMDz;7~Ncwyv zKrEdnnW^zXnpL4n?>EIn()QOT&`ODF5>+#|of}Sbw9eJVMiYy*eJDc}M%BxC8???a zTW-7@Ko%{nRn4G+Xv9PrUg)3kI_S_C*+p#ZBz(D*64Q`(#N25~=LA)=0kVS(1*;4> z!G?&Zi+x)I;mpm2fxj1Jlx=6!Pma)jeq#9Y+=WQzd>MShG?tL_7xOK5;=#ikkILeAK4~BbZE@AwJ zkCtYE*vv!YxmKf5v2?2yAN6w+U{}y8AK~_-aLf7MkYF-?@sXxq&=!K`>g0`|R7qYb z2F|Obu;$~J#B(VFrTRlNptJD{v&_b!XgAc@q0`^0&%J?}%tr|C%~RFLH5Bx5Rw_Q! z10{_;L7rU;qr*!bm>8|*SwOR1!i5g*=5S1%<9r&WLRwit<StA8PvWIjvZPD=Ym|jl6gF*sZaJA6xoc zxX<-L8I*9o;)RKG-x%Ojz?sJ)m*KnK)*K|-2>l6Nb>lOC(z#AAXtN2x?ozyr5&0K> zKzZx@Y3W6HPbEQB9rqn!ovYn&Wwgt!wR;jv{)5+(`flFUGuh00i9Gnro=TaaldI^dgCupx1QH% zT$x!!oDt)5TQ&AB6Uy8YC*H{%w1OTx>UfI0`^iZ3><&1s5^1^P&WOSm6;SCsF>(IH zC9OoY1HsxpdA(}gA|v5r+O*{yCVSF%UKzuSyZ{>6MrtcE#4l8cZEKvZrivF`+(a#R zxw8H@B&<1yh?c=)B2cr7!jDx{{r z6q`x+tTSOW3bQ7x;&h^IY+G-gV-&M~ZS{J-O`MYyXOmoy1_<2nw{C>CRLSCbw{~de zEoBA%K|){ly=dEm^z>pwO%3^i-2Ul@z3p6aO3&-*BaN>7{gD@Cc`kbAix);>VXKS+ z@Sk(19=|r2rsGezks=@-jKyxG{utFlqx_>{di$(Qq7g;(PTXf>^ zun&=K7I8<9rPZ71rB~lPbM^|CNFU-nv@$g*;M~Oy*a(9S(8}f6^_~`*5D!BA4!j$g7|p=QX_> z;)>zQ(`bAR4@DA}+$;?RCn6932|T`*w12-wyu^C&Afl5G8WU?%jpt#F?t9kWxo^v8@p1#R{EnX-&C5A%#HC`Y?V{j>jvNa(H6+{OEfx z9=579q|@o*vWbhWL?~vI@>uzR@PbBPAm@iop0q7Nxy-4js9gd}cqlj`!ylRpjHkCw=gdDw9R3Tr0P zeAtopP*5JJnecE-0|h_+n1=)kB_ophgHiN8cZ)@e70MTgSTU1j<7xDZvS^2SI{}(%h3u4cH3UHj)M!Sp z>X06a`Hhy`o$YN@2YAh%%Kh(f?{8KkZ&km)II?2Z7Tv|B7{f<R&IztXQu+9BZV>hg@WK={daW4i? zqqsyJIpT;YmmXOvrTaC*bBqx`1sNfTyHiefct@h{sl&c+wqYCDqn0V>!@jRxG9>Pp zOH(5Iq#9Ql+c5&e#%S0>HwDX+=3;v!mtep=^gr}S>W`;s{Z7?52Ir@&i&;=6K1pE( zC7nPee8+B1-Nx##`(z=^*X$JJ@jL2s^{I>EOKBx+`Bn?Gy8tJIo)l^d*r^K4NWvh- zS}faF^t3v?OA=yF)G4Kd;WOKW)fbXhP5~k_Jl53DIl9$30)=DNlc87KxpQ#nI}%f2 zwPBtEODu+OAr2z4j$E0~mUnu8hk{pB3BmK49-HD^tw%2qBVMK|Czq~vSDN%C2h^Ak z%mp_|odQ&v*hJ8o@T0K#<8de5>{Q&*Q(pt|s#oOGX&uz^3Is2fLjra-vWMA~V|UYu zG$#M=@t+X^$e4|7*#-O$t*%tk^wP>Q@v8B8i5W_{&fNH#J3I_?a}@#p&B*Zk|9i9i z@Hwq{T}~N3CuVaE{JmFSW}t%KB*%l5JS_fOueayn<5rOuK;3#~k*Ra>zQ9&aIPHJY z&Cq}L3n?k7IH?GIFv$?}_^%At#eIU71DxhX9jlKK`h!xAOu#Rm<6eFv@*n>Bf=q@@ zeSIH*E}AI0ZHRSh!hrzj1#U+0U#L2O2L4`|SBr%vl5VGx2o z;Bc|=iqR<9;x z<$r{ea+dh3$3FIdT+)}2G3bb$UMGP2Dp{*yP1mNA!*lU&@Ra)7{jR&=Y5d|R41Ni@ z?y7rLzWm_!bB#JF3{;>&rFZ4JDU30alZMV|u&r7g%gy$OoP+wZnMU|2*9|I?vix^2 zHVxACjJEf&wED>78^4CneCoQ)J|UkL_JS`df-~4frOPuzlWX%iT2!l2 zGHz*0(Sxa4I<_n)6i2alJ|oWuTYX9c+FOm}=c=}=Hx2x^cfiS(MMv-Hx4m?OQ#<_E}mzJiv)Nl_i1{gG^aH@APLh%clWDQI@7@ zCrMrXDw5^g6Sg&WzLDk=#km~@3w3aO+(C*uG&^U4LFlY^s?v(wBW_skb?Vu@n~cNz zAQCGQ~5$9b69&w7&8gSc-9ZmZ|N7upM45uIxn258Fk7!~9O2+|HvO z&)4b;(&$Lsy47L_I^~G;J*)UeU{St<{0Ek)GhVJ#&&smPf}Ed~Iq~A0OYK~ZBE;3< z7rJ9s55%^5tn2u%BM=fT8LLEjoUwElE)W8N2DIVa%WZqoo-%TvgZLsgvUW8i`d+kS z9eqNFVN2*n6x0f_UuX1bJkzcg>P`aZ))?-TD+cl9NCe;yTlJ`op|zJCJC(Mz-vT=a z5E-f35u$liyWSo9={H!tYrKOr zDN{Ad!QYGX#6Pa-&&f?y$7mR*IqzRY*Sxh*Y7()F2CzG!A?1VIX3Mm1j?ETVq(kbW zKsA9xJmvfJVlnW9zpwQLtL6<nkr2`bPnF+-a@K;*ewm|D!aH?#EaHciufWf0WLN zao?2z+Zu7TFmjLYk?njU1%ZDqBT&OvNx0Td*;H_ecp>c7@!3$1HLi#FccizPf=AH; zz5EuO%q1Cj_3_tx>PgDNnJX>Mr?d`YY`DpSOh&XA&avi(hAKY@DBguCEBB5_<=3kq z``%sLuj;mC#hM#^BR=>h0sP}rFbt(v)^Kif@oKHPcNVi4^7?O=>X!4A2+HcN@YH7q z1=bllc9S1)oh);*n4;Gvp0$d194gpp(oSHtO@YRDr+XAfn~Wf!9R(BD{E&t&STluG zZ7^L$E{;#dtsv&`pYN73Rqcn|IhRxBTKGleYn>UDQ!^~}>tMwu9_@I}t83ZN4eR(K z);`lh%QyuTmZM5OP7*4XI4U{9SxGhNcugyfxd%#$f?FRyGYgKt^G@7(V3jz+0mrec zzepMo&Qa3UmuzQ27)>U;v0ZjCCddaarO(#q85FAUf7MOw;c*am5PS4U{y76iJB~=q zt`H-7tW=^@i|7s+_)9lDA@J{f{rykqyh-Mc8XGSY-Ok*}D2zVk`lSQ(#Xf}A6O_(M zN*axmP&bd4GI!<+lW_u$NK#TRk{ya8JaJ5=|NJfLF4VdYWbbmAP*`9R#a2N9vePVs zc?75mGe2yCf7>E(hd}6QGajL5*MglP!c#5g6HTGk1Mml**H^BA(HxO2Q(rFCu8JnB zC}rR7^n|M$E;BB0d){}}f8XW4PHg8sG}?~ZByh*^{?=0IOm$Mg|JSJN3&ph4-F+eZ zLNJf8twT*Tt*cP?{+ak$0Va{`o^mi8_y+ijSDf7%=P^JH4e?b z5M~2!1zFBwY>IAARYFnRf!3L{I90Kvy0i7Q*9z-)+0)QFG|y#oXh83*J7D!Dka7fzF>B*)-kK} znl+#l7yd71Phh&uMxh?>N|2|>cBW>H|A#t@OOKyk1}26R)8^G?96xV;!*Q%L-saBbztBe)$7YXk+I{6>UATX zC9hQK!s7Tp)bWP01{uBxpSCOWoh{F^mW^UI_a2vBb8Roy+m!tG)20vg&3_hJ+X#7s zqKSv>LdpA4ee;Jr-X)KEoFeT%^fO-(__F~h(iC*GJ!tFo74Q9%dh^6?fO7JI>(TVqgUE zkadh1D3fu50wpQE=i!FlBT>(EmdPch+k>)zJ==<=2!rR_7sh;lzV2}P_wvJ9w)M){ zzGu{n-d*$F4=NbI#TtdB!-G50o<}8GHq}3rV_%^N_SZv}rQLi>v)2dSn|D;|j_#J* zCyo?Vf;BACR>vSPTE|Hg8Nd{^+Z26SLMkZF5I?r(~V4xPo;nR kPXdcKvB`?8W-3x<^X7p{+6tsIj5sTR6%GRaiv52R0JidclmGw# diff --git a/ui/images/screens/Events.jpg b/ui/images/screens/Events.jpg deleted file mode 100644 index df22149816da91fb206fbc155bd37e5503b7a0aa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 37224 zcmeHw2Rzm9_xSr73CWB?_N>fM_8ymPC1oTl$;e1#wWx$#GkYc!pKL0W%Fa$kA~Q0w zvi;xhi;R1zKHu-__g~%n+-IEUob#OLJm;L}dB5-bvixzm3)nAv)!Y;S;P6AhZeabj z{0$(MaWJxQ0$>0Z2o?c=II%S%>vLRUo@kj#a^It1oophKeATZl49LZi~1R#u~vwzf5)Ap%dWa~PRFC1*0yF`Jc5FPT->}|yu2JB28W%KmA&Cr4l6sxH4ZW+c1E`5HumP$ zR&)?Y!}Hb-_M!}+q$^Xfv{6?6$r!D+EG?mVA+hc3HB8pUK#OgsRrD0-c?Oty^Ya|zKPD+4aFmBfmRpJs ze9OoP@=8h`;}g|GBR1CHD9L{*pXI9+SbI--r82n+WOMk>Zx6@wzsys zU~NMut;tWPY-nU|1+^j7u1acRYktMVSkBhkk`AdGA@dF6a~~5Bl;h*&<>uw%li@ik zcl4;#ab7uI0ominWVw&?ax<*b7;l*GFKM{I!MUJ?+_11~AkbQ%>Q6HT8$S)*#0s=- zThNS`M*tS2aRSE0pu4~@fqht*yYL7AfXxm7jz0nbV+sK9H(hQ6ZUMWnu&}VPc41@h z!rcXa@OSUpwHtpA9v(g(-X5Yo$QRL`y@W)Bd-sr%l97>;Qd3b;QPci>!EkVJ2=)-{ zCnDNUxsP}s<?AP5d#>+Fic|D@*7|nfUUgN!vF>h6AOD61O+f* z>tFze0l|p?3=jbm6B___VPHbtFk%u6ZcHp5$$b)1q(pS|yku(U7>@E`+jtb$?ji=s zFm~<6#K8dPj{{)?7!o|hm#@dZ;-wp-m#iZ}VER8}AmUaxj3YhTg(YQM&!?ur82|nJ zjf`_1ufLEn?IZa)5;Pk0B05(Kqqp(kF+5KSfWly4V`BfD1&nyiu#Wz_1drrpfA!dB zM0DKeUa4L8uz672wcHBq0cV6kj7bd2F&`1(_E4f2*yXVYY0Zf2KDgq#e)W2r zcAL7!#rgVs0|ha$M{h}7og`>?Wr185_a2*MUO}bNY;@zBq`?<^QbbY{&lOdqcTn}A zS^uKDkWl+F5cI>vy>?+4$et8hDA+iHlS=;u8lgc$+)q~fa6c03-lLl=#;nKz0LRbWIf|5mW(>YCPXN%o z&v7GL$P9123e{Yz1~voGw_uaKUoIfK3}c`I=IH?yyC|U?*uSYu*2Agz2nfaWZ+u{* zDh2TuJP2p&bOb9nywywy(gF_v*lQ97m&a5o1^}#39VRmM7_hg;0Y{DSz=T6|NOC>X zw9<4Y2y}Cq?;O_|&yLRbaR5~To1GZFS!q`B!KG=idP3)iUAk@zAg0qk9eVZK%v_Da zg!!DlD2RyJ8(Cw1D>}S`X;B4K4Xjfz`b0^$g&4kI%Roq0k-HEmOMgwFgD~JSejid2 z=Id)7+>o*eaYumtU?hO`2uH#^FeL!MX>Y#-a>CRKqI4xlmC}&5I|_STRRr+?U=JU% zKvDxhi3HTb1UZ1O)h?mPViB{bav?GYA`qDH;@dclha*(wIdxT4k~KjBtiJUY@rpV4 z2CQLJM=|CnT?T7F0+azl>E{R!RMZ>{p$QrWn=tq(m*;F#l^syu+r}}SikBL5F<^a< zUS@BLe473kSckzUvgxLk20Xwzo&=Zym?_i1_`Y+H)@~F-1qME{u^U=l0AS$sfQsGTF;D^ap+j1wzq4ZSYa~ZD zbJ{!z17tA(uu4Mr&t7Z~sEri}UA|!U_=M0kUuK zrN{ALWNimwSGx>r|3nq5n%}Ii)kRrdhE?s|C`OC4vT|Fy##d&!i!%n28s?2u%NBjC zby|GHe0LE!ISmbDv8u1teqaTNO$YVCnyvv?t0kZ+o+7eX)7N$@!oFoGh#=ZsoVTC? z<{b+a!1)0Qs33nSv2`90_^)w7aX!c(b5IH6ZSsIhtjd*cN;RnU z^U83g@qa=N-a^<7-WP#kV|3>(Oqb=zK0Fev#Ed96$>8w6nkD5p4SUrDKDl<=kRZDzi0B3D|vb7~IeTpiBb9@y5| z6$WB|zoZHgd|UQ==;HMk~D<_QX5(h)F-Jsv&64Z!wJiG16yP^GUwU#S2W&rLQjo z6m=lArUgN6MMu9KcYDdhs;dj_Pn=$eI7W4RSO!?HJ(vv#v1nO?-?`rxA6gP}pOYsf zxSxBpz0}c63}?Y$jKgo5g5J`sM}Uz2AC3`{%DFsKktiCjzRpm);~%-`Gn@$GX-*yJ zpE&RG(c8`vf3{xL5TAxu?2>9YY2TaKXFPSCqm~?g;l{H$s=t{jM^?YkuFl|pW!?KR zC}bb*_scy`J4ncX5Ku>^QPAo7avq+`dq>qSLa_|Q^1 zi~*v~Zlpkxx|yPo^%-tpi$pr#|E+YY(r%Az6c>j`u1zWZ0W)9r@x1c)QD^gv{5wt@ zo3?Y-88PxCog9rC>1b{H+|u4qV@J&$os@TWp6ggqMrNH8(?2!T-$h$ShKCcQj#x6- zXM8H5zAsi~Fz57GpnGMay}hNR zyA=s%La3NF6N*kQ1EjOPPsY1N@}B$k_2G^lna-zv_~lWKF^Clq?L~XpX6eWkrsx{3 zYXmQYqB^Kfo4m{q*7a2u$S5C%_%lT;9H000`fl`$X~`~|)o0=PsMR>K17UaS!&wpP zE8MT)3loLnAr$w>`<%K$=({~Tru(PMAzFl;K%UcR(oEYt>4Itt5&~!WL1A(23Y_)p zN|Om;efdgmu5ol#z!XFQXX^Q8Sk_WPZ3D7hT!BPH-WQ}gZsfZkF8*+hj%8&3$nf{+ z%O6`1?o6wA;q7`)hg$>+Ts749``<;A9^83zUja?J&4Uix|1O>p2@fNhMD~v9yd0Xu zj+Xm<<)TcQ@cDTbbk?eE#= z>^k9;=*6zt*DBz6`BRB%;i>yhoZ|7O0zaO$JG)LzFI72PpPOju=r)OR-{sEvUeG8z zbs_(WHWh#H`vZo)(RN~DI<)1%r!Bh*a{1$Z9BMgH%T_-O@0 z=_AcQEYgZ}#g$!LF@`|EeD-4NPYFmDw_70~!V#?|6sX!W%}s6Ga(dOSox5B0x4Yl3YAx;LF(wc6srYl9yK{a*FS#%FjfO<3d1X`<} zTV7A(#qiMsf?z?@MoLgE1!=$$ZN;E^UI~~;cru6Ru<(5de zX>U1RfEw*iKSLAjg3v^^|0q<3y8gj#vJxh{5l>O$6+RQ{7K=6lN|h_4Qlm4wxQgbj zdv=|@(d-V9aFeU^oJ5URvFA}EWeVB|csd!C8m-x#JcF`lAG+^W^{Z3Py8jm$dfU?WW!^mtG;{esWe`b+Cx->!Nn=LGhG@_@pU=K>MZC zao^}BBuW{GGC@U{-)~nb%LKF@yYyLc!Ef#=SJ^ULBffrL4cZ;u+m>!GmO3sL4Q$^~rgeC4@hUFr%oUvpE!PIsl7?4i zjt#GHXhX;N+^QE?4UqJ5QSj=X1od_iUKXzcLaRb$P5K|KWTH&e7@X?IjO} z_qvs2+Rhv0PkY(r4;~>%ns;l>3e>{Nj^3J>@7q8QWrQoeSEkU}4Cdd0+2fU!Qz-hq>A1OdPD%Ps;#! z>k<=bSNbs*7%bdxeC|x9{^FDAh;U4pn=ZIOM8Ie%>=^r17#qVOg-Yeqql+qI8jd$E zQ!$HrbL-r2z!9^|)FKr3e4BAsU9@r;aD1%J zZ;T5{a9R@3!VIb@Y1_zZE}W%p`@WBU8EBce^tYg{AmG0#Yjly|;0HheRz=Zkrlxf) zub4H|S}*uYWX*I*{++fkUz&G&hw=+M-nE8OvkAK_1I6?16(U8&ObI)@5|;=Ar;6?c z6nuUQ|786oVI=pvhU3|pWZ|a^T9F@?4yg>e?_+iNi}1fm57U~;Fw5g?+WjZKX7fi zmcw_*@!FAT+r8aN%Rr?<(kokvEb)b`;U6-Q9?CcZa{86~L`!!Odi(5FlTR_=hlf13 zfVVCbmTG%?=si*lxg@Z!gLBdmcR2fv`b39`qM7xm@c3~J0v6vhW>OYjmuu`k4tp)> zs72t@ISnx=ehJzy&Y#p7VDVn;gHaV8TY{?-v0SC@gZB=18O-x z1cHkqi|}?;2Rdt-UyITkO#Cb=aO`D=;|0yk5T`M=W7+MZ@9-|z6?KYw=~aaA3EQ1< zV{l)$Kl#W~mVBekzhwxL8UOQq*TR^79Hf47==u_McrJU(&Hyho9N~{Zd zM}8b^HDql3JVVwueJyPy>)?wvKac6t&rM1ncsS{Quc`6 zTsR9(b~CrcM5*Az58uzZ(-%KW#g9;6^zStY9z8MqgL$)l((rTJ?dKhvt^a8reePPS zn(KOHexmJ4&COe{Bz-=9_(roZ{VB7vi(Ad;3jcGQj2h>Wwv6Y7%EP%=mVvu7^GAMs zD(p^bD}3(i`4aXFzWGJj7`W->-)xK6{1O_`*!()hzWH^SY!in-|8xn4>8#^?B*R%s zZ`C_ueP;-si<$b|nM=&@3vCViz91ES)#7k1reAT`@kd1kf`V^3fQd_EI-a8``iCE8 zB=`|tb!g8~79Qc_dBS&mLbD*+s~Yd12(g7DV4mJ_(2+}GuQguLqIcLhNtY`o^&GH_J)hpF9_;{gK09bux30Uuag?Hh6;vsfq8 zrF|Igz3hFk>m>|k?jbY+r3?&6@)V4QeqeCS!{Mp~r6>2U*!!5NB@!oL<`WP^=mi~t z^nOgWcd(|em_r^}v&A6fpgs|Z277D-lGW~$IOEc}_jwGMj(B81XmkXoz)%60Fv{@h z2%Is159faN1I;MikMUI_V9oYm!hS#w4#T95p$Af>*xr6$Y7!Vo= z0S5ps?k@%<-J4DT)51=A4?#fjK{QZO95BpyWECHDlmL?s%ovs~AwgNS&Yp4-3-)Al z_nA%7Z9hkF)_c=;gQ|dpa=3Z{FNQ<8P<@%I;vj)NOVuNF+?5;}lIEzzz_koPKk&=>%V&E4di zq}zUuzrQ}~wg4^TFN|SJH*z(8F6O^5%&m*I8~?Iyo_mF)Q@$BsPe4Q$T{57xA; zutEU^PxNiqwE?^5iiOmM=63x2Tb|NQNjcujbI0nbBg$FOF9;&X~4#0$o)AvU{ zyQ)v|_RIIHFZ8)6Zg*WZpzvO6iK`^d?J`ZMf9M?k(c18(=QPz?A^MEts9j2pv`gf; zWBF&+pT&Ttb5fP-o35pOO017>g?njq4(GI4{!G~owAzyfw24n`57(tmwOQ;v8CP9> z`oknvxeurNxzar>wl?gl-3cnZ5%Mt)b2AAAA5HT0DWxA@1_)B;2rtaKpCpuS5ASJL z)K6G+HaIO`Y1(s>rNkDSsWl*4k|#?=PouNF{F~SrzE~WkfrZ4WQPmWBwP8WOOV50> zF>P3$^s;m>5g|?#Y6Qk%e}JgaTvqp zNrPV8D^CER$rYnxu3YA0h;+rfuVqMl;sP^P)s$pR0YQ1=B3!DUsm+`qXBLsra5BYa z?A;p+g`PH-HtXWu6v;)KlMAUj%l*ZO!E*zQgT^<^d{;!pE&K3K)P;o!WHM~^ zYKyYm-LZvKT}4!#_2ISRLkmKKac{CN@-tox*J^##B#@k~oMNfDwQ-WIH7elDpr!h7 za;p9NYhf>4r5mMsl9uj-$znxi*&VLvJET1~(5j(&D(%F#!G@zlJ(ld^CkK`RTn6qh ztN`^V3+?i}lZO>9Fjpp@aEBLGzp|q=P?yv3I+uuj7|u?eC{OPfXK8V|FQj^WP~=_Q z!B6RCOGj$&=-*qnPvo;3d{8KMcP;qL5TzSSKwm-*hxhr9zsibACp8L~ zZart-x5PjHWAt;8+v2vxGC;n4=L+F>ZL};JhP;^K=b6D@B5wc2ihNhpRT?XKeTTQl zqlO1dxO2KO#XG$<&CA@e7CF_UtrAp8qLNkaUK86p~OU87(Gx>Gh2D#pt|H^0+s(F%fb66!h1t4Wxzt~+E$ zo^$2yQf8?wLo{)*M7Sr@jn?g%aU&wI%;5Glf9lS9#+m#n)kqcvKcx>JC2oGA)era_ z#y&yC5bJk4qHFa9hen1PfK`(FXeO3 z@$36;&W`pI5#daaJG;Xh-PuWb&)ldxI|{p<%HM!7ILCI%s3EF9B}&R&zIcXEh2u3# z^obfyDaKS&>_{Cd;&jpAXdju zeet1CS}FRe#CTzT9@Mfr5o$;WW+ zO9=Vcp&1GOoU+$~kFpmc%?I>$%|FG{3u#Fho>ZKu3RGN>*y}o3D%%(2tzB2$K0C+k zCIPD{3>|n;|Lo$NVDvp|^4MP^z~cMnW2+vW$eT2npy6b8B3vWT6%N7`imf{@?T|< zM;lTnbsu*q`tv zkEtG#6E41uC0!z*(FSK=)r{7T9~$7-%EGaCQ|%frmB08k(<{`tBL?H0u|KWaOI;Hq z@SaY0-wD-YLiP*MCB0+R*|r8|8PXr%Pi7V666zi(%96aq_9`#6GdS+ENL*K?lAZ4B z>DgItX`ZB2#1QNLD23^rz}<`4G^qhnG7W0~G|IHytPmflWT2^P|0jT5x_R84(5p`F z)<;+bZgrVUsCzxJbJUrae6_W^?x(|NS@H#^z;U}Kn`A=Y)p4APZ?e6h5O$HA@=myo zllS$p8ZX#z^j?g(1!Stp{8)H= zue@9uW}6r_PL9iQJnu1N6012s(w4lL3pB1Ci#fWuV!5`}s_o_sG1TXd! zzLy`E;RvoY$!NKhb9SvF6cv7BV;kx)W0*3)_${Dmx{6=$PhM-4rmuD5TZTEyaW6nt z!8Pjb_hMTiex&Y2GrNkl3_PHuGkuMs6jnCSRD8{>1i6~(ZZIgBwR|(6#$k-thL#sv zO>FELD9J{faf>PBRo+LF-X^DYIfWd!jG7{)Q)Vd8b%Y#f6iUC2h63afl@#BL$_fb? zbQaS=PZX9iP*`J}hb^_RvOHbI?t~%8k8M-AYgD-n-feIxq2(z7SCzCOB8oP)yJ zK8j8#k^jE0NxH$n1o~PPA%c^)8%^T5XpyIH@=g~unW=$>h_1urox|SLPBauC53^)2 z-lHdSj`9mePh^@>z?J!LqMpov_tq7<|33WoR&Ap7o@ciDshvMPajk;ZdjyYzlR{y7 zk@G>}wwul$``R;;k7(`Mc%Tt2{P0tkjjr@_Yh!juzmCoaxAmNo(~FwDkto|E=v7Yq zd^^N5QAFoHd~j=m;veKz2FOpPHuDZ{*yHAhvLhKKf6YS)jJ8}S$9@lZaT3kiRO?65 z9@*-WACvjCyihhBS#ZZeZaTl~Yib4_U;_6wJIo$$bemM={WD=B6c5qv9^3ClJ8WRv z??pRoU@2+d|1RnYF|=&iXpD|N>e>P_LGL}fAt^H`8yZ^gd9NZQrh~%tjCTUppC9nH z^);Opf_XgJ``6{c-SA z+*kZRKj3Y