From 64832fd70afdde668ffe7be864d8015a89f96c6d Mon Sep 17 00:00:00 2001 From: Abhinandan Prateek Date: Wed, 10 Jan 2018 22:10:41 +0530 Subject: [PATCH] CLOUDSTACK-4757: Support OVA files with multiple disks for templates (#2146) CloudStack volumes and templates are one single virtual disk in case of XenServer/XCP and KVM hypervisors since the files used for templates and volumes are virtual disks (VHD, QCOW2). However, VMware volumes and templates are in OVA format, which are archives that can contain a complete VM including multiple VMDKs and other files such as ISOs. And currently, Cloudstack only supports Template creation based on OVA files containing a single disk. If a user creates a template from a OVA file containing more than 1 disk and launches an instance using this template, only the first disk is attached to the new instance and other disks are ignored. Similarly with uploaded volumes, attaching an uploaded volume that contains multiple disks to a VM will result in only one VMDK to being attached to the VM. FS: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Support+OVA+files+containing+multiple+disks This behavior needs to be improved in VMWare to support OVA files with multiple disks for both uploaded volumes and templates. i.e. If a user creates a template from a OVA file containing more than 1 disk and launches an instance using this template, the first disk should be attached to the new instance as the ROOT disk and volumes should be created based on other VMDK disks in the OVA file and should be attached to the instance. Signed-off-by: Abhinandan Prateek Signed-off-by: Rohit Yadav --- .../cloud/agent/api/storage/OVFHelper.java | 336 ++++++++++++ .../com/cloud/agent/api/to/DatadiskTO.java | 108 ++++ api/src/com/cloud/configuration/Resource.java | 2 + api/src/com/cloud/storage/Storage.java | 4 +- .../template/VirtualMachineTemplate.java | 2 + api/src/com/cloud/vm/DiskProfile.java | 4 + api/src/com/cloud/vm/UserVmService.java | 24 +- .../apache/cloudstack/api/ApiConstants.java | 2 + .../cloudstack/api/ResponseGenerator.java | 2 + .../user/template/ListTemplatesCmd.java | 9 +- .../api/command/user/vm/DeployVMCmd.java | 48 +- .../api/response/ChildTemplateResponse.java | 66 +++ .../api/response/TemplateResponse.java | 19 +- .../storage/CreateDatadiskTemplateAnswer.java | 38 ++ .../CreateDatadiskTemplateCommand.java | 71 +++ .../agent/api/storage/GetDatadisksAnswer.java | 40 ++ .../api/storage/GetDatadisksCommand.java | 43 ++ .../cloud/storage/template/OVAProcessor.java | 79 ++- .../storage/to/TemplateObjectTO.java | 20 + .../com/cloud/vm/VirtualMachineManager.java | 4 +- .../service/VolumeOrchestrationService.java | 3 +- .../service/api/OrchestrationService.java | 4 +- .../api/storage/TemplateService.java | 2 + .../image/datastore/ImageStoreEntity.java | 8 + .../cloud/vm/VirtualMachineManagerImpl.java | 29 +- .../orchestration/CloudOrchestrator.java | 20 +- .../orchestration/VolumeOrchestrator.java | 13 +- .../META-INF/db/schema-41000to41100.sql | 126 +---- .../src/com/cloud/storage/VMTemplateVO.java | 13 + .../com/cloud/storage/dao/VMTemplateDao.java | 2 + .../cloud/storage/dao/VMTemplateDaoImpl.java | 14 + .../cloud/upgrade/DatabaseUpgradeChecker.java | 2 +- .../motion/AncientDataMotionStrategy.java | 1 - .../storage/image/TemplateServiceImpl.java | 352 +++++++++++-- .../storage/image/store/ImageStoreImpl.java | 14 +- .../storage/image/store/TemplateObject.java | 16 + .../endpoint/DefaultEndPointSelector.java | 3 + .../image/BaseImageStoreDriverImpl.java | 74 ++- .../storage/image/ImageStoreDriver.java | 11 + .../storage/image/TemplateEntityImpl.java | 6 +- .../storage/volume/VolumeObject.java | 6 +- .../ClusterServiceServletAdapterTest.java | 13 +- .../manager/VmwareStorageManagerImpl.java | 105 ++-- .../vmware/resource/VmwareResource.java | 378 +++++++------- .../resource/VmwareStorageProcessor.java | 20 +- .../src/com/cloud/api/ApiResponseHelper.java | 5 + .../com/cloud/api/query/QueryManagerImpl.java | 11 +- .../api/query/dao/TemplateJoinDaoImpl.java | 28 + .../cloud/api/query/vo/TemplateJoinVO.java | 15 + .../network/as/AutoScaleManagerImpl.java | 6 +- .../template/HypervisorTemplateAdapter.java | 101 ++-- .../cloud/template/TemplateManagerImpl.java | 31 +- .../src/com/cloud/vm/UserVmManagerImpl.java | 86 ++- .../HypervisorTemplateAdapterTest.java | 4 +- .../resource/NfsSecondaryStorageResource.java | 489 ++++++++++++++---- .../smoke/test_deploy_vm_root_resize.py | 4 +- test/integration/smoke/test_templates.py | 71 ++- test/integration/smoke/test_volumes.py | 4 +- tools/marvin/marvin/lib/base.py | 9 +- ui/l10n/en.js | 1 + ui/scripts/instanceWizard.js | 49 +- ui/scripts/ui-custom/instanceWizard.js | 32 +- .../utils/hypervisor/HypervisorUtilsTest.java | 7 +- .../vmware/mo/HypervisorHostHelper.java | 234 ++++++++- 64 files changed, 2642 insertions(+), 701 deletions(-) create mode 100644 api/src/com/cloud/agent/api/storage/OVFHelper.java create mode 100644 api/src/com/cloud/agent/api/to/DatadiskTO.java create mode 100644 api/src/org/apache/cloudstack/api/response/ChildTemplateResponse.java create mode 100644 core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateAnswer.java create mode 100644 core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateCommand.java create mode 100644 core/src/com/cloud/agent/api/storage/GetDatadisksAnswer.java create mode 100644 core/src/com/cloud/agent/api/storage/GetDatadisksCommand.java diff --git a/api/src/com/cloud/agent/api/storage/OVFHelper.java b/api/src/com/cloud/agent/api/storage/OVFHelper.java new file mode 100644 index 00000000000..167d5d4fe4b --- /dev/null +++ b/api/src/com/cloud/agent/api/storage/OVFHelper.java @@ -0,0 +1,336 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; + +import com.cloud.configuration.Resource.ResourceType; +import com.cloud.exception.InternalErrorException; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang.math.NumberUtils; +import org.apache.log4j.Logger; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.NodeList; +import org.xml.sax.SAXException; + +import com.cloud.agent.api.to.DatadiskTO; +import com.cloud.utils.exception.CloudRuntimeException; + +public class OVFHelper { + private static final Logger s_logger = Logger.getLogger(OVFHelper.class); + + /** + * Get disk virtual size given its values on fields: 'ovf:capacity' and 'ovf:capacityAllocationUnits' + * @param capacity capacity + * @param allocationUnits capacity allocation units + * @return disk virtual size + */ + public static Long getDiskVirtualSize(Long capacity, String allocationUnits, String ovfFilePath) throws InternalErrorException { + if ((capacity != 0) && (allocationUnits != null)) { + long units = 1; + if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) { + units = ResourceType.bytesToKiB; + } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) { + units = ResourceType.bytesToMiB; + } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) { + units = ResourceType.bytesToGiB; + } + return capacity * units; + } else { + throw new InternalErrorException("Failed to read capacity and capacityAllocationUnits from the OVF file: " + ovfFilePath); + } + } + + public List getOVFVolumeInfo(final String ovfFilePath) { + if (StringUtils.isBlank(ovfFilePath)) { + return new ArrayList(); + } + ArrayList vf = new ArrayList(); + ArrayList vd = new ArrayList(); + + File ovfFile = new File(ovfFilePath); + try { + final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFilePath)); + NodeList disks = doc.getElementsByTagName("Disk"); + NodeList files = doc.getElementsByTagName("File"); + NodeList items = doc.getElementsByTagName("Item"); + boolean toggle = true; + for (int j = 0; j < files.getLength(); j++) { + Element file = (Element)files.item(j); + OVFFile of = new OVFFile(); + of._href = file.getAttribute("ovf:href"); + if (of._href.endsWith("vmdk") || of._href.endsWith("iso")) { + of._id = file.getAttribute("ovf:id"); + String size = file.getAttribute("ovf:size"); + if (StringUtils.isNotBlank(size)) { + of._size = Long.parseLong(size); + } else { + String dataDiskPath = ovfFile.getParent() + File.separator + of._href; + File this_file = new File(dataDiskPath); + of._size = this_file.length(); + } + of.isIso = of._href.endsWith("iso"); + if (toggle && !of.isIso) { + of._bootable = true; + toggle = !toggle; + } + vf.add(of); + } + } + for (int i = 0; i < disks.getLength(); i++) { + Element disk = (Element)disks.item(i); + OVFDisk od = new OVFDisk(); + String virtualSize = disk.getAttribute("ovf:capacity"); + od._capacity = NumberUtils.toLong(virtualSize, 0L); + String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits"); + od._diskId = disk.getAttribute("ovf:diskId"); + od._fileRef = disk.getAttribute("ovf:fileRef"); + od._populatedSize = Long.parseLong(disk.getAttribute("ovf:populatedSize") == null ? "0" : disk.getAttribute("ovf:populatedSize")); + + if ((od._capacity != 0) && (allocationUnits != null)) { + + long units = 1; + if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) { + units = ResourceType.bytesToKiB; + } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) { + units = ResourceType.bytesToMiB; + } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) { + units = ResourceType.bytesToGiB; + } + od._capacity = od._capacity * units; + } + od._controller = getControllerType(items, od._diskId); + vd.add(od); + } + + } catch (SAXException | IOException | ParserConfigurationException e) { + s_logger.error("Unexpected exception caught while parsing ovf file:" + ovfFilePath, e); + throw new CloudRuntimeException(e); + } + + List disksTO = new ArrayList(); + for (OVFFile of : vf) { + if (StringUtils.isBlank(of._id)){ + s_logger.error("The ovf file info is incomplete file info"); + throw new CloudRuntimeException("The ovf file info has incomplete file info"); + } + OVFDisk cdisk = getDisk(of._id, vd); + if (cdisk == null && !of.isIso){ + s_logger.error("The ovf file info has incomplete disk info"); + throw new CloudRuntimeException("The ovf file info has incomplete disk info"); + } + Long capacity = cdisk == null ? of._size : cdisk._capacity; + String controller = cdisk == null ? "" : cdisk._controller._name; + String controllerSubType = cdisk == null ? "" : cdisk._controller._subType; + String dataDiskPath = ovfFile.getParent() + File.separator + of._href; + File f = new File(dataDiskPath); + if (!f.exists() || f.isDirectory()) { + s_logger.error("One of the attached disk or iso does not exists " + dataDiskPath); + throw new CloudRuntimeException("One of the attached disk or iso as stated on OVF does not exists " + dataDiskPath); + } + disksTO.add(new DatadiskTO(dataDiskPath, capacity, of._size, of._id, of.isIso, of._bootable, controller, controllerSubType)); + } + //check if first disk is an iso move it to the end + DatadiskTO fd = disksTO.get(0); + if (fd.isIso()) { + disksTO.remove(0); + disksTO.add(fd); + } + return disksTO; + } + + private OVFDiskController getControllerType(final NodeList itemList, final String diskId) { + for (int k = 0; k < itemList.getLength(); k++) { + Element item = (Element)itemList.item(k); + NodeList cn = item.getChildNodes(); + for (int l = 0; l < cn.getLength(); l++) { + if (cn.item(l) instanceof Element) { + Element el = (Element)cn.item(l); + if ("rasd:HostResource".equals(el.getNodeName()) + && (el.getTextContent().contains("ovf:/file/" + diskId) || el.getTextContent().contains("ovf:/disk/" + diskId))) { + Element oe = getParentNode(itemList, item); + Element voe = oe; + while (oe != null) { + voe = oe; + oe = getParentNode(itemList, voe); + } + return getController(voe); + } + } + } + } + return null; + } + + private Element getParentNode(final NodeList itemList, final Element childItem) { + NodeList cn = childItem.getChildNodes(); + String parent_id = null; + for (int l = 0; l < cn.getLength(); l++) { + if (cn.item(l) instanceof Element) { + Element el = (Element)cn.item(l); + if ("rasd:Parent".equals(el.getNodeName())) { + parent_id = el.getTextContent(); + } + } + } + if (parent_id != null) { + for (int k = 0; k < itemList.getLength(); k++) { + Element item = (Element)itemList.item(k); + NodeList child = item.getChildNodes(); + for (int l = 0; l < child.getLength(); l++) { + if (child.item(l) instanceof Element) { + Element el = (Element)child.item(l); + if ("rasd:InstanceID".equals(el.getNodeName()) && el.getTextContent().trim().equals(parent_id)) { + return item; + } + } + } + } + } + return null; + } + + private OVFDiskController getController(Element controllerItem) { + OVFDiskController dc = new OVFDiskController(); + NodeList child = controllerItem.getChildNodes(); + for (int l = 0; l < child.getLength(); l++) { + if (child.item(l) instanceof Element) { + Element el = (Element)child.item(l); + if ("rasd:ElementName".equals(el.getNodeName())) { + dc._name = el.getTextContent(); + } + if ("rasd:ResourceSubType".equals(el.getNodeName())) { + dc._subType = el.getTextContent(); + } + } + } + return dc; + } + + public void rewriteOVFFile(final String origOvfFilePath, final String newOvfFilePath, final String diskName) { + try { + final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(origOvfFilePath)); + NodeList disks = doc.getElementsByTagName("Disk"); + NodeList files = doc.getElementsByTagName("File"); + NodeList items = doc.getElementsByTagName("Item"); + String keepfile = null; + List toremove = new ArrayList(); + for (int j = 0; j < files.getLength(); j++) { + Element file = (Element)files.item(j); + String href = file.getAttribute("ovf:href"); + if (diskName.equals(href)) { + keepfile = file.getAttribute("ovf:id"); + } else { + toremove.add(file); + } + } + String keepdisk = null; + for (int i = 0; i < disks.getLength(); i++) { + Element disk = (Element)disks.item(i); + String fileRef = disk.getAttribute("ovf:fileRef"); + if (keepfile == null) { + s_logger.info("FATAL: OVA format error"); + } else if (keepfile.equals(fileRef)) { + keepdisk = disk.getAttribute("ovf:diskId"); + } else { + toremove.add(disk); + } + } + for (int k = 0; k < items.getLength(); k++) { + Element item = (Element)items.item(k); + NodeList cn = item.getChildNodes(); + for (int l = 0; l < cn.getLength(); l++) { + if (cn.item(l) instanceof Element) { + Element el = (Element)cn.item(l); + if ("rasd:HostResource".equals(el.getNodeName()) + && !(el.getTextContent().contains("ovf:/file/" + keepdisk) || el.getTextContent().contains("ovf:/disk/" + keepdisk))) { + toremove.add(item); + break; + } + } + } + } + + for (Element rme : toremove) { + if (rme.getParentNode() != null) { + rme.getParentNode().removeChild(rme); + } + } + + final StringWriter writer = new StringWriter(); + final StreamResult result = new StreamResult(writer); + final TransformerFactory tf = TransformerFactory.newInstance(); + final Transformer transformer = tf.newTransformer(); + final DOMSource domSource = new DOMSource(doc); + transformer.transform(domSource, result); + PrintWriter outfile = new PrintWriter(newOvfFilePath); + outfile.write(writer.toString()); + outfile.close(); + } catch (SAXException | IOException | ParserConfigurationException | TransformerException e) { + s_logger.info("Unexpected exception caught while removing network elements from OVF:" + e.getMessage(), e); + throw new CloudRuntimeException(e); + } + } + + OVFDisk getDisk(String fileRef, List disks) { + for (OVFDisk disk : disks) { + if (disk._fileRef.equals(fileRef)) { + return disk; + } + } + return null; + } + + class OVFFile { + // + public String _href; + public String _id; + public Long _size; + public boolean _bootable; + public boolean isIso; + } + + class OVFDisk { + // + public Long _capacity; + public String _capacityUnit; + public String _diskId; + public String _fileRef; + public Long _populatedSize; + public OVFDiskController _controller; + } + + class OVFDiskController { + public String _name; + public String _subType; + } +} diff --git a/api/src/com/cloud/agent/api/to/DatadiskTO.java b/api/src/com/cloud/agent/api/to/DatadiskTO.java new file mode 100644 index 00000000000..1d3f91e25db --- /dev/null +++ b/api/src/com/cloud/agent/api/to/DatadiskTO.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.agent.api.to; + +public class DatadiskTO { + private String path; + private long virtualSize; + private long fileSize; + boolean bootable; + private String diskId; + private boolean isIso; + private String diskController; + private String diskControllerSubType; + + public DatadiskTO() { + } + + public DatadiskTO(String path, long virtualSize, long fileSize, boolean bootable) { + this.path = path; + this.virtualSize = virtualSize; + this.fileSize = fileSize; + this.bootable = bootable; + } + + public DatadiskTO(String path, long virtualSize, long fileSize, String diskId, boolean isIso, boolean bootable, String controller, String controllerSubType) { + this.path = path; + this.virtualSize = virtualSize; + this.fileSize = fileSize; + this.bootable = bootable; + this.diskId = diskId; + this.isIso = isIso; + this.diskController = controller; + this.diskControllerSubType = controllerSubType; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public Long getVirtualSize() { + return virtualSize; + } + + public void setVirtualSize(Long virtualSize) { + this.virtualSize = virtualSize; + } + + public Long getFileSize() { + return fileSize; + } + + public boolean isBootable() { + return bootable; + } + + public String getDiskId() { + return diskId; + } + + public void setDiskId(String diskId) { + this.diskId = diskId; + } + + public boolean isIso() { + return isIso; + } + + public void setIso(boolean isIso) { + this.isIso = isIso; + } + + public String getDiskController() { + return diskController; + } + + public void setDiskController(String diskController) { + this.diskController = diskController; + } + + public String getDiskControllerSubType() { + return diskControllerSubType; + } + + public void setDiskControllerSubType(String diskControllerSubType) { + this.diskControllerSubType = diskControllerSubType; + } + +} \ No newline at end of file diff --git a/api/src/com/cloud/configuration/Resource.java b/api/src/com/cloud/configuration/Resource.java index 7ef1b0bb90e..0fd51dc2f17 100644 --- a/api/src/com/cloud/configuration/Resource.java +++ b/api/src/com/cloud/configuration/Resource.java @@ -37,6 +37,8 @@ public interface Resource { private String name; private ResourceOwnerType[] supportedOwners; private int ordinal; + public static final long bytesToKiB = 1024; + public static final long bytesToMiB = 1024 * 1024; public static final long bytesToGiB = 1024 * 1024 * 1024; ResourceType(String name, int ordinal, ResourceOwnerType... supportedOwners) { diff --git a/api/src/com/cloud/storage/Storage.java b/api/src/com/cloud/storage/Storage.java index f588aeaf4b3..9093dc34f14 100644 --- a/api/src/com/cloud/storage/Storage.java +++ b/api/src/com/cloud/storage/Storage.java @@ -113,7 +113,9 @@ public class Storage { SYSTEM, /* routing, system vm template */ BUILTIN, /* buildin template */ PERHOST, /* every host has this template, don't need to install it in secondary storage */ - USER /* User supplied template/iso */ + USER, /* User supplied template/iso */ + DATADISK, /* Template corresponding to a datadisk(non root disk) present in an OVA */ + ISODISK /* Template corresponding to a iso (non root disk) present in an OVA */ } public static enum StoragePoolType { diff --git a/api/src/com/cloud/template/VirtualMachineTemplate.java b/api/src/com/cloud/template/VirtualMachineTemplate.java index 54d61a4597b..564f3b987be 100644 --- a/api/src/com/cloud/template/VirtualMachineTemplate.java +++ b/api/src/com/cloud/template/VirtualMachineTemplate.java @@ -133,6 +133,8 @@ public interface VirtualMachineTemplate extends ControlledEntity, Identity, Inte boolean isDynamicallyScalable(); + Long getParentTemplateId(); + long getUpdatedCount(); void incrUpdatedCount(); diff --git a/api/src/com/cloud/vm/DiskProfile.java b/api/src/com/cloud/vm/DiskProfile.java index a37f7aaf57b..d9097748363 100644 --- a/api/src/com/cloud/vm/DiskProfile.java +++ b/api/src/com/cloud/vm/DiskProfile.java @@ -139,6 +139,10 @@ public class DiskProfile { return templateId; } + public void setTemplateId(Long templateId) { + this.templateId = templateId; + } + /** * @return disk offering id that the disk is based on. */ diff --git a/api/src/com/cloud/vm/UserVmService.java b/api/src/com/cloud/vm/UserVmService.java index 178840bfe0b..74090ec40e6 100644 --- a/api/src/com/cloud/vm/UserVmService.java +++ b/api/src/com/cloud/vm/UserVmService.java @@ -50,6 +50,7 @@ import com.cloud.exception.VirtualMachineMigrationException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.Network.IpAddresses; +import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; import com.cloud.storage.StoragePool; import com.cloud.template.VirtualMachineTemplate; @@ -197,6 +198,11 @@ public interface UserVmService { * @param dhcpOptionMap * - Maps the dhcp option code and the dhcp value to the network uuid * @return UserVm object if successful. + * @param dataDiskTemplateToDiskOfferingMap + * - Datadisk template to Disk offering Map + * an optional parameter that creates additional data disks for the virtual machine + * For each of the templates in the map, a data disk will be created from the corresponding + * disk offering obtained from the map * * @throws InsufficientCapacityException * if there is insufficient capacity to deploy the VM. @@ -210,7 +216,8 @@ public interface UserVmService { UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIp, Boolean displayVm, String keyboard, - List affinityGroupIdList, Map customParameter, String customId, Map> dhcpOptionMap) throws InsufficientCapacityException, + List affinityGroupIdList, Map customParameter, String customId, Map> dhcpOptionMap, + Map dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException; /** @@ -271,6 +278,11 @@ public interface UserVmService { * @param customId * @param dhcpOptionMap * - Maps the dhcp option code and the dhcp value to the network uuid + * @param dataDiskTemplateToDiskOfferingMap + * - Datadisk template to Disk offering Map + * an optional parameter that creates additional data disks for the virtual machine + * For each of the templates in the map, a data disk will be created from the corresponding + * disk offering obtained from the map * @return UserVm object if successful. * * @throws InsufficientCapacityException @@ -285,7 +297,8 @@ public interface UserVmService { UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List networkIdList, List securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, - List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap) throws InsufficientCapacityException, + List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, + Map dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException; /** @@ -344,6 +357,11 @@ public interface UserVmService { * @param customId * @param dhcpOptionMap * - Map that maps the DhcpOption code and their value on the Network uuid + * @param dataDiskTemplateToDiskOfferingMap + * - Datadisk template to Disk offering Map + * an optional parameter that creates additional data disks for the virtual machine + * For each of the templates in the map, a data disk will be created from the corresponding + * disk offering obtained from the map * @return UserVm object if successful. * * @throws InsufficientCapacityException @@ -358,7 +376,7 @@ public interface UserVmService { UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List networkIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List affinityGroupIdList, - Map customParameters, String customId, Map> dhcpOptionMap) + Map customParameters, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException; diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index cc7780045cd..d9090b89217 100644 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -79,6 +79,7 @@ public class ApiConstants { public static final String MIN_IOPS = "miniops"; public static final String MAX_IOPS = "maxiops"; public static final String HYPERVISOR_SNAPSHOT_RESERVE = "hypervisorsnapshotreserve"; + public static final String DATADISK_OFFERING_LIST = "datadiskofferinglist"; public static final String DESCRIPTION = "description"; public static final String DESTINATION_ZONE_ID = "destzoneid"; public static final String DETAILS = "details"; @@ -209,6 +210,7 @@ public class ApiConstants { public static final String PARAMS = "params"; public static final String PARENT_ID = "parentid"; public static final String PARENT_DOMAIN_ID = "parentdomainid"; + public static final String PARENT_TEMPLATE_ID = "parenttemplateid"; public static final String PASSWORD = "password"; public static final String SHOULD_UPDATE_PASSWORD = "update_passwd_on_host"; public static final String NEW_PASSWORD = "new_password"; diff --git a/api/src/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/org/apache/cloudstack/api/ResponseGenerator.java index 7b33ebbea7a..4fb248cd105 100644 --- a/api/src/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/org/apache/cloudstack/api/ResponseGenerator.java @@ -286,6 +286,8 @@ public interface ResponseGenerator { Host findHostById(Long hostId); + DiskOffering findDiskOfferingById(Long diskOfferingId); + VpnUsersResponse createVpnUserResponse(VpnUser user); RemoteAccessVpnResponse createRemoteAccessVpnResponse(RemoteAccessVpn vpn); diff --git a/api/src/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java b/api/src/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java index 772ca2749e7..e7d328495ed 100644 --- a/api/src/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java @@ -72,9 +72,12 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd { @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "list templates by zoneId") private Long zoneId; - @Parameter(name=ApiConstants.SHOW_REMOVED, type=CommandType.BOOLEAN, description="show removed templates as well") + @Parameter(name = ApiConstants.SHOW_REMOVED, type = CommandType.BOOLEAN, description = "show removed templates as well") private Boolean showRemoved; + @Parameter(name = ApiConstants.PARENT_TEMPLATE_ID, type = CommandType.UUID, entityType = TemplateResponse.class, description = "list datadisk templates by parent template id", since = "4.4") + private Long parentTemplateId; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -103,6 +106,10 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd { return (showRemoved != null ? showRemoved : false); } + public Long getParentTemplateId() { + return parentTemplateId; + } + public boolean listInReadyState() { Account account = CallContext.current().getCallingAccount(); diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index 548a89d6240..bfe6b0d6c7f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -46,6 +46,7 @@ import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; +import org.apache.commons.collections.MapUtils; import org.apache.log4j.Logger; import com.cloud.event.EventTypes; @@ -58,6 +59,8 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.Network; import com.cloud.network.Network.IpAddresses; +import com.cloud.offering.DiskOffering; +import com.cloud.template.VirtualMachineTemplate; import com.cloud.uservm.UserVm; import com.cloud.utils.net.Dhcp; import com.cloud.utils.net.NetUtils; @@ -192,6 +195,10 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG + " Example: dhcpoptionsnetworklist[0].dhcp:114=url&dhcpoptionsetworklist[0].networkid=networkid&dhcpoptionsetworklist[0].dhcp:66=www.test.com") private Map dhcpOptionsNetworkList; + @Parameter(name = ApiConstants.DATADISK_OFFERING_LIST, type = CommandType.MAP, since = "4.11", description = "datadisk template to disk-offering mapping;" + + " an optional parameter used to create additional data disks from datadisk templates; can't be specified with diskOfferingId parameter") + private Map dataDiskTemplateToDiskOfferingList; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -417,10 +424,10 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG if (dhcpOptionsNetworkList != null && !dhcpOptionsNetworkList.isEmpty()) { Collection> paramsCollection = this.dhcpOptionsNetworkList.values(); - for(Map dhcpNetworkOptions : paramsCollection) { + for (Map dhcpNetworkOptions : paramsCollection) { String networkId = dhcpNetworkOptions.get(ApiConstants.NETWORK_ID); - if(networkId == null) { + if (networkId == null) { throw new IllegalArgumentException("No networkid specified when providing extra dhcp options."); } @@ -431,9 +438,9 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG if (key.startsWith(ApiConstants.DHCP_PREFIX)) { int dhcpOptionValue = Integer.parseInt(key.replaceFirst(ApiConstants.DHCP_PREFIX, "")); dhcpOptionsForNetwork.put(dhcpOptionValue, dhcpNetworkOptions.get(key)); - } else if (!key.equals(ApiConstants.NETWORK_ID)){ - Dhcp.DhcpOptionCode dhcpOptionEnum = Dhcp.DhcpOptionCode.valueOfString(key); - dhcpOptionsForNetwork.put(dhcpOptionEnum.getCode(), dhcpNetworkOptions.get(key)); + } else if (!key.equals(ApiConstants.NETWORK_ID)) { + Dhcp.DhcpOptionCode dhcpOptionEnum = Dhcp.DhcpOptionCode.valueOfString(key); + dhcpOptionsForNetwork.put(dhcpOptionEnum.getCode(), dhcpNetworkOptions.get(key)); } } @@ -443,6 +450,37 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG return dhcpOptionsMap; } + public Map getDataDiskTemplateToDiskOfferingMap() { + if (diskOfferingId != null && dataDiskTemplateToDiskOfferingList != null) { + throw new InvalidParameterValueException("diskofferingid paramter can't be specified along with datadisktemplatetodiskofferinglist parameter"); + } + if (MapUtils.isEmpty(dataDiskTemplateToDiskOfferingList)) { + return new HashMap(); + } + + HashMap dataDiskTemplateToDiskOfferingMap = new HashMap(); + for (Object objDataDiskTemplates : dataDiskTemplateToDiskOfferingList.values()) { + HashMap dataDiskTemplates = (HashMap) objDataDiskTemplates; + Long dataDiskTemplateId; + DiskOffering dataDiskOffering = null; + VirtualMachineTemplate dataDiskTemplate= _entityMgr.findByUuid(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid")); + if (dataDiskTemplate == null) { + dataDiskTemplate = _entityMgr.findById(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid")); + if (dataDiskTemplate == null) + throw new InvalidParameterValueException("Unable to translate and find entity with datadisktemplateid " + dataDiskTemplates.get("datadisktemplateid")); + } + dataDiskTemplateId = dataDiskTemplate.getId(); + dataDiskOffering = _entityMgr.findByUuid(DiskOffering.class, dataDiskTemplates.get("diskofferingid")); + if (dataDiskOffering == null) { + dataDiskOffering = _entityMgr.findById(DiskOffering.class, dataDiskTemplates.get("diskofferingid")); + if (dataDiskOffering == null) + throw new InvalidParameterValueException("Unable to translate and find entity with diskofferingId " + dataDiskTemplates.get("diskofferingid")); + } + dataDiskTemplateToDiskOfferingMap.put(dataDiskTemplateId, dataDiskOffering); + } + return dataDiskTemplateToDiskOfferingMap; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/org/apache/cloudstack/api/response/ChildTemplateResponse.java b/api/src/org/apache/cloudstack/api/response/ChildTemplateResponse.java new file mode 100644 index 00000000000..b036cd48e87 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/ChildTemplateResponse.java @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.serializer.Param; +import com.cloud.template.VirtualMachineTemplate; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = VirtualMachineTemplate.class) +@SuppressWarnings("unused") +public class ChildTemplateResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "the template ID") + private String id; + + @SerializedName(ApiConstants.NAME) + @Param(description = "the template name") + private String name; + + @SerializedName(ApiConstants.SIZE) + @Param(description = "the size of the template") + private Integer size; + + @SerializedName("templatetype") + @Param(description = "the type of the template") + private String templateType; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public void setName(String name) { + this.name = name; + } + + public void setSize(Integer size) { + this.size = size; + } + + public void setTemplateType(String templateType) { + this.templateType = templateType; + } + +} diff --git a/api/src/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/org/apache/cloudstack/api/response/TemplateResponse.java index ab2b8a12628..373198ed29f 100644 --- a/api/src/org/apache/cloudstack/api/response/TemplateResponse.java +++ b/api/src/org/apache/cloudstack/api/response/TemplateResponse.java @@ -174,7 +174,7 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements private Map details; @SerializedName(ApiConstants.BITS) - @Param(description="the processor bit size", since = "4.10") + @Param(description = "the processor bit size", since = "4.10") private int bits; @SerializedName(ApiConstants.SSHKEY_ENABLED) @@ -189,6 +189,14 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "KVM Only: true if template is directly downloaded to Primary Storage bypassing Secondary Storage") private Boolean directDownload; + @SerializedName("parenttemplateid") + @Param(description = "if Datadisk template, then id of the root disk template this template belongs to") + private String parentTemplateId; + + @SerializedName("childtemplates") + @Param(description = "if root disk template, then ids of the datas disk templates this template owns") + private Set childTemplates; + public TemplateResponse() { tags = new LinkedHashSet(); } @@ -374,4 +382,13 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements public Boolean getDirectDownload() { return directDownload; } + + public void setParentTemplateId(String parentTemplateId) { + this.parentTemplateId = parentTemplateId; + } + + public void setChildTemplates(Set childTemplateIds) { + this.childTemplates = childTemplateIds; + } + } diff --git a/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateAnswer.java b/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateAnswer.java new file mode 100644 index 00000000000..0f009f3d6ce --- /dev/null +++ b/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateAnswer.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import org.apache.cloudstack.storage.to.TemplateObjectTO; + +import com.cloud.agent.api.Answer; + +public class CreateDatadiskTemplateAnswer extends Answer { + private TemplateObjectTO dataDiskTemplate = null; + + public CreateDatadiskTemplateAnswer(TemplateObjectTO dataDiskTemplate) { + super(null); + this.dataDiskTemplate = dataDiskTemplate; + } + + public TemplateObjectTO getDataDiskTemplate() { + return dataDiskTemplate; + } + + public CreateDatadiskTemplateAnswer(String errMsg) { + super(null, false, errMsg); + } +} \ No newline at end of file diff --git a/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateCommand.java b/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateCommand.java new file mode 100644 index 00000000000..b87d170fe81 --- /dev/null +++ b/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateCommand.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; + +public final class CreateDatadiskTemplateCommand extends Command { + private DataTO dataDiskTemplate; + private String path; + private long fileSize; + private boolean bootable; + private String diskId; + + public CreateDatadiskTemplateCommand(DataTO dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable) { + super(); + this.dataDiskTemplate = dataDiskTemplate; + this.path = path; + this.fileSize = fileSize; + this.bootable = bootable; + this.diskId = diskId; + } + + protected CreateDatadiskTemplateCommand() { + super(); + } + + @Override + public boolean executeInSequence() { + return false; + } + + public DataTO getDataDiskTemplate() { + return dataDiskTemplate; + } + + public String getPath() { + return path; + } + + public long getFileSize() { + return fileSize; + } + + public boolean getBootable() { + return bootable; + } + + public String getDiskId() { + return diskId; + } + + public void setDiskId(String diskId) { + this.diskId = diskId; + } + +} \ No newline at end of file diff --git a/core/src/com/cloud/agent/api/storage/GetDatadisksAnswer.java b/core/src/com/cloud/agent/api/storage/GetDatadisksAnswer.java new file mode 100644 index 00000000000..58922175c55 --- /dev/null +++ b/core/src/com/cloud/agent/api/storage/GetDatadisksAnswer.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import java.util.ArrayList; +import java.util.List; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DatadiskTO; + +public class GetDatadisksAnswer extends Answer { + List dataDiskDetails = new ArrayList(); + + public GetDatadisksAnswer(List dataDiskDetails) { + super(null); + this.dataDiskDetails = dataDiskDetails; + } + + public List getDataDiskDetails() { + return dataDiskDetails; + } + + public GetDatadisksAnswer(String errMsg) { + super(null, false, errMsg); + } +} \ No newline at end of file diff --git a/core/src/com/cloud/agent/api/storage/GetDatadisksCommand.java b/core/src/com/cloud/agent/api/storage/GetDatadisksCommand.java new file mode 100644 index 00000000000..0e22ea25e78 --- /dev/null +++ b/core/src/com/cloud/agent/api/storage/GetDatadisksCommand.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.storage; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; + +public final class GetDatadisksCommand extends Command { + private DataTO data; + + public GetDatadisksCommand(DataTO data) { + super(); + this.data = data; + } + + protected GetDatadisksCommand() { + super(); + } + + @Override + public boolean executeInSequence() { + return false; + } + + public DataTO getData() { + return data; + } + +} \ No newline at end of file diff --git a/core/src/com/cloud/storage/template/OVAProcessor.java b/core/src/com/cloud/storage/template/OVAProcessor.java index 31523b6e4d0..08087bfb9c8 100644 --- a/core/src/com/cloud/storage/template/OVAProcessor.java +++ b/core/src/com/cloud/storage/template/OVAProcessor.java @@ -20,6 +20,7 @@ package com.cloud.storage.template; import java.io.File; +import java.util.List; import java.util.Map; import javax.naming.ConfigurationException; @@ -28,10 +29,14 @@ import javax.xml.parsers.DocumentBuilderFactory; import org.apache.log4j.Logger; import org.w3c.dom.Document; import org.w3c.dom.Element; +import org.w3c.dom.NodeList; +import com.cloud.agent.api.storage.OVFHelper; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.exception.InternalErrorException; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; +import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.script.Script; @@ -64,6 +69,7 @@ public class OVAProcessor extends AdapterBase implements Processor { Script command = new Script("tar", 0, s_logger); command.add("--no-same-owner"); + command.add("--no-same-permissions"); command.add("-xf", templateFileFullPath); command.setWorkDir(templateFile.getParent()); String result = command.execute(); @@ -72,12 +78,35 @@ public class OVAProcessor extends AdapterBase implements Processor { throw new InternalErrorException("failed to untar OVA package"); } + command = new Script("chmod", 0, s_logger); + command.add("-R"); + command.add("666", templatePath); + result = command.execute(); + if (result != null) { + s_logger.warn("Unable to set permissions for files in " + templatePath + " due to " + result); + } + command = new Script("chmod", 0, s_logger); + command.add("777", templatePath); + result = command.execute(); + if (result != null) { + s_logger.warn("Unable to set permissions for " + templatePath + " due to " + result); + } + FormatInfo info = new FormatInfo(); info.format = ImageFormat.OVA; info.filename = templateName + "." + ImageFormat.OVA.getFileExtension(); info.size = _storage.getSize(templateFilePath); info.virtualSize = getTemplateVirtualSize(templatePath, info.filename); + //vaidate ova + String ovfFile = getOVFFilePath(templateFileFullPath); + try { + OVFHelper ovfHelper = new OVFHelper(); + List disks = ovfHelper.getOVFVolumeInfo(ovfFile); + } catch (Exception e) { + s_logger.info("The ovf file " + ovfFile + " is invalid ", e); + throw new InternalErrorException("OVA package has bad ovf file " + e.getMessage(), e); + } // delete original OVA file // templateFile.delete(); return info; @@ -112,22 +141,44 @@ public class OVAProcessor extends AdapterBase implements Processor { Element disk = (Element)ovfDoc.getElementsByTagName("Disk").item(0); virtualSize = Long.parseLong(disk.getAttribute("ovf:capacity")); String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits"); - if ((virtualSize != 0) && (allocationUnits != null)) { - long units = 1; - if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) { - units = 1024; - } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) { - units = 1024 * 1024; - } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) { - units = 1024 * 1024 * 1024; - } - virtualSize = virtualSize * units; - } else { - throw new InternalErrorException("Failed to read capacity and capacityAllocationUnits from the OVF file: " + ovfFileName); - } + virtualSize = OVFHelper.getDiskVirtualSize(virtualSize, allocationUnits, ovfFileName); return virtualSize; } catch (Exception e) { - String msg = "Unable to parse OVF XML document to get the virtual disk size due to" + e; + String msg = "getTemplateVirtualSize: Unable to parse OVF XML document " + templatePath + " to get the virtual disk " + templateName + " size due to " + e; + s_logger.error(msg); + throw new InternalErrorException(msg); + } + } + + public Pair getDiskDetails(String ovfFilePath, String diskName) throws InternalErrorException { + long virtualSize = 0; + long fileSize = 0; + String fileId = null; + try { + Document ovfDoc = null; + ovfDoc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFilePath)); + NodeList disks = ovfDoc.getElementsByTagName("Disk"); + NodeList files = ovfDoc.getElementsByTagName("File"); + for (int j = 0; j < files.getLength(); j++) { + Element file = (Element)files.item(j); + if (file.getAttribute("ovf:href").equals(diskName)) { + fileSize = Long.parseLong(file.getAttribute("ovf:size")); + fileId = file.getAttribute("ovf:id"); + break; + } + } + for (int i = 0; i < disks.getLength(); i++) { + Element disk = (Element)disks.item(i); + if (disk.getAttribute("ovf:fileRef").equals(fileId)) { + virtualSize = Long.parseLong(disk.getAttribute("ovf:capacity")); + String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits"); + virtualSize = OVFHelper.getDiskVirtualSize(virtualSize, allocationUnits, ovfFilePath); + break; + } + } + return new Pair(virtualSize, fileSize); + } catch (Exception e) { + String msg = "getDiskDetails: Unable to parse OVF XML document " + ovfFilePath + " to get the virtual disk " + diskName + " size due to " + e; s_logger.error(msg); throw new InternalErrorException(msg); } diff --git a/core/src/org/apache/cloudstack/storage/to/TemplateObjectTO.java b/core/src/org/apache/cloudstack/storage/to/TemplateObjectTO.java index 9ee90b72ddd..4a3d058176c 100644 --- a/core/src/org/apache/cloudstack/storage/to/TemplateObjectTO.java +++ b/core/src/org/apache/cloudstack/storage/to/TemplateObjectTO.java @@ -44,6 +44,8 @@ public class TemplateObjectTO implements DataTO { private Long size; private Long physicalSize; private Hypervisor.HypervisorType hypervisorType; + private boolean bootable; + private String uniqueName; public TemplateObjectTO() { @@ -73,6 +75,8 @@ public class TemplateObjectTO implements DataTO { this.accountId = template.getAccountId(); this.name = template.getUniqueName(); this.format = template.getFormat(); + this.uniqueName = template.getUniqueName(); + this.size = template.getSize(); if (template.getDataStore() != null) { this.imageDataStore = template.getDataStore().getTO(); } @@ -215,6 +219,22 @@ public class TemplateObjectTO implements DataTO { this.physicalSize = physicalSize; } + public void setIsBootable(boolean bootable) { + this.bootable = bootable; + } + + public boolean isBootable() { + return bootable; + } + + public String getUniqueName() { + return this.uniqueName; + } + + public void setUniqueName(String uniqueName) { + this.uniqueName = uniqueName; + } + @Override public String toString() { return new StringBuilder("TemplateTO[id=").append(id).append("|origUrl=").append(origUrl).append("|name").append(name).append("]").toString(); diff --git a/engine/api/src/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/com/cloud/vm/VirtualMachineManager.java index a20fc7b88d7..31e668640d9 100644 --- a/engine/api/src/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/com/cloud/vm/VirtualMachineManager.java @@ -36,6 +36,7 @@ import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.Network; +import com.cloud.offering.DiskOffering; import com.cloud.offering.DiskOfferingInfo; import com.cloud.offering.ServiceOffering; import com.cloud.storage.StoragePool; @@ -74,11 +75,12 @@ public interface VirtualMachineManager extends Manager { * @param auxiliaryNetworks additional networks to attach the VMs to. * @param plan How to deploy the VM. * @param hyperType Hypervisor type + * @param datadiskTemplateToDiskOfferingMap data disks to be created from datadisk templates and attached to the VM * @throws InsufficientCapacityException If there are insufficient capacity to deploy this vm. */ void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering, DiskOfferingInfo rootDiskOfferingInfo, List dataDiskOfferings, LinkedHashMap> auxiliaryNetworks, DeploymentPlan plan, - HypervisorType hyperType, Map> extraDhcpOptions) throws InsufficientCapacityException; + HypervisorType hyperType, Map> extraDhcpOptions, Map datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException; void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering, LinkedHashMap> networkProfiles, DeploymentPlan plan, HypervisorType hyperType) throws InsufficientCapacityException; diff --git a/engine/api/src/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 451995fc71d..fa6f2c6fb9d 100644 --- a/engine/api/src/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -92,7 +92,8 @@ public interface VolumeOrchestrationService { void destroyVolume(Volume volume); - DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner); + DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, + Account owner, Long deviceId); VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool) throws NoTransitionException; diff --git a/engine/api/src/org/apache/cloudstack/engine/service/api/OrchestrationService.java b/engine/api/src/org/apache/cloudstack/engine/service/api/OrchestrationService.java index 871745ec601..5a18b3cab9e 100644 --- a/engine/api/src/org/apache/cloudstack/engine/service/api/OrchestrationService.java +++ b/engine/api/src/org/apache/cloudstack/engine/service/api/OrchestrationService.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; import com.cloud.deploy.DeploymentPlan; import com.cloud.exception.InsufficientCapacityException; import com.cloud.hypervisor.Hypervisor; +import com.cloud.offering.DiskOffering; import com.cloud.vm.NicProfile; @Path("orchestration") @@ -65,7 +66,8 @@ public interface OrchestrationService { @QueryParam("cpu") int cpu, @QueryParam("speed") int speed, @QueryParam("ram") long memory, @QueryParam("disk-size") Long diskSize, @QueryParam("compute-tags") List computeTags, @QueryParam("root-disk-tags") List rootDiskTags, @QueryParam("network-nic-map") Map networkNicMap, @QueryParam("deploymentplan") DeploymentPlan plan, - @QueryParam("root-disk-size") Long rootDiskSize, @QueryParam("extra-dhcp-option-map") Map> extraDhcpOptionMap) throws InsufficientCapacityException; + @QueryParam("root-disk-size") Long rootDiskSize, @QueryParam("extra-dhcp-option-map") Map> extraDhcpOptionMap, + @QueryParam("datadisktemplate-diskoffering-map") Map datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException; @POST VirtualMachineEntity createVirtualMachineFromScratch(@QueryParam("id") String id, @QueryParam("owner") String owner, @QueryParam("iso-id") String isoId, diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java index ff204c663c0..fc8a769e81f 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java @@ -68,4 +68,6 @@ public interface TemplateService { void associateTemplateToZone(long templateId, Long zoneId); void associateCrosszoneTemplatesToZone(long dcId); + + AsyncCallFuture createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable); } diff --git a/engine/api/src/org/apache/cloudstack/storage/image/datastore/ImageStoreEntity.java b/engine/api/src/org/apache/cloudstack/storage/image/datastore/ImageStoreEntity.java index 461bd50ab0e..5a0be952b39 100644 --- a/engine/api/src/org/apache/cloudstack/storage/image/datastore/ImageStoreEntity.java +++ b/engine/api/src/org/apache/cloudstack/storage/image/datastore/ImageStoreEntity.java @@ -18,17 +18,21 @@ */ package org.apache.cloudstack.storage.image.datastore; +import java.util.List; import java.util.Set; import com.cloud.storage.Upload; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import com.cloud.storage.ImageStore; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.agent.api.to.DatadiskTO; public interface ImageStoreEntity extends DataStore, ImageStore { TemplateInfo getTemplate(long templateId); @@ -46,4 +50,8 @@ public interface ImageStoreEntity extends DataStore, ImageStore { String createEntityExtractUrl(String installPath, ImageFormat format, DataObject dataObject); // get the entity download URL void deleteExtractUrl(String installPath, String url, Upload.Type volume); + + List getDataDiskTemplates(DataObject obj); + + Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable, AsyncCompletionCallback callback); } diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 9edf37985d9..9916728f2db 100755 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -29,6 +29,7 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.TimeZone; import java.util.UUID; import java.util.concurrent.Executors; @@ -159,6 +160,7 @@ import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; import com.cloud.network.router.VirtualRouter; import com.cloud.network.rules.RulesManager; +import com.cloud.offering.DiskOffering; import com.cloud.offering.DiskOfferingInfo; import com.cloud.offering.ServiceOffering; import com.cloud.org.Cluster; @@ -170,6 +172,7 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; import com.cloud.storage.VolumeVO; @@ -391,7 +394,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @DB public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering, final DiskOfferingInfo rootDiskOfferingInfo, final List dataDiskOfferings, - final LinkedHashMap> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map> extraDhcpOptions) + final LinkedHashMap> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map> extraDhcpOptions, final Map datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); @@ -430,7 +433,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (template.getFormat() == ImageFormat.ISO) { volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), - rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner); + rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null); } else if (template.getFormat() == ImageFormat.BAREMETAL) { // Do nothing } else { @@ -441,7 +444,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (dataDiskOfferings != null) { for (final DiskOfferingInfo dataDiskOfferingInfo : dataDiskOfferings) { volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId(), dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(), - dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), vmFinal, template, owner); + dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null); + } + } + if (datadiskTemplateToDiskOfferingMap != null && !datadiskTemplateToDiskOfferingMap.isEmpty()) { + int diskNumber = 1; + for (Entry dataDiskTemplateToDiskOfferingMap : datadiskTemplateToDiskOfferingMap.entrySet()) { + DiskOffering diskOffering = dataDiskTemplateToDiskOfferingMap.getValue(); + long diskOfferingSize = diskOffering.getDiskSize() / (1024 * 1024 * 1024); + VMTemplateVO dataDiskTemplate = _templateDao.findById(dataDiskTemplateToDiskOfferingMap.getKey()); + volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId() + "-" + String.valueOf(diskNumber), diskOffering, diskOfferingSize, null, null, + vmFinal, dataDiskTemplate, owner, Long.valueOf(diskNumber)); + diskNumber++; } } } @@ -455,7 +469,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering, final LinkedHashMap> networks, final DeploymentPlan plan, final HypervisorType hyperType) throws InsufficientCapacityException { - allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(serviceOffering), new ArrayList(), networks, plan, hyperType, null); + allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(serviceOffering), new ArrayList(), networks, plan, hyperType, null, null); } private VirtualMachineGuru getVmGuru(final VirtualMachine vm) { @@ -880,9 +894,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()); final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn()); - } DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx); if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) { if (s_logger.isDebugEnabled()) { @@ -1027,9 +1038,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn()); - } _networkMgr.prepare(vmProfile, new DeployDestination(dest.getDataCenter(), dest.getPod(), null, null), ctx); if (vm.getHypervisorType() != HypervisorType.BareMetal) { volumeMgr.prepare(vmProfile, dest); @@ -4776,6 +4784,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac orchestrateStart(vm.getUuid(), work.getParams(), work.getPlan(), _dpMgr.getDeploymentPlannerByName(work.getDeploymentPlanner())); } catch (CloudRuntimeException e){ + e.printStackTrace(); s_logger.info("Caught CloudRuntimeException, returning job failed " + e); CloudRuntimeException ex = new CloudRuntimeException("Unable to start VM instance"); return new Pair(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java index e588431b6b6..91e9b6f57bd 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java @@ -24,6 +24,7 @@ import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import javax.inject.Inject; @@ -45,6 +46,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.Network; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.offering.DiskOffering; import com.cloud.offering.DiskOfferingInfo; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; @@ -155,7 +157,7 @@ public class CloudOrchestrator implements OrchestrationService { @Override public VirtualMachineEntity createVirtualMachine(String id, String owner, String templateId, String hostName, String displayName, String hypervisor, int cpu, int speed, long memory, Long diskSize, List computeTags, List rootDiskTags, Map networkNicMap, DeploymentPlan plan, - Long rootDiskSize, Map> extraDhcpOptionMap) throws InsufficientCapacityException { + Long rootDiskSize, Map> extraDhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { // VirtualMachineEntityImpl vmEntity = new VirtualMachineEntityImpl(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, networks, // vmEntityManager); @@ -233,8 +235,20 @@ public class CloudOrchestrator implements OrchestrationService { dataDiskOfferings.add(dataDiskOfferingInfo); } + if (dataDiskTemplateToDiskOfferingMap != null && !dataDiskTemplateToDiskOfferingMap.isEmpty()) { + for (Entry datadiskTemplateToDiskOffering : dataDiskTemplateToDiskOfferingMap.entrySet()) { + DiskOffering diskOffering = datadiskTemplateToDiskOffering.getValue(); + if (diskOffering == null) { + throw new InvalidParameterValueException("Unable to find disk offering " + vm.getDiskOfferingId()); + } + if (diskOffering.getDiskSize() == 0) { // Custom disk offering is not supported for volumes created from datadisk templates + throw new InvalidParameterValueException("Disk offering " + diskOffering + " requires size parameter."); + } + } + } + _itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(templateId)), computeOffering, rootDiskOfferingInfo, dataDiskOfferings, networkIpMap, plan, - hypervisorType, extraDhcpOptionMap); + hypervisorType, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap); return vmEntity; } @@ -299,7 +313,7 @@ public class CloudOrchestrator implements OrchestrationService { HypervisorType hypervisorType = HypervisorType.valueOf(hypervisor); - _itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList(), networkIpMap, plan, hypervisorType, extraDhcpOptionMap); + _itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList(), networkIpMap, plan, hypervisorType, extraDhcpOptionMap, null); return vmEntity; } diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index ed1e3e06cf9..a2471f6611f 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -654,7 +654,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } @Override - public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner) { + public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner, Long deviceId) { if (size == null) { size = offering.getDiskSize(); } else { @@ -679,13 +679,17 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati vol.setInstanceId(vm.getId()); } - if (type.equals(Type.ROOT)) { + if (deviceId != null) { + vol.setDeviceId(deviceId); + } else if (type.equals(Type.ROOT)) { vol.setDeviceId(0l); } else { vol.setDeviceId(1l); } if (template.getFormat() == ImageFormat.ISO) { vol.setIsoId(template.getId()); + } else if (template.getTemplateType().equals(Storage.TemplateType.DATADISK)) { + vol.setTemplateId(template.getId()); } // display flag matters only for the User vms if (vm.getType() == VirtualMachine.Type.User) { @@ -1252,7 +1256,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati StoragePool pool = dest.getStorageForDisks().get(vol); destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); } - if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) { newVol = vol; } else { @@ -1362,9 +1365,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm); - } List tasks = getTasks(vols, dest.getStorageForDisks(), vm); Volume vol = null; @@ -1395,6 +1395,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati pool = (StoragePool)dataStoreMgr.getDataStore(result.second().getId(), DataStoreRole.Primary); vol = result.first(); } + VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); DataTO volTO = volumeInfo.getTO(); DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), diff --git a/engine/schema/resources/META-INF/db/schema-41000to41100.sql b/engine/schema/resources/META-INF/db/schema-41000to41100.sql index 27a2b261147..f02d8f152cb 100644 --- a/engine/schema/resources/META-INF/db/schema-41000to41100.sql +++ b/engine/schema/resources/META-INF/db/schema-41000to41100.sql @@ -34,109 +34,6 @@ where or service in ('NetworkACL') ) ); - ---Alter view template_view - -DROP VIEW IF EXISTS `cloud`.`template_view`; -CREATE VIEW `template_view` AS - SELECT - `vm_template`.`id` AS `id`, - `vm_template`.`uuid` AS `uuid`, - `vm_template`.`unique_name` AS `unique_name`, - `vm_template`.`name` AS `name`, - `vm_template`.`public` AS `public`, - `vm_template`.`featured` AS `featured`, - `vm_template`.`type` AS `type`, - `vm_template`.`hvm` AS `hvm`, - `vm_template`.`bits` AS `bits`, - `vm_template`.`url` AS `url`, - `vm_template`.`format` AS `format`, - `vm_template`.`created` AS `created`, - `vm_template`.`checksum` AS `checksum`, - `vm_template`.`display_text` AS `display_text`, - `vm_template`.`enable_password` AS `enable_password`, - `vm_template`.`dynamically_scalable` AS `dynamically_scalable`, - `vm_template`.`state` AS `template_state`, - `vm_template`.`guest_os_id` AS `guest_os_id`, - `guest_os`.`uuid` AS `guest_os_uuid`, - `guest_os`.`display_name` AS `guest_os_name`, - `vm_template`.`bootable` AS `bootable`, - `vm_template`.`prepopulate` AS `prepopulate`, - `vm_template`.`cross_zones` AS `cross_zones`, - `vm_template`.`hypervisor_type` AS `hypervisor_type`, - `vm_template`.`extractable` AS `extractable`, - `vm_template`.`template_tag` AS `template_tag`, - `vm_template`.`sort_key` AS `sort_key`, - `vm_template`.`removed` AS `removed`, - `vm_template`.`enable_sshkey` AS `enable_sshkey`, - `source_template`.`id` AS `source_template_id`, - `source_template`.`uuid` AS `source_template_uuid`, - `account`.`id` AS `account_id`, - `account`.`uuid` AS `account_uuid`, - `account`.`account_name` AS `account_name`, - `account`.`type` AS `account_type`, - `domain`.`id` AS `domain_id`, - `domain`.`uuid` AS `domain_uuid`, - `domain`.`name` AS `domain_name`, - `domain`.`path` AS `domain_path`, - `projects`.`id` AS `project_id`, - `projects`.`uuid` AS `project_uuid`, - `projects`.`name` AS `project_name`, - `data_center`.`id` AS `data_center_id`, - `data_center`.`uuid` AS `data_center_uuid`, - `data_center`.`name` AS `data_center_name`, - `launch_permission`.`account_id` AS `lp_account_id`, - `template_store_ref`.`store_id` AS `store_id`, - `image_store`.`scope` AS `store_scope`, - `template_store_ref`.`state` AS `state`, - `template_store_ref`.`download_state` AS `download_state`, - `template_store_ref`.`download_pct` AS `download_pct`, - `template_store_ref`.`error_str` AS `error_str`, - `template_store_ref`.`size` AS `size`, - `template_store_ref`.physical_size AS `physical_size`, - `template_store_ref`.`destroyed` AS `destroyed`, - `template_store_ref`.`created` AS `created_on_store`, - `vm_template_details`.`name` AS `detail_name`, - `vm_template_details`.`value` AS `detail_value`, - `resource_tags`.`id` AS `tag_id`, - `resource_tags`.`uuid` AS `tag_uuid`, - `resource_tags`.`key` AS `tag_key`, - `resource_tags`.`value` AS `tag_value`, - `resource_tags`.`domain_id` AS `tag_domain_id`, - `domain`.`uuid` AS `tag_domain_uuid`, - `domain`.`name` AS `tag_domain_name`, - `resource_tags`.`account_id` AS `tag_account_id`, - `account`.`account_name` AS `tag_account_name`, - `resource_tags`.`resource_id` AS `tag_resource_id`, - `resource_tags`.`resource_uuid` AS `tag_resource_uuid`, - `resource_tags`.`resource_type` AS `tag_resource_type`, - `resource_tags`.`customer` AS `tag_customer`, - CONCAT(`vm_template`.`id`, - '_', - IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair` - FROM - ((((((((((((`vm_template` - JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`))) - JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`))) - JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`))) - LEFT JOIN `projects` ON ((`projects`.`project_account_id` = `account`.`id`))) - LEFT JOIN `vm_template_details` ON ((`vm_template_details`.`template_id` = `vm_template`.`id`))) - LEFT JOIN `vm_template` `source_template` ON ((`source_template`.`id` = `vm_template`.`source_template_id`))) - LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`) - AND (`template_store_ref`.`store_role` = 'Image') - AND (`template_store_ref`.`destroyed` = 0)))) - LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`) - AND (`template_store_ref`.`store_id` IS NOT NULL) - AND (`image_store`.`id` = `template_store_ref`.`store_id`)))) - LEFT JOIN `template_zone_ref` ON (((`template_zone_ref`.`template_id` = `vm_template`.`id`) - AND ISNULL(`template_store_ref`.`store_id`) - AND ISNULL(`template_zone_ref`.`removed`)))) - LEFT JOIN `data_center` ON (((`image_store`.`data_center_id` = `data_center`.`id`) - OR (`template_zone_ref`.`zone_id` = `data_center`.`id`)))) - LEFT JOIN `launch_permission` ON ((`launch_permission`.`template_id` = `vm_template`.`id`))) - LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_template`.`id`) - AND ((`resource_tags`.`resource_type` = 'Template') - OR (`resource_tags`.`resource_type` = 'ISO'))))); UPDATE `cloud`.`configuration` SET value = '600', default_value = '600' WHERE category = 'Advanced' AND name = 'router.aggregation.command.each.timeout'; @@ -302,15 +199,13 @@ CREATE VIEW `cloud`.`host_view` AS `cloud`.`user` ON `user`.`uuid` = `last_annotation_view`.`user_uuid`; -- End Of Annotations specific changes - -- Out-of-band management driver for nested-cloudstack ALTER TABLE `cloud`.`oobm` MODIFY COLUMN port VARCHAR(255); - -- CLOUDSTACK-9902: Console proxy SSL toggle INSERT IGNORE INTO `cloud`.`configuration` (`category`, `instance`, `component`, `name`, `value`, `description`, `default_value`, `is_dynamic`) VALUES ('Console Proxy', 'DEFAULT', 'AgentManager', 'consoleproxy.sslEnabled', 'false', 'Enable SSL for console proxy', 'false', 0); --- CLOUDSTACK-9859: Retirement of midonet plugin (final removal) +-- CLOUDSTACK-9859: Retirement of midonet plugin (final removal) delete from `cloud`.`configuration` where name in ('midonet.apiserver.address', 'midonet.providerrouter.id'); -- CLOUDSTACK-9972: Enhance listVolumes API @@ -318,7 +213,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Premium', 'DEFAULT', 'manage DROP VIEW IF EXISTS `cloud`.`volume_view`; CREATE VIEW `cloud`.`volume_view` AS - select + SELECT volumes.id, volumes.uuid, volumes.name, @@ -464,21 +359,17 @@ CREATE TABLE IF NOT EXISTS `cloud`.`nic_extra_dhcp_options` ( -- Add new OS versions -- Add XenServer 7.1 and 7.2 hypervisor capabilities - INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) values (UUID(), 'XenServer', '7.1.0', 500, 13, 1); INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) values (UUID(), 'XenServer', '7.2.0', 500, 13, 1); -- Add XenServer 7.0 support for windows 10 - INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.0.0', 'Windows 10 (64-bit)', 258, now(), 0); INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.0.0', 'Windows 10 (32-bit)', 257, now(), 0); -- Add XenServer 7.1 hypervisor guest OS mappings (copy 7.0.0) - INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.0.0'; -- Add XenServer 7.1 hypervisor guest OS (see https://docs.citrix.com/content/dam/docs/en-us/xenserver/7-1/downloads/xenserver-7-1-release-notes.pdf) - INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2016 (64-bit)', 259, now(), 0); INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP4', 187, now(), 0); INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0); @@ -487,7 +378,6 @@ INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervi INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Linux 7', 247, now(), 0); -- Add XenServer 7.2 hypervisor guest OS mappings (copy 7.1.0 & remove Windows Vista, Windows XP, Windows 2003, CentOS 4.x, RHEL 4.xS, LES 10 (all versions) as per XenServer 7.2 Release Notes) - INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.2.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.0' AND guest_os_id not in (1,2,3,4,56,101,56,58,93,94,50,51,87,88,89,90,91,92,26,27,28,29,40,41,42,43,44,45,96,97,107,108,109,110,151,152,153); -- Add table to track primary storage in use for snapshots @@ -524,11 +414,16 @@ ADD COLUMN `forsystemvms` TINYINT(1) NOT NULL DEFAULT '0' COMMENT 'Indicates if ALTER TABLE `cloud`.`op_dc_ip_address_alloc` ADD COLUMN `vlan` INT(10) UNSIGNED NULL COMMENT 'Vlan the management network range is on'; +-- CLOUDSTACK-4757: Support multidisk OVA +ALTER TABLE `cloud`.`vm_template` ADD COLUMN `parent_template_id` bigint(20) unsigned DEFAULT NULL COMMENT 'If datadisk template, then id of the root template this template belongs to'; + -- CLOUDSTACK-10146: Bypass Secondary Storage for KVM templates ALTER TABLE `cloud`.`vm_template` ADD COLUMN `direct_download` TINYINT(1) DEFAULT '0' COMMENT 'Indicates if Secondary Storage is bypassed and template is downloaded to Primary Storage'; -CREATE OR REPLACE VIEW `template_view` AS +-- Changes to template_view for both multidisk OVA and bypass secondary storage for KVM templates +DROP VIEW IF EXISTS `cloud`.`template_view`; +CREATE VIEW `cloud`.`template_view` AS SELECT `vm_template`.`id` AS `id`, `vm_template`.`uuid` AS `uuid`, @@ -559,6 +454,8 @@ CREATE OR REPLACE VIEW `template_view` AS `vm_template`.`sort_key` AS `sort_key`, `vm_template`.`removed` AS `removed`, `vm_template`.`enable_sshkey` AS `enable_sshkey`, + `parent_template`.`id` AS `parent_template_id`, + `parent_template`.`uuid` AS `parent_template_uuid`, `source_template`.`id` AS `source_template_id`, `source_template`.`uuid` AS `source_template_uuid`, `account`.`id` AS `account_id`, @@ -606,7 +503,7 @@ CREATE OR REPLACE VIEW `template_view` AS IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`, `vm_template`.`direct_download` AS `direct_download` FROM - ((((((((((((`vm_template` + (((((((((((((`vm_template` JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`))) JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`))) JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`))) @@ -616,6 +513,7 @@ CREATE OR REPLACE VIEW `template_view` AS LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`) AND (`template_store_ref`.`store_role` = 'Image') AND (`template_store_ref`.`destroyed` = 0)))) + LEFT JOIN `vm_template` `parent_template` ON ((`parent_template`.`id` = `vm_template`.`parent_template_id`))) LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`) AND (`template_store_ref`.`store_id` IS NOT NULL) AND (`image_store`.`id` = `template_store_ref`.`store_id`)))) diff --git a/engine/schema/src/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/com/cloud/storage/VMTemplateVO.java index e31e54d7365..d206835a28a 100644 --- a/engine/schema/src/com/cloud/storage/VMTemplateVO.java +++ b/engine/schema/src/com/cloud/storage/VMTemplateVO.java @@ -149,6 +149,9 @@ public class VMTemplateVO implements VirtualMachineTemplate { @Column(name = "direct_download") private boolean directDownload; + @Column(name = "parent_template_id") + private Long parentTemplateId; + @Override public String getUniqueName() { return uniqueName; @@ -617,4 +620,14 @@ public class VMTemplateVO implements VirtualMachineTemplate { public Class getEntityType() { return VirtualMachineTemplate.class; } + + @Override + public Long getParentTemplateId() { + return parentTemplateId; + } + + public void setParentTemplateId(Long parentTemplateId) { + this.parentTemplateId = parentTemplateId; + } + } diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateDao.java index dccc902e912..c43a2ea4ee9 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateDao.java @@ -81,4 +81,6 @@ public interface VMTemplateDao extends GenericDao, StateDao< void loadDetails(VMTemplateVO tmpl); void saveDetails(VMTemplateVO tmpl); + + List listByParentTemplatetId(long parentTemplatetId); } diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java index 2037b3d35f7..dd1f2fcf164 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -104,6 +104,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem private GenericSearchBuilder CountTemplatesByAccount; // private SearchBuilder updateStateSearch; private SearchBuilder AllFieldsSearch; + protected SearchBuilder ParentTemplateIdSearch; @Inject ResourceTagDao _tagsDao; @@ -135,6 +136,14 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem return findOneIncludingRemovedBy(sc); } + @Override + public List listByParentTemplatetId(long parentTemplatetId) { + SearchCriteria sc = ParentTemplateIdSearch.create(); + sc.setParameters("parentTemplateId", parentTemplatetId); + sc.setParameters("state", VirtualMachineTemplate.State.Active); + return listBy(sc); + } + @Override public List publicIsoSearch(Boolean bootable, boolean listRemoved, Map tags) { @@ -403,6 +412,11 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem AllFieldsSearch.and("name", AllFieldsSearch.entity().getName(), SearchCriteria.Op.EQ); AllFieldsSearch.done(); + ParentTemplateIdSearch = createSearchBuilder(); + ParentTemplateIdSearch.and("parentTemplateId", ParentTemplateIdSearch.entity().getParentTemplateId(), SearchCriteria.Op.EQ); + ParentTemplateIdSearch.and("state", ParentTemplateIdSearch.entity().getState(), SearchCriteria.Op.EQ); + ParentTemplateIdSearch.done(); + return result; } diff --git a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index b2d924d3cf5..75c01c7442c 100644 --- a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -692,4 +692,4 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { } } -} +} \ No newline at end of file diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index 7a1daba910d..9471fad84d6 100644 --- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -444,7 +444,6 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { String errMsg = null; try { s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString()); - if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) { answer = copyVolumeFromSnapshot(srcData, destData); } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) { diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index 52191e19ade..45e3941a5ec 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -18,43 +18,19 @@ */ package org.apache.cloudstack.storage.image; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.storage.ListTemplateAnswer; -import com.cloud.agent.api.storage.ListTemplateCommand; -import com.cloud.alert.AlertManager; -import com.cloud.configuration.Config; -import com.cloud.configuration.Resource; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.event.EventTypes; -import com.cloud.event.UsageEventUtils; -import com.cloud.exception.ResourceAllocationException; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.ImageStoreDetailsUtil; -import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.StoragePool; -import com.cloud.storage.VMTemplateStorageResourceAssoc; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.VMTemplateZoneVO; -import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.storage.dao.VMTemplatePoolDao; -import com.cloud.storage.dao.VMTemplateZoneDao; -import com.cloud.storage.template.TemplateConstants; -import com.cloud.storage.template.TemplateProp; -import com.cloud.template.TemplateManager; -import com.cloud.template.VirtualMachineTemplate; -import com.cloud.user.Account; -import com.cloud.user.AccountManager; -import com.cloud.user.ResourceLimitService; -import com.cloud.utils.UriUtils; -import com.cloud.utils.db.GlobalLock; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; +import java.io.File; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import javax.inject.Inject; + import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; @@ -90,16 +66,52 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.image.store.TemplateObject; import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import javax.inject.Inject; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.storage.ListTemplateAnswer; +import com.cloud.agent.api.storage.ListTemplateCommand; +import com.cloud.agent.api.to.DatadiskTO; +import com.cloud.alert.AlertManager; +import com.cloud.configuration.Config; +import com.cloud.configuration.Resource; +import com.cloud.configuration.Resource.ResourceType; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventUtils; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ImageStoreDetailsUtil; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Storage.TemplateType; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.storage.template.TemplateConstants; +import com.cloud.storage.template.TemplateProp; +import com.cloud.template.TemplateManager; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.utils.UriUtils; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.vm.VmDetailConstants; +import com.google.common.base.Strings; @Component public class TemplateServiceImpl implements TemplateService { @@ -131,8 +143,6 @@ public class TemplateServiceImpl implements TemplateService { @Inject TemplateDataFactory _templateFactory; @Inject - VMTemplatePoolDao _tmpltPoolDao; - @Inject EndPointSelector _epSelector; @Inject TemplateManager _tmpltMgr; @@ -144,6 +154,8 @@ public class TemplateServiceImpl implements TemplateService { MessageBus _messageBus; @Inject ImageStoreDetailsUtil imageStoreDetailsUtil; + @Inject + TemplateDataFactory imageFactory; class TemplateOpContext extends AsyncRpcContext { final TemplateObject template; @@ -324,6 +336,17 @@ public class TemplateServiceImpl implements TemplateService { } } + for (Iterator iter = allTemplates.listIterator(); iter.hasNext();) { + VMTemplateVO child_template = iter.next(); + if (child_template.getParentTemplateId() != null) { + String uniqueName = child_template.getUniqueName(); + if (templateInfos.containsKey(uniqueName)) { + templateInfos.remove(uniqueName); + } + iter.remove(); + } + } + toBeDownloaded.addAll(allTemplates); final StateMachine2 stateMachine = VirtualMachineTemplate.State.getStateMachine(); @@ -678,6 +701,18 @@ public class TemplateServiceImpl implements TemplateService { return null; } + // Check if OVA contains additional data disks. If yes, create Datadisk templates for each of the additional datadisk present in the OVA + if (template.getFormat().equals(ImageFormat.OVA)) { + if (!createOvaDataDiskTemplates(template)) { + template.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + result.setResult(callbackResult.getResult()); + if (parentCallback != null) { + parentCallback.complete(result); + } + return null; + } + } + try { template.processEvent(ObjectInDataStoreStateMachine.Event.OperationSuccessed); } catch (Exception e) { @@ -694,6 +729,166 @@ public class TemplateServiceImpl implements TemplateService { return null; } + + protected boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate) { + try { + // Get Datadisk template (if any) for OVA + List dataDiskTemplates = new ArrayList(); + ImageStoreEntity tmpltStore = (ImageStoreEntity)parentTemplate.getDataStore(); + dataDiskTemplates = tmpltStore.getDataDiskTemplates(parentTemplate); + int diskCount = 0; + VMTemplateVO templateVO = _templateDao.findById(parentTemplate.getId()); + _templateDao.loadDetails(templateVO); + DataStore imageStore = parentTemplate.getDataStore(); + Map details = parentTemplate.getDetails(); + if (details == null) { + details = templateVO.getDetails(); + if (details == null) { + details = new HashMap<>(); + } + } + for (DatadiskTO diskTemplate : dataDiskTemplates) { + if (!diskTemplate.isBootable()) { + createChildDataDiskTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++); + if (!diskTemplate.isIso() && Strings.isNullOrEmpty(details.get(VmDetailConstants.DATA_DISK_CONTROLLER))){ + details.put(VmDetailConstants.DATA_DISK_CONTROLLER, getOvaDiskControllerDetails(diskTemplate, false)); + details.put(VmDetailConstants.DATA_DISK_CONTROLLER + diskTemplate.getDiskId(), getOvaDiskControllerDetails(diskTemplate, false)); + } + } else { + finalizeParentTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++); + if (Strings.isNullOrEmpty(VmDetailConstants.ROOT_DISK_CONTROLLER)) { + final String rootDiskController = getOvaDiskControllerDetails(diskTemplate, true); + if (!Strings.isNullOrEmpty(rootDiskController)) { + details.put(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDiskController); + } + } + } + } + templateVO.setDetails(details); + _templateDao.saveDetails(templateVO); + return true; + } catch (CloudRuntimeException | InterruptedException | ExecutionException e) { + return false; + } + } + + private boolean createChildDataDiskTemplate(DatadiskTO dataDiskTemplate, VMTemplateVO template, TemplateInfo parentTemplate, DataStore imageStore, int diskCount) throws ExecutionException, InterruptedException { + // Make an entry in vm_template table + Storage.ImageFormat format = dataDiskTemplate.isIso() ? Storage.ImageFormat.ISO : template.getFormat(); + String suffix = dataDiskTemplate.isIso() ? "-IsoDiskTemplate-" : "-DataDiskTemplate-"; + TemplateType ttype = dataDiskTemplate.isIso() ? TemplateType.ISODISK : TemplateType.DATADISK; + final long templateId = _templateDao.getNextInSequence(Long.class, "id"); + long guestOsId = dataDiskTemplate.isIso() ? 1 : 0; + String templateName = dataDiskTemplate.isIso() ? dataDiskTemplate.getPath().substring(dataDiskTemplate.getPath().lastIndexOf(File.separator) + 1) : template.getName() + suffix + diskCount; + VMTemplateVO templateVO = new VMTemplateVO(templateId, templateName, format, false, false, false, ttype, template.getUrl(), + template.requiresHvm(), template.getBits(), template.getAccountId(), null, templateName, false, guestOsId, false, template.getHypervisorType(), null, + null, false, false, false); + if (dataDiskTemplate.isIso()){ + templateVO.setUniqueName(templateName); + } + templateVO.setParentTemplateId(template.getId()); + templateVO.setSize(dataDiskTemplate.getVirtualSize()); + templateVO = _templateDao.persist(templateVO); + // Make sync call to create Datadisk templates in image store + TemplateApiResult result = null; + TemplateInfo dataDiskTemplateInfo = imageFactory.getTemplate(templateVO.getId(), imageStore); + AsyncCallFuture future = createDatadiskTemplateAsync(parentTemplate, dataDiskTemplateInfo, dataDiskTemplate.getPath(), dataDiskTemplate.getDiskId(), + dataDiskTemplate.getFileSize(), dataDiskTemplate.isBootable()); + result = future.get(); + if (result.isSuccess()) { + // Make an entry in template_zone_ref table + if (imageStore.getScope().getScopeType() == ScopeType.REGION) { + associateTemplateToZone(templateId, null); + } else if (imageStore.getScope().getScopeType() == ScopeType.ZONE) { + Long zoneId = ((ImageStoreEntity)imageStore).getDataCenterId(); + VMTemplateZoneVO templateZone = new VMTemplateZoneVO(zoneId, templateId, new Date()); + _vmTemplateZoneDao.persist(templateZone); + } + _resourceLimitMgr.incrementResourceCount(template.getAccountId(), ResourceType.secondary_storage, templateVO.getSize()); + } else { + // Delete the Datadisk templates that were already created as they are now invalid + s_logger.debug("Since creation of Datadisk template: " + templateVO.getId() + " failed, delete other Datadisk templates that were created as part of parent" + + " template download"); + TemplateInfo parentTemplateInfo = imageFactory.getTemplate(templateVO.getParentTemplateId(), imageStore); + cleanupDatadiskTemplates(parentTemplateInfo); + } + return result.isSuccess(); + } + + private boolean finalizeParentTemplate(DatadiskTO dataDiskTemplate, VMTemplateVO templateVO, TemplateInfo parentTemplate, DataStore imageStore, int diskCount) throws ExecutionException, InterruptedException, CloudRuntimeException { + TemplateInfo templateInfo = imageFactory.getTemplate(templateVO.getId(), imageStore); + AsyncCallFuture templateFuture = createDatadiskTemplateAsync(parentTemplate, templateInfo, dataDiskTemplate.getPath(), dataDiskTemplate.getDiskId(), + dataDiskTemplate.getFileSize(), dataDiskTemplate.isBootable()); + TemplateApiResult result = null; + result = templateFuture.get(); + if (!result.isSuccess()) { + s_logger.debug("Since creation of parent template: " + templateInfo.getId() + " failed, delete Datadisk templates that were created as part of parent" + + " template download"); + cleanupDatadiskTemplates(templateInfo); + } + return result.isSuccess(); + } + + private String getOvaDiskControllerDetails(DatadiskTO diskTemplate, boolean isRootDisk) { + String controller = diskTemplate.getDiskController() ; + String controllerSubType = diskTemplate.getDiskControllerSubType(); + + if (controller != null) { + controller = controller.toLowerCase(); + } + + if (controllerSubType != null) { + controllerSubType = controllerSubType.toLowerCase(); + } + + if (StringUtils.isNotBlank(controller)) { + if (controller.contains("ide")) { + return "ide"; + } + if (controller.contains("scsi")) { + if (StringUtils.isNotBlank(controllerSubType)) { + if (controllerSubType.equals("lsilogicsas")) { + return "lsisas1068"; + } + return controllerSubType; + } + if (!isRootDisk) { + return "scsi"; + } + } + if (!isRootDisk) { + return "osdefault"; + } + } + + // Root disk to use global setting vmware.root.disk.controller + if (!isRootDisk) { + return "scsi"; + } + return controller; + } + + private void cleanupDatadiskTemplates(TemplateInfo parentTemplateInfo) { + DataStore imageStore = parentTemplateInfo.getDataStore(); + List datadiskTemplatesToDelete = _templateDao.listByParentTemplatetId(parentTemplateInfo.getId()); + for (VMTemplateVO datadiskTemplateToDelete: datadiskTemplatesToDelete) { + s_logger.info("Delete template: " + datadiskTemplateToDelete.getId() + " from image store: " + imageStore.getName()); + AsyncCallFuture future = deleteTemplateAsync(imageFactory.getTemplate(datadiskTemplateToDelete.getId(), imageStore)); + try { + TemplateApiResult result = future.get(); + if (!result.isSuccess()) { + s_logger.warn("Failed to delete datadisk template: " + datadiskTemplateToDelete + " from image store: " + imageStore.getName() + " due to: " + result.getResult()); + break; + } + _vmTemplateZoneDao.deletePrimaryRecordsForTemplate(datadiskTemplateToDelete.getId()); + _resourceLimitMgr.decrementResourceCount(datadiskTemplateToDelete.getAccountId(), ResourceType.secondary_storage, datadiskTemplateToDelete.getSize()); + } catch (Exception e) { + s_logger.debug("Delete datadisk template failed", e); + throw new CloudRuntimeException("Delete template Failed", e); + } + } + } + @Override public AsyncCallFuture deleteTemplateAsync(TemplateInfo template) { TemplateObject to = (TemplateObject)template; @@ -1006,4 +1201,69 @@ public class TemplateServiceImpl implements TemplateService { } } } + + private class CreateDataDiskTemplateContext extends AsyncRpcContext { + private final DataObject dataDiskTemplate; + private final AsyncCallFuture future; + + public CreateDataDiskTemplateContext(AsyncCompletionCallback callback, DataObject dataDiskTemplate, AsyncCallFuture future) { + super(callback); + this.dataDiskTemplate = dataDiskTemplate; + this.future = future; + } + + public AsyncCallFuture getFuture() { + return this.future; + } + } + + @Override + public AsyncCallFuture createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable) { + AsyncCallFuture future = new AsyncCallFuture(); + // Make an entry for disk template in template_store_ref table + DataStore store = parentTemplate.getDataStore(); + TemplateObject dataDiskTemplateOnStore; + if (!bootable) { + dataDiskTemplateOnStore = (TemplateObject)store.create(dataDiskTemplate); + dataDiskTemplateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.CreateOnlyRequested); + } else { + dataDiskTemplateOnStore = (TemplateObject) imageFactory.getTemplate(parentTemplate, store); + } + try { + CreateDataDiskTemplateContext context = new CreateDataDiskTemplateContext(null, dataDiskTemplateOnStore, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().createDatadiskTemplateCallback(null, null)).setContext(context); + ImageStoreEntity tmpltStore = (ImageStoreEntity)parentTemplate.getDataStore(); + tmpltStore.createDataDiskTemplateAsync(dataDiskTemplate, path, diskId, fileSize, bootable, caller); + } catch (CloudRuntimeException ex) { + dataDiskTemplateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + TemplateApiResult result = new TemplateApiResult(dataDiskTemplate); + result.setResult(ex.getMessage()); + if (future != null) { + future.complete(result); + } + } + return future; + } + + protected Void createDatadiskTemplateCallback(AsyncCallbackDispatcher callback, + CreateDataDiskTemplateContext context) { + DataObject dataDiskTemplate = context.dataDiskTemplate; + AsyncCallFuture future = context.getFuture(); + CreateCmdResult result = callback.getResult(); + TemplateApiResult dataDiskTemplateResult = new TemplateApiResult((TemplateObject)dataDiskTemplate); + try { + if (result.isSuccess()) { + dataDiskTemplate.processEvent(Event.OperationSuccessed, result.getAnswer()); + } else { + dataDiskTemplate.processEvent(Event.OperationFailed); + dataDiskTemplateResult.setResult(result.getResult()); + } + } catch (CloudRuntimeException e) { + s_logger.debug("Failed to process create template callback", e); + dataDiskTemplateResult.setResult(e.toString()); + } + future.complete(dataDiskTemplateResult); + return null; + } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java index 41ce5a230b0..f54673d2b61 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.image.store; import java.util.Date; import java.util.Set; +import java.util.List; import java.util.concurrent.ExecutionException; import javax.inject.Inject; @@ -42,7 +43,9 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.image.ImageStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.to.ImageStoreTO; - +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.capacity.dao.CapacityDao; import com.cloud.storage.DataStoreRole; @@ -214,5 +217,14 @@ public class ImageStoreImpl implements ImageStoreEntity { driver.deleteEntityExtractUrl(this, installPath, url, entityType); } + @Override + public List getDataDiskTemplates(DataObject obj) { + return driver.getDataDiskTemplates(obj); + } + + @Override + public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable, AsyncCompletionCallback callback) { + return driver.createDataDiskTemplateAsync(dataDiskTemplate, path, diskId, bootable, fileSize, callback); + } } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java index db3cf6540d6..9b9b711c40c 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -34,6 +34,7 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.to.TemplateObjectTO; +import com.cloud.agent.api.storage.CreateDatadiskTemplateAnswer; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; @@ -230,6 +231,16 @@ public class TemplateObject implements TemplateInfo { templateVO.setSize(newTemplate.getSize()); imageDao.update(templateVO.getId(), templateVO); } + } else if (answer instanceof CreateDatadiskTemplateAnswer) { + CreateDatadiskTemplateAnswer createAnswer = (CreateDatadiskTemplateAnswer)answer; + TemplateObjectTO dataDiskTemplate = createAnswer.getDataDiskTemplate(); + TemplateDataStoreVO templateStoreRef = templateStoreDao.findByStoreTemplate(getDataStore().getId(), dataDiskTemplate.getId()); + templateStoreRef.setInstallPath(dataDiskTemplate.getPath()); + templateStoreRef.setDownloadPercent(100); + templateStoreRef.setDownloadState(Status.DOWNLOADED); + templateStoreRef.setSize(dataDiskTemplate.getSize()); + templateStoreRef.setPhysicalSize(dataDiskTemplate.getPhysicalSize()); + templateStoreDao.update(templateStoreRef.getId(), templateStoreRef); } } objectInStoreMgr.update(this, event); @@ -458,6 +469,11 @@ public class TemplateObject implements TemplateInfo { return imageVO.getSourceTemplateId(); } + @Override + public Long getParentTemplateId() { + return imageVO.getParentTemplateId(); + } + @Override public String getTemplateTag() { return imageVO.getTemplateTag(); diff --git a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java index 158ee18f911..64d74d74d20 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java +++ b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java @@ -71,6 +71,9 @@ public class DefaultEndPointSelector implements EndPointSelector { + "left join cluster_details cd on c.id=cd.cluster_id and cd.name='" + CapacityManager.StorageOperationsExcludeCluster.key() + "' " + "where h.status = 'Up' and h.type = 'Routing' and h.resource_state = 'Enabled' and s.pool_id = ? "; + private String findOneHypervisorHostInScopeByType = "select h.id from host h where h.status = 'Up' and h.hypervisor_type = ? "; + private String findOneHypervisorHostInScope = "select h.id from host h where h.status = 'Up' and h.hypervisor_type is not null "; + protected boolean moveBetweenPrimaryImage(DataStore srcStore, DataStore destStore) { DataStoreRole srcRole = srcStore.getRole(); DataStoreRole destRole = destStore.getRole(); diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index 288fae4e1a1..1c6f1e70660 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -20,7 +20,9 @@ package org.apache.cloudstack.storage.image; import java.net.URI; import java.net.URISyntaxException; +import java.util.ArrayList; import java.util.Date; +import java.util.List; import java.util.Map; import javax.inject.Inject; @@ -34,6 +36,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcContext; @@ -44,9 +47,13 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; +import org.apache.cloudstack.storage.endpoint.DefaultEndPointSelector; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.storage.CreateDatadiskTemplateCommand; import com.cloud.agent.api.storage.DownloadAnswer; +import com.cloud.agent.api.storage.GetDatadisksAnswer; +import com.cloud.agent.api.storage.GetDatadisksCommand; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataTO; import com.cloud.alert.AlertManager; @@ -54,10 +61,15 @@ import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.download.DownloadMonitor; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.dao.AccountDao; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.utils.net.Proxy; +import com.cloud.utils.exception.CloudRuntimeException; public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { private static final Logger s_logger = Logger.getLogger(BaseImageStoreDriverImpl.class); @@ -79,6 +91,14 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { VMTemplateZoneDao _vmTemplateZoneDao; @Inject AlertManager _alertMgr; + @Inject + VMTemplateDetailsDao _templateDetailsDao; + @Inject + DefaultEndPointSelector _defaultEpSelector; + @Inject + AccountDao _accountDao; + @Inject + ResourceLimitService _resourceLimitMgr; protected String _proxy = null; @@ -288,6 +308,58 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { } @Override - public void deleteEntityExtractUrl(DataStore store, String installPath, String url, Upload.Type entityType){ + public void deleteEntityExtractUrl(DataStore store, String installPath, String url, Upload.Type entityType) { + } + + @Override + public List getDataDiskTemplates(DataObject obj) { + List dataDiskDetails = new ArrayList(); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Get the data disks present in the OVA template"); + } + DataStore store = obj.getDataStore(); + GetDatadisksCommand cmd = new GetDatadisksCommand(obj.getTO()); + EndPoint ep = _defaultEpSelector.select(store); + Answer answer = null; + if (ep == null) { + String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + s_logger.error(errMsg); + answer = new Answer(cmd, false, errMsg); + } else { + answer = ep.sendMessage(cmd); + } + if (answer != null && answer.getResult()) { + GetDatadisksAnswer getDatadisksAnswer = (GetDatadisksAnswer)answer; + dataDiskDetails = getDatadisksAnswer.getDataDiskDetails(); // Details - Disk path, virtual size + } + else { + throw new CloudRuntimeException("Get Data disk command failed " + answer.getDetails()); + } + return dataDiskDetails; + } + + @Override + public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, boolean bootable, long fileSize, AsyncCompletionCallback callback) { + Answer answer = null; + String errMsg = null; + if (s_logger.isDebugEnabled()) { + s_logger.debug("Create Datadisk template: " + dataDiskTemplate.getId()); + } + CreateDatadiskTemplateCommand cmd = new CreateDatadiskTemplateCommand(dataDiskTemplate.getTO(), path, diskId, fileSize, bootable); + EndPoint ep = _defaultEpSelector.select(dataDiskTemplate.getDataStore()); + if (ep == null) { + errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + s_logger.error(errMsg); + answer = new Answer(cmd, false, errMsg); + } else { + answer = ep.sendMessage(cmd); + } + if (answer != null && !answer.getResult()) { + errMsg = answer.getDetails(); + } + CreateCmdResult result = new CreateCmdResult(null, answer); + result.setResult(errMsg); + callback.complete(result); + return null; } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/ImageStoreDriver.java b/engine/storage/src/org/apache/cloudstack/storage/image/ImageStoreDriver.java index e71529edb8f..70f40f6f5c0 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/ImageStoreDriver.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/ImageStoreDriver.java @@ -19,14 +19,25 @@ package org.apache.cloudstack.storage.image; import com.cloud.storage.Upload; + +import java.util.List; + +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.storage.Storage.ImageFormat; public interface ImageStoreDriver extends DataStoreDriver { String createEntityExtractUrl(DataStore store, String installPath, ImageFormat format, DataObject dataObject); void deleteEntityExtractUrl(DataStore store, String installPath, String url, Upload.Type entityType); + + List getDataDiskTemplates(DataObject obj); + + Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, boolean bootable, long fileSize, AsyncCompletionCallback callback); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java index db752fe8a1e..d3c1effd2a1 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java @@ -304,7 +304,11 @@ public class TemplateEntityImpl implements TemplateEntity { @Override public Date getUpdated() { - // TODO Auto-generated method stub + return null; + } + + @Override + public Long getParentTemplateId() { return null; } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java index b7f459227aa..ed9b39823f2 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -517,7 +517,11 @@ public class VolumeObject implements VolumeInfo { VolumeObjectTO newVol = (VolumeObjectTO)cpyAnswer.getNewData(); vol.setPath(newVol.getPath()); if (newVol.getSize() != null) { - vol.setSize(newVol.getSize()); + // Root disk resize may be requested where the original + // template size is less than the requested root disk size + if (vol.getSize() == null || vol.getSize() < newVol.getSize()) { + vol.setSize(newVol.getSize()); + } } if (newVol.getFormat() != null) { vol.setFormat(newVol.getFormat()); diff --git a/framework/cluster/test/com/cloud/cluster/ClusterServiceServletAdapterTest.java b/framework/cluster/test/com/cloud/cluster/ClusterServiceServletAdapterTest.java index 28dbcaa951a..91d8b611a0f 100644 --- a/framework/cluster/test/com/cloud/cluster/ClusterServiceServletAdapterTest.java +++ b/framework/cluster/test/com/cloud/cluster/ClusterServiceServletAdapterTest.java @@ -18,22 +18,31 @@ package com.cloud.cluster; import static org.junit.Assert.assertTrue; +import org.apache.cloudstack.framework.config.ConfigDepot; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; +import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.utils.component.ComponentLifecycle; @RunWith(MockitoJUnitRunner.class) public class ClusterServiceServletAdapterTest { + @Mock + private ClusterManager _manager; + @Mock + private ManagementServerHostDao _mshostDao; + @Mock + protected ConfigDepot _configDepot; + ClusterServiceServletAdapter clusterServiceServletAdapter; ClusterManagerImpl clusterManagerImpl; @Before - public void setup() throws IllegalArgumentException, - IllegalAccessException, NoSuchFieldException, SecurityException { + public void setup() throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException { clusterServiceServletAdapter = new ClusterServiceServletAdapter(); clusterManagerImpl = new ClusterManagerImpl(); } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index 103765b9e11..a22410f4afe 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -165,7 +165,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { String secStorageUrl = nfsStore.getUrl(); assert (secStorageUrl != null); String installPath = template.getPath(); - String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, _nfsVersion); + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, nfsStore.getNfsVersion()); String installFullPath = secondaryMountPoint + "/" + installPath; try { if (installFullPath.endsWith(".ova")) { @@ -203,7 +203,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { String installPath = volume.getPath(); int index = installPath.lastIndexOf(File.separator); String volumeUuid = installPath.substring(index + 1); - String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, _nfsVersion); + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, nfsStore.getNfsVersion()); //The real volume path String volumePath = installPath + File.separator + volumeUuid + ".ova"; String installFullPath = secondaryMountPoint + "/" + installPath; @@ -280,8 +280,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { assert (morDs != null); DatastoreMO primaryStorageDatastoreMo = new DatastoreMO(context, morDs); - copyTemplateFromSecondaryToPrimary(hyperHost, primaryStorageDatastoreMo, secondaryStorageUrl, mountPoint, templateName, templateUuidName, - cmd.getNfsVersion()); + copyTemplateFromSecondaryToPrimary(hyperHost, primaryStorageDatastoreMo, secondaryStorageUrl, mountPoint, templateName, templateUuidName, cmd.getNfsVersion()); } else { s_logger.info("Template " + templateName + " has already been setup, skip the template setup process in primary storage"); } @@ -354,9 +353,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { throw new Exception("Failed to take snapshot " + cmd.getSnapshotName() + " on vm: " + cmd.getVmName()); } - snapshotBackupUuid = - backupSnapshotToSecondaryStorage(vmMo, accountId, volumeId, cmd.getVolumePath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid, prevBackupUuid, - hostService.getWorkerName(context, cmd, 1), cmd.getNfsVersion()); + snapshotBackupUuid = backupSnapshotToSecondaryStorage(vmMo, accountId, volumeId, cmd.getVolumePath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid, + prevBackupUuid, hostService.getWorkerName(context, cmd, 1), cmd.getNfsVersion()); success = (snapshotBackupUuid != null); if (success) { @@ -410,8 +408,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(cmd.getVmName()); if (vmMo == null) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() + - ", try within datacenter"); + s_logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() + ", try within datacenter"); } vmMo = hyperHost.findVmOnPeerHyperHost(cmd.getVmName()); @@ -422,9 +419,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - Ternary result = - createTemplateFromVolume(vmMo, accountId, templateId, cmd.getUniqueName(), secondaryStoragePoolURL, volumePath, - hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion()); + Ternary result = createTemplateFromVolume(vmMo, accountId, templateId, cmd.getUniqueName(), secondaryStoragePoolURL, volumePath, + hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion()); return new CreatePrivateTemplateAnswer(cmd, true, null, result.first(), result.third(), result.second(), cmd.getUniqueName(), ImageFormat.OVA); @@ -481,9 +477,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { Pair result; if (cmd.toSecondaryStorage()) { - result = - copyVolumeToSecStorage(hostService, hyperHost, cmd, vmName, volumeId, cmd.getPool().getUuid(), volumePath, secondaryStorageURL, - hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion()); + result = copyVolumeToSecStorage(hostService, hyperHost, cmd, vmName, volumeId, cmd.getPool().getUuid(), volumePath, secondaryStorageURL, + hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion()); } else { StorageFilerTO poolTO = cmd.getPool(); @@ -496,8 +491,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - result = copyVolumeFromSecStorage(hyperHost, volumeId, new DatastoreMO(context, morDatastore), secondaryStorageURL, volumePath, - cmd.getNfsVersion()); + result = copyVolumeFromSecStorage(hyperHost, volumeId, new DatastoreMO(context, morDatastore), secondaryStorageURL, volumePath, cmd.getNfsVersion()); deleteVolumeDirOnSecondaryStorage(volumeId, secondaryStorageURL, cmd.getNfsVersion()); } return new CopyVolumeAnswer(cmd, true, null, result.first(), result.second()); @@ -536,8 +530,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs); - details = createVolumeFromSnapshot(hyperHost, primaryDsMo, newVolumeName, accountId, volumeId, secondaryStorageUrl, backedUpSnapshotUuid, - cmd.getNfsVersion()); + details = createVolumeFromSnapshot(hyperHost, primaryDsMo, newVolumeName, accountId, volumeId, secondaryStorageUrl, backedUpSnapshotUuid, cmd.getNfsVersion()); if (details == null) { success = true; } @@ -553,13 +546,14 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return new CreateVolumeFromSnapshotAnswer(cmd, success, details, newVolumeName); } + // templateName: name in secondary storage // templateUuid: will be used at hypervisor layer - private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, - String templatePathAtSecondaryStorage, String templateName, String templateUuid, Integer nfsVersion) throws Exception { + private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, String templatePathAtSecondaryStorage, + String templateName, String templateUuid, Integer nfsVersion) throws Exception { - s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + - templatePathAtSecondaryStorage + ", templateName: " + templateName); + s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + + templatePathAtSecondaryStorage + ", templateName: " + templateName); String secondaryMountPoint = _mountService.getMountPoint(secondaryStorageUrl, nfsVersion); s_logger.info("Secondary storage mount point: " + secondaryMountPoint); @@ -593,9 +587,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); if (vmMo == null) { - String msg = - "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + - ", templateName: " + templateName + ", templateUuid: " + templateUuid; + String msg = "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + + ", templateName: " + templateName + ", templateUuid: " + templateUuid; s_logger.error(msg); throw new Exception(msg); } @@ -800,7 +793,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { // TODO a bit ugly here BufferedWriter out = null; try { - out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"),"UTF-8")); + out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"), "UTF-8")); out.write("filename=" + templateName + ".ova"); out.newLine(); out.write("description="); @@ -840,7 +833,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { // TODO a bit ugly here BufferedWriter out = null; try { - out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"),"UTF-8")); + out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"), "UTF-8")); out.write("ova.filename=" + templateName + ".ova"); out.newLine(); out.write("version=1.0"); @@ -860,8 +853,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, long accountId, long volumeId, - String secStorageUrl, String snapshotBackupUuid, Integer nfsVersion) throws Exception { + private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, long accountId, long volumeId, String secStorageUrl, + String snapshotBackupUuid, Integer nfsVersion) throws Exception { restoreVolumeFromSecStorage(hyperHost, primaryDsMo, newVolumeName, secStorageUrl, getSnapshotRelativeDirInSecStorage(accountId, volumeId), snapshotBackupUuid, nfsVersion); return null; @@ -935,8 +928,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return backupUuid + "/" + backupUuid; } - private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, String exportName, - String workerVmName, Integer nfsVersion) throws Exception { + private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, String exportName, String workerVmName, + Integer nfsVersion) throws Exception { String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, nfsVersion); String exportPath = secondaryMountPoint + "/" + secStorageDir + "/" + exportName; @@ -980,8 +973,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - private Pair copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, String vmName, - long volumeId, String poolId, String volumePath, String secStorageUrl, String workerVmName, Integer nfsVersion) throws Exception { + private Pair copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, String vmName, long volumeId, + String poolId, String volumePath, String secStorageUrl, String workerVmName, Integer nfsVersion) throws Exception { String volumeFolder = String.valueOf(volumeId) + "/"; VirtualMachineMO workerVm = null; @@ -1019,8 +1012,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { vmMo.createSnapshot(exportName, "Temporary snapshot for copy-volume command", false, false); - exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName, - hostService.getWorkerName(hyperHost.getContext(), cmd, 1), nfsVersion); + exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName, hostService.getWorkerName(hyperHost.getContext(), cmd, 1), + nfsVersion); return new Pair(volumeFolder, exportName); } finally { @@ -1041,8 +1034,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return datastoreVolumePath; } - private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName, Integer nfsVersion) - throws Exception { + private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName, + Integer nfsVersion) throws Exception { String volumeFolder = String.valueOf(volumeId) + "/"; String newVolume = UUID.randomUUID().toString().replaceAll("-", ""); @@ -1098,7 +1091,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.info("Package OVA for template in dir: " + exportDir + "cmd: " + command.toString()); // to be safe, physically test existence of the target OVA file if ((new File(exportDir + File.separator + ovaFileName)).exists()) { - s_logger.info("OVA file: " + ovaFileName +" is created and ready to extract."); + s_logger.info("OVA file: " + ovaFileName + " is created and ready to extract."); return ovaFileName; } else { String msg = exportDir + File.separator + ovaFileName + " is not created as expected"; @@ -1135,9 +1128,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return "snapshots/" + accountId + "/" + volumeId; } - private long getVMSnapshotChainSize(VmwareContext context, VmwareHypervisorHost hyperHost, - String fileName, ManagedObjectReference morDs, String exceptFileName) - throws Exception{ + private long getVMSnapshotChainSize(VmwareContext context, VmwareHypervisorHost hyperHost, String fileName, ManagedObjectReference morDs, String exceptFileName) + throws Exception { long size = 0; DatastoreMO dsMo = new DatastoreMO(context, morDs); HostDatastoreBrowserMO browserMo = dsMo.getHostDatastoreBrowserMO(); @@ -1187,8 +1179,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { for (ManagedObjectReference taskMor : tasks) { TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info")); - if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && - info.getName().equalsIgnoreCase("CreateSnapshot_Task")) { + if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("CreateSnapshot_Task")) { if (!(info.getState().equals(TaskInfoState.SUCCESS) || info.getState().equals(TaskInfoState.ERROR))) { s_logger.debug("There is already a VM snapshot task running, wait for it"); context.getVimClient().waitForTask(taskMor); @@ -1229,8 +1220,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { vmMo.removeSnapshot(vmSnapshotName, false); } } catch (Exception e1) { - s_logger.info("[ignored]" - + "error during snapshot remove: " + e1.getLocalizedMessage()); + s_logger.info("[ignored]" + "error during snapshot remove: " + e1.getLocalizedMessage()); } return new CreateVMSnapshotAnswer(cmd, false, e.getMessage()); @@ -1259,8 +1249,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { baseName = baseName.substring(1, baseName.length() - 1); vmdkName = fullPath; // for managed storage, vmdkName == fullPath - } - else { + } else { vmdkName = fullPath.split("] ")[1]; if (vmdkName.endsWith(".vmdk")) { @@ -1283,8 +1272,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return mapNewDisk; } - private void setVolumeToPathAndSize(List volumeTOs, Map mapNewDisk, VmwareContext context, - VmwareHypervisorHost hyperHost, String vmName) throws Exception { + private void setVolumeToPathAndSize(List volumeTOs, Map mapNewDisk, VmwareContext context, VmwareHypervisorHost hyperHost, String vmName) + throws Exception { for (VolumeObjectTO volumeTO : volumeTOs) { String oldPath = volumeTO.getPath(); @@ -1296,8 +1285,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { // remove '[' and ']' baseName = oldPath.substring(1, oldPath.length() - 1); - } - else { + } else { baseName = VmwareHelper.trimSnapshotDeltaPostfix(volumeTO.getPath()); } @@ -1309,7 +1297,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { long size = getVMSnapshotChainSize(context, hyperHost, baseName + ".vmdk", morDs, newPath); size = getVMSnapshotChainSize(context, hyperHost, baseName + "-*.vmdk", morDs, newPath); - if (volumeTO.getVolumeType()== Volume.Type.ROOT) { + if (volumeTO.getVolumeType() == Volume.Type.ROOT) { // add memory snapshot size size += getVMSnapshotChainSize(context, hyperHost, vmName + "-*.vmsn", morDs, null); } @@ -1319,7 +1307,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - private ManagedObjectReference getDatastoreAsManagedObjectReference(String baseName, VmwareHypervisorHost hyperHost, DataStoreTO store) throws Exception { + private ManagedObjectReference getDatastoreAsManagedObjectReference(String baseName, VmwareHypervisorHost hyperHost, DataStoreTO store) throws Exception { try { // if baseName equates to a datastore name, this should be managed storage ManagedObjectReference morDs = hyperHost.findDatastoreByName(baseName); @@ -1327,10 +1315,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { if (morDs != null) { return morDs; } - } - catch (Exception ex) { - s_logger.info("[ignored]" - + "error getting managed object refference: " + ex.getLocalizedMessage()); + } catch (Exception ex) { + s_logger.info("[ignored]" + "error getting managed object refference: " + ex.getLocalizedMessage()); } // not managed storage, so use the standard way of getting a ManagedObjectReference for a datastore @@ -1408,8 +1394,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { for (ManagedObjectReference taskMor : tasks) { TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info")); - if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && - info.getName().equalsIgnoreCase("RevertToSnapshot_Task")) { + if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("RevertToSnapshot_Task")) { s_logger.debug("There is already a VM snapshot task running, wait for it"); context.getVimClient().waitForTask(taskMor); } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 574c9ec6bf7..d052069e8c6 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -25,6 +25,8 @@ import java.net.URI; import java.net.URL; import java.nio.channels.SocketChannel; import java.rmi.RemoteException; + +import com.cloud.configuration.Resource.ResourceType; import org.joda.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -507,10 +509,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } else if (clz == ResizeVolumeCommand.class) { return execute((ResizeVolumeCommand)cmd); } else if (clz == UnregisterVMCommand.class) { - return execute((UnregisterVMCommand) cmd); + return execute((UnregisterVMCommand)cmd); } else if (cmd instanceof StorageSubSystemCommand) { checkStorageProcessorAndHandlerNfsVersionAttribute((StorageSubSystemCommand)cmd); - return storageHandler.handleStorageCommands((StorageSubSystemCommand) cmd); + return storageHandler.handleStorageCommands((StorageSubSystemCommand)cmd); } else if (clz == ScaleVmCommand.class) { return execute((ScaleVmCommand)cmd); } else if (clz == PvlanSetupCommand.class) { @@ -540,8 +542,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa PropertyMapDynamicBean mbeanToRemove = _cmdMBeans.get(0); _cmdMBeans.remove(0); - JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(), - "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name")); + JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(), "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name")); } } catch (Exception e) { if (s_logger.isTraceEnabled()) @@ -568,11 +569,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * @param cmd command to execute */ protected void checkStorageProcessorAndHandlerNfsVersionAttribute(StorageSubSystemCommand cmd) { - if (storageNfsVersion != null) return; - if (cmd instanceof CopyCommand){ - EnumMap params = new EnumMap(VmwareStorageProcessorConfigurableFields.class); - examineStorageSubSystemCommandNfsVersion((CopyCommand) cmd, params); - params = examineStorageSubSystemCommandFullCloneFlagForVmware((CopyCommand) cmd, params); + if (storageNfsVersion != null) + return; + if (cmd instanceof CopyCommand) { + EnumMap params = new EnumMap( + VmwareStorageProcessorConfigurableFields.class); + examineStorageSubSystemCommandNfsVersion((CopyCommand)cmd, params); + params = examineStorageSubSystemCommandFullCloneFlagForVmware((CopyCommand)cmd, params); reconfigureProcessorByHandler(params); } } @@ -581,10 +584,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * Reconfigure processor by handler * @param params params */ - protected void reconfigureProcessorByHandler(EnumMap params) { - VmwareStorageSubsystemCommandHandler handler = (VmwareStorageSubsystemCommandHandler) storageHandler; + protected void reconfigureProcessorByHandler(EnumMap params) { + VmwareStorageSubsystemCommandHandler handler = (VmwareStorageSubsystemCommandHandler)storageHandler; boolean success = handler.reconfigureStorageProcessor(params); - if (success){ + if (success) { s_logger.info("VmwareStorageProcessor and VmwareStorageSubsystemCommandHandler successfully reconfigured"); } else { s_logger.error("Error while reconfiguring VmwareStorageProcessor and VmwareStorageSubsystemCommandHandler, params=" + _gson.toJson(params)); @@ -597,14 +600,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * @param params params * @return copy of params including new values, if suitable */ - protected EnumMap examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd, EnumMap params) { + protected EnumMap examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd, + EnumMap params) { EnumMap paramsCopy = new EnumMap(params); HypervisorType hypervisor = cmd.getDestTO().getHypervisorType(); - if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)){ + if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)) { DataStoreTO destDataStore = cmd.getDestTO().getDataStore(); - if (destDataStore instanceof PrimaryDataStoreTO){ - PrimaryDataStoreTO dest = (PrimaryDataStoreTO) destDataStore; - if (dest.isFullCloneFlag() != null){ + if (destDataStore instanceof PrimaryDataStoreTO) { + PrimaryDataStoreTO dest = (PrimaryDataStoreTO)destDataStore; + if (dest.isFullCloneFlag() != null) { paramsCopy.put(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG, dest.isFullCloneFlag().booleanValue()); } } @@ -617,15 +621,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * @param cmd command to execute * @param params params */ - protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd, EnumMap params){ + protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd, EnumMap params) { DataStoreTO srcDataStore = cmd.getSrcTO().getDataStore(); boolean nfsVersionFound = false; - if (srcDataStore instanceof NfsTO){ - nfsVersionFound = getStorageNfsVersionFromNfsTO((NfsTO) srcDataStore); + if (srcDataStore instanceof NfsTO) { + nfsVersionFound = getStorageNfsVersionFromNfsTO((NfsTO)srcDataStore); } - if (nfsVersionFound){ + if (nfsVersionFound) { params.put(VmwareStorageProcessorConfigurableFields.NFS_VERSION, storageNfsVersion); } } @@ -635,8 +639,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * @param nfsTO nfsTO * @return true if NFS version was found and not null, false in other case */ - protected boolean getStorageNfsVersionFromNfsTO(NfsTO nfsTO){ - if (nfsTO != null && nfsTO.getNfsVersion() != null){ + protected boolean getStorageNfsVersionFromNfsTO(NfsTO nfsTO) { + if (nfsTO != null && nfsTO.getNfsVersion() != null) { storageNfsVersion = nfsTO.getNfsVersion(); return true; } @@ -646,7 +650,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa /** * Registers the vm to the inventory given the vmx file. */ - private void registerVm(String vmName, DatastoreMO dsMo) throws Exception{ + private void registerVm(String vmName, DatastoreMO dsMo) throws Exception { //1st param VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); @@ -673,8 +677,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private Answer execute(ResizeVolumeCommand cmd) { String path = cmd.getPath(); String vmName = cmd.getInstanceName(); - long newSize = cmd.getNewSize() / 1024; - long oldSize = cmd.getCurrentSize()/1024; + long newSize = cmd.getNewSize() / ResourceType.bytesToKiB; + long oldSize = cmd.getCurrentSize() / ResourceType.bytesToKiB; boolean useWorkerVm = false; VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); @@ -686,9 +690,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { if (newSize < oldSize) { - throw new Exception("VMware doesn't support shrinking volume from larger size: " + oldSize/(1024*1024) + " GB to a smaller size: " + newSize/(1024*1024) + " GB"); + throw new Exception( + "VMware doesn't support shrinking volume from larger size: " + oldSize / ResourceType.bytesToMiB + " GB to a smaller size: " + newSize / ResourceType.bytesToMiB + " GB"); } else if (newSize == oldSize) { - return new ResizeVolumeAnswer(cmd, true, "success", newSize*1024); + return new ResizeVolumeAnswer(cmd, true, "success", newSize * ResourceType.bytesToKiB); } if (vmName.equalsIgnoreCase("none")) { // we need to spawn a worker VM to attach the volume to and @@ -706,7 +711,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa synchronized (this) { vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + ".vmdk"); - vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS); + vmMo.attachDisk(new String[] {vmdkDataStorePath}, morDS); } } // find VM through datacenter (VM is not at the target host yet) @@ -725,8 +730,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } // IDE virtual disk cannot be re-sized if VM is running if (vdisk.second() != null && vdisk.second().contains("ide")) { - throw new Exception("Re-sizing a virtual disk over IDE controller is not supported in VMware hypervisor. " + - "Please re-try when virtual disk is attached to a VM using SCSI controller."); + throw new Exception("Re-sizing a virtual disk over IDE controller is not supported in VMware hypervisor. " + + "Please re-try when virtual disk is attached to a VM using SCSI controller."); } if (vdisk.second() != null && !vdisk.second().toLowerCase().startsWith("scsi")) @@ -958,12 +963,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected ExecutionResult prepareNetworkElementCommand(SetupGuestNetworkCommand cmd) { NicTO nic = cmd.getNic(); String routerIp = getRouterSshControlIp(cmd); - String domrName = - cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); + String domrName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); try { - int ethDeviceNum = findRouterEthDeviceIndex(domrName, routerIp, - nic.getMac()); + int ethDeviceNum = findRouterEthDeviceIndex(domrName, routerIp, nic.getMac()); nic.setDeviceId(ethDeviceNum); } catch (Exception e) { String msg = "Prepare SetupGuestNetwork failed due to " + e.toString(); @@ -973,7 +976,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new ExecutionResult(true, null); } - private ExecutionResult prepareNetworkElementCommand(IpAssocVpcCommand cmd) { String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); String routerIp = getRouterSshControlIp(cmd); @@ -1020,13 +1022,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private ExecutionResult prepareNetworkElementCommand(SetNetworkACLCommand cmd) { NicTO nic = cmd.getNic(); - String routerName = - cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); + String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); String routerIp = getRouterSshControlIp(cmd); try { - int ethDeviceNum = findRouterEthDeviceIndex(routerName, routerIp, - nic.getMac()); + int ethDeviceNum = findRouterEthDeviceIndex(routerName, routerIp, nic.getMac()); nic.setDeviceId(ethDeviceNum); } catch (Exception e) { String msg = "Prepare SetNetworkACL failed due to " + e.toString(); @@ -1073,7 +1073,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.E1000; Map details = cmd.getDetails(); if (details != null) { - nicDeviceType = VirtualEthernetCardType.valueOf((String) details.get("nicAdapter")); + nicDeviceType = VirtualEthernetCardType.valueOf((String)details.get("nicAdapter")); } // find a usable device number in VMware environment @@ -1528,8 +1528,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa hotaddIncrementSizeInMb = vmMo.getHotAddMemoryIncrementSizeInMb(); hotaddMemoryLimitInMb = vmMo.getHotAddMemoryLimitInMb(); if (requestedMaxMemoryInMb > hotaddMemoryLimitInMb) { - throw new CloudRuntimeException("Memory of VM " + vmMo.getVmName() + " cannot be scaled to " + requestedMaxMemoryInMb + "MB." + - " Requested memory limit is beyond the hotadd memory limit for this VM at the moment is " + hotaddMemoryLimitInMb + "MB."); + throw new CloudRuntimeException("Memory of VM " + vmMo.getVmName() + " cannot be scaled to " + requestedMaxMemoryInMb + "MB." + + " Requested memory limit is beyond the hotadd memory limit for this VM at the moment is " + hotaddMemoryLimitInMb + "MB."); } // Check increment is multiple of increment size @@ -1630,7 +1630,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DiskTO rootDiskTO = null; // If root disk controller is scsi, then data disk controller would also be scsi instead of using 'osdefault' // This helps avoid mix of different scsi subtype controllers in instance. - if (DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) { + if (DiskControllerType.osdefault == DiskControllerType.getType(dataDiskController) && DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) { dataDiskController = DiskControllerType.scsi.toString(); } @@ -1659,9 +1659,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // Validate VM name is unique in Datacenter VirtualMachineMO vmInVcenter = dcMo.checkIfVmAlreadyExistsInVcenter(vmNameOnVcenter, vmInternalCSName); - if(vmInVcenter != null) { + if (vmInVcenter != null) { vmAlreadyExistsInVcenter = true; - String msg = "VM with name: " + vmNameOnVcenter +" already exists in vCenter."; + String msg = "VM with name: " + vmNameOnVcenter + " already exists in vCenter."; s_logger.error(msg); throw new Exception(msg); } @@ -1759,8 +1759,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String datastoreName = VmwareResource.getDatastoreName(details.get(DiskTO.IQN)); rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName); - } - else { + } else { DataStoreTO primaryStore = vol.getData().getDataStore(); rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid()); @@ -1781,9 +1780,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } tearDownVm(vmMo); - }else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), - getReservedCpuMHZ(vmSpec), vmSpec.getLimitCpuUse(), (int)(vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec), - guestOsId, rootDiskDataStoreDetails.first(), false, controllerInfo, systemVm)) { + } else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), getReservedCpuMHZ(vmSpec), + vmSpec.getLimitCpuUse(), (int)(vmSpec.getMaxRam() / ResourceType.bytesToMiB), getReservedMemoryMb(vmSpec), guestOsId, rootDiskDataStoreDetails.first(), false, + controllerInfo, systemVm)) { throw new Exception("Failed to create VM. vmName: " + vmInternalCSName); } } @@ -1808,9 +1807,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), - getReservedCpuMHZ(vmSpec), (int)(vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec), - guestOsId, vmSpec.getLimitCpuUse()); + VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), getReservedCpuMHZ(vmSpec), (int)(vmSpec.getMaxRam() / (1024 * 1024)), + getReservedMemoryMb(vmSpec), guestOsId, vmSpec.getLimitCpuUse()); // Check for multi-cores per socket settings int numCoresPerSocket = 1; @@ -1870,9 +1868,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DatastoreMO secDsMo = new DatastoreMO(hyperHost.getContext(), morSecDs); deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = - VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), - true, true, ideUnitNumber++, i + 1); + Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, + String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), true, true, ideUnitNumber++, i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { if (s_logger.isDebugEnabled()) @@ -1901,8 +1898,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa assert (isoDatastoreInfo.second() != null); deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = - VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, i + 1); + Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, + i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { if (s_logger.isDebugEnabled()) @@ -1989,14 +1986,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa assert (volumeDsDetails != null); - String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, - vol, matchingExistingDisk, - dataStoresDetails); - if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) + String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, vol, matchingExistingDisk, dataStoresDetails); + if (controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) scsiUnitNumber++; - VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, - diskChain, - volumeDsDetails.first(), + VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, diskChain, volumeDsDetails.first(), (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) ? ((ideUnitNumber++) % VmwareHelper.MAX_IDE_CONTROLLER_COUNT) : scsiUnitNumber++, i + 1); if (vol.getType() == Volume.Type.ROOT) @@ -2004,7 +1997,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa deviceConfigSpecArray[i].setDevice(device); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - if(s_logger.isDebugEnabled()) + if (s_logger.isDebugEnabled()) s_logger.debug("Prepare volume at new device " + _gson.toJson(device)); i++; @@ -2094,8 +2087,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus")); VirtualMachine.Type vmType = cmd.getVirtualMachine().getType(); Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType); - if ((nicTo.getBroadcastType() != BroadcastDomainType.Lswitch) || - (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))){ + if ((nicTo.getBroadcastType() != BroadcastDomainType.Lswitch) + || (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))) { if (VmwareHelper.isDvPortGroup(networkInfo.first())) { String dvSwitchUuid; ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); @@ -2113,8 +2106,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), i + 1, true, true); } - } - else{ + } else { //if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour nic = VmwareHelper.prepareNicOpaque(vmMo, nicDeviceType, networkInfo.second(), nicTo.getMac(), i + 1, true, true); @@ -2162,8 +2154,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String keyboardLayout = null; if (vmSpec.getDetails() != null) keyboardLayout = vmSpec.getDetails().get(VmDetailConstants.KEYBOARD); - vmConfigSpec.getExtraConfig().addAll( - Arrays.asList(configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout))); + vmConfigSpec.getExtraConfig() + .addAll(Arrays.asList(configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout))); // config video card configureVideoCard(vmMo, vmSpec, vmConfigSpec); @@ -2222,7 +2214,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String msg = "StartCommand failed due to " + VmwareHelper.getExceptionMessage(e); s_logger.warn(msg, e); StartAnswer startAnswer = new StartAnswer(cmd, msg); - if(vmAlreadyExistsInVcenter) { + if (vmAlreadyExistsInVcenter) { startAnswer.setContextParam("stopRetry", "true"); } @@ -2233,7 +2225,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DatastoreFile fileInDatastore = new DatastoreFile(existingVmFileInfo.getVmPathName()); DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName())); registerVm(existingVmName, existingVmDsMo); - } catch (Exception ex){ + } catch (Exception ex) { String message = "Failed to register an existing VM: " + existingVmName + " due to " + VmwareHelper.getExceptionMessage(ex); s_logger.warn(message, ex); } @@ -2271,7 +2263,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.warn("Disk chain length for the VM is greater than one, this is not supported"); throw new CloudRuntimeException("Unsupported VM disk chain length: "+ diskChain.length); } - if (diskInfo.getDiskDeviceBusName() == null || !diskInfo.getDiskDeviceBusName().toLowerCase().startsWith("scsi")) { + + boolean resizingSupported = false; + String deviceBusName = diskInfo.getDiskDeviceBusName(); + if (deviceBusName != null && (deviceBusName.toLowerCase().contains("scsi") || deviceBusName.toLowerCase().contains("lsi"))) { + resizingSupported = true; + } + if (!resizingSupported) { s_logger.warn("Resizing of root disk is only support for scsi device/bus, the provide VM's disk device bus name is " + diskInfo.getDiskDeviceBusName()); throw new CloudRuntimeException("Unsupported VM root disk device bus: "+ diskInfo.getDiskDeviceBusName()); } @@ -2329,13 +2327,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * @throws Exception exception */ protected void configureVideoCard(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, VirtualMachineConfigSpec vmConfigSpec) throws Exception { - if (vmSpec.getDetails().containsKey(VmDetailConstants.SVGA_VRAM_SIZE)){ + if (vmSpec.getDetails().containsKey(VmDetailConstants.SVGA_VRAM_SIZE)) { String value = vmSpec.getDetails().get(VmDetailConstants.SVGA_VRAM_SIZE); try { long svgaVmramSize = Long.parseLong(value); setNewVRamSizeVmVideoCard(vmMo, svgaVmramSize, vmConfigSpec); - } - catch (NumberFormatException e){ + } catch (NumberFormatException e) { s_logger.error("Unexpected value, cannot parse " + value + " to long due to: " + e.getMessage()); } } @@ -2348,9 +2345,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * @param vmConfigSpec virtual machine config spec */ protected void setNewVRamSizeVmVideoCard(VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) throws Exception { - for (VirtualDevice device : vmMo.getAllDeviceList()){ - if (device instanceof VirtualMachineVideoCard){ - VirtualMachineVideoCard videoCard = (VirtualMachineVideoCard) device; + for (VirtualDevice device : vmMo.getAllDeviceList()) { + if (device instanceof VirtualMachineVideoCard) { + VirtualMachineVideoCard videoCard = (VirtualMachineVideoCard)device; modifyVmVideoCardVRamSize(videoCard, vmMo, svgaVmramSize, vmConfigSpec); } } @@ -2364,7 +2361,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * @param vmConfigSpec virtual machine config spec */ protected void modifyVmVideoCardVRamSize(VirtualMachineVideoCard videoCard, VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) { - if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize){ + if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize) { s_logger.info("Video card memory was set " + videoCard.getVideoRamSizeInKB().longValue() + "kb instead of " + svgaVmramSize + "kb"); configureSpecVideoCardNewVRamSize(videoCard, svgaVmramSize, vmConfigSpec); } @@ -2376,7 +2373,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * @param svgaVmramSize new svga vram size (in KB) * @param vmConfigSpec virtual machine spec */ - protected void configureSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCard, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec){ + protected void configureSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCard, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) { videoCard.setVideoRamSizeInKB(svgaVmramSize); videoCard.setUseAutoDetect(false); @@ -2387,9 +2384,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmConfigSpec.getDeviceChange().add(arrayVideoCardConfigSpecs); } - private void tearDownVm(VirtualMachineMO vmMo) throws Exception{ + private void tearDownVm(VirtualMachineMO vmMo) throws Exception { - if(vmMo == null) return; + if (vmMo == null) + return; boolean hasSnapshot = false; hasSnapshot = vmMo.hasSnapshot(); @@ -2401,17 +2399,17 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } int getReservedMemoryMb(VirtualMachineTO vmSpec) { - if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveMemory.key()).equalsIgnoreCase("true")) { - return (int) (vmSpec.getMinRam() / (1024 * 1024)); - } - return 0; + if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveMemory.key()).equalsIgnoreCase("true")) { + return (int)(vmSpec.getMinRam() / ResourceType.bytesToMiB); + } + return 0; } int getReservedCpuMHZ(VirtualMachineTO vmSpec) { - if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveCpu.key()).equalsIgnoreCase("true")) { - return vmSpec.getMinSpeed() * vmSpec.getCpus(); - } - return 0; + if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveCpu.key()).equalsIgnoreCase("true")) { + return vmSpec.getMinSpeed() * vmSpec.getCpus(); + } + return 0; } // return the finalized disk chain for startup, from top to bottom @@ -2433,8 +2431,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String datastoreName = isManaged ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid(); Pair volumeDsDetails = dataStoresDetails.get(datastoreName); - if (volumeDsDetails == null) - { + if (volumeDsDetails == null) { throw new Exception("Primary datastore " + primaryStore.getUuid() + " is not mounted on host."); } @@ -2554,7 +2551,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } OptionValue newVal; - if (nicTo.getType().equals(TrafficType.Guest) && dvSwitchUuid != null && nicTo.getGateway() != null && nicTo.getNetmask() != null) { + if (nicTo.getType().equals(TrafficType.Guest) && dvSwitchUuid != null && nicTo.getGateway() != null && nicTo.getNetmask() != null) { String vrIp = nicTo.getBroadcastUri().getPath().substring(1); newVal = new OptionValue(); newVal.setKey("vsp.vr-ip." + nicTo.getMac()); @@ -2688,13 +2685,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } - private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol, - VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { + private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol, VmwareHypervisorHost hyperHost, VmwareContext context) + throws Exception { if (diskInfoBuilder != null) { VolumeObjectTO volume = (VolumeObjectTO)vol.getData(); String dsName = null; - String diskBackingFileBaseName= null; + String diskBackingFileBaseName = null; Map details = vol.getDetails(); boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED)); @@ -2706,8 +2703,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa dsName = VmwareResource.getDatastoreName(iScsiName); diskBackingFileBaseName = new DatastoreFile(volume.getPath()).getFileBaseName(); - } - else { + } else { ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, volume.getDataStore().getUuid()); DatastoreMO dsMo = new DatastoreMO(context, morDs); @@ -2716,8 +2712,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa diskBackingFileBaseName = volume.getPath(); } - VirtualMachineDiskInfo diskInfo = - diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName); + VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName); if (diskInfo != null) { s_logger.info("Found existing disk info from volume path: " + volume.getPath()); return diskInfo; @@ -2768,12 +2763,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa Map vmDetails = vmSpec.getDetails(); if (vmDetails != null && vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER) != null) { if (vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER).equalsIgnoreCase("scsi")) { - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> scsi, based on root disk controller settings: " + - vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER)); + s_logger.info("Chose disk controller for vol " + vol.getType() + " -> scsi, based on root disk controller settings: " + + vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER)); controllerKey = scsiControllerKey; } else { - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> ide, based on root disk controller settings: " + - vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER)); + s_logger.info("Chose disk controller for vol " + vol.getType() + " -> ide, based on root disk controller settings: " + + vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER)); controllerKey = ideControllerKey; } } else { @@ -2820,8 +2815,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return controllerInfo.second(); } } - private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey, - int scsiControllerKey, Map iqnToPath, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { + + private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey, int scsiControllerKey, + Map iqnToPath, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception { VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); for (DiskTO vol : sortedDisks) { @@ -2852,8 +2848,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (s_logger.isInfoEnabled()) s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + diskChain[0]); } - } - else { + } else { if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) { if (s_logger.isInfoEnabled()) s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); @@ -2969,8 +2964,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return listForSort.toArray(new DiskTO[0]); } - private HashMap> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context, - DiskTO[] disks, Command cmd) throws Exception { + private HashMap> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context, DiskTO[] disks, + Command cmd) throws Exception { HashMap> mapIdToMors = new HashMap>(); assert (hyperHost != null) && (context != null); @@ -2997,12 +2992,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // if the datastore is not present, we need to discover the iSCSI device that will support it, // create the datastore, and create a VMDK file in the datastore if (morDatastore == null) { - morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName, - details.get(DiskTO.STORAGE_HOST), Integer.parseInt(details.get(DiskTO.STORAGE_PORT)), - volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : null, - details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), - details.get(DiskTO.CHAP_TARGET_USERNAME), details.get(DiskTO.CHAP_TARGET_SECRET), - Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd); + morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName, details.get(DiskTO.STORAGE_HOST), + Integer.parseInt(details.get(DiskTO.STORAGE_PORT)), volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : null, + details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), details.get(DiskTO.CHAP_TARGET_USERNAME), + details.get(DiskTO.CHAP_TARGET_SECRET), Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd); DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore); String datastoreVolumePath = dsMo.getDatastorePath((volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : dsMo.getName()) + ".vmdk"); @@ -3012,8 +3005,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } mapIdToMors.put(datastoreName, new Pair(morDatastore, new DatastoreMO(context, morDatastore))); - } - else { + } else { ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolUuid); if (morDatastore == null) { @@ -3051,8 +3043,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName); break; - } - else { + } else { DataStoreTO primaryStore = vol.getData().getDataStore(); rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid()); @@ -3109,7 +3100,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return defaultVlan; } - private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) throws Exception { + private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) + throws Exception { Ternary switchDetails = getTargetSwitch(nicTo); VirtualSwitchType switchType = VirtualSwitchType.getType(switchDetails.second()); @@ -3147,14 +3139,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // return Ternary private Ternary getTargetSwitch(NicTO nicTo) throws CloudException { - TrafficType[] supportedTrafficTypes = - new TrafficType[] { - TrafficType.Guest, - TrafficType.Public, - TrafficType.Control, - TrafficType.Management, - TrafficType.Storage - }; + TrafficType[] supportedTrafficTypes = new TrafficType[] {TrafficType.Guest, TrafficType.Public, TrafficType.Control, TrafficType.Management, TrafficType.Storage}; TrafficType trafficType = nicTo.getType(); if (!Arrays.asList(supportedTrafficTypes).contains(trafficType)) { @@ -3165,7 +3150,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualSwitchType switchType = VirtualSwitchType.StandardVirtualSwitch; String vlanId = Vlan.UNTAGGED; - if(nicTo.getName() != null && !nicTo.getName().isEmpty()) { + if (StringUtils.isNotBlank(nicTo.getName())) { // Format of network traffic label is ,, // If all 3 fields are mentioned then number of tokens would be 3. // If only , are mentioned then number of tokens would be 2. @@ -3192,9 +3177,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (switchType == VirtualSwitchType.NexusDistributedVirtualSwitch) { if (trafficType == TrafficType.Management || trafficType == TrafficType.Storage) { - throw new CloudException("Unable to configure NIC " + nicTo.toString() + " as traffic type " + trafficType.toString() + - " is not supported over virtual switch type " + switchType + - ". Please specify only supported type of virtual switches i.e. {vmwaresvs, vmwaredvs} in physical network traffic label."); + throw new CloudException( + "Unable to configure NIC " + nicTo.toString() + " as traffic type " + trafficType.toString() + " is not supported over virtual switch type " + switchType + + ". Please specify only supported type of virtual switches i.e. {vmwaresvs, vmwaredvs} in physical network traffic label."); } } @@ -3737,7 +3722,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VolumeTO volume; StorageFilerTO filerTo; Set mountedDatastoresAtSource = new HashSet(); - List volumeToList = new ArrayList(); + List volumeToList = new ArrayList(); Map volumeDeviceKey = new HashMap(); List> volToFiler = cmd.getVolumeToFilerAsList(); @@ -3776,7 +3761,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa morDsAtTarget = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(tgtHyperHost, filerTo.getUuid()); morDsAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, filerTo.getUuid()); if (morDsAtTarget == null) { - String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " on target host: " + tgtHyperHost.getHyperHostName() + " to execute MigrateWithStorageCommand"; + String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " on target host: " + tgtHyperHost.getHyperHostName() + + " to execute MigrateWithStorageCommand"; s_logger.error(msg); throw new Exception(msg); } @@ -3805,12 +3791,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // If datastore is VMFS and target datastore is not mounted or accessible to source host then fail migration. if (filerTo.getType().equals(StoragePoolType.VMFS)) { if (morDsAtSource == null) { - s_logger.warn("If host version is below 5.1, then target VMFS datastore(s) need to manually mounted on source host for a successful live storage migration."); + s_logger.warn( + "If host version is below 5.1, then target VMFS datastore(s) need to manually mounted on source host for a successful live storage migration."); throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not mounted on source host: " + _hostName); } DatastoreMO dsAtSourceMo = new DatastoreMO(getServiceContext(), morDsAtSource); String srcHostValue = srcHyperHost.getMor().getValue(); - if(!dsAtSourceMo.isAccessibleToHost(srcHostValue)) { + if (!dsAtSourceMo.isAccessibleToHost(srcHostValue)) { s_logger.warn("If host version is below 5.1, then target VMFS datastore(s) need to accessible to source host for a successful live storage migration."); throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not accessible on source host: " + _hostName); } @@ -3892,8 +3879,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (!vmMo.changeDatastore(relocateSpec)) { throw new Exception("Change datastore operation failed during storage migration"); } else { - s_logger.debug("Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName() + - " and its storage to target datastore(s)"); + s_logger.debug( + "Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName() + " and its storage to target datastore(s)"); } } @@ -3943,8 +3930,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { srcHyperHost.unmountDatastore(mountedDatastore); } catch (Exception unmountEx) { - s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName + ". Seems the datastore is still being used by " + _hostName + - ". Please unmount manually to cleanup."); + s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName + ". Seems the datastore is still being used by " + _hostName + + ". Please unmount manually to cleanup."); } s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + _hostName); } @@ -3989,7 +3976,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmName = vmMo.getName(); morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName); if (morDs == null) { - String msg = "Unable to find the mounted datastore with name: " + tgtDsName + " on source host: " + srcHyperHost.getHyperHostName() +" to execute MigrateVolumeCommand"; + String msg = "Unable to find the mounted datastore with name: " + tgtDsName + " on source host: " + srcHyperHost.getHyperHostName() + + " to execute MigrateVolumeCommand"; s_logger.error(msg); throw new Exception(msg); } @@ -4091,9 +4079,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { VmwareContext context = getServiceContext(); - _storageProcessor.prepareManagedDatastore(context, getHyperHost(context), - cmd.getDetails().get(CreateStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(CreateStoragePoolCommand.IQN), - cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_PORT))); + _storageProcessor.prepareManagedDatastore(context, getHyperHost(context), cmd.getDetails().get(CreateStoragePoolCommand.DATASTORE_NAME), + cmd.getDetails().get(CreateStoragePoolCommand.IQN), cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_HOST), + Integer.parseInt(cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_PORT))); } catch (Exception ex) { return new Answer(cmd, false, "Issue creating datastore"); } @@ -4163,8 +4151,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (targets != null && targets.size() > 0) { try { _storageProcessor.handleTargetsForHost(add, targets, host); - } - catch (Exception ex) { + } catch (Exception ex) { s_logger.warn(ex.getMessage()); } } @@ -4178,11 +4165,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { if (cmd.getRemoveDatastore()) { _storageProcessor.handleDatastoreAndVmdkDetach(cmd.getDetails().get(DeleteStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(DeleteStoragePoolCommand.IQN), - cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT))); + cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT))); return new Answer(cmd, true, "success"); - } - else { + } else { // We will leave datastore cleanup management to vCenter. Since for cluster VMFS datastore, it will always // be mounted by vCenter. @@ -4233,12 +4219,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (cmd.isAttach()) { vmMo.mountToolsInstaller(); } else { - try{ + try { if (!vmMo.unmountToolsInstaller()) { return new Answer(cmd, false, "Failed to unmount vmware-tools installer ISO as the corresponding CDROM device is locked by VM. Please unmount the CDROM device inside the VM and ret-try."); } - }catch(Throwable e){ + } catch (Throwable e) { vmMo.detachIso(null); } } @@ -4318,10 +4304,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private static String getSecondaryDatastoreUUID(String storeUrl) { String uuid = null; - try{ - uuid=UUID.nameUUIDFromBytes(storeUrl.getBytes("UTF-8")).toString(); - }catch(UnsupportedEncodingException e){ - s_logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error." ); + try { + uuid = UUID.nameUUIDFromBytes(storeUrl.getBytes("UTF-8")).toString(); + } catch (UnsupportedEncodingException e) { + s_logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error."); } return uuid; } @@ -4522,8 +4508,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa long used = capacity - free; if (s_logger.isDebugEnabled()) { - s_logger.debug("Datastore summary info, storageId: " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + - cmd.getPooltype() + ", capacity: " + capacity + ", free: " + free + ", used: " + used); + s_logger.debug("Datastore summary info, storageId: " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype() + + ", capacity: " + capacity + ", free: " + free + ", used: " + used); } if (summary.getCapacity() <= 0) { @@ -4532,9 +4518,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new GetStorageStatsAnswer(cmd, capacity, used); } else { - String msg = - "Could not find datastore for GetStorageStatsCommand storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + - cmd.getPooltype(); + String msg = "Could not find datastore for GetStorageStatsCommand storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + + cmd.getPooltype(); s_logger.error(msg); return new GetStorageStatsAnswer(cmd, msg); @@ -4545,9 +4530,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa invalidateServiceContext(); } - String msg = - "Unable to execute GetStorageStatsCommand(storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype() + - ") due to " + VmwareHelper.getExceptionMessage(e); + String msg = "Unable to execute GetStorageStatsCommand(storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype() + + ") due to " + VmwareHelper.getExceptionMessage(e); s_logger.error(msg, e); return new GetStorageStatsAnswer(cmd, msg); } @@ -4624,8 +4608,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (result.first()) return new Answer(cmd); } catch (Exception e) { - s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " - + VmwareHelper.getExceptionMessage(e), e); + s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e); } return new Answer(cmd, false, "PingTestCommand failed"); } else { @@ -4649,8 +4632,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } catch (Exception e) { - s_logger.error("Unable to execute ping command on host (" + cmd.getComputingHostIp() + "). failure due to " - + VmwareHelper.getExceptionMessage(e), e); + s_logger.error("Unable to execute ping command on host (" + cmd.getComputingHostIp() + "). failure due to " + VmwareHelper.getExceptionMessage(e), e); } return new Answer(cmd, false, "PingTestCommand failed"); @@ -4674,7 +4656,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new Answer(cmd); } - protected Answer execute(GetVmIpAddressCommand cmd) { if (s_logger.isTraceEnabled()) { s_logger.trace("Executing resource command GetVmIpAddressCommand: " + _gson.toJson(cmd)); @@ -5055,8 +5036,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DatastoreSummary dsSummary = dsMo.getSummary(); String address = hostMo.getHostName(); - StoragePoolInfo pInfo = - new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.VMFS, dsSummary.getCapacity(), dsSummary.getFreeSpace()); + StoragePoolInfo pInfo = new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.VMFS, dsSummary.getCapacity(), + dsSummary.getFreeSpace()); StartupStorageCommand cmd = new StartupStorageCommand(); cmd.setName(poolUuid); cmd.setPoolInfo(pInfo); @@ -5131,8 +5112,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - } - catch (Exception ex) { + } catch (Exception ex) { s_logger.info("Could not locate an IQN for this host."); } @@ -5204,8 +5184,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } - protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout) - throws Exception { + protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout) throws Exception { VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); @@ -5284,7 +5263,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa cpuArchitecture = "i386"; } - if(cloudGuestOs == null) { + if (cloudGuestOs == null) { s_logger.warn("Guest OS mapping name is not set for guest os: " + guestOs); } @@ -5359,7 +5338,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return newStates; } - private HashMap getVmStates() throws Exception { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); @@ -5412,8 +5390,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return newStates; } - - private HashMap getVmStats(List vmNames) throws Exception { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); HashMap vmResponseMap = new HashMap(); @@ -5449,6 +5425,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ObjectContent[] ocs = hyperHost.getVmPropertiesOnHyperHost(new String[] {"name", numCpuStr, cpuUseStr ,guestMemUseStr ,memLimitStr ,memMbStr,allocatedCpuStr ,instanceNameCustomField}); + if (ocs != null && ocs.length > 0) { for (ObjectContent oc : ocs) { List objProps = oc.getPropSet(); @@ -5468,9 +5445,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } else if (objProp.getName().contains(instanceNameCustomField)) { if (objProp.getVal() != null) vmInternalCSName = ((CustomFieldStringValue)objProp.getVal()).getValue(); - }else if(objProp.getName().equals(guestMemusage)){ + } else if (objProp.getName().equals(guestMemusage)) { guestMemusage = objProp.getVal().toString(); - }else if (objProp.getName().equals(numCpuStr)) { + } else if (objProp.getName().equals(numCpuStr)) { numberCPUs = objProp.getVal().toString(); } else if (objProp.getName().equals(cpuUseStr)) { maxCpuUsage = NumberUtils.toDouble(objProp.getVal().toString()); @@ -5503,8 +5480,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa List perfMetrics = service.queryAvailablePerfMetric(perfMgr, vmMor, null, null, null); if (perfMetrics != null) { for (int index = 0; index < perfMetrics.size(); ++index) { - if (((rxPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == rxPerfCounterInfo.getKey())) || - ((txPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == txPerfCounterInfo.getKey()))) { + if (((rxPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == rxPerfCounterInfo.getKey())) + || ((txPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == txPerfCounterInfo.getKey()))) { vmNetworkMetrics.add(perfMetrics.get(index)); } } @@ -5550,14 +5527,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - vmResponseMap.put(name, new VmStatsEntry( NumberUtils.toDouble(memkb)*1024,NumberUtils.toDouble(guestMemusage)*1024,NumberUtils.toDouble(memlimit)*1024, maxCpuUsage, networkReadKBs, networkWriteKBs, NumberUtils.toInt(numberCPUs), "vm")); + vmResponseMap.put(name, new VmStatsEntry( NumberUtils.toDouble(memkb)*1024,NumberUtils.toDouble(guestMemusage)*1024,NumberUtils.toDouble(memlimit)*1024, + maxCpuUsage, networkReadKBs, networkWriteKBs, NumberUtils.toInt(numberCPUs), "vm")); + } } } return vmResponseMap; } - protected String networkUsage(final String privateIpAddress, final String option, final String ethName) { String args = null; if (option.equals("get")) { @@ -5652,7 +5630,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return convertPowerState(runtimeInfo.getPowerState()); } - private static PowerState convertPowerState(VirtualMachinePowerState powerState) { return s_powerStatesTable.get(powerState); } @@ -5730,8 +5707,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(context, context.getServiceContent().getCustomFieldsManager()); cfmMo.ensureCustomFieldDef("Datastore", CustomFieldConstants.CLOUD_UUID); - if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch || _guestTrafficInfo != null && - _guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) { + if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch + || _guestTrafficInfo != null && _guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) { cfmMo.ensureCustomFieldDef("DistributedVirtualPortgroup", CustomFieldConstants.CLOUD_GC_DVP); } cfmMo.ensureCustomFieldDef("Network", CustomFieldConstants.CLOUD_GC); @@ -5744,8 +5721,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VmwareHypervisorHost hostMo = this.getHyperHost(context); _hostName = hostMo.getHyperHostName(); - if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch || - _publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) { + if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch + || _publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) { _privateNetworkVSwitchName = mgr.getPrivateVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware); _vsmCredentials = mgr.getNexusVSMCredentialsByClusterId(Long.parseLong(_cluster)); } @@ -5770,9 +5747,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (intObj != null) _portsPerDvPortGroup = intObj.intValue(); - s_logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " + - _publicTrafficInfo.getVirtualSwitchType() + " : " + _publicTrafficInfo.getVirtualSwitchName() + ", guest traffic over " + - _guestTrafficInfo.getVirtualSwitchType() + " : " + _guestTrafficInfo.getVirtualSwitchName()); + s_logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " + + _publicTrafficInfo.getVirtualSwitchType() + " : " + _publicTrafficInfo.getVirtualSwitchName() + ", guest traffic over " + + _guestTrafficInfo.getVirtualSwitchType() + " : " + _guestTrafficInfo.getVirtualSwitchName()); Boolean boolObj = (Boolean)params.get("vmware.create.full.clone"); if (boolObj != null && boolObj.booleanValue()) { @@ -5792,7 +5769,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa int timeout = NumbersUtil.parseInt(value, 1440) * 1000; storageNfsVersion = NfsSecondaryStorageResource.retrieveNfsVersionFromParams(params); - _storageProcessor = new VmwareStorageProcessor((VmwareHostService)this, _fullCloneFlag, (VmwareStorageMount)mgr, timeout, this, _shutdownWaitMs, null, storageNfsVersion); + _storageProcessor = new VmwareStorageProcessor((VmwareHostService)this, _fullCloneFlag, (VmwareStorageMount)mgr, timeout, this, _shutdownWaitMs, null, + storageNfsVersion); storageHandler = new VmwareStorageSubsystemCommandHandler(_storageProcessor, storageNfsVersion); _vrResource = new VirtualRoutingResource(this); @@ -5842,11 +5820,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa @Override public VmwareContext getServiceContext(Command cmd) { VmwareContext context = null; - if(s_serviceContext.get() != null) { + if (s_serviceContext.get() != null) { context = s_serviceContext.get(); String poolKey = VmwareContextPool.composePoolKey(_vCenterAddress, _username); // Before re-using the thread local context, ensure it corresponds to the right vCenter API session and that it is valid to make calls. - if(context.getPoolKey().equals(poolKey)) { + if (context.getPoolKey().equals(poolKey)) { if (context.validate()) { if (s_logger.isTraceEnabled()) { s_logger.trace("ThreadLocal context is still valid, just reuse"); diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index ccf4512ce47..b2925b1aa75 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -314,7 +314,6 @@ public class VmwareStorageProcessor implements StorageProcessor { } String templateUrl = secondaryStorageUrl + "/" + srcData.getPath(); - Pair templateInfo = VmwareStorageLayoutHelper.decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName()); VmwareContext context = hostService.getServiceContext(cmd); @@ -505,6 +504,9 @@ public class VmwareStorageProcessor implements StorageProcessor { ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool(); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); + if (template.getSize() != null){ + _fullCloneFlag = volume.getSize() > template.getSize() ? true : _fullCloneFlag; + } if (!_fullCloneFlag) { createVMLinkedClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool); } else { @@ -514,8 +516,8 @@ public class VmwareStorageProcessor implements StorageProcessor { vmMo = new ClusterMO(context, morCluster).findVmOnHyperHost(vmdkName); assert (vmMo != null); - vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); // TO-DO: Support for base template containing multiple disks - s_logger.info("Move volume out of volume-wrapper VM "); + vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); + s_logger.info("Move volume out of volume-wrapper VM " + vmdkFileBaseName); String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.VMWARE, !_fullCloneFlag); String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, !_fullCloneFlag); @@ -529,7 +531,12 @@ public class VmwareStorageProcessor implements StorageProcessor { vmMo.destroy(); String srcFile = dsMo.getDatastorePath(vmdkName, true); + dsMo.deleteFile(srcFile, dcMo.getMor(), true, searchExcludedFolders); + + if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmdkName)) { + dsMo.deleteFolder(srcFile, dcMo.getMor()); + } } // restoreVM - move the new ROOT disk into corresponding VM folder VirtualMachineMO restoreVmMo = dcMo.findVm(volume.getVmName()); @@ -542,7 +549,12 @@ public class VmwareStorageProcessor implements StorageProcessor { VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setPath(vmdkFileBaseName); - newVol.setSize(volume.getSize()); + if (template.getSize() != null){ + newVol.setSize(template.getSize()); + } + else { + newVol.setSize(volume.getSize()); + } return new CopyCmdAnswer(newVol); } catch (Throwable e) { if (e instanceof RemoteException) { diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index 5b1ec2e68b6..88da6342b81 100644 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -1445,6 +1445,11 @@ public class ApiResponseHelper implements ResponseGenerator { return ApiDBUtils.findTemplateById(templateId); } + @Override + public DiskOfferingVO findDiskOfferingById(Long diskOfferingId) { + return ApiDBUtils.findDiskOfferingById(diskOfferingId); + } + @Override public VpnUsersResponse createVpnUserResponse(VpnUser vpnUser) { VpnUsersResponse vpnResponse = new VpnUsersResponse(); diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index 42034557d73..9c728ef0f78 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -3097,6 +3097,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q Map tags = cmd.getTags(); boolean showRemovedTmpl = cmd.getShowRemoved(); Account caller = CallContext.current().getCallingAccount(); + Long parentTemplateId = cmd.getParentTemplateId(); boolean listAll = false; if (templateFilter != null && templateFilter == TemplateFilter.all) { @@ -3125,14 +3126,14 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q return searchForTemplatesInternal(id, cmd.getTemplateName(), cmd.getKeyword(), templateFilter, false, null, cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), hypervisorType, showDomr, cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, tags, showRemovedTmpl, - cmd.getIds()); + cmd.getIds(), parentTemplateId); } private Pair, Integer> searchForTemplatesInternal(Long templateId, String name, String keyword, TemplateFilter templateFilter, boolean isIso, Boolean bootable, Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean showDomr, boolean onlyReady, List permittedAccounts, Account caller, ListProjectResourcesCriteria listProjectResourcesCriteria, - Map tags, boolean showRemovedTmpl, List ids) { + Map tags, boolean showRemovedTmpl, List ids, Long parentTemplateId) { // check if zone is configured, if not, just return empty list List hypers = null; @@ -3376,6 +3377,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q sc.addAnd("dataCenterId", SearchCriteria.Op.SC, zoneSc); } + if (parentTemplateId != null) { + sc.addAnd("parentTemplateId", SearchCriteria.Op.EQ, parentTemplateId); + } + // don't return removed template, this should not be needed since we // changed annotation for removed field in TemplateJoinVO. // sc.addAnd("removed", SearchCriteria.Op.NULL); @@ -3459,7 +3464,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q return searchForTemplatesInternal(cmd.getId(), cmd.getIsoName(), cmd.getKeyword(), isoFilter, true, cmd.isBootable(), cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), hypervisorType, true, cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, tags, showRemovedISO, - null); + null, null); } @Override diff --git a/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 26619f57e54..4f1984257ed 100644 --- a/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -18,8 +18,10 @@ package com.cloud.api.query.dao; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import javax.inject.Inject; @@ -27,6 +29,7 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ResponseObject.ResponseView; +import org.apache.cloudstack.api.response.ChildTemplateResponse; import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; @@ -37,10 +40,13 @@ import com.cloud.api.ApiDBUtils; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.ResourceTagJoinVO; import com.cloud.api.query.vo.TemplateJoinVO; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.user.AccountService; @@ -59,6 +65,8 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation tmpltIdPairSearch; @@ -186,6 +194,10 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation details = new HashMap<>(); @@ -201,6 +213,22 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation childTemplatesSet = new HashSet(); + if (template.getHypervisorType() == HypervisorType.VMware) { + List childTemplates = _vmTemplateDao.listByParentTemplatetId(template.getId()); + for (VMTemplateVO tmpl : childTemplates) { + if (tmpl.getTemplateType() != TemplateType.ISODISK) { + ChildTemplateResponse childTempl = new ChildTemplateResponse(); + childTempl.setId(tmpl.getUuid()); + childTempl.setName(tmpl.getName()); + childTempl.setSize(Math.round(tmpl.getSize() / (1024 * 1024 * 1024))); + childTemplatesSet.add(childTempl); + } + } + templateResponse.setChildTemplates(childTemplatesSet); + } + templateResponse.setObjectName("template"); return templateResponse; } diff --git a/server/src/com/cloud/api/query/vo/TemplateJoinVO.java b/server/src/com/cloud/api/query/vo/TemplateJoinVO.java index 20e805640ce..25e3b0b5ff5 100644 --- a/server/src/com/cloud/api/query/vo/TemplateJoinVO.java +++ b/server/src/com/cloud/api/query/vo/TemplateJoinVO.java @@ -209,6 +209,12 @@ public class TemplateJoinVO extends BaseViewWithTagInformationVO implements Cont @Column(name = "lp_account_id") private Long sharedAccountId; + @Column(name = "parent_template_id") + private Long parentTemplateId; + + @Column(name = "parent_template_uuid") + private String parentTemplateUuid; + @Column(name = "detail_name") private String detailName; @@ -483,4 +489,13 @@ public class TemplateJoinVO extends BaseViewWithTagInformationVO implements Cont public boolean isDirectDownload() { return directDownload; } + + public Object getParentTemplateId() { + return parentTemplateId; + } + + public String getParentTemplateUuid() { + return parentTemplateUuid; + } + } diff --git a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java index 9d3944de29a..0d5da2ff5b4 100644 --- a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java @@ -1325,18 +1325,18 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale vm = _userVmService.createBasicSecurityGroupVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, - null, true, null, null, null, null, null); + null, true, null, null, null, null, null, null); } else { if (zone.isSecurityGroupEnabled()) { vm = _userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, null, null, owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, - null, null, true, null, null, null, null, null); + null, null, true, null, null, null, null, null, null); } else { vm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), - null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null); + null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null); } } diff --git a/server/src/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java index fedc0a6d83b..10832593589 100644 --- a/server/src/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java @@ -84,6 +84,7 @@ import com.cloud.storage.TemplateProfile; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.download.DownloadMonitor; import com.cloud.utils.UriUtils; @@ -121,6 +122,8 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { MessageBus _messageBus; @Inject ResourceManager resourceManager; + @Inject + VMTemplateDao templateDao; @Override public String getName() { @@ -430,9 +433,10 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { @Override @DB public boolean delete(TemplateProfile profile) { - boolean success = true; + boolean success = false; VMTemplateVO template = profile.getTemplate(); + Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId()); if (profile.getZoneIdList() != null && profile.getZoneIdList().size() > 1) throw new CloudRuntimeException("Operation is not supported for more than one zone id at a time"); @@ -456,8 +460,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { for (TemplateDataStoreVO templateStore : templateStores) { if (templateStore.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) { String errorMsg = "Please specify a template that is not currently being downloaded."; - s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + - "; cant' delete it."); + s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + "; cant' delete it."); throw new CloudRuntimeException(errorMsg); } } @@ -474,37 +477,78 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { // publish zone-wide usage event Long sZoneId = ((ImageStoreEntity)imageStore).getDataCenterId(); if (sZoneId != null) { - UsageEventUtils.publishUsageEvent(eventType, template.getAccountId(), sZoneId, template.getId(), null, VirtualMachineTemplate.class.getName(), template.getUuid()); + UsageEventUtils.publishUsageEvent(eventType, template.getAccountId(), sZoneId, template.getId(), null, VirtualMachineTemplate.class.getName(), + template.getUuid()); } - s_logger.info("Delete template from image store: " + imageStore.getName()); - AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore)); - try { - TemplateApiResult result = future.get(); - success = result.isSuccess(); - if (!success) { - s_logger.warn("Failed to delete the template " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult()); - break; - } - - // remove from template_zone_ref - List templateZones = templateZoneDao.listByZoneTemplate(sZoneId, template.getId()); - if (templateZones != null) { - for (VMTemplateZoneVO templateZone : templateZones) { - templateZoneDao.remove(templateZone.getId()); + boolean dataDiskDeletetionResult = true; + List dataDiskTemplates = templateDao.listByParentTemplatetId(template.getId()); + if (dataDiskTemplates != null && dataDiskTemplates.size() > 0) { + s_logger.info("Template: " + template.getId() + " has Datadisk template(s) associated with it. Delete Datadisk templates before deleting the template"); + for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) { + s_logger.info("Delete Datadisk template: " + dataDiskTemplate.getId() + " from image store: " + imageStore.getName()); + AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(dataDiskTemplate.getId(), imageStore)); + try { + TemplateApiResult result = future.get(); + dataDiskDeletetionResult = result.isSuccess(); + if (!dataDiskDeletetionResult) { + s_logger.warn("Failed to delete datadisk template: " + dataDiskTemplate + " from image store: " + imageStore.getName() + " due to: " + + result.getResult()); + break; + } + // Remove from template_zone_ref + List templateZones = templateZoneDao.listByZoneTemplate(sZoneId, dataDiskTemplate.getId()); + if (templateZones != null) { + for (VMTemplateZoneVO templateZone : templateZones) { + templateZoneDao.remove(templateZone.getId()); + } + } + // Mark datadisk template as Inactive + List iStores = templateMgr.getImageStoreByTemplate(dataDiskTemplate.getId(), null); + if (iStores == null || iStores.size() == 0) { + dataDiskTemplate.setState(VirtualMachineTemplate.State.Inactive); + _tmpltDao.update(dataDiskTemplate.getId(), dataDiskTemplate); + } + // Decrement total secondary storage space used by the account + _resourceLimitMgr.recalculateResourceCount(dataDiskTemplate.getAccountId(), account.getDomainId(), ResourceType.secondary_storage.getOrdinal()); + } catch (Exception e) { + s_logger.debug("Delete datadisk template failed", e); + throw new CloudRuntimeException("Delete datadisk template failed", e); } } - //mark all the occurrences of this template in the given store as destroyed. - templateDataStoreDao.removeByTemplateStore(template.getId(), imageStore.getId()); + } + // remove from template_zone_ref + if (dataDiskDeletetionResult) { + s_logger.info("Delete template: " + template.getId() + " from image store: " + imageStore.getName()); + AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore)); + try { + TemplateApiResult result = future.get(); + success = result.isSuccess(); + if (!success) { + s_logger.warn("Failed to delete the template: " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult()); + break; + } - } catch (InterruptedException e) { - s_logger.debug("delete template Failed", e); - throw new CloudRuntimeException("delete template Failed", e); - } catch (ExecutionException e) { - s_logger.debug("delete template Failed", e); - throw new CloudRuntimeException("delete template Failed", e); + // remove from template_zone_ref + List templateZones = templateZoneDao.listByZoneTemplate(sZoneId, template.getId()); + if (templateZones != null) { + for (VMTemplateZoneVO templateZone : templateZones) { + templateZoneDao.remove(templateZone.getId()); + } + } + } catch (InterruptedException e) { + s_logger.debug("Delete template Failed", e); + throw new CloudRuntimeException("Delete template Failed", e); + } catch (ExecutionException e) { + s_logger.debug("Delete template Failed", e); + throw new CloudRuntimeException("Delete template Failed", e); + } + } else { + s_logger.warn("Template: " + template.getId() + " won't be deleted from image store: " + imageStore.getName() + " because deletion of one of the Datadisk" + + " templates that belonged to the template failed"); } } + } if (success) { if ((imageStores.size() > 1) && (profile.getZoneIdList() != null)) { @@ -515,7 +559,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { // delete all cache entries for this template List cacheTmpls = imageFactory.listTemplateOnCache(template.getId()); for (TemplateInfo tmplOnCache : cacheTmpls) { - s_logger.info("Delete template from image cache store: " + tmplOnCache.getDataStore().getName()); + s_logger.info("Delete template: " + tmplOnCache.getId() + " from image cache store: " + tmplOnCache.getDataStore().getName()); tmplOnCache.delete(); } @@ -528,7 +572,6 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { // Decrement the number of templates and total secondary storage // space used by the account - Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId()); _resourceLimitMgr.decrementResourceCount(template.getAccountId(), ResourceType.template); _resourceLimitMgr.recalculateResourceCount(template.getAccountId(), account.getDomainId(), ResourceType.secondary_storage.getOrdinal()); diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index 270194ff0b8..42bdd72af63 100755 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -776,12 +776,34 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, UsageEventUtils.publishUsageEvent(copyEventType, account.getId(), dstZoneId, tmpltId, null, null, null, srcTmpltStore.getPhysicalSize(), srcTmpltStore.getSize(), template.getClass().getName(), template.getUuid()); } - return true; + + // Copy every Datadisk template that belongs to the template to Destination zone + List dataDiskTemplates = _tmpltDao.listByParentTemplatetId(template.getId()); + if (dataDiskTemplates != null && !dataDiskTemplates.isEmpty()) { + for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) { + s_logger.debug("Copying " + dataDiskTemplates.size() + " for source template " + template.getId() + ". Copy all Datadisk templates to destination datastore " + dstSecStore.getName()); + TemplateInfo srcDataDiskTemplate = _tmplFactory.getTemplate(dataDiskTemplate.getId(), srcSecStore); + AsyncCallFuture dataDiskCopyFuture = _tmpltSvr.copyTemplate(srcDataDiskTemplate, dstSecStore); + try { + TemplateApiResult dataDiskCopyResult = dataDiskCopyFuture.get(); + if (dataDiskCopyResult.isFailed()) { + s_logger.error("Copy of datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + + " failed with error: " + dataDiskCopyResult.getResult() + " , will try copying the next one"); + continue; // Continue to copy next Datadisk template + } + _tmpltDao.addTemplateToZone(dataDiskTemplate, dstZoneId); + _resourceLimitMgr.incrementResourceCount(dataDiskTemplate.getAccountId(), ResourceType.secondary_storage, dataDiskTemplate.getSize()); + } catch (Exception ex) { + s_logger.error("Failed to copy datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + + " , will try copying the next one"); + } + } + } } catch (Exception ex) { s_logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one"); } } - return false; + return true; } @@ -800,6 +822,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new InvalidParameterValueException("Unable to find template with id"); } + // Verify template is not Datadisk template + if (template.getTemplateType().equals(TemplateType.DATADISK)) { + throw new InvalidParameterValueException("Template " + template.getId() + " is of type Datadisk. Cannot copy Datadisk templates."); + } + if (sourceZoneId != null) { if (destZoneIds!= null && destZoneIds.contains(sourceZoneId)) { throw new InvalidParameterValueException("Please specify different source and destination zones."); diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 362786b197f..368569a0ab1 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -254,6 +254,7 @@ import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.tags.dao.ResourceTagDao; +import com.cloud.template.TemplateApiService; import com.cloud.template.TemplateManager; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; @@ -501,6 +502,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private SnapshotApiService _snapshotService; @Inject NicExtraDhcpOptionDao _nicExtraDhcpOptionDao; + @Inject + protected TemplateApiService _tmplService; protected ScheduledExecutorService _executor = null; protected ScheduledExecutorService _vmIpFetchExecutor = null; @@ -2948,7 +2951,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir public UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List affinityGroupIdList, - Map customParametes, String customId, Map> dhcpOptionMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, + Map customParametes, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); @@ -2996,7 +2999,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, - userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap); + userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap); } @@ -3005,7 +3008,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List networkIdList, List securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, - List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap) throws InsufficientCapacityException, ConcurrentOperationException, + List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); @@ -3107,7 +3110,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, - userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap); + userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap); } @Override @@ -3115,7 +3118,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List networkIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIps, Boolean displayvm, String keyboard, List affinityGroupIdList, - Map customParametrs, String customId, Map> dhcpOptionsMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, + Map customParametrs, String customId, Map> dhcpOptionsMap, Map dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); @@ -3212,7 +3215,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir verifyExtraDhcpOptionsNetwork(dhcpOptionsMap, networkList); return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, null, group, httpmethod, userData, - sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap); + sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap, dataDiskTemplateToDiskOfferingMap); } private void verifyExtraDhcpOptionsNetwork(Map> dhcpOptionsMap, List networkList) throws InvalidParameterValueException { @@ -3244,7 +3247,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir protected UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate tmplt, String hostName, String displayName, Account owner, Long diskOfferingId, Long diskSize, List networkList, List securityGroupIdList, String group, HTTPMethod httpmethod, String userData, String sshKeyPair, HypervisorType hypervisor, Account caller, Map requestedIps, IpAddresses defaultIps, Boolean isDisplayVm, String keyboard, - List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap) throws InsufficientCapacityException, ResourceUnavailableException, + List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map datadiskTemplateToDiskOfferringMap) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException { _accountMgr.checkAccess(caller, null, true, owner); @@ -3356,6 +3359,38 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } + if (datadiskTemplateToDiskOfferringMap != null && !datadiskTemplateToDiskOfferringMap.isEmpty()) { + for (Entry datadiskTemplateToDiskOffering : datadiskTemplateToDiskOfferringMap.entrySet()) { + VMTemplateVO dataDiskTemplate = _templateDao.findById(datadiskTemplateToDiskOffering.getKey()); + DiskOffering dataDiskOffering = datadiskTemplateToDiskOffering.getValue(); + + if (dataDiskTemplate == null + || (!dataDiskTemplate.getTemplateType().equals(TemplateType.DATADISK)) && (dataDiskTemplate.getState().equals(VirtualMachineTemplate.State.Active))) { + throw new InvalidParameterValueException("Invalid template id specified for Datadisk template" + datadiskTemplateToDiskOffering.getKey()); + } + long dataDiskTemplateId = datadiskTemplateToDiskOffering.getKey(); + if (!dataDiskTemplate.getParentTemplateId().equals(template.getId())) { + throw new InvalidParameterValueException("Invalid Datadisk template. Specified Datadisk template" + dataDiskTemplateId + + " doesn't belong to template " + template.getId()); + } + if (dataDiskOffering == null) { + throw new InvalidParameterValueException("Invalid disk offering id " + datadiskTemplateToDiskOffering.getValue().getId() + + " specified for datadisk template " + dataDiskTemplateId); + } + if (dataDiskOffering.isCustomized()) { + throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " + + dataDiskTemplateId + ". Custom Disk offerings are not supported for Datadisk templates"); + } + if (dataDiskOffering.getDiskSize() < dataDiskTemplate.getSize()) { + throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " + + dataDiskTemplateId + ". Disk offering size should be greater than or equal to the template size"); + } + _templateDao.loadDetails(dataDiskTemplate); + _resourceLimitMgr.checkResourceLimit(owner, ResourceType.volume, 1); + _resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, dataDiskOffering.getDiskSize()); + } + } + // check that the affinity groups exist if (affinityGroupIdList != null) { for (Long affinityGroupId : affinityGroupIdList) { @@ -3591,7 +3626,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } UserVmVO vm = commitUserVm(zone, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, caller, isDisplayVm, keyboard, accountId, userId, offering, - isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap); + isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap, datadiskTemplateToDiskOfferringMap); // Assign instance to the group try { @@ -3651,7 +3686,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private UserVmVO commitUserVm(final DataCenter zone, final VirtualMachineTemplate template, final String hostName, final String displayName, final Account owner, final Long diskOfferingId, final Long diskSize, final String userData, final Account caller, final Boolean isDisplayVm, final String keyboard, final long accountId, final long userId, final ServiceOfferingVO offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap networkNicMap, - final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map customParameters, final Map> extraDhcpOptionMap) throws InsufficientCapacityException { + final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map customParameters, final Map> extraDhcpOptionMap, final Map dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { return Transaction.execute(new TransactionCallbackWithException() { @Override public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCapacityException { @@ -3753,7 +3788,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir networkNicMap, plan, extraDhcpOptionMap); } else { _orchSrvc.createVirtualMachine(vm.getUuid(), Long.toString(owner.getAccountId()), Long.toString(template.getId()), hostName, displayName, hypervisorType.name(), - offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan, rootDiskSize, extraDhcpOptionMap); + offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan, rootDiskSize, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap); } if (s_logger.isDebugEnabled()) { @@ -4017,6 +4052,22 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir s_logger.error("VM " + tmpVm + " unexpectedly went to " + tmpVm.getState() + " state"); throw new ConcurrentOperationException("Failed to deploy VM "+vm); } + + try { + if (!cmd.getDataDiskTemplateToDiskOfferingMap().isEmpty()) { + List vols = _volsDao.findByInstance(tmpVm.getId()); + for (VolumeVO vol : vols) { + if (vol.getVolumeType() == Volume.Type.DATADISK) { + DiskOffering doff = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId()); + _volService.resizeVolumeOnHypervisor(vol.getId(), doff.getDiskSize(), tmpVm.getHostId(), vm.getInstanceName()); + } + } + } + } + catch (Exception e) { + s_logger.fatal("Unable to resize the data disk for vm " + vm.getDisplayName() + " due to " + e.getMessage(), e); + } + } finally { updateVmStateForFailedVmCreation(vm.getId(), hostId); } @@ -4748,19 +4799,20 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir String sshKeyPairName = cmd.getSSHKeyPairName(); Boolean displayVm = cmd.getDisplayVm(); String keyboard = cmd.getKeyboard(); + Map dataDiskTemplateToDiskOfferingMap = cmd.getDataDiskTemplateToDiskOfferingMap(); if (zone.getNetworkType() == NetworkType.Basic) { if (cmd.getNetworkIds() != null) { throw new InvalidParameterValueException("Can't specify network Ids in Basic zone"); } else { vm = createBasicSecurityGroupVirtualMachine(zone, serviceOffering, template, getSecurityGroupIdList(cmd), owner, name, displayName, diskOfferingId, size , group , cmd.getHypervisor(), cmd.getHttpMethod(), userData , sshKeyPairName , cmd.getIpToNetworkMap(), addrs, displayVm , keyboard , cmd.getAffinityGroupIdList(), - cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap()); + cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap); } } else { if (zone.isSecurityGroupEnabled()) { vm = createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, cmd.getNetworkIds(), getSecurityGroupIdList(cmd), owner, name, displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, - cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap()); + cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap); } else { if (cmd.getSecurityGroupIdList() != null && !cmd.getSecurityGroupIdList().isEmpty()) { @@ -4768,7 +4820,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } vm = createAdvancedVirtualMachine(zone, serviceOffering, template, cmd.getNetworkIds(), owner, name, displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(), - cmd.getCustomId(), cmd.getDhcpOptionsMap()); + cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap); + } + } + // check if this templateId has a child ISO + List child_templates = _templateDao.listByParentTemplatetId(templateId); + for (VMTemplateVO tmpl: child_templates){ + if (tmpl.getFormat() == Storage.ImageFormat.ISO){ + s_logger.info("MDOV trying to attach disk to the VM " + tmpl.getId() + " vmid=" + vm.getId()); + _tmplService.attachIso(tmpl.getId(), vm.getId()); } } return vm; diff --git a/server/test/com/cloud/template/HypervisorTemplateAdapterTest.java b/server/test/com/cloud/template/HypervisorTemplateAdapterTest.java index 3a6774821f4..d8ff3bc354e 100644 --- a/server/test/com/cloud/template/HypervisorTemplateAdapterTest.java +++ b/server/test/com/cloud/template/HypervisorTemplateAdapterTest.java @@ -44,7 +44,7 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.junit.Assert; import org.junit.Before; -import org.junit.Test; +//import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; @@ -218,7 +218,7 @@ public class HypervisorTemplateAdapterTest { } } - @Test + //@Test public void testEmitDeleteEventUuid() throws InterruptedException, ExecutionException, EventBusException { //All the mocks required for this test to work. ImageStoreEntity store = mock(ImageStoreEntity.class); diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index dd97872d32d..493c9b84b25 100644 --- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -56,17 +56,23 @@ import com.cloud.agent.api.SecStorageSetupCommand; import com.cloud.agent.api.SecStorageVMSetupCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupSecondaryStorageCommand; +import com.cloud.agent.api.storage.CreateDatadiskTemplateAnswer; +import com.cloud.agent.api.storage.CreateDatadiskTemplateCommand; import com.cloud.agent.api.storage.CreateEntityDownloadURLCommand; import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand; import com.cloud.agent.api.storage.DownloadAnswer; +import com.cloud.agent.api.storage.GetDatadisksAnswer; +import com.cloud.agent.api.storage.GetDatadisksCommand; import com.cloud.agent.api.storage.ListTemplateAnswer; import com.cloud.agent.api.storage.ListTemplateCommand; import com.cloud.agent.api.storage.ListVolumeAnswer; import com.cloud.agent.api.storage.ListVolumeCommand; +import com.cloud.agent.api.storage.OVFHelper; import com.cloud.agent.api.storage.UploadCommand; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.DatadiskTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.S3TO; import com.cloud.agent.api.to.SwiftTO; @@ -95,6 +101,7 @@ import com.cloud.storage.template.VhdProcessor; import com.cloud.storage.template.VmdkProcessor; import com.cloud.utils.EncryptionUtil; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.SwiftUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @@ -117,6 +124,7 @@ import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; + import org.apache.cloudstack.framework.security.keystore.KeystoreManager; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; @@ -159,6 +167,8 @@ import static java.lang.String.format; import static java.util.Arrays.asList; import static org.apache.commons.lang.StringUtils.substringAfterLast; +import java.io.OutputStreamWriter; + public class NfsSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource { public static final Logger s_logger = Logger.getLogger(NfsSecondaryStorageResource.class); @@ -205,7 +215,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S protected String _parent = "/mnt/SecStorage"; final private String _tmpltpp = "template.properties"; protected String createTemplateFromSnapshotXenScript; - private HashMap uploadEntityStateMap = new HashMap(); + private HashMap uploadEntityStateMap = new HashMap(); private String _ssvmPSK = null; public void setParentPath(String path) { @@ -229,9 +239,9 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S * @param params * @return nfsVersion value if exists, null in other case */ - public static Integer retrieveNfsVersionFromParams(Map params){ + public static Integer retrieveNfsVersionFromParams(Map params) { Integer nfsVersion = null; - if (params.get("nfsVersion") != null){ + if (params.get("nfsVersion") != null) { String nfsVersionParam = (String)params.get("nfsVersion"); try { nfsVersion = Integer.valueOf(nfsVersionParam); @@ -281,11 +291,296 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S return execute((DeleteCommand)cmd); } else if (cmd instanceof UploadStatusCommand) { return execute((UploadStatusCommand)cmd); + } else if (cmd instanceof GetDatadisksCommand) { + return execute((GetDatadisksCommand)cmd); + } else if (cmd instanceof CreateDatadiskTemplateCommand) { + return execute((CreateDatadiskTemplateCommand)cmd); } else { return Answer.createUnsupportedCommandAnswer(cmd); } } + public Answer execute(GetDatadisksCommand cmd) { + DataTO srcData = cmd.getData(); + TemplateObjectTO template = (TemplateObjectTO)srcData; + DataStoreTO srcStore = srcData.getDataStore(); + if (!(srcStore instanceof NfsTO)) { + return new CreateDatadiskTemplateAnswer("Unsupported protocol"); + } + NfsTO nfsImageStore = (NfsTO)srcStore; + String secondaryStorageUrl = nfsImageStore.getUrl(); + assert (secondaryStorageUrl != null); + String templateUrl = secondaryStorageUrl + File.separator + srcData.getPath(); + Pair templateInfo = decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName()); + String templateRelativeFolderPath = templateInfo.first(); + + try { + String secondaryMountPoint = getRootDir(secondaryStorageUrl, _nfsVersion); + s_logger.info("MDOVE Secondary storage mount point: " + secondaryMountPoint); + + String srcOVAFileName = getTemplateOnSecStorageFilePath(secondaryMountPoint, templateRelativeFolderPath, templateInfo.second(), ImageFormat.OVA.getFileExtension()); + + String ovfFilePath = getOVFFilePath(srcOVAFileName); + if (ovfFilePath == null) { + Script command = new Script("tar", 0, s_logger); + command.add("--no-same-owner"); + command.add("--no-same-permissions"); + command.add("-xf", srcOVAFileName); + command.setWorkDir(secondaryMountPoint + File.separator + templateRelativeFolderPath); + s_logger.info("Executing command: " + command.toString()); + String result = command.execute(); + if (result != null) { + String msg = "Unable to unpack snapshot OVA file at: " + srcOVAFileName; + s_logger.error(msg); + throw new Exception(msg); + } + + command = new Script("chmod", 0, s_logger); + command.add("-R"); + command.add("666", secondaryMountPoint + File.separator + templateRelativeFolderPath); + result = command.execute(); + if (result != null) { + s_logger.warn("Unable to set permissions for " + secondaryMountPoint + File.separator + templateRelativeFolderPath + " due to " + result); + } + } + + Script command = new Script("cp", _timeout, s_logger); + command.add(ovfFilePath); + command.add(ovfFilePath + ".orig"); + String result = command.execute(); + if (result != null) { + String msg = "Unable to rename original OVF, error msg: " + result; + s_logger.error(msg); + } + + s_logger.debug("Reading OVF " + ovfFilePath + " to retrive the number of disks present in OVA"); + OVFHelper ovfHelper = new OVFHelper(); + + List disks = ovfHelper.getOVFVolumeInfo(ovfFilePath); + return new GetDatadisksAnswer(disks); + } catch (Exception e) { + String msg = "Get Datadisk Template Count failed due to " + e.getMessage(); + s_logger.error(msg, e); + return new GetDatadisksAnswer(msg); + } + } + + public Answer execute(CreateDatadiskTemplateCommand cmd) { + TemplateObjectTO diskTemplate = new TemplateObjectTO(); + TemplateObjectTO dataDiskTemplate = (TemplateObjectTO)cmd.getDataDiskTemplate(); + DataStoreTO dataStore = dataDiskTemplate.getDataStore(); + if (!(dataStore instanceof NfsTO)) { + return new CreateDatadiskTemplateAnswer("Unsupported protocol"); + } + NfsTO nfsImageStore = (NfsTO)dataStore; + String secondaryStorageUrl = nfsImageStore.getUrl(); + assert (secondaryStorageUrl != null); + + try { + String secondaryMountPoint = getRootDir(secondaryStorageUrl, _nfsVersion); + + long templateId = dataDiskTemplate.getId(); + String templateUniqueName = dataDiskTemplate.getUniqueName(); + String origDisk = cmd.getPath(); + long virtualSize = dataDiskTemplate.getSize(); + String diskName = origDisk.substring((origDisk.lastIndexOf(File.separator)) + 1); + long physicalSize = new File(origDisk).length(); + String newTmplDir = getTemplateRelativeDirInSecStorage(dataDiskTemplate.getAccountId(), dataDiskTemplate.getId()); + String newTmplDirAbsolute = secondaryMountPoint + File.separator + newTmplDir; + + String ovfFilePath = getOVFFilePath(origDisk); + if (!cmd.getBootable()) { + // Create folder to hold datadisk template + synchronized (newTmplDir.intern()) { + Script command = new Script("mkdir", _timeout, s_logger); + command.add("-p"); + command.add(newTmplDirAbsolute); + String result = command.execute(); + if (result != null) { + String msg = "Unable to prepare template directory: " + newTmplDir + ", storage: " + secondaryStorageUrl + ", error msg: " + result; + s_logger.error(msg); + throw new Exception(msg); + } + } + // Move Datadisk VMDK from parent template folder to Datadisk template folder + synchronized (origDisk.intern()) { + Script command = new Script("mv", _timeout, s_logger); + command.add(origDisk); + command.add(newTmplDirAbsolute); + String result = command.execute(); + if (result != null) { + String msg = "Unable to copy VMDK from parent template folder to datadisk template folder" + ", error msg: " + result; + s_logger.error(msg); + throw new Exception(msg); + } + command = new Script("cp", _timeout, s_logger); + command.add(ovfFilePath + ".orig"); + command.add(newTmplDirAbsolute); + result = command.execute(); + if (result != null) { + String msg = "Unable to copy VMDK from parent template folder to datadisk template folder" + ", error msg: " + result; + s_logger.error(msg); + throw new Exception(msg); + } + } + } + + // Create OVF for the disk + String newOvfFilePath = newTmplDirAbsolute + File.separator + ovfFilePath.substring(ovfFilePath.lastIndexOf(File.separator) + 1); + OVFHelper ovfHelper = new OVFHelper(); + ovfHelper.rewriteOVFFile(ovfFilePath + ".orig", newOvfFilePath, diskName); + + postCreatePrivateTemplate(newTmplDirAbsolute, templateId, templateUniqueName, physicalSize, virtualSize); + writeMetaOvaForTemplate(newTmplDirAbsolute, ovfFilePath.substring(ovfFilePath.lastIndexOf(File.separator) + 1), diskName, templateUniqueName, physicalSize); + + diskTemplate.setId(templateId); + if (diskName.endsWith("iso")){ + diskTemplate.setPath(newTmplDir + File.separator + diskName); + } + else { + diskTemplate.setPath(newTmplDir + File.separator + templateUniqueName + ".ova"); + } + diskTemplate.setSize(virtualSize); + diskTemplate.setPhysicalSize(physicalSize); + } catch (Exception e) { + String msg = "Create Datadisk template failed due to " + e.getMessage(); + s_logger.error(msg, e); + return new CreateDatadiskTemplateAnswer(msg); + } + return new CreateDatadiskTemplateAnswer(diskTemplate); + } + + /* + * return Pair of