mirror of https://github.com/apache/cloudstack.git
CLOUDSTACK-4757: Support OVA files with multiple disks for templates (#2146)
CloudStack volumes and templates are one single virtual disk in case of XenServer/XCP and KVM hypervisors since the files used for templates and volumes are virtual disks (VHD, QCOW2). However, VMware volumes and templates are in OVA format, which are archives that can contain a complete VM including multiple VMDKs and other files such as ISOs. And currently, Cloudstack only supports Template creation based on OVA files containing a single disk. If a user creates a template from a OVA file containing more than 1 disk and launches an instance using this template, only the first disk is attached to the new instance and other disks are ignored. Similarly with uploaded volumes, attaching an uploaded volume that contains multiple disks to a VM will result in only one VMDK to being attached to the VM. FS: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Support+OVA+files+containing+multiple+disks This behavior needs to be improved in VMWare to support OVA files with multiple disks for both uploaded volumes and templates. i.e. If a user creates a template from a OVA file containing more than 1 disk and launches an instance using this template, the first disk should be attached to the new instance as the ROOT disk and volumes should be created based on other VMDK disks in the OVA file and should be attached to the instance. Signed-off-by: Abhinandan Prateek <abhinandan.prateek@shapeblue.com> Signed-off-by: Rohit Yadav <rohit.yadav@shapeblue.com>
This commit is contained in:
parent
b0d7844cf0
commit
64832fd70a
|
|
@ -0,0 +1,336 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.agent.api.storage;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.transform.Transformer;
|
||||
import javax.xml.transform.TransformerException;
|
||||
import javax.xml.transform.TransformerFactory;
|
||||
import javax.xml.transform.dom.DOMSource;
|
||||
import javax.xml.transform.stream.StreamResult;
|
||||
|
||||
import com.cloud.configuration.Resource.ResourceType;
|
||||
import com.cloud.exception.InternalErrorException;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.commons.lang.math.NumberUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Element;
|
||||
import org.w3c.dom.NodeList;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
import com.cloud.agent.api.to.DatadiskTO;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
public class OVFHelper {
|
||||
private static final Logger s_logger = Logger.getLogger(OVFHelper.class);
|
||||
|
||||
/**
|
||||
* Get disk virtual size given its values on fields: 'ovf:capacity' and 'ovf:capacityAllocationUnits'
|
||||
* @param capacity capacity
|
||||
* @param allocationUnits capacity allocation units
|
||||
* @return disk virtual size
|
||||
*/
|
||||
public static Long getDiskVirtualSize(Long capacity, String allocationUnits, String ovfFilePath) throws InternalErrorException {
|
||||
if ((capacity != 0) && (allocationUnits != null)) {
|
||||
long units = 1;
|
||||
if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) {
|
||||
units = ResourceType.bytesToKiB;
|
||||
} else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) {
|
||||
units = ResourceType.bytesToMiB;
|
||||
} else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) {
|
||||
units = ResourceType.bytesToGiB;
|
||||
}
|
||||
return capacity * units;
|
||||
} else {
|
||||
throw new InternalErrorException("Failed to read capacity and capacityAllocationUnits from the OVF file: " + ovfFilePath);
|
||||
}
|
||||
}
|
||||
|
||||
public List<DatadiskTO> getOVFVolumeInfo(final String ovfFilePath) {
|
||||
if (StringUtils.isBlank(ovfFilePath)) {
|
||||
return new ArrayList<DatadiskTO>();
|
||||
}
|
||||
ArrayList<OVFFile> vf = new ArrayList<OVFFile>();
|
||||
ArrayList<OVFDisk> vd = new ArrayList<OVFDisk>();
|
||||
|
||||
File ovfFile = new File(ovfFilePath);
|
||||
try {
|
||||
final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFilePath));
|
||||
NodeList disks = doc.getElementsByTagName("Disk");
|
||||
NodeList files = doc.getElementsByTagName("File");
|
||||
NodeList items = doc.getElementsByTagName("Item");
|
||||
boolean toggle = true;
|
||||
for (int j = 0; j < files.getLength(); j++) {
|
||||
Element file = (Element)files.item(j);
|
||||
OVFFile of = new OVFFile();
|
||||
of._href = file.getAttribute("ovf:href");
|
||||
if (of._href.endsWith("vmdk") || of._href.endsWith("iso")) {
|
||||
of._id = file.getAttribute("ovf:id");
|
||||
String size = file.getAttribute("ovf:size");
|
||||
if (StringUtils.isNotBlank(size)) {
|
||||
of._size = Long.parseLong(size);
|
||||
} else {
|
||||
String dataDiskPath = ovfFile.getParent() + File.separator + of._href;
|
||||
File this_file = new File(dataDiskPath);
|
||||
of._size = this_file.length();
|
||||
}
|
||||
of.isIso = of._href.endsWith("iso");
|
||||
if (toggle && !of.isIso) {
|
||||
of._bootable = true;
|
||||
toggle = !toggle;
|
||||
}
|
||||
vf.add(of);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < disks.getLength(); i++) {
|
||||
Element disk = (Element)disks.item(i);
|
||||
OVFDisk od = new OVFDisk();
|
||||
String virtualSize = disk.getAttribute("ovf:capacity");
|
||||
od._capacity = NumberUtils.toLong(virtualSize, 0L);
|
||||
String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits");
|
||||
od._diskId = disk.getAttribute("ovf:diskId");
|
||||
od._fileRef = disk.getAttribute("ovf:fileRef");
|
||||
od._populatedSize = Long.parseLong(disk.getAttribute("ovf:populatedSize") == null ? "0" : disk.getAttribute("ovf:populatedSize"));
|
||||
|
||||
if ((od._capacity != 0) && (allocationUnits != null)) {
|
||||
|
||||
long units = 1;
|
||||
if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) {
|
||||
units = ResourceType.bytesToKiB;
|
||||
} else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) {
|
||||
units = ResourceType.bytesToMiB;
|
||||
} else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) {
|
||||
units = ResourceType.bytesToGiB;
|
||||
}
|
||||
od._capacity = od._capacity * units;
|
||||
}
|
||||
od._controller = getControllerType(items, od._diskId);
|
||||
vd.add(od);
|
||||
}
|
||||
|
||||
} catch (SAXException | IOException | ParserConfigurationException e) {
|
||||
s_logger.error("Unexpected exception caught while parsing ovf file:" + ovfFilePath, e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
|
||||
List<DatadiskTO> disksTO = new ArrayList<DatadiskTO>();
|
||||
for (OVFFile of : vf) {
|
||||
if (StringUtils.isBlank(of._id)){
|
||||
s_logger.error("The ovf file info is incomplete file info");
|
||||
throw new CloudRuntimeException("The ovf file info has incomplete file info");
|
||||
}
|
||||
OVFDisk cdisk = getDisk(of._id, vd);
|
||||
if (cdisk == null && !of.isIso){
|
||||
s_logger.error("The ovf file info has incomplete disk info");
|
||||
throw new CloudRuntimeException("The ovf file info has incomplete disk info");
|
||||
}
|
||||
Long capacity = cdisk == null ? of._size : cdisk._capacity;
|
||||
String controller = cdisk == null ? "" : cdisk._controller._name;
|
||||
String controllerSubType = cdisk == null ? "" : cdisk._controller._subType;
|
||||
String dataDiskPath = ovfFile.getParent() + File.separator + of._href;
|
||||
File f = new File(dataDiskPath);
|
||||
if (!f.exists() || f.isDirectory()) {
|
||||
s_logger.error("One of the attached disk or iso does not exists " + dataDiskPath);
|
||||
throw new CloudRuntimeException("One of the attached disk or iso as stated on OVF does not exists " + dataDiskPath);
|
||||
}
|
||||
disksTO.add(new DatadiskTO(dataDiskPath, capacity, of._size, of._id, of.isIso, of._bootable, controller, controllerSubType));
|
||||
}
|
||||
//check if first disk is an iso move it to the end
|
||||
DatadiskTO fd = disksTO.get(0);
|
||||
if (fd.isIso()) {
|
||||
disksTO.remove(0);
|
||||
disksTO.add(fd);
|
||||
}
|
||||
return disksTO;
|
||||
}
|
||||
|
||||
private OVFDiskController getControllerType(final NodeList itemList, final String diskId) {
|
||||
for (int k = 0; k < itemList.getLength(); k++) {
|
||||
Element item = (Element)itemList.item(k);
|
||||
NodeList cn = item.getChildNodes();
|
||||
for (int l = 0; l < cn.getLength(); l++) {
|
||||
if (cn.item(l) instanceof Element) {
|
||||
Element el = (Element)cn.item(l);
|
||||
if ("rasd:HostResource".equals(el.getNodeName())
|
||||
&& (el.getTextContent().contains("ovf:/file/" + diskId) || el.getTextContent().contains("ovf:/disk/" + diskId))) {
|
||||
Element oe = getParentNode(itemList, item);
|
||||
Element voe = oe;
|
||||
while (oe != null) {
|
||||
voe = oe;
|
||||
oe = getParentNode(itemList, voe);
|
||||
}
|
||||
return getController(voe);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private Element getParentNode(final NodeList itemList, final Element childItem) {
|
||||
NodeList cn = childItem.getChildNodes();
|
||||
String parent_id = null;
|
||||
for (int l = 0; l < cn.getLength(); l++) {
|
||||
if (cn.item(l) instanceof Element) {
|
||||
Element el = (Element)cn.item(l);
|
||||
if ("rasd:Parent".equals(el.getNodeName())) {
|
||||
parent_id = el.getTextContent();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (parent_id != null) {
|
||||
for (int k = 0; k < itemList.getLength(); k++) {
|
||||
Element item = (Element)itemList.item(k);
|
||||
NodeList child = item.getChildNodes();
|
||||
for (int l = 0; l < child.getLength(); l++) {
|
||||
if (child.item(l) instanceof Element) {
|
||||
Element el = (Element)child.item(l);
|
||||
if ("rasd:InstanceID".equals(el.getNodeName()) && el.getTextContent().trim().equals(parent_id)) {
|
||||
return item;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private OVFDiskController getController(Element controllerItem) {
|
||||
OVFDiskController dc = new OVFDiskController();
|
||||
NodeList child = controllerItem.getChildNodes();
|
||||
for (int l = 0; l < child.getLength(); l++) {
|
||||
if (child.item(l) instanceof Element) {
|
||||
Element el = (Element)child.item(l);
|
||||
if ("rasd:ElementName".equals(el.getNodeName())) {
|
||||
dc._name = el.getTextContent();
|
||||
}
|
||||
if ("rasd:ResourceSubType".equals(el.getNodeName())) {
|
||||
dc._subType = el.getTextContent();
|
||||
}
|
||||
}
|
||||
}
|
||||
return dc;
|
||||
}
|
||||
|
||||
public void rewriteOVFFile(final String origOvfFilePath, final String newOvfFilePath, final String diskName) {
|
||||
try {
|
||||
final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(origOvfFilePath));
|
||||
NodeList disks = doc.getElementsByTagName("Disk");
|
||||
NodeList files = doc.getElementsByTagName("File");
|
||||
NodeList items = doc.getElementsByTagName("Item");
|
||||
String keepfile = null;
|
||||
List<Element> toremove = new ArrayList<Element>();
|
||||
for (int j = 0; j < files.getLength(); j++) {
|
||||
Element file = (Element)files.item(j);
|
||||
String href = file.getAttribute("ovf:href");
|
||||
if (diskName.equals(href)) {
|
||||
keepfile = file.getAttribute("ovf:id");
|
||||
} else {
|
||||
toremove.add(file);
|
||||
}
|
||||
}
|
||||
String keepdisk = null;
|
||||
for (int i = 0; i < disks.getLength(); i++) {
|
||||
Element disk = (Element)disks.item(i);
|
||||
String fileRef = disk.getAttribute("ovf:fileRef");
|
||||
if (keepfile == null) {
|
||||
s_logger.info("FATAL: OVA format error");
|
||||
} else if (keepfile.equals(fileRef)) {
|
||||
keepdisk = disk.getAttribute("ovf:diskId");
|
||||
} else {
|
||||
toremove.add(disk);
|
||||
}
|
||||
}
|
||||
for (int k = 0; k < items.getLength(); k++) {
|
||||
Element item = (Element)items.item(k);
|
||||
NodeList cn = item.getChildNodes();
|
||||
for (int l = 0; l < cn.getLength(); l++) {
|
||||
if (cn.item(l) instanceof Element) {
|
||||
Element el = (Element)cn.item(l);
|
||||
if ("rasd:HostResource".equals(el.getNodeName())
|
||||
&& !(el.getTextContent().contains("ovf:/file/" + keepdisk) || el.getTextContent().contains("ovf:/disk/" + keepdisk))) {
|
||||
toremove.add(item);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (Element rme : toremove) {
|
||||
if (rme.getParentNode() != null) {
|
||||
rme.getParentNode().removeChild(rme);
|
||||
}
|
||||
}
|
||||
|
||||
final StringWriter writer = new StringWriter();
|
||||
final StreamResult result = new StreamResult(writer);
|
||||
final TransformerFactory tf = TransformerFactory.newInstance();
|
||||
final Transformer transformer = tf.newTransformer();
|
||||
final DOMSource domSource = new DOMSource(doc);
|
||||
transformer.transform(domSource, result);
|
||||
PrintWriter outfile = new PrintWriter(newOvfFilePath);
|
||||
outfile.write(writer.toString());
|
||||
outfile.close();
|
||||
} catch (SAXException | IOException | ParserConfigurationException | TransformerException e) {
|
||||
s_logger.info("Unexpected exception caught while removing network elements from OVF:" + e.getMessage(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
OVFDisk getDisk(String fileRef, List<OVFDisk> disks) {
|
||||
for (OVFDisk disk : disks) {
|
||||
if (disk._fileRef.equals(fileRef)) {
|
||||
return disk;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
class OVFFile {
|
||||
// <File ovf:href="i-2-8-VM-disk2.vmdk" ovf:id="file1" ovf:size="69120" />
|
||||
public String _href;
|
||||
public String _id;
|
||||
public Long _size;
|
||||
public boolean _bootable;
|
||||
public boolean isIso;
|
||||
}
|
||||
|
||||
class OVFDisk {
|
||||
//<Disk ovf:capacity="50" ovf:capacityAllocationUnits="byte * 2^20" ovf:diskId="vmdisk2" ovf:fileRef="file2"
|
||||
//ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" ovf:populatedSize="43319296" />
|
||||
public Long _capacity;
|
||||
public String _capacityUnit;
|
||||
public String _diskId;
|
||||
public String _fileRef;
|
||||
public Long _populatedSize;
|
||||
public OVFDiskController _controller;
|
||||
}
|
||||
|
||||
class OVFDiskController {
|
||||
public String _name;
|
||||
public String _subType;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package com.cloud.agent.api.to;
|
||||
|
||||
public class DatadiskTO {
|
||||
private String path;
|
||||
private long virtualSize;
|
||||
private long fileSize;
|
||||
boolean bootable;
|
||||
private String diskId;
|
||||
private boolean isIso;
|
||||
private String diskController;
|
||||
private String diskControllerSubType;
|
||||
|
||||
public DatadiskTO() {
|
||||
}
|
||||
|
||||
public DatadiskTO(String path, long virtualSize, long fileSize, boolean bootable) {
|
||||
this.path = path;
|
||||
this.virtualSize = virtualSize;
|
||||
this.fileSize = fileSize;
|
||||
this.bootable = bootable;
|
||||
}
|
||||
|
||||
public DatadiskTO(String path, long virtualSize, long fileSize, String diskId, boolean isIso, boolean bootable, String controller, String controllerSubType) {
|
||||
this.path = path;
|
||||
this.virtualSize = virtualSize;
|
||||
this.fileSize = fileSize;
|
||||
this.bootable = bootable;
|
||||
this.diskId = diskId;
|
||||
this.isIso = isIso;
|
||||
this.diskController = controller;
|
||||
this.diskControllerSubType = controllerSubType;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public void setPath(String path) {
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
public Long getVirtualSize() {
|
||||
return virtualSize;
|
||||
}
|
||||
|
||||
public void setVirtualSize(Long virtualSize) {
|
||||
this.virtualSize = virtualSize;
|
||||
}
|
||||
|
||||
public Long getFileSize() {
|
||||
return fileSize;
|
||||
}
|
||||
|
||||
public boolean isBootable() {
|
||||
return bootable;
|
||||
}
|
||||
|
||||
public String getDiskId() {
|
||||
return diskId;
|
||||
}
|
||||
|
||||
public void setDiskId(String diskId) {
|
||||
this.diskId = diskId;
|
||||
}
|
||||
|
||||
public boolean isIso() {
|
||||
return isIso;
|
||||
}
|
||||
|
||||
public void setIso(boolean isIso) {
|
||||
this.isIso = isIso;
|
||||
}
|
||||
|
||||
public String getDiskController() {
|
||||
return diskController;
|
||||
}
|
||||
|
||||
public void setDiskController(String diskController) {
|
||||
this.diskController = diskController;
|
||||
}
|
||||
|
||||
public String getDiskControllerSubType() {
|
||||
return diskControllerSubType;
|
||||
}
|
||||
|
||||
public void setDiskControllerSubType(String diskControllerSubType) {
|
||||
this.diskControllerSubType = diskControllerSubType;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -37,6 +37,8 @@ public interface Resource {
|
|||
private String name;
|
||||
private ResourceOwnerType[] supportedOwners;
|
||||
private int ordinal;
|
||||
public static final long bytesToKiB = 1024;
|
||||
public static final long bytesToMiB = 1024 * 1024;
|
||||
public static final long bytesToGiB = 1024 * 1024 * 1024;
|
||||
|
||||
ResourceType(String name, int ordinal, ResourceOwnerType... supportedOwners) {
|
||||
|
|
|
|||
|
|
@ -113,7 +113,9 @@ public class Storage {
|
|||
SYSTEM, /* routing, system vm template */
|
||||
BUILTIN, /* buildin template */
|
||||
PERHOST, /* every host has this template, don't need to install it in secondary storage */
|
||||
USER /* User supplied template/iso */
|
||||
USER, /* User supplied template/iso */
|
||||
DATADISK, /* Template corresponding to a datadisk(non root disk) present in an OVA */
|
||||
ISODISK /* Template corresponding to a iso (non root disk) present in an OVA */
|
||||
}
|
||||
|
||||
public static enum StoragePoolType {
|
||||
|
|
|
|||
|
|
@ -133,6 +133,8 @@ public interface VirtualMachineTemplate extends ControlledEntity, Identity, Inte
|
|||
|
||||
boolean isDynamicallyScalable();
|
||||
|
||||
Long getParentTemplateId();
|
||||
|
||||
long getUpdatedCount();
|
||||
|
||||
void incrUpdatedCount();
|
||||
|
|
|
|||
|
|
@ -139,6 +139,10 @@ public class DiskProfile {
|
|||
return templateId;
|
||||
}
|
||||
|
||||
public void setTemplateId(Long templateId) {
|
||||
this.templateId = templateId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return disk offering id that the disk is based on.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ import com.cloud.exception.VirtualMachineMigrationException;
|
|||
import com.cloud.host.Host;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.network.Network.IpAddresses;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
|
|
@ -197,6 +198,11 @@ public interface UserVmService {
|
|||
* @param dhcpOptionMap
|
||||
* - Maps the dhcp option code and the dhcp value to the network uuid
|
||||
* @return UserVm object if successful.
|
||||
* @param dataDiskTemplateToDiskOfferingMap
|
||||
* - Datadisk template to Disk offering Map
|
||||
* an optional parameter that creates additional data disks for the virtual machine
|
||||
* For each of the templates in the map, a data disk will be created from the corresponding
|
||||
* disk offering obtained from the map
|
||||
*
|
||||
* @throws InsufficientCapacityException
|
||||
* if there is insufficient capacity to deploy the VM.
|
||||
|
|
@ -210,7 +216,8 @@ public interface UserVmService {
|
|||
UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> securityGroupIdList,
|
||||
Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod,
|
||||
String userData, String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIp, Boolean displayVm, String keyboard,
|
||||
List<Long> affinityGroupIdList, Map<String, String> customParameter, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException,
|
||||
List<Long> affinityGroupIdList, Map<String, String> customParameter, String customId, Map<String, Map<Integer, String>> dhcpOptionMap,
|
||||
Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException,
|
||||
ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException;
|
||||
|
||||
/**
|
||||
|
|
@ -271,6 +278,11 @@ public interface UserVmService {
|
|||
* @param customId
|
||||
* @param dhcpOptionMap
|
||||
* - Maps the dhcp option code and the dhcp value to the network uuid
|
||||
* @param dataDiskTemplateToDiskOfferingMap
|
||||
* - Datadisk template to Disk offering Map
|
||||
* an optional parameter that creates additional data disks for the virtual machine
|
||||
* For each of the templates in the map, a data disk will be created from the corresponding
|
||||
* disk offering obtained from the map
|
||||
* @return UserVm object if successful.
|
||||
*
|
||||
* @throws InsufficientCapacityException
|
||||
|
|
@ -285,7 +297,8 @@ public interface UserVmService {
|
|||
UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> networkIdList,
|
||||
List<Long> securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor,
|
||||
HTTPMethod httpmethod, String userData, String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard,
|
||||
List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException,
|
||||
List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap,
|
||||
Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException,
|
||||
ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException;
|
||||
|
||||
/**
|
||||
|
|
@ -344,6 +357,11 @@ public interface UserVmService {
|
|||
* @param customId
|
||||
* @param dhcpOptionMap
|
||||
* - Map that maps the DhcpOption code and their value on the Network uuid
|
||||
* @param dataDiskTemplateToDiskOfferingMap
|
||||
* - Datadisk template to Disk offering Map
|
||||
* an optional parameter that creates additional data disks for the virtual machine
|
||||
* For each of the templates in the map, a data disk will be created from the corresponding
|
||||
* disk offering obtained from the map
|
||||
* @return UserVm object if successful.
|
||||
*
|
||||
* @throws InsufficientCapacityException
|
||||
|
|
@ -358,7 +376,7 @@ public interface UserVmService {
|
|||
UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> networkIdList, Account owner,
|
||||
String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData,
|
||||
String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List<Long> affinityGroupIdList,
|
||||
Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap)
|
||||
Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap)
|
||||
|
||||
throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException;
|
||||
|
||||
|
|
|
|||
|
|
@ -79,6 +79,7 @@ public class ApiConstants {
|
|||
public static final String MIN_IOPS = "miniops";
|
||||
public static final String MAX_IOPS = "maxiops";
|
||||
public static final String HYPERVISOR_SNAPSHOT_RESERVE = "hypervisorsnapshotreserve";
|
||||
public static final String DATADISK_OFFERING_LIST = "datadiskofferinglist";
|
||||
public static final String DESCRIPTION = "description";
|
||||
public static final String DESTINATION_ZONE_ID = "destzoneid";
|
||||
public static final String DETAILS = "details";
|
||||
|
|
@ -209,6 +210,7 @@ public class ApiConstants {
|
|||
public static final String PARAMS = "params";
|
||||
public static final String PARENT_ID = "parentid";
|
||||
public static final String PARENT_DOMAIN_ID = "parentdomainid";
|
||||
public static final String PARENT_TEMPLATE_ID = "parenttemplateid";
|
||||
public static final String PASSWORD = "password";
|
||||
public static final String SHOULD_UPDATE_PASSWORD = "update_passwd_on_host";
|
||||
public static final String NEW_PASSWORD = "new_password";
|
||||
|
|
|
|||
|
|
@ -286,6 +286,8 @@ public interface ResponseGenerator {
|
|||
|
||||
Host findHostById(Long hostId);
|
||||
|
||||
DiskOffering findDiskOfferingById(Long diskOfferingId);
|
||||
|
||||
VpnUsersResponse createVpnUserResponse(VpnUser user);
|
||||
|
||||
RemoteAccessVpnResponse createRemoteAccessVpnResponse(RemoteAccessVpn vpn);
|
||||
|
|
|
|||
|
|
@ -72,9 +72,12 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd {
|
|||
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "list templates by zoneId")
|
||||
private Long zoneId;
|
||||
|
||||
@Parameter(name=ApiConstants.SHOW_REMOVED, type=CommandType.BOOLEAN, description="show removed templates as well")
|
||||
@Parameter(name = ApiConstants.SHOW_REMOVED, type = CommandType.BOOLEAN, description = "show removed templates as well")
|
||||
private Boolean showRemoved;
|
||||
|
||||
@Parameter(name = ApiConstants.PARENT_TEMPLATE_ID, type = CommandType.UUID, entityType = TemplateResponse.class, description = "list datadisk templates by parent template id", since = "4.4")
|
||||
private Long parentTemplateId;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -103,6 +106,10 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd {
|
|||
return (showRemoved != null ? showRemoved : false);
|
||||
}
|
||||
|
||||
public Long getParentTemplateId() {
|
||||
return parentTemplateId;
|
||||
}
|
||||
|
||||
public boolean listInReadyState() {
|
||||
|
||||
Account account = CallContext.current().getCallingAccount();
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ import org.apache.cloudstack.api.response.TemplateResponse;
|
|||
import org.apache.cloudstack.api.response.UserVmResponse;
|
||||
import org.apache.cloudstack.api.response.ZoneResponse;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.event.EventTypes;
|
||||
|
|
@ -58,6 +59,8 @@ import com.cloud.exception.ResourceUnavailableException;
|
|||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.network.Network;
|
||||
import com.cloud.network.Network.IpAddresses;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.uservm.UserVm;
|
||||
import com.cloud.utils.net.Dhcp;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
|
|
@ -192,6 +195,10 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG
|
|||
+ " Example: dhcpoptionsnetworklist[0].dhcp:114=url&dhcpoptionsetworklist[0].networkid=networkid&dhcpoptionsetworklist[0].dhcp:66=www.test.com")
|
||||
private Map dhcpOptionsNetworkList;
|
||||
|
||||
@Parameter(name = ApiConstants.DATADISK_OFFERING_LIST, type = CommandType.MAP, since = "4.11", description = "datadisk template to disk-offering mapping;" +
|
||||
" an optional parameter used to create additional data disks from datadisk templates; can't be specified with diskOfferingId parameter")
|
||||
private Map dataDiskTemplateToDiskOfferingList;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -417,10 +424,10 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG
|
|||
if (dhcpOptionsNetworkList != null && !dhcpOptionsNetworkList.isEmpty()) {
|
||||
|
||||
Collection<Map<String, String>> paramsCollection = this.dhcpOptionsNetworkList.values();
|
||||
for(Map<String, String> dhcpNetworkOptions : paramsCollection) {
|
||||
for (Map<String, String> dhcpNetworkOptions : paramsCollection) {
|
||||
String networkId = dhcpNetworkOptions.get(ApiConstants.NETWORK_ID);
|
||||
|
||||
if(networkId == null) {
|
||||
if (networkId == null) {
|
||||
throw new IllegalArgumentException("No networkid specified when providing extra dhcp options.");
|
||||
}
|
||||
|
||||
|
|
@ -431,9 +438,9 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG
|
|||
if (key.startsWith(ApiConstants.DHCP_PREFIX)) {
|
||||
int dhcpOptionValue = Integer.parseInt(key.replaceFirst(ApiConstants.DHCP_PREFIX, ""));
|
||||
dhcpOptionsForNetwork.put(dhcpOptionValue, dhcpNetworkOptions.get(key));
|
||||
} else if (!key.equals(ApiConstants.NETWORK_ID)){
|
||||
Dhcp.DhcpOptionCode dhcpOptionEnum = Dhcp.DhcpOptionCode.valueOfString(key);
|
||||
dhcpOptionsForNetwork.put(dhcpOptionEnum.getCode(), dhcpNetworkOptions.get(key));
|
||||
} else if (!key.equals(ApiConstants.NETWORK_ID)) {
|
||||
Dhcp.DhcpOptionCode dhcpOptionEnum = Dhcp.DhcpOptionCode.valueOfString(key);
|
||||
dhcpOptionsForNetwork.put(dhcpOptionEnum.getCode(), dhcpNetworkOptions.get(key));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -443,6 +450,37 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG
|
|||
return dhcpOptionsMap;
|
||||
}
|
||||
|
||||
public Map<Long, DiskOffering> getDataDiskTemplateToDiskOfferingMap() {
|
||||
if (diskOfferingId != null && dataDiskTemplateToDiskOfferingList != null) {
|
||||
throw new InvalidParameterValueException("diskofferingid paramter can't be specified along with datadisktemplatetodiskofferinglist parameter");
|
||||
}
|
||||
if (MapUtils.isEmpty(dataDiskTemplateToDiskOfferingList)) {
|
||||
return new HashMap<Long, DiskOffering>();
|
||||
}
|
||||
|
||||
HashMap<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap = new HashMap<Long, DiskOffering>();
|
||||
for (Object objDataDiskTemplates : dataDiskTemplateToDiskOfferingList.values()) {
|
||||
HashMap<String, String> dataDiskTemplates = (HashMap<String, String>) objDataDiskTemplates;
|
||||
Long dataDiskTemplateId;
|
||||
DiskOffering dataDiskOffering = null;
|
||||
VirtualMachineTemplate dataDiskTemplate= _entityMgr.findByUuid(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid"));
|
||||
if (dataDiskTemplate == null) {
|
||||
dataDiskTemplate = _entityMgr.findById(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid"));
|
||||
if (dataDiskTemplate == null)
|
||||
throw new InvalidParameterValueException("Unable to translate and find entity with datadisktemplateid " + dataDiskTemplates.get("datadisktemplateid"));
|
||||
}
|
||||
dataDiskTemplateId = dataDiskTemplate.getId();
|
||||
dataDiskOffering = _entityMgr.findByUuid(DiskOffering.class, dataDiskTemplates.get("diskofferingid"));
|
||||
if (dataDiskOffering == null) {
|
||||
dataDiskOffering = _entityMgr.findById(DiskOffering.class, dataDiskTemplates.get("diskofferingid"));
|
||||
if (dataDiskOffering == null)
|
||||
throw new InvalidParameterValueException("Unable to translate and find entity with diskofferingId " + dataDiskTemplates.get("diskofferingid"));
|
||||
}
|
||||
dataDiskTemplateToDiskOfferingMap.put(dataDiskTemplateId, dataDiskOffering);
|
||||
}
|
||||
return dataDiskTemplateToDiskOfferingMap;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -0,0 +1,66 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.api.response;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.BaseResponse;
|
||||
import org.apache.cloudstack.api.EntityReference;
|
||||
|
||||
import com.cloud.serializer.Param;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
@EntityReference(value = VirtualMachineTemplate.class)
|
||||
@SuppressWarnings("unused")
|
||||
public class ChildTemplateResponse extends BaseResponse {
|
||||
@SerializedName(ApiConstants.ID)
|
||||
@Param(description = "the template ID")
|
||||
private String id;
|
||||
|
||||
@SerializedName(ApiConstants.NAME)
|
||||
@Param(description = "the template name")
|
||||
private String name;
|
||||
|
||||
@SerializedName(ApiConstants.SIZE)
|
||||
@Param(description = "the size of the template")
|
||||
private Integer size;
|
||||
|
||||
@SerializedName("templatetype")
|
||||
@Param(description = "the type of the template")
|
||||
private String templateType;
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public void setSize(Integer size) {
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
public void setTemplateType(String templateType) {
|
||||
this.templateType = templateType;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -174,7 +174,7 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements
|
|||
private Map details;
|
||||
|
||||
@SerializedName(ApiConstants.BITS)
|
||||
@Param(description="the processor bit size", since = "4.10")
|
||||
@Param(description = "the processor bit size", since = "4.10")
|
||||
private int bits;
|
||||
|
||||
@SerializedName(ApiConstants.SSHKEY_ENABLED)
|
||||
|
|
@ -189,6 +189,14 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements
|
|||
@Param(description = "KVM Only: true if template is directly downloaded to Primary Storage bypassing Secondary Storage")
|
||||
private Boolean directDownload;
|
||||
|
||||
@SerializedName("parenttemplateid")
|
||||
@Param(description = "if Datadisk template, then id of the root disk template this template belongs to")
|
||||
private String parentTemplateId;
|
||||
|
||||
@SerializedName("childtemplates")
|
||||
@Param(description = "if root disk template, then ids of the datas disk templates this template owns")
|
||||
private Set<ChildTemplateResponse> childTemplates;
|
||||
|
||||
public TemplateResponse() {
|
||||
tags = new LinkedHashSet<ResourceTagResponse>();
|
||||
}
|
||||
|
|
@ -374,4 +382,13 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements
|
|||
public Boolean getDirectDownload() {
|
||||
return directDownload;
|
||||
}
|
||||
|
||||
public void setParentTemplateId(String parentTemplateId) {
|
||||
this.parentTemplateId = parentTemplateId;
|
||||
}
|
||||
|
||||
public void setChildTemplates(Set<ChildTemplateResponse> childTemplateIds) {
|
||||
this.childTemplates = childTemplateIds;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,38 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.agent.api.storage;
|
||||
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
|
||||
public class CreateDatadiskTemplateAnswer extends Answer {
|
||||
private TemplateObjectTO dataDiskTemplate = null;
|
||||
|
||||
public CreateDatadiskTemplateAnswer(TemplateObjectTO dataDiskTemplate) {
|
||||
super(null);
|
||||
this.dataDiskTemplate = dataDiskTemplate;
|
||||
}
|
||||
|
||||
public TemplateObjectTO getDataDiskTemplate() {
|
||||
return dataDiskTemplate;
|
||||
}
|
||||
|
||||
public CreateDatadiskTemplateAnswer(String errMsg) {
|
||||
super(null, false, errMsg);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.agent.api.storage;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
|
||||
public final class CreateDatadiskTemplateCommand extends Command {
|
||||
private DataTO dataDiskTemplate;
|
||||
private String path;
|
||||
private long fileSize;
|
||||
private boolean bootable;
|
||||
private String diskId;
|
||||
|
||||
public CreateDatadiskTemplateCommand(DataTO dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable) {
|
||||
super();
|
||||
this.dataDiskTemplate = dataDiskTemplate;
|
||||
this.path = path;
|
||||
this.fileSize = fileSize;
|
||||
this.bootable = bootable;
|
||||
this.diskId = diskId;
|
||||
}
|
||||
|
||||
protected CreateDatadiskTemplateCommand() {
|
||||
super();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public DataTO getDataDiskTemplate() {
|
||||
return dataDiskTemplate;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public long getFileSize() {
|
||||
return fileSize;
|
||||
}
|
||||
|
||||
public boolean getBootable() {
|
||||
return bootable;
|
||||
}
|
||||
|
||||
public String getDiskId() {
|
||||
return diskId;
|
||||
}
|
||||
|
||||
public void setDiskId(String diskId) {
|
||||
this.diskId = diskId;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.agent.api.storage;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DatadiskTO;
|
||||
|
||||
public class GetDatadisksAnswer extends Answer {
|
||||
List<DatadiskTO> dataDiskDetails = new ArrayList<DatadiskTO>();
|
||||
|
||||
public GetDatadisksAnswer(List<DatadiskTO> dataDiskDetails) {
|
||||
super(null);
|
||||
this.dataDiskDetails = dataDiskDetails;
|
||||
}
|
||||
|
||||
public List<DatadiskTO> getDataDiskDetails() {
|
||||
return dataDiskDetails;
|
||||
}
|
||||
|
||||
public GetDatadisksAnswer(String errMsg) {
|
||||
super(null, false, errMsg);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.agent.api.storage;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
|
||||
public final class GetDatadisksCommand extends Command {
|
||||
private DataTO data;
|
||||
|
||||
public GetDatadisksCommand(DataTO data) {
|
||||
super();
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
protected GetDatadisksCommand() {
|
||||
super();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public DataTO getData() {
|
||||
return data;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -20,6 +20,7 @@
|
|||
package com.cloud.storage.template;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.naming.ConfigurationException;
|
||||
|
|
@ -28,10 +29,14 @@ import javax.xml.parsers.DocumentBuilderFactory;
|
|||
import org.apache.log4j.Logger;
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Element;
|
||||
import org.w3c.dom.NodeList;
|
||||
|
||||
import com.cloud.agent.api.storage.OVFHelper;
|
||||
import com.cloud.agent.api.to.DatadiskTO;
|
||||
import com.cloud.exception.InternalErrorException;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.StorageLayer;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.component.AdapterBase;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
|
|
@ -64,6 +69,7 @@ public class OVAProcessor extends AdapterBase implements Processor {
|
|||
|
||||
Script command = new Script("tar", 0, s_logger);
|
||||
command.add("--no-same-owner");
|
||||
command.add("--no-same-permissions");
|
||||
command.add("-xf", templateFileFullPath);
|
||||
command.setWorkDir(templateFile.getParent());
|
||||
String result = command.execute();
|
||||
|
|
@ -72,12 +78,35 @@ public class OVAProcessor extends AdapterBase implements Processor {
|
|||
throw new InternalErrorException("failed to untar OVA package");
|
||||
}
|
||||
|
||||
command = new Script("chmod", 0, s_logger);
|
||||
command.add("-R");
|
||||
command.add("666", templatePath);
|
||||
result = command.execute();
|
||||
if (result != null) {
|
||||
s_logger.warn("Unable to set permissions for files in " + templatePath + " due to " + result);
|
||||
}
|
||||
command = new Script("chmod", 0, s_logger);
|
||||
command.add("777", templatePath);
|
||||
result = command.execute();
|
||||
if (result != null) {
|
||||
s_logger.warn("Unable to set permissions for " + templatePath + " due to " + result);
|
||||
}
|
||||
|
||||
FormatInfo info = new FormatInfo();
|
||||
info.format = ImageFormat.OVA;
|
||||
info.filename = templateName + "." + ImageFormat.OVA.getFileExtension();
|
||||
info.size = _storage.getSize(templateFilePath);
|
||||
info.virtualSize = getTemplateVirtualSize(templatePath, info.filename);
|
||||
|
||||
//vaidate ova
|
||||
String ovfFile = getOVFFilePath(templateFileFullPath);
|
||||
try {
|
||||
OVFHelper ovfHelper = new OVFHelper();
|
||||
List<DatadiskTO> disks = ovfHelper.getOVFVolumeInfo(ovfFile);
|
||||
} catch (Exception e) {
|
||||
s_logger.info("The ovf file " + ovfFile + " is invalid ", e);
|
||||
throw new InternalErrorException("OVA package has bad ovf file " + e.getMessage(), e);
|
||||
}
|
||||
// delete original OVA file
|
||||
// templateFile.delete();
|
||||
return info;
|
||||
|
|
@ -112,22 +141,44 @@ public class OVAProcessor extends AdapterBase implements Processor {
|
|||
Element disk = (Element)ovfDoc.getElementsByTagName("Disk").item(0);
|
||||
virtualSize = Long.parseLong(disk.getAttribute("ovf:capacity"));
|
||||
String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits");
|
||||
if ((virtualSize != 0) && (allocationUnits != null)) {
|
||||
long units = 1;
|
||||
if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) {
|
||||
units = 1024;
|
||||
} else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) {
|
||||
units = 1024 * 1024;
|
||||
} else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) {
|
||||
units = 1024 * 1024 * 1024;
|
||||
}
|
||||
virtualSize = virtualSize * units;
|
||||
} else {
|
||||
throw new InternalErrorException("Failed to read capacity and capacityAllocationUnits from the OVF file: " + ovfFileName);
|
||||
}
|
||||
virtualSize = OVFHelper.getDiskVirtualSize(virtualSize, allocationUnits, ovfFileName);
|
||||
return virtualSize;
|
||||
} catch (Exception e) {
|
||||
String msg = "Unable to parse OVF XML document to get the virtual disk size due to" + e;
|
||||
String msg = "getTemplateVirtualSize: Unable to parse OVF XML document " + templatePath + " to get the virtual disk " + templateName + " size due to " + e;
|
||||
s_logger.error(msg);
|
||||
throw new InternalErrorException(msg);
|
||||
}
|
||||
}
|
||||
|
||||
public Pair<Long, Long> getDiskDetails(String ovfFilePath, String diskName) throws InternalErrorException {
|
||||
long virtualSize = 0;
|
||||
long fileSize = 0;
|
||||
String fileId = null;
|
||||
try {
|
||||
Document ovfDoc = null;
|
||||
ovfDoc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFilePath));
|
||||
NodeList disks = ovfDoc.getElementsByTagName("Disk");
|
||||
NodeList files = ovfDoc.getElementsByTagName("File");
|
||||
for (int j = 0; j < files.getLength(); j++) {
|
||||
Element file = (Element)files.item(j);
|
||||
if (file.getAttribute("ovf:href").equals(diskName)) {
|
||||
fileSize = Long.parseLong(file.getAttribute("ovf:size"));
|
||||
fileId = file.getAttribute("ovf:id");
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < disks.getLength(); i++) {
|
||||
Element disk = (Element)disks.item(i);
|
||||
if (disk.getAttribute("ovf:fileRef").equals(fileId)) {
|
||||
virtualSize = Long.parseLong(disk.getAttribute("ovf:capacity"));
|
||||
String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits");
|
||||
virtualSize = OVFHelper.getDiskVirtualSize(virtualSize, allocationUnits, ovfFilePath);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return new Pair<Long, Long>(virtualSize, fileSize);
|
||||
} catch (Exception e) {
|
||||
String msg = "getDiskDetails: Unable to parse OVF XML document " + ovfFilePath + " to get the virtual disk " + diskName + " size due to " + e;
|
||||
s_logger.error(msg);
|
||||
throw new InternalErrorException(msg);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,6 +44,8 @@ public class TemplateObjectTO implements DataTO {
|
|||
private Long size;
|
||||
private Long physicalSize;
|
||||
private Hypervisor.HypervisorType hypervisorType;
|
||||
private boolean bootable;
|
||||
private String uniqueName;
|
||||
|
||||
public TemplateObjectTO() {
|
||||
|
||||
|
|
@ -73,6 +75,8 @@ public class TemplateObjectTO implements DataTO {
|
|||
this.accountId = template.getAccountId();
|
||||
this.name = template.getUniqueName();
|
||||
this.format = template.getFormat();
|
||||
this.uniqueName = template.getUniqueName();
|
||||
this.size = template.getSize();
|
||||
if (template.getDataStore() != null) {
|
||||
this.imageDataStore = template.getDataStore().getTO();
|
||||
}
|
||||
|
|
@ -215,6 +219,22 @@ public class TemplateObjectTO implements DataTO {
|
|||
this.physicalSize = physicalSize;
|
||||
}
|
||||
|
||||
public void setIsBootable(boolean bootable) {
|
||||
this.bootable = bootable;
|
||||
}
|
||||
|
||||
public boolean isBootable() {
|
||||
return bootable;
|
||||
}
|
||||
|
||||
public String getUniqueName() {
|
||||
return this.uniqueName;
|
||||
}
|
||||
|
||||
public void setUniqueName(String uniqueName) {
|
||||
this.uniqueName = uniqueName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringBuilder("TemplateTO[id=").append(id).append("|origUrl=").append(origUrl).append("|name").append(name).append("]").toString();
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ import com.cloud.exception.OperationTimedoutException;
|
|||
import com.cloud.exception.ResourceUnavailableException;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.network.Network;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.offering.DiskOfferingInfo;
|
||||
import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.storage.StoragePool;
|
||||
|
|
@ -74,11 +75,12 @@ public interface VirtualMachineManager extends Manager {
|
|||
* @param auxiliaryNetworks additional networks to attach the VMs to.
|
||||
* @param plan How to deploy the VM.
|
||||
* @param hyperType Hypervisor type
|
||||
* @param datadiskTemplateToDiskOfferingMap data disks to be created from datadisk templates and attached to the VM
|
||||
* @throws InsufficientCapacityException If there are insufficient capacity to deploy this vm.
|
||||
*/
|
||||
void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering, DiskOfferingInfo rootDiskOfferingInfo,
|
||||
List<DiskOfferingInfo> dataDiskOfferings, LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, DeploymentPlan plan,
|
||||
HypervisorType hyperType, Map<String, Map<Integer, String>> extraDhcpOptions) throws InsufficientCapacityException;
|
||||
HypervisorType hyperType, Map<String, Map<Integer, String>> extraDhcpOptions, Map<Long, DiskOffering> datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException;
|
||||
|
||||
void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering,
|
||||
LinkedHashMap<? extends Network, List<? extends NicProfile>> networkProfiles, DeploymentPlan plan, HypervisorType hyperType) throws InsufficientCapacityException;
|
||||
|
|
|
|||
|
|
@ -92,7 +92,8 @@ public interface VolumeOrchestrationService {
|
|||
|
||||
void destroyVolume(Volume volume);
|
||||
|
||||
DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner);
|
||||
DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template,
|
||||
Account owner, Long deviceId);
|
||||
|
||||
VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool) throws NoTransitionException;
|
||||
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
|
|||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.exception.InsufficientCapacityException;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.vm.NicProfile;
|
||||
|
||||
@Path("orchestration")
|
||||
|
|
@ -65,7 +66,8 @@ public interface OrchestrationService {
|
|||
@QueryParam("cpu") int cpu, @QueryParam("speed") int speed, @QueryParam("ram") long memory, @QueryParam("disk-size") Long diskSize,
|
||||
@QueryParam("compute-tags") List<String> computeTags, @QueryParam("root-disk-tags") List<String> rootDiskTags,
|
||||
@QueryParam("network-nic-map") Map<String, NicProfile> networkNicMap, @QueryParam("deploymentplan") DeploymentPlan plan,
|
||||
@QueryParam("root-disk-size") Long rootDiskSize, @QueryParam("extra-dhcp-option-map") Map<String, Map<Integer, String>> extraDhcpOptionMap) throws InsufficientCapacityException;
|
||||
@QueryParam("root-disk-size") Long rootDiskSize, @QueryParam("extra-dhcp-option-map") Map<String, Map<Integer, String>> extraDhcpOptionMap,
|
||||
@QueryParam("datadisktemplate-diskoffering-map") Map<Long, DiskOffering> datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException;
|
||||
|
||||
@POST
|
||||
VirtualMachineEntity createVirtualMachineFromScratch(@QueryParam("id") String id, @QueryParam("owner") String owner, @QueryParam("iso-id") String isoId,
|
||||
|
|
|
|||
|
|
@ -68,4 +68,6 @@ public interface TemplateService {
|
|||
void associateTemplateToZone(long templateId, Long zoneId);
|
||||
|
||||
void associateCrosszoneTemplatesToZone(long dcId);
|
||||
|
||||
AsyncCallFuture<TemplateApiResult> createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,17 +18,21 @@
|
|||
*/
|
||||
package org.apache.cloudstack.storage.image.datastore;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import com.cloud.storage.Upload;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
|
||||
import com.cloud.storage.ImageStore;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.agent.api.to.DatadiskTO;
|
||||
|
||||
public interface ImageStoreEntity extends DataStore, ImageStore {
|
||||
TemplateInfo getTemplate(long templateId);
|
||||
|
|
@ -46,4 +50,8 @@ public interface ImageStoreEntity extends DataStore, ImageStore {
|
|||
String createEntityExtractUrl(String installPath, ImageFormat format, DataObject dataObject); // get the entity download URL
|
||||
|
||||
void deleteExtractUrl(String installPath, String url, Upload.Type volume);
|
||||
|
||||
List<DatadiskTO> getDataDiskTemplates(DataObject obj);
|
||||
|
||||
Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable, AsyncCompletionCallback<CreateCmdResult> callback);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ import java.util.Iterator;
|
|||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.TimeZone;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Executors;
|
||||
|
|
@ -159,6 +160,7 @@ import com.cloud.network.dao.NetworkDao;
|
|||
import com.cloud.network.dao.NetworkVO;
|
||||
import com.cloud.network.router.VirtualRouter;
|
||||
import com.cloud.network.rules.RulesManager;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.offering.DiskOfferingInfo;
|
||||
import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.org.Cluster;
|
||||
|
|
@ -170,6 +172,7 @@ import com.cloud.storage.DiskOfferingVO;
|
|||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.Volume.Type;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
|
|
@ -391,7 +394,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
@DB
|
||||
public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering,
|
||||
final DiskOfferingInfo rootDiskOfferingInfo, final List<DiskOfferingInfo> dataDiskOfferings,
|
||||
final LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map<String, Map<Integer, String>> extraDhcpOptions)
|
||||
final LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map<String, Map<Integer, String>> extraDhcpOptions, final Map<Long, DiskOffering> datadiskTemplateToDiskOfferingMap)
|
||||
throws InsufficientCapacityException {
|
||||
|
||||
final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName);
|
||||
|
|
@ -430,7 +433,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
|
||||
if (template.getFormat() == ImageFormat.ISO) {
|
||||
volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(),
|
||||
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner);
|
||||
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null);
|
||||
} else if (template.getFormat() == ImageFormat.BAREMETAL) {
|
||||
// Do nothing
|
||||
} else {
|
||||
|
|
@ -441,7 +444,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
if (dataDiskOfferings != null) {
|
||||
for (final DiskOfferingInfo dataDiskOfferingInfo : dataDiskOfferings) {
|
||||
volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId(), dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(),
|
||||
dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), vmFinal, template, owner);
|
||||
dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null);
|
||||
}
|
||||
}
|
||||
if (datadiskTemplateToDiskOfferingMap != null && !datadiskTemplateToDiskOfferingMap.isEmpty()) {
|
||||
int diskNumber = 1;
|
||||
for (Entry<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap : datadiskTemplateToDiskOfferingMap.entrySet()) {
|
||||
DiskOffering diskOffering = dataDiskTemplateToDiskOfferingMap.getValue();
|
||||
long diskOfferingSize = diskOffering.getDiskSize() / (1024 * 1024 * 1024);
|
||||
VMTemplateVO dataDiskTemplate = _templateDao.findById(dataDiskTemplateToDiskOfferingMap.getKey());
|
||||
volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId() + "-" + String.valueOf(diskNumber), diskOffering, diskOfferingSize, null, null,
|
||||
vmFinal, dataDiskTemplate, owner, Long.valueOf(diskNumber));
|
||||
diskNumber++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -455,7 +469,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
@Override
|
||||
public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering,
|
||||
final LinkedHashMap<? extends Network, List<? extends NicProfile>> networks, final DeploymentPlan plan, final HypervisorType hyperType) throws InsufficientCapacityException {
|
||||
allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(serviceOffering), new ArrayList<DiskOfferingInfo>(), networks, plan, hyperType, null);
|
||||
allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(serviceOffering), new ArrayList<DiskOfferingInfo>(), networks, plan, hyperType, null, null);
|
||||
}
|
||||
|
||||
private VirtualMachineGuru getVmGuru(final VirtualMachine vm) {
|
||||
|
|
@ -880,9 +894,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
final ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId());
|
||||
final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId());
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn());
|
||||
}
|
||||
DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx);
|
||||
if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
|
|
@ -1027,9 +1038,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
}
|
||||
|
||||
try {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
|
||||
}
|
||||
_networkMgr.prepare(vmProfile, new DeployDestination(dest.getDataCenter(), dest.getPod(), null, null), ctx);
|
||||
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
|
||||
volumeMgr.prepare(vmProfile, dest);
|
||||
|
|
@ -4776,6 +4784,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
orchestrateStart(vm.getUuid(), work.getParams(), work.getPlan(), _dpMgr.getDeploymentPlannerByName(work.getDeploymentPlanner()));
|
||||
}
|
||||
catch (CloudRuntimeException e){
|
||||
e.printStackTrace();
|
||||
s_logger.info("Caught CloudRuntimeException, returning job failed " + e);
|
||||
CloudRuntimeException ex = new CloudRuntimeException("Unable to start VM instance");
|
||||
return new Pair<JobInfo.Status, String>(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex));
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import java.util.Arrays;
|
|||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
|
|
@ -45,6 +46,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
|||
import com.cloud.network.Network;
|
||||
import com.cloud.network.dao.NetworkDao;
|
||||
import com.cloud.network.dao.NetworkVO;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.offering.DiskOfferingInfo;
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
|
|
@ -155,7 +157,7 @@ public class CloudOrchestrator implements OrchestrationService {
|
|||
@Override
|
||||
public VirtualMachineEntity createVirtualMachine(String id, String owner, String templateId, String hostName, String displayName, String hypervisor, int cpu,
|
||||
int speed, long memory, Long diskSize, List<String> computeTags, List<String> rootDiskTags, Map<String, NicProfile> networkNicMap, DeploymentPlan plan,
|
||||
Long rootDiskSize, Map<String, Map<Integer, String>> extraDhcpOptionMap) throws InsufficientCapacityException {
|
||||
Long rootDiskSize, Map<String, Map<Integer, String>> extraDhcpOptionMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException {
|
||||
|
||||
// VirtualMachineEntityImpl vmEntity = new VirtualMachineEntityImpl(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, networks,
|
||||
// vmEntityManager);
|
||||
|
|
@ -233,8 +235,20 @@ public class CloudOrchestrator implements OrchestrationService {
|
|||
dataDiskOfferings.add(dataDiskOfferingInfo);
|
||||
}
|
||||
|
||||
if (dataDiskTemplateToDiskOfferingMap != null && !dataDiskTemplateToDiskOfferingMap.isEmpty()) {
|
||||
for (Entry<Long, DiskOffering> datadiskTemplateToDiskOffering : dataDiskTemplateToDiskOfferingMap.entrySet()) {
|
||||
DiskOffering diskOffering = datadiskTemplateToDiskOffering.getValue();
|
||||
if (diskOffering == null) {
|
||||
throw new InvalidParameterValueException("Unable to find disk offering " + vm.getDiskOfferingId());
|
||||
}
|
||||
if (diskOffering.getDiskSize() == 0) { // Custom disk offering is not supported for volumes created from datadisk templates
|
||||
throw new InvalidParameterValueException("Disk offering " + diskOffering + " requires size parameter.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(templateId)), computeOffering, rootDiskOfferingInfo, dataDiskOfferings, networkIpMap, plan,
|
||||
hypervisorType, extraDhcpOptionMap);
|
||||
hypervisorType, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap);
|
||||
|
||||
return vmEntity;
|
||||
}
|
||||
|
|
@ -299,7 +313,7 @@ public class CloudOrchestrator implements OrchestrationService {
|
|||
|
||||
HypervisorType hypervisorType = HypervisorType.valueOf(hypervisor);
|
||||
|
||||
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList<DiskOfferingInfo>(), networkIpMap, plan, hypervisorType, extraDhcpOptionMap);
|
||||
_itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList<DiskOfferingInfo>(), networkIpMap, plan, hypervisorType, extraDhcpOptionMap, null);
|
||||
|
||||
return vmEntity;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -654,7 +654,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
}
|
||||
|
||||
@Override
|
||||
public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner) {
|
||||
public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner, Long deviceId) {
|
||||
if (size == null) {
|
||||
size = offering.getDiskSize();
|
||||
} else {
|
||||
|
|
@ -679,13 +679,17 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
vol.setInstanceId(vm.getId());
|
||||
}
|
||||
|
||||
if (type.equals(Type.ROOT)) {
|
||||
if (deviceId != null) {
|
||||
vol.setDeviceId(deviceId);
|
||||
} else if (type.equals(Type.ROOT)) {
|
||||
vol.setDeviceId(0l);
|
||||
} else {
|
||||
vol.setDeviceId(1l);
|
||||
}
|
||||
if (template.getFormat() == ImageFormat.ISO) {
|
||||
vol.setIsoId(template.getId());
|
||||
} else if (template.getTemplateType().equals(Storage.TemplateType.DATADISK)) {
|
||||
vol.setTemplateId(template.getId());
|
||||
}
|
||||
// display flag matters only for the User vms
|
||||
if (vm.getType() == VirtualMachine.Type.User) {
|
||||
|
|
@ -1252,7 +1256,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
StoragePool pool = dest.getStorageForDisks().get(vol);
|
||||
destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) {
|
||||
newVol = vol;
|
||||
} else {
|
||||
|
|
@ -1362,9 +1365,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
}
|
||||
|
||||
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm);
|
||||
}
|
||||
|
||||
List<VolumeTask> tasks = getTasks(vols, dest.getStorageForDisks(), vm);
|
||||
Volume vol = null;
|
||||
|
|
@ -1395,6 +1395,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
pool = (StoragePool)dataStoreMgr.getDataStore(result.second().getId(), DataStoreRole.Primary);
|
||||
vol = result.first();
|
||||
}
|
||||
|
||||
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
|
||||
DataTO volTO = volumeInfo.getTO();
|
||||
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(),
|
||||
|
|
|
|||
|
|
@ -34,109 +34,6 @@ where
|
|||
or service in ('NetworkACL')
|
||||
)
|
||||
);
|
||||
|
||||
--Alter view template_view
|
||||
|
||||
DROP VIEW IF EXISTS `cloud`.`template_view`;
|
||||
CREATE VIEW `template_view` AS
|
||||
SELECT
|
||||
`vm_template`.`id` AS `id`,
|
||||
`vm_template`.`uuid` AS `uuid`,
|
||||
`vm_template`.`unique_name` AS `unique_name`,
|
||||
`vm_template`.`name` AS `name`,
|
||||
`vm_template`.`public` AS `public`,
|
||||
`vm_template`.`featured` AS `featured`,
|
||||
`vm_template`.`type` AS `type`,
|
||||
`vm_template`.`hvm` AS `hvm`,
|
||||
`vm_template`.`bits` AS `bits`,
|
||||
`vm_template`.`url` AS `url`,
|
||||
`vm_template`.`format` AS `format`,
|
||||
`vm_template`.`created` AS `created`,
|
||||
`vm_template`.`checksum` AS `checksum`,
|
||||
`vm_template`.`display_text` AS `display_text`,
|
||||
`vm_template`.`enable_password` AS `enable_password`,
|
||||
`vm_template`.`dynamically_scalable` AS `dynamically_scalable`,
|
||||
`vm_template`.`state` AS `template_state`,
|
||||
`vm_template`.`guest_os_id` AS `guest_os_id`,
|
||||
`guest_os`.`uuid` AS `guest_os_uuid`,
|
||||
`guest_os`.`display_name` AS `guest_os_name`,
|
||||
`vm_template`.`bootable` AS `bootable`,
|
||||
`vm_template`.`prepopulate` AS `prepopulate`,
|
||||
`vm_template`.`cross_zones` AS `cross_zones`,
|
||||
`vm_template`.`hypervisor_type` AS `hypervisor_type`,
|
||||
`vm_template`.`extractable` AS `extractable`,
|
||||
`vm_template`.`template_tag` AS `template_tag`,
|
||||
`vm_template`.`sort_key` AS `sort_key`,
|
||||
`vm_template`.`removed` AS `removed`,
|
||||
`vm_template`.`enable_sshkey` AS `enable_sshkey`,
|
||||
`source_template`.`id` AS `source_template_id`,
|
||||
`source_template`.`uuid` AS `source_template_uuid`,
|
||||
`account`.`id` AS `account_id`,
|
||||
`account`.`uuid` AS `account_uuid`,
|
||||
`account`.`account_name` AS `account_name`,
|
||||
`account`.`type` AS `account_type`,
|
||||
`domain`.`id` AS `domain_id`,
|
||||
`domain`.`uuid` AS `domain_uuid`,
|
||||
`domain`.`name` AS `domain_name`,
|
||||
`domain`.`path` AS `domain_path`,
|
||||
`projects`.`id` AS `project_id`,
|
||||
`projects`.`uuid` AS `project_uuid`,
|
||||
`projects`.`name` AS `project_name`,
|
||||
`data_center`.`id` AS `data_center_id`,
|
||||
`data_center`.`uuid` AS `data_center_uuid`,
|
||||
`data_center`.`name` AS `data_center_name`,
|
||||
`launch_permission`.`account_id` AS `lp_account_id`,
|
||||
`template_store_ref`.`store_id` AS `store_id`,
|
||||
`image_store`.`scope` AS `store_scope`,
|
||||
`template_store_ref`.`state` AS `state`,
|
||||
`template_store_ref`.`download_state` AS `download_state`,
|
||||
`template_store_ref`.`download_pct` AS `download_pct`,
|
||||
`template_store_ref`.`error_str` AS `error_str`,
|
||||
`template_store_ref`.`size` AS `size`,
|
||||
`template_store_ref`.physical_size AS `physical_size`,
|
||||
`template_store_ref`.`destroyed` AS `destroyed`,
|
||||
`template_store_ref`.`created` AS `created_on_store`,
|
||||
`vm_template_details`.`name` AS `detail_name`,
|
||||
`vm_template_details`.`value` AS `detail_value`,
|
||||
`resource_tags`.`id` AS `tag_id`,
|
||||
`resource_tags`.`uuid` AS `tag_uuid`,
|
||||
`resource_tags`.`key` AS `tag_key`,
|
||||
`resource_tags`.`value` AS `tag_value`,
|
||||
`resource_tags`.`domain_id` AS `tag_domain_id`,
|
||||
`domain`.`uuid` AS `tag_domain_uuid`,
|
||||
`domain`.`name` AS `tag_domain_name`,
|
||||
`resource_tags`.`account_id` AS `tag_account_id`,
|
||||
`account`.`account_name` AS `tag_account_name`,
|
||||
`resource_tags`.`resource_id` AS `tag_resource_id`,
|
||||
`resource_tags`.`resource_uuid` AS `tag_resource_uuid`,
|
||||
`resource_tags`.`resource_type` AS `tag_resource_type`,
|
||||
`resource_tags`.`customer` AS `tag_customer`,
|
||||
CONCAT(`vm_template`.`id`,
|
||||
'_',
|
||||
IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`
|
||||
FROM
|
||||
((((((((((((`vm_template`
|
||||
JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`)))
|
||||
JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`)))
|
||||
JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`)))
|
||||
LEFT JOIN `projects` ON ((`projects`.`project_account_id` = `account`.`id`)))
|
||||
LEFT JOIN `vm_template_details` ON ((`vm_template_details`.`template_id` = `vm_template`.`id`)))
|
||||
LEFT JOIN `vm_template` `source_template` ON ((`source_template`.`id` = `vm_template`.`source_template_id`)))
|
||||
LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`)
|
||||
AND (`template_store_ref`.`store_role` = 'Image')
|
||||
AND (`template_store_ref`.`destroyed` = 0))))
|
||||
LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`)
|
||||
AND (`template_store_ref`.`store_id` IS NOT NULL)
|
||||
AND (`image_store`.`id` = `template_store_ref`.`store_id`))))
|
||||
LEFT JOIN `template_zone_ref` ON (((`template_zone_ref`.`template_id` = `vm_template`.`id`)
|
||||
AND ISNULL(`template_store_ref`.`store_id`)
|
||||
AND ISNULL(`template_zone_ref`.`removed`))))
|
||||
LEFT JOIN `data_center` ON (((`image_store`.`data_center_id` = `data_center`.`id`)
|
||||
OR (`template_zone_ref`.`zone_id` = `data_center`.`id`))))
|
||||
LEFT JOIN `launch_permission` ON ((`launch_permission`.`template_id` = `vm_template`.`id`)))
|
||||
LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_template`.`id`)
|
||||
AND ((`resource_tags`.`resource_type` = 'Template')
|
||||
OR (`resource_tags`.`resource_type` = 'ISO')))));
|
||||
|
||||
UPDATE `cloud`.`configuration` SET value = '600', default_value = '600' WHERE category = 'Advanced' AND name = 'router.aggregation.command.each.timeout';
|
||||
|
||||
|
|
@ -302,15 +199,13 @@ CREATE VIEW `cloud`.`host_view` AS
|
|||
`cloud`.`user` ON `user`.`uuid` = `last_annotation_view`.`user_uuid`;
|
||||
-- End Of Annotations specific changes
|
||||
|
||||
|
||||
-- Out-of-band management driver for nested-cloudstack
|
||||
ALTER TABLE `cloud`.`oobm` MODIFY COLUMN port VARCHAR(255);
|
||||
|
||||
|
||||
-- CLOUDSTACK-9902: Console proxy SSL toggle
|
||||
INSERT IGNORE INTO `cloud`.`configuration` (`category`, `instance`, `component`, `name`, `value`, `description`, `default_value`, `is_dynamic`) VALUES ('Console Proxy', 'DEFAULT', 'AgentManager', 'consoleproxy.sslEnabled', 'false', 'Enable SSL for console proxy', 'false', 0);
|
||||
|
||||
-- CLOUDSTACK-9859: Retirement of midonet plugin (final removal)
|
||||
-- CLOUDSTACK-9859: Retirement of midonet plugin (final removal)
|
||||
delete from `cloud`.`configuration` where name in ('midonet.apiserver.address', 'midonet.providerrouter.id');
|
||||
|
||||
-- CLOUDSTACK-9972: Enhance listVolumes API
|
||||
|
|
@ -318,7 +213,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Premium', 'DEFAULT', 'manage
|
|||
|
||||
DROP VIEW IF EXISTS `cloud`.`volume_view`;
|
||||
CREATE VIEW `cloud`.`volume_view` AS
|
||||
select
|
||||
SELECT
|
||||
volumes.id,
|
||||
volumes.uuid,
|
||||
volumes.name,
|
||||
|
|
@ -464,21 +359,17 @@ CREATE TABLE IF NOT EXISTS `cloud`.`nic_extra_dhcp_options` (
|
|||
-- Add new OS versions
|
||||
|
||||
-- Add XenServer 7.1 and 7.2 hypervisor capabilities
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) values (UUID(), 'XenServer', '7.1.0', 500, 13, 1);
|
||||
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, max_data_volumes_limit, storage_motion_supported) values (UUID(), 'XenServer', '7.2.0', 500, 13, 1);
|
||||
|
||||
-- Add XenServer 7.0 support for windows 10
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.0.0', 'Windows 10 (64-bit)', 258, now(), 0);
|
||||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.0.0', 'Windows 10 (32-bit)', 257, now(), 0);
|
||||
|
||||
-- Add XenServer 7.1 hypervisor guest OS mappings (copy 7.0.0)
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.1.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.0.0';
|
||||
|
||||
-- Add XenServer 7.1 hypervisor guest OS (see https://docs.citrix.com/content/dam/docs/en-us/xenserver/7-1/downloads/xenserver-7-1-release-notes.pdf)
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Windows Server 2016 (64-bit)', 259, now(), 0);
|
||||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'SUSE Linux Enterprise Server 11 SP4', 187, now(), 0);
|
||||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Red Hat Enterprise Linux 6 (64-bit)', 240, now(), 0);
|
||||
|
|
@ -487,7 +378,6 @@ INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervi
|
|||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(), 'Xenserver', '7.1.0', 'Oracle Linux 7', 247, now(), 0);
|
||||
|
||||
-- Add XenServer 7.2 hypervisor guest OS mappings (copy 7.1.0 & remove Windows Vista, Windows XP, Windows 2003, CentOS 4.x, RHEL 4.xS, LES 10 (all versions) as per XenServer 7.2 Release Notes)
|
||||
|
||||
INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '7.2.0', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='7.1.0' AND guest_os_id not in (1,2,3,4,56,101,56,58,93,94,50,51,87,88,89,90,91,92,26,27,28,29,40,41,42,43,44,45,96,97,107,108,109,110,151,152,153);
|
||||
|
||||
-- Add table to track primary storage in use for snapshots
|
||||
|
|
@ -524,11 +414,16 @@ ADD COLUMN `forsystemvms` TINYINT(1) NOT NULL DEFAULT '0' COMMENT 'Indicates if
|
|||
ALTER TABLE `cloud`.`op_dc_ip_address_alloc`
|
||||
ADD COLUMN `vlan` INT(10) UNSIGNED NULL COMMENT 'Vlan the management network range is on';
|
||||
|
||||
-- CLOUDSTACK-4757: Support multidisk OVA
|
||||
ALTER TABLE `cloud`.`vm_template` ADD COLUMN `parent_template_id` bigint(20) unsigned DEFAULT NULL COMMENT 'If datadisk template, then id of the root template this template belongs to';
|
||||
|
||||
-- CLOUDSTACK-10146: Bypass Secondary Storage for KVM templates
|
||||
ALTER TABLE `cloud`.`vm_template`
|
||||
ADD COLUMN `direct_download` TINYINT(1) DEFAULT '0' COMMENT 'Indicates if Secondary Storage is bypassed and template is downloaded to Primary Storage';
|
||||
|
||||
CREATE OR REPLACE VIEW `template_view` AS
|
||||
-- Changes to template_view for both multidisk OVA and bypass secondary storage for KVM templates
|
||||
DROP VIEW IF EXISTS `cloud`.`template_view`;
|
||||
CREATE VIEW `cloud`.`template_view` AS
|
||||
SELECT
|
||||
`vm_template`.`id` AS `id`,
|
||||
`vm_template`.`uuid` AS `uuid`,
|
||||
|
|
@ -559,6 +454,8 @@ CREATE OR REPLACE VIEW `template_view` AS
|
|||
`vm_template`.`sort_key` AS `sort_key`,
|
||||
`vm_template`.`removed` AS `removed`,
|
||||
`vm_template`.`enable_sshkey` AS `enable_sshkey`,
|
||||
`parent_template`.`id` AS `parent_template_id`,
|
||||
`parent_template`.`uuid` AS `parent_template_uuid`,
|
||||
`source_template`.`id` AS `source_template_id`,
|
||||
`source_template`.`uuid` AS `source_template_uuid`,
|
||||
`account`.`id` AS `account_id`,
|
||||
|
|
@ -606,7 +503,7 @@ CREATE OR REPLACE VIEW `template_view` AS
|
|||
IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`,
|
||||
`vm_template`.`direct_download` AS `direct_download`
|
||||
FROM
|
||||
((((((((((((`vm_template`
|
||||
(((((((((((((`vm_template`
|
||||
JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`)))
|
||||
JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`)))
|
||||
JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`)))
|
||||
|
|
@ -616,6 +513,7 @@ CREATE OR REPLACE VIEW `template_view` AS
|
|||
LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`)
|
||||
AND (`template_store_ref`.`store_role` = 'Image')
|
||||
AND (`template_store_ref`.`destroyed` = 0))))
|
||||
LEFT JOIN `vm_template` `parent_template` ON ((`parent_template`.`id` = `vm_template`.`parent_template_id`)))
|
||||
LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`)
|
||||
AND (`template_store_ref`.`store_id` IS NOT NULL)
|
||||
AND (`image_store`.`id` = `template_store_ref`.`store_id`))))
|
||||
|
|
|
|||
|
|
@ -149,6 +149,9 @@ public class VMTemplateVO implements VirtualMachineTemplate {
|
|||
@Column(name = "direct_download")
|
||||
private boolean directDownload;
|
||||
|
||||
@Column(name = "parent_template_id")
|
||||
private Long parentTemplateId;
|
||||
|
||||
@Override
|
||||
public String getUniqueName() {
|
||||
return uniqueName;
|
||||
|
|
@ -617,4 +620,14 @@ public class VMTemplateVO implements VirtualMachineTemplate {
|
|||
public Class<?> getEntityType() {
|
||||
return VirtualMachineTemplate.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getParentTemplateId() {
|
||||
return parentTemplateId;
|
||||
}
|
||||
|
||||
public void setParentTemplateId(Long parentTemplateId) {
|
||||
this.parentTemplateId = parentTemplateId;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,4 +81,6 @@ public interface VMTemplateDao extends GenericDao<VMTemplateVO, Long>, StateDao<
|
|||
void loadDetails(VMTemplateVO tmpl);
|
||||
|
||||
void saveDetails(VMTemplateVO tmpl);
|
||||
|
||||
List<VMTemplateVO> listByParentTemplatetId(long parentTemplatetId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -104,6 +104,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
|||
private GenericSearchBuilder<VMTemplateVO, Long> CountTemplatesByAccount;
|
||||
// private SearchBuilder<VMTemplateVO> updateStateSearch;
|
||||
private SearchBuilder<VMTemplateVO> AllFieldsSearch;
|
||||
protected SearchBuilder<VMTemplateVO> ParentTemplateIdSearch;
|
||||
|
||||
@Inject
|
||||
ResourceTagDao _tagsDao;
|
||||
|
|
@ -135,6 +136,14 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
|||
return findOneIncludingRemovedBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<VMTemplateVO> listByParentTemplatetId(long parentTemplatetId) {
|
||||
SearchCriteria<VMTemplateVO> sc = ParentTemplateIdSearch.create();
|
||||
sc.setParameters("parentTemplateId", parentTemplatetId);
|
||||
sc.setParameters("state", VirtualMachineTemplate.State.Active);
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<VMTemplateVO> publicIsoSearch(Boolean bootable, boolean listRemoved, Map<String, String> tags) {
|
||||
|
||||
|
|
@ -403,6 +412,11 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
|||
AllFieldsSearch.and("name", AllFieldsSearch.entity().getName(), SearchCriteria.Op.EQ);
|
||||
AllFieldsSearch.done();
|
||||
|
||||
ParentTemplateIdSearch = createSearchBuilder();
|
||||
ParentTemplateIdSearch.and("parentTemplateId", ParentTemplateIdSearch.entity().getParentTemplateId(), SearchCriteria.Op.EQ);
|
||||
ParentTemplateIdSearch.and("state", ParentTemplateIdSearch.entity().getState(), SearchCriteria.Op.EQ);
|
||||
ParentTemplateIdSearch.done();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -692,4 +692,4 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
|||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -444,7 +444,6 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
|
|||
String errMsg = null;
|
||||
try {
|
||||
s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString());
|
||||
|
||||
if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) {
|
||||
answer = copyVolumeFromSnapshot(srcData, destData);
|
||||
} else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) {
|
||||
|
|
|
|||
|
|
@ -18,43 +18,19 @@
|
|||
*/
|
||||
package org.apache.cloudstack.storage.image;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.storage.ListTemplateAnswer;
|
||||
import com.cloud.agent.api.storage.ListTemplateCommand;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.dc.DataCenterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.event.UsageEventUtils;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.ImageStoreDetailsUtil;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.Storage.TemplateType;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.VMTemplateZoneVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VMTemplatePoolDao;
|
||||
import com.cloud.storage.dao.VMTemplateZoneDao;
|
||||
import com.cloud.storage.template.TemplateConstants;
|
||||
import com.cloud.storage.template.TemplateProp;
|
||||
import com.cloud.template.TemplateManager;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.utils.UriUtils;
|
||||
import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.utils.fsm.StateMachine2;
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService;
|
||||
|
|
@ -90,16 +66,52 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
|||
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
|
||||
import org.apache.cloudstack.storage.image.store.TemplateObject;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.storage.ListTemplateAnswer;
|
||||
import com.cloud.agent.api.storage.ListTemplateCommand;
|
||||
import com.cloud.agent.api.to.DatadiskTO;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.configuration.Resource.ResourceType;
|
||||
import com.cloud.dc.DataCenterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.event.UsageEventUtils;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.ImageStoreDetailsUtil;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.Storage.TemplateType;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.VMTemplateZoneVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VMTemplateZoneDao;
|
||||
import com.cloud.storage.template.TemplateConstants;
|
||||
import com.cloud.storage.template.TemplateProp;
|
||||
import com.cloud.template.TemplateManager;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountManager;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.utils.UriUtils;
|
||||
import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.utils.fsm.StateMachine2;
|
||||
import com.cloud.vm.VmDetailConstants;
|
||||
import com.google.common.base.Strings;
|
||||
|
||||
@Component
|
||||
public class TemplateServiceImpl implements TemplateService {
|
||||
|
|
@ -131,8 +143,6 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
@Inject
|
||||
TemplateDataFactory _templateFactory;
|
||||
@Inject
|
||||
VMTemplatePoolDao _tmpltPoolDao;
|
||||
@Inject
|
||||
EndPointSelector _epSelector;
|
||||
@Inject
|
||||
TemplateManager _tmpltMgr;
|
||||
|
|
@ -144,6 +154,8 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
MessageBus _messageBus;
|
||||
@Inject
|
||||
ImageStoreDetailsUtil imageStoreDetailsUtil;
|
||||
@Inject
|
||||
TemplateDataFactory imageFactory;
|
||||
|
||||
class TemplateOpContext<T> extends AsyncRpcContext<T> {
|
||||
final TemplateObject template;
|
||||
|
|
@ -324,6 +336,17 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
}
|
||||
}
|
||||
|
||||
for (Iterator<VMTemplateVO> iter = allTemplates.listIterator(); iter.hasNext();) {
|
||||
VMTemplateVO child_template = iter.next();
|
||||
if (child_template.getParentTemplateId() != null) {
|
||||
String uniqueName = child_template.getUniqueName();
|
||||
if (templateInfos.containsKey(uniqueName)) {
|
||||
templateInfos.remove(uniqueName);
|
||||
}
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
||||
toBeDownloaded.addAll(allTemplates);
|
||||
|
||||
final StateMachine2<VirtualMachineTemplate.State, VirtualMachineTemplate.Event, VirtualMachineTemplate> stateMachine = VirtualMachineTemplate.State.getStateMachine();
|
||||
|
|
@ -678,6 +701,18 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
return null;
|
||||
}
|
||||
|
||||
// Check if OVA contains additional data disks. If yes, create Datadisk templates for each of the additional datadisk present in the OVA
|
||||
if (template.getFormat().equals(ImageFormat.OVA)) {
|
||||
if (!createOvaDataDiskTemplates(template)) {
|
||||
template.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
|
||||
result.setResult(callbackResult.getResult());
|
||||
if (parentCallback != null) {
|
||||
parentCallback.complete(result);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
template.processEvent(ObjectInDataStoreStateMachine.Event.OperationSuccessed);
|
||||
} catch (Exception e) {
|
||||
|
|
@ -694,6 +729,166 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
return null;
|
||||
}
|
||||
|
||||
|
||||
protected boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate) {
|
||||
try {
|
||||
// Get Datadisk template (if any) for OVA
|
||||
List<DatadiskTO> dataDiskTemplates = new ArrayList<DatadiskTO>();
|
||||
ImageStoreEntity tmpltStore = (ImageStoreEntity)parentTemplate.getDataStore();
|
||||
dataDiskTemplates = tmpltStore.getDataDiskTemplates(parentTemplate);
|
||||
int diskCount = 0;
|
||||
VMTemplateVO templateVO = _templateDao.findById(parentTemplate.getId());
|
||||
_templateDao.loadDetails(templateVO);
|
||||
DataStore imageStore = parentTemplate.getDataStore();
|
||||
Map<String, String> details = parentTemplate.getDetails();
|
||||
if (details == null) {
|
||||
details = templateVO.getDetails();
|
||||
if (details == null) {
|
||||
details = new HashMap<>();
|
||||
}
|
||||
}
|
||||
for (DatadiskTO diskTemplate : dataDiskTemplates) {
|
||||
if (!diskTemplate.isBootable()) {
|
||||
createChildDataDiskTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++);
|
||||
if (!diskTemplate.isIso() && Strings.isNullOrEmpty(details.get(VmDetailConstants.DATA_DISK_CONTROLLER))){
|
||||
details.put(VmDetailConstants.DATA_DISK_CONTROLLER, getOvaDiskControllerDetails(diskTemplate, false));
|
||||
details.put(VmDetailConstants.DATA_DISK_CONTROLLER + diskTemplate.getDiskId(), getOvaDiskControllerDetails(diskTemplate, false));
|
||||
}
|
||||
} else {
|
||||
finalizeParentTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++);
|
||||
if (Strings.isNullOrEmpty(VmDetailConstants.ROOT_DISK_CONTROLLER)) {
|
||||
final String rootDiskController = getOvaDiskControllerDetails(diskTemplate, true);
|
||||
if (!Strings.isNullOrEmpty(rootDiskController)) {
|
||||
details.put(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDiskController);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
templateVO.setDetails(details);
|
||||
_templateDao.saveDetails(templateVO);
|
||||
return true;
|
||||
} catch (CloudRuntimeException | InterruptedException | ExecutionException e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean createChildDataDiskTemplate(DatadiskTO dataDiskTemplate, VMTemplateVO template, TemplateInfo parentTemplate, DataStore imageStore, int diskCount) throws ExecutionException, InterruptedException {
|
||||
// Make an entry in vm_template table
|
||||
Storage.ImageFormat format = dataDiskTemplate.isIso() ? Storage.ImageFormat.ISO : template.getFormat();
|
||||
String suffix = dataDiskTemplate.isIso() ? "-IsoDiskTemplate-" : "-DataDiskTemplate-";
|
||||
TemplateType ttype = dataDiskTemplate.isIso() ? TemplateType.ISODISK : TemplateType.DATADISK;
|
||||
final long templateId = _templateDao.getNextInSequence(Long.class, "id");
|
||||
long guestOsId = dataDiskTemplate.isIso() ? 1 : 0;
|
||||
String templateName = dataDiskTemplate.isIso() ? dataDiskTemplate.getPath().substring(dataDiskTemplate.getPath().lastIndexOf(File.separator) + 1) : template.getName() + suffix + diskCount;
|
||||
VMTemplateVO templateVO = new VMTemplateVO(templateId, templateName, format, false, false, false, ttype, template.getUrl(),
|
||||
template.requiresHvm(), template.getBits(), template.getAccountId(), null, templateName, false, guestOsId, false, template.getHypervisorType(), null,
|
||||
null, false, false, false);
|
||||
if (dataDiskTemplate.isIso()){
|
||||
templateVO.setUniqueName(templateName);
|
||||
}
|
||||
templateVO.setParentTemplateId(template.getId());
|
||||
templateVO.setSize(dataDiskTemplate.getVirtualSize());
|
||||
templateVO = _templateDao.persist(templateVO);
|
||||
// Make sync call to create Datadisk templates in image store
|
||||
TemplateApiResult result = null;
|
||||
TemplateInfo dataDiskTemplateInfo = imageFactory.getTemplate(templateVO.getId(), imageStore);
|
||||
AsyncCallFuture<TemplateApiResult> future = createDatadiskTemplateAsync(parentTemplate, dataDiskTemplateInfo, dataDiskTemplate.getPath(), dataDiskTemplate.getDiskId(),
|
||||
dataDiskTemplate.getFileSize(), dataDiskTemplate.isBootable());
|
||||
result = future.get();
|
||||
if (result.isSuccess()) {
|
||||
// Make an entry in template_zone_ref table
|
||||
if (imageStore.getScope().getScopeType() == ScopeType.REGION) {
|
||||
associateTemplateToZone(templateId, null);
|
||||
} else if (imageStore.getScope().getScopeType() == ScopeType.ZONE) {
|
||||
Long zoneId = ((ImageStoreEntity)imageStore).getDataCenterId();
|
||||
VMTemplateZoneVO templateZone = new VMTemplateZoneVO(zoneId, templateId, new Date());
|
||||
_vmTemplateZoneDao.persist(templateZone);
|
||||
}
|
||||
_resourceLimitMgr.incrementResourceCount(template.getAccountId(), ResourceType.secondary_storage, templateVO.getSize());
|
||||
} else {
|
||||
// Delete the Datadisk templates that were already created as they are now invalid
|
||||
s_logger.debug("Since creation of Datadisk template: " + templateVO.getId() + " failed, delete other Datadisk templates that were created as part of parent"
|
||||
+ " template download");
|
||||
TemplateInfo parentTemplateInfo = imageFactory.getTemplate(templateVO.getParentTemplateId(), imageStore);
|
||||
cleanupDatadiskTemplates(parentTemplateInfo);
|
||||
}
|
||||
return result.isSuccess();
|
||||
}
|
||||
|
||||
private boolean finalizeParentTemplate(DatadiskTO dataDiskTemplate, VMTemplateVO templateVO, TemplateInfo parentTemplate, DataStore imageStore, int diskCount) throws ExecutionException, InterruptedException, CloudRuntimeException {
|
||||
TemplateInfo templateInfo = imageFactory.getTemplate(templateVO.getId(), imageStore);
|
||||
AsyncCallFuture<TemplateApiResult> templateFuture = createDatadiskTemplateAsync(parentTemplate, templateInfo, dataDiskTemplate.getPath(), dataDiskTemplate.getDiskId(),
|
||||
dataDiskTemplate.getFileSize(), dataDiskTemplate.isBootable());
|
||||
TemplateApiResult result = null;
|
||||
result = templateFuture.get();
|
||||
if (!result.isSuccess()) {
|
||||
s_logger.debug("Since creation of parent template: " + templateInfo.getId() + " failed, delete Datadisk templates that were created as part of parent"
|
||||
+ " template download");
|
||||
cleanupDatadiskTemplates(templateInfo);
|
||||
}
|
||||
return result.isSuccess();
|
||||
}
|
||||
|
||||
private String getOvaDiskControllerDetails(DatadiskTO diskTemplate, boolean isRootDisk) {
|
||||
String controller = diskTemplate.getDiskController() ;
|
||||
String controllerSubType = diskTemplate.getDiskControllerSubType();
|
||||
|
||||
if (controller != null) {
|
||||
controller = controller.toLowerCase();
|
||||
}
|
||||
|
||||
if (controllerSubType != null) {
|
||||
controllerSubType = controllerSubType.toLowerCase();
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(controller)) {
|
||||
if (controller.contains("ide")) {
|
||||
return "ide";
|
||||
}
|
||||
if (controller.contains("scsi")) {
|
||||
if (StringUtils.isNotBlank(controllerSubType)) {
|
||||
if (controllerSubType.equals("lsilogicsas")) {
|
||||
return "lsisas1068";
|
||||
}
|
||||
return controllerSubType;
|
||||
}
|
||||
if (!isRootDisk) {
|
||||
return "scsi";
|
||||
}
|
||||
}
|
||||
if (!isRootDisk) {
|
||||
return "osdefault";
|
||||
}
|
||||
}
|
||||
|
||||
// Root disk to use global setting vmware.root.disk.controller
|
||||
if (!isRootDisk) {
|
||||
return "scsi";
|
||||
}
|
||||
return controller;
|
||||
}
|
||||
|
||||
private void cleanupDatadiskTemplates(TemplateInfo parentTemplateInfo) {
|
||||
DataStore imageStore = parentTemplateInfo.getDataStore();
|
||||
List<VMTemplateVO> datadiskTemplatesToDelete = _templateDao.listByParentTemplatetId(parentTemplateInfo.getId());
|
||||
for (VMTemplateVO datadiskTemplateToDelete: datadiskTemplatesToDelete) {
|
||||
s_logger.info("Delete template: " + datadiskTemplateToDelete.getId() + " from image store: " + imageStore.getName());
|
||||
AsyncCallFuture<TemplateApiResult> future = deleteTemplateAsync(imageFactory.getTemplate(datadiskTemplateToDelete.getId(), imageStore));
|
||||
try {
|
||||
TemplateApiResult result = future.get();
|
||||
if (!result.isSuccess()) {
|
||||
s_logger.warn("Failed to delete datadisk template: " + datadiskTemplateToDelete + " from image store: " + imageStore.getName() + " due to: " + result.getResult());
|
||||
break;
|
||||
}
|
||||
_vmTemplateZoneDao.deletePrimaryRecordsForTemplate(datadiskTemplateToDelete.getId());
|
||||
_resourceLimitMgr.decrementResourceCount(datadiskTemplateToDelete.getAccountId(), ResourceType.secondary_storage, datadiskTemplateToDelete.getSize());
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Delete datadisk template failed", e);
|
||||
throw new CloudRuntimeException("Delete template Failed", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncCallFuture<TemplateApiResult> deleteTemplateAsync(TemplateInfo template) {
|
||||
TemplateObject to = (TemplateObject)template;
|
||||
|
|
@ -1006,4 +1201,69 @@ public class TemplateServiceImpl implements TemplateService {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class CreateDataDiskTemplateContext<T> extends AsyncRpcContext<T> {
|
||||
private final DataObject dataDiskTemplate;
|
||||
private final AsyncCallFuture<TemplateApiResult> future;
|
||||
|
||||
public CreateDataDiskTemplateContext(AsyncCompletionCallback<T> callback, DataObject dataDiskTemplate, AsyncCallFuture<TemplateApiResult> future) {
|
||||
super(callback);
|
||||
this.dataDiskTemplate = dataDiskTemplate;
|
||||
this.future = future;
|
||||
}
|
||||
|
||||
public AsyncCallFuture<TemplateApiResult> getFuture() {
|
||||
return this.future;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncCallFuture<TemplateApiResult> createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable) {
|
||||
AsyncCallFuture<TemplateApiResult> future = new AsyncCallFuture<TemplateApiResult>();
|
||||
// Make an entry for disk template in template_store_ref table
|
||||
DataStore store = parentTemplate.getDataStore();
|
||||
TemplateObject dataDiskTemplateOnStore;
|
||||
if (!bootable) {
|
||||
dataDiskTemplateOnStore = (TemplateObject)store.create(dataDiskTemplate);
|
||||
dataDiskTemplateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.CreateOnlyRequested);
|
||||
} else {
|
||||
dataDiskTemplateOnStore = (TemplateObject) imageFactory.getTemplate(parentTemplate, store);
|
||||
}
|
||||
try {
|
||||
CreateDataDiskTemplateContext<TemplateApiResult> context = new CreateDataDiskTemplateContext<TemplateApiResult>(null, dataDiskTemplateOnStore, future);
|
||||
AsyncCallbackDispatcher<TemplateServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
|
||||
caller.setCallback(caller.getTarget().createDatadiskTemplateCallback(null, null)).setContext(context);
|
||||
ImageStoreEntity tmpltStore = (ImageStoreEntity)parentTemplate.getDataStore();
|
||||
tmpltStore.createDataDiskTemplateAsync(dataDiskTemplate, path, diskId, fileSize, bootable, caller);
|
||||
} catch (CloudRuntimeException ex) {
|
||||
dataDiskTemplateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
|
||||
TemplateApiResult result = new TemplateApiResult(dataDiskTemplate);
|
||||
result.setResult(ex.getMessage());
|
||||
if (future != null) {
|
||||
future.complete(result);
|
||||
}
|
||||
}
|
||||
return future;
|
||||
}
|
||||
|
||||
protected Void createDatadiskTemplateCallback(AsyncCallbackDispatcher<TemplateServiceImpl, CreateCmdResult> callback,
|
||||
CreateDataDiskTemplateContext<TemplateApiResult> context) {
|
||||
DataObject dataDiskTemplate = context.dataDiskTemplate;
|
||||
AsyncCallFuture<TemplateApiResult> future = context.getFuture();
|
||||
CreateCmdResult result = callback.getResult();
|
||||
TemplateApiResult dataDiskTemplateResult = new TemplateApiResult((TemplateObject)dataDiskTemplate);
|
||||
try {
|
||||
if (result.isSuccess()) {
|
||||
dataDiskTemplate.processEvent(Event.OperationSuccessed, result.getAnswer());
|
||||
} else {
|
||||
dataDiskTemplate.processEvent(Event.OperationFailed);
|
||||
dataDiskTemplateResult.setResult(result.getResult());
|
||||
}
|
||||
} catch (CloudRuntimeException e) {
|
||||
s_logger.debug("Failed to process create template callback", e);
|
||||
dataDiskTemplateResult.setResult(e.toString());
|
||||
}
|
||||
future.complete(dataDiskTemplateResult);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.image.store;
|
|||
|
||||
import java.util.Date;
|
||||
import java.util.Set;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
|
@ -42,7 +43,9 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
|||
import org.apache.cloudstack.storage.image.ImageStoreDriver;
|
||||
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
|
||||
import org.apache.cloudstack.storage.to.ImageStoreTO;
|
||||
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import com.cloud.agent.api.to.DatadiskTO;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.capacity.dao.CapacityDao;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
|
|
@ -214,5 +217,14 @@ public class ImageStoreImpl implements ImageStoreEntity {
|
|||
driver.deleteEntityExtractUrl(this, installPath, url, entityType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<DatadiskTO> getDataDiskTemplates(DataObject obj) {
|
||||
return driver.getDataDiskTemplates(obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
return driver.createDataDiskTemplateAsync(dataDiskTemplate, path, diskId, bootable, fileSize, callback);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
|
|||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
||||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import com.cloud.agent.api.storage.CreateDatadiskTemplateAnswer;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
|
|
@ -230,6 +231,16 @@ public class TemplateObject implements TemplateInfo {
|
|||
templateVO.setSize(newTemplate.getSize());
|
||||
imageDao.update(templateVO.getId(), templateVO);
|
||||
}
|
||||
} else if (answer instanceof CreateDatadiskTemplateAnswer) {
|
||||
CreateDatadiskTemplateAnswer createAnswer = (CreateDatadiskTemplateAnswer)answer;
|
||||
TemplateObjectTO dataDiskTemplate = createAnswer.getDataDiskTemplate();
|
||||
TemplateDataStoreVO templateStoreRef = templateStoreDao.findByStoreTemplate(getDataStore().getId(), dataDiskTemplate.getId());
|
||||
templateStoreRef.setInstallPath(dataDiskTemplate.getPath());
|
||||
templateStoreRef.setDownloadPercent(100);
|
||||
templateStoreRef.setDownloadState(Status.DOWNLOADED);
|
||||
templateStoreRef.setSize(dataDiskTemplate.getSize());
|
||||
templateStoreRef.setPhysicalSize(dataDiskTemplate.getPhysicalSize());
|
||||
templateStoreDao.update(templateStoreRef.getId(), templateStoreRef);
|
||||
}
|
||||
}
|
||||
objectInStoreMgr.update(this, event);
|
||||
|
|
@ -458,6 +469,11 @@ public class TemplateObject implements TemplateInfo {
|
|||
return imageVO.getSourceTemplateId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getParentTemplateId() {
|
||||
return imageVO.getParentTemplateId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getTemplateTag() {
|
||||
return imageVO.getTemplateTag();
|
||||
|
|
|
|||
|
|
@ -71,6 +71,9 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
|||
+ "left join cluster_details cd on c.id=cd.cluster_id and cd.name='" + CapacityManager.StorageOperationsExcludeCluster.key() + "' "
|
||||
+ "where h.status = 'Up' and h.type = 'Routing' and h.resource_state = 'Enabled' and s.pool_id = ? ";
|
||||
|
||||
private String findOneHypervisorHostInScopeByType = "select h.id from host h where h.status = 'Up' and h.hypervisor_type = ? ";
|
||||
private String findOneHypervisorHostInScope = "select h.id from host h where h.status = 'Up' and h.hypervisor_type is not null ";
|
||||
|
||||
protected boolean moveBetweenPrimaryImage(DataStore srcStore, DataStore destStore) {
|
||||
DataStoreRole srcRole = srcStore.getRole();
|
||||
DataStoreRole destRole = destStore.getRole();
|
||||
|
|
|
|||
|
|
@ -20,7 +20,9 @@ package org.apache.cloudstack.storage.image;
|
|||
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
|
@ -34,6 +36,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.framework.async.AsyncRpcContext;
|
||||
|
|
@ -44,9 +47,13 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
|||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
|
||||
import org.apache.cloudstack.storage.endpoint.DefaultEndPointSelector;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.storage.CreateDatadiskTemplateCommand;
|
||||
import com.cloud.agent.api.storage.DownloadAnswer;
|
||||
import com.cloud.agent.api.storage.GetDatadisksAnswer;
|
||||
import com.cloud.agent.api.storage.GetDatadisksCommand;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.alert.AlertManager;
|
||||
|
|
@ -54,10 +61,15 @@ import com.cloud.storage.VMTemplateStorageResourceAssoc;
|
|||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VMTemplateDetailsDao;
|
||||
import com.cloud.storage.dao.VMTemplateZoneDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.download.DownloadMonitor;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import com.cloud.user.dao.AccountDao;
|
||||
import com.cloud.agent.api.to.DatadiskTO;
|
||||
import com.cloud.utils.net.Proxy;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
|
||||
private static final Logger s_logger = Logger.getLogger(BaseImageStoreDriverImpl.class);
|
||||
|
|
@ -79,6 +91,14 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
|
|||
VMTemplateZoneDao _vmTemplateZoneDao;
|
||||
@Inject
|
||||
AlertManager _alertMgr;
|
||||
@Inject
|
||||
VMTemplateDetailsDao _templateDetailsDao;
|
||||
@Inject
|
||||
DefaultEndPointSelector _defaultEpSelector;
|
||||
@Inject
|
||||
AccountDao _accountDao;
|
||||
@Inject
|
||||
ResourceLimitService _resourceLimitMgr;
|
||||
|
||||
protected String _proxy = null;
|
||||
|
||||
|
|
@ -288,6 +308,58 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void deleteEntityExtractUrl(DataStore store, String installPath, String url, Upload.Type entityType){
|
||||
public void deleteEntityExtractUrl(DataStore store, String installPath, String url, Upload.Type entityType) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<DatadiskTO> getDataDiskTemplates(DataObject obj) {
|
||||
List<DatadiskTO> dataDiskDetails = new ArrayList<DatadiskTO>();
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Get the data disks present in the OVA template");
|
||||
}
|
||||
DataStore store = obj.getDataStore();
|
||||
GetDatadisksCommand cmd = new GetDatadisksCommand(obj.getTO());
|
||||
EndPoint ep = _defaultEpSelector.select(store);
|
||||
Answer answer = null;
|
||||
if (ep == null) {
|
||||
String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
|
||||
s_logger.error(errMsg);
|
||||
answer = new Answer(cmd, false, errMsg);
|
||||
} else {
|
||||
answer = ep.sendMessage(cmd);
|
||||
}
|
||||
if (answer != null && answer.getResult()) {
|
||||
GetDatadisksAnswer getDatadisksAnswer = (GetDatadisksAnswer)answer;
|
||||
dataDiskDetails = getDatadisksAnswer.getDataDiskDetails(); // Details - Disk path, virtual size
|
||||
}
|
||||
else {
|
||||
throw new CloudRuntimeException("Get Data disk command failed " + answer.getDetails());
|
||||
}
|
||||
return dataDiskDetails;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, boolean bootable, long fileSize, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
Answer answer = null;
|
||||
String errMsg = null;
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Create Datadisk template: " + dataDiskTemplate.getId());
|
||||
}
|
||||
CreateDatadiskTemplateCommand cmd = new CreateDatadiskTemplateCommand(dataDiskTemplate.getTO(), path, diskId, fileSize, bootable);
|
||||
EndPoint ep = _defaultEpSelector.select(dataDiskTemplate.getDataStore());
|
||||
if (ep == null) {
|
||||
errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
|
||||
s_logger.error(errMsg);
|
||||
answer = new Answer(cmd, false, errMsg);
|
||||
} else {
|
||||
answer = ep.sendMessage(cmd);
|
||||
}
|
||||
if (answer != null && !answer.getResult()) {
|
||||
errMsg = answer.getDetails();
|
||||
}
|
||||
CreateCmdResult result = new CreateCmdResult(null, answer);
|
||||
result.setResult(errMsg);
|
||||
callback.complete(result);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,14 +19,25 @@
|
|||
package org.apache.cloudstack.storage.image;
|
||||
|
||||
import com.cloud.storage.Upload;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
|
||||
import com.cloud.agent.api.to.DatadiskTO;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
|
||||
public interface ImageStoreDriver extends DataStoreDriver {
|
||||
String createEntityExtractUrl(DataStore store, String installPath, ImageFormat format, DataObject dataObject);
|
||||
|
||||
void deleteEntityExtractUrl(DataStore store, String installPath, String url, Upload.Type entityType);
|
||||
|
||||
List<DatadiskTO> getDataDiskTemplates(DataObject obj);
|
||||
|
||||
Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, boolean bootable, long fileSize, AsyncCompletionCallback<CreateCmdResult> callback);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -304,7 +304,11 @@ public class TemplateEntityImpl implements TemplateEntity {
|
|||
|
||||
@Override
|
||||
public Date getUpdated() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getParentTemplateId() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -517,7 +517,11 @@ public class VolumeObject implements VolumeInfo {
|
|||
VolumeObjectTO newVol = (VolumeObjectTO)cpyAnswer.getNewData();
|
||||
vol.setPath(newVol.getPath());
|
||||
if (newVol.getSize() != null) {
|
||||
vol.setSize(newVol.getSize());
|
||||
// Root disk resize may be requested where the original
|
||||
// template size is less than the requested root disk size
|
||||
if (vol.getSize() == null || vol.getSize() < newVol.getSize()) {
|
||||
vol.setSize(newVol.getSize());
|
||||
}
|
||||
}
|
||||
if (newVol.getFormat() != null) {
|
||||
vol.setFormat(newVol.getFormat());
|
||||
|
|
|
|||
|
|
@ -18,22 +18,31 @@ package com.cloud.cluster;
|
|||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.cloudstack.framework.config.ConfigDepot;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.cluster.dao.ManagementServerHostDao;
|
||||
import com.cloud.utils.component.ComponentLifecycle;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class ClusterServiceServletAdapterTest {
|
||||
|
||||
@Mock
|
||||
private ClusterManager _manager;
|
||||
@Mock
|
||||
private ManagementServerHostDao _mshostDao;
|
||||
@Mock
|
||||
protected ConfigDepot _configDepot;
|
||||
|
||||
ClusterServiceServletAdapter clusterServiceServletAdapter;
|
||||
ClusterManagerImpl clusterManagerImpl;
|
||||
|
||||
@Before
|
||||
public void setup() throws IllegalArgumentException,
|
||||
IllegalAccessException, NoSuchFieldException, SecurityException {
|
||||
public void setup() throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException {
|
||||
clusterServiceServletAdapter = new ClusterServiceServletAdapter();
|
||||
clusterManagerImpl = new ClusterManagerImpl();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
String secStorageUrl = nfsStore.getUrl();
|
||||
assert (secStorageUrl != null);
|
||||
String installPath = template.getPath();
|
||||
String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, _nfsVersion);
|
||||
String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, nfsStore.getNfsVersion());
|
||||
String installFullPath = secondaryMountPoint + "/" + installPath;
|
||||
try {
|
||||
if (installFullPath.endsWith(".ova")) {
|
||||
|
|
@ -203,7 +203,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
String installPath = volume.getPath();
|
||||
int index = installPath.lastIndexOf(File.separator);
|
||||
String volumeUuid = installPath.substring(index + 1);
|
||||
String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, _nfsVersion);
|
||||
String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, nfsStore.getNfsVersion());
|
||||
//The real volume path
|
||||
String volumePath = installPath + File.separator + volumeUuid + ".ova";
|
||||
String installFullPath = secondaryMountPoint + "/" + installPath;
|
||||
|
|
@ -280,8 +280,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
assert (morDs != null);
|
||||
DatastoreMO primaryStorageDatastoreMo = new DatastoreMO(context, morDs);
|
||||
|
||||
copyTemplateFromSecondaryToPrimary(hyperHost, primaryStorageDatastoreMo, secondaryStorageUrl, mountPoint, templateName, templateUuidName,
|
||||
cmd.getNfsVersion());
|
||||
copyTemplateFromSecondaryToPrimary(hyperHost, primaryStorageDatastoreMo, secondaryStorageUrl, mountPoint, templateName, templateUuidName, cmd.getNfsVersion());
|
||||
} else {
|
||||
s_logger.info("Template " + templateName + " has already been setup, skip the template setup process in primary storage");
|
||||
}
|
||||
|
|
@ -354,9 +353,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
throw new Exception("Failed to take snapshot " + cmd.getSnapshotName() + " on vm: " + cmd.getVmName());
|
||||
}
|
||||
|
||||
snapshotBackupUuid =
|
||||
backupSnapshotToSecondaryStorage(vmMo, accountId, volumeId, cmd.getVolumePath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid, prevBackupUuid,
|
||||
hostService.getWorkerName(context, cmd, 1), cmd.getNfsVersion());
|
||||
snapshotBackupUuid = backupSnapshotToSecondaryStorage(vmMo, accountId, volumeId, cmd.getVolumePath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid,
|
||||
prevBackupUuid, hostService.getWorkerName(context, cmd, 1), cmd.getNfsVersion());
|
||||
|
||||
success = (snapshotBackupUuid != null);
|
||||
if (success) {
|
||||
|
|
@ -410,8 +408,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(cmd.getVmName());
|
||||
if (vmMo == null) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() +
|
||||
", try within datacenter");
|
||||
s_logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() + ", try within datacenter");
|
||||
}
|
||||
vmMo = hyperHost.findVmOnPeerHyperHost(cmd.getVmName());
|
||||
|
||||
|
|
@ -422,9 +419,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
}
|
||||
}
|
||||
|
||||
Ternary<String, Long, Long> result =
|
||||
createTemplateFromVolume(vmMo, accountId, templateId, cmd.getUniqueName(), secondaryStoragePoolURL, volumePath,
|
||||
hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion());
|
||||
Ternary<String, Long, Long> result = createTemplateFromVolume(vmMo, accountId, templateId, cmd.getUniqueName(), secondaryStoragePoolURL, volumePath,
|
||||
hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion());
|
||||
|
||||
return new CreatePrivateTemplateAnswer(cmd, true, null, result.first(), result.third(), result.second(), cmd.getUniqueName(), ImageFormat.OVA);
|
||||
|
||||
|
|
@ -481,9 +477,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
|
||||
Pair<String, String> result;
|
||||
if (cmd.toSecondaryStorage()) {
|
||||
result =
|
||||
copyVolumeToSecStorage(hostService, hyperHost, cmd, vmName, volumeId, cmd.getPool().getUuid(), volumePath, secondaryStorageURL,
|
||||
hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion());
|
||||
result = copyVolumeToSecStorage(hostService, hyperHost, cmd, vmName, volumeId, cmd.getPool().getUuid(), volumePath, secondaryStorageURL,
|
||||
hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion());
|
||||
} else {
|
||||
StorageFilerTO poolTO = cmd.getPool();
|
||||
|
||||
|
|
@ -496,8 +491,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
}
|
||||
}
|
||||
|
||||
result = copyVolumeFromSecStorage(hyperHost, volumeId, new DatastoreMO(context, morDatastore), secondaryStorageURL, volumePath,
|
||||
cmd.getNfsVersion());
|
||||
result = copyVolumeFromSecStorage(hyperHost, volumeId, new DatastoreMO(context, morDatastore), secondaryStorageURL, volumePath, cmd.getNfsVersion());
|
||||
deleteVolumeDirOnSecondaryStorage(volumeId, secondaryStorageURL, cmd.getNfsVersion());
|
||||
}
|
||||
return new CopyVolumeAnswer(cmd, true, null, result.first(), result.second());
|
||||
|
|
@ -536,8 +530,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
}
|
||||
|
||||
DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs);
|
||||
details = createVolumeFromSnapshot(hyperHost, primaryDsMo, newVolumeName, accountId, volumeId, secondaryStorageUrl, backedUpSnapshotUuid,
|
||||
cmd.getNfsVersion());
|
||||
details = createVolumeFromSnapshot(hyperHost, primaryDsMo, newVolumeName, accountId, volumeId, secondaryStorageUrl, backedUpSnapshotUuid, cmd.getNfsVersion());
|
||||
if (details == null) {
|
||||
success = true;
|
||||
}
|
||||
|
|
@ -553,13 +546,14 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
return new CreateVolumeFromSnapshotAnswer(cmd, success, details, newVolumeName);
|
||||
}
|
||||
|
||||
|
||||
// templateName: name in secondary storage
|
||||
// templateUuid: will be used at hypervisor layer
|
||||
private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl,
|
||||
String templatePathAtSecondaryStorage, String templateName, String templateUuid, Integer nfsVersion) throws Exception {
|
||||
private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, String templatePathAtSecondaryStorage,
|
||||
String templateName, String templateUuid, Integer nfsVersion) throws Exception {
|
||||
|
||||
s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " +
|
||||
templatePathAtSecondaryStorage + ", templateName: " + templateName);
|
||||
s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: "
|
||||
+ templatePathAtSecondaryStorage + ", templateName: " + templateName);
|
||||
|
||||
String secondaryMountPoint = _mountService.getMountPoint(secondaryStorageUrl, nfsVersion);
|
||||
s_logger.info("Secondary storage mount point: " + secondaryMountPoint);
|
||||
|
|
@ -593,9 +587,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
|
||||
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName);
|
||||
if (vmMo == null) {
|
||||
String msg =
|
||||
"Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage +
|
||||
", templateName: " + templateName + ", templateUuid: " + templateUuid;
|
||||
String msg = "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage
|
||||
+ ", templateName: " + templateName + ", templateUuid: " + templateUuid;
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
|
@ -800,7 +793,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
// TODO a bit ugly here
|
||||
BufferedWriter out = null;
|
||||
try {
|
||||
out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"),"UTF-8"));
|
||||
out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"), "UTF-8"));
|
||||
out.write("filename=" + templateName + ".ova");
|
||||
out.newLine();
|
||||
out.write("description=");
|
||||
|
|
@ -840,7 +833,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
// TODO a bit ugly here
|
||||
BufferedWriter out = null;
|
||||
try {
|
||||
out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"),"UTF-8"));
|
||||
out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"), "UTF-8"));
|
||||
out.write("ova.filename=" + templateName + ".ova");
|
||||
out.newLine();
|
||||
out.write("version=1.0");
|
||||
|
|
@ -860,8 +853,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
}
|
||||
}
|
||||
|
||||
private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, long accountId, long volumeId,
|
||||
String secStorageUrl, String snapshotBackupUuid, Integer nfsVersion) throws Exception {
|
||||
private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, long accountId, long volumeId, String secStorageUrl,
|
||||
String snapshotBackupUuid, Integer nfsVersion) throws Exception {
|
||||
|
||||
restoreVolumeFromSecStorage(hyperHost, primaryDsMo, newVolumeName, secStorageUrl, getSnapshotRelativeDirInSecStorage(accountId, volumeId), snapshotBackupUuid, nfsVersion);
|
||||
return null;
|
||||
|
|
@ -935,8 +928,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
return backupUuid + "/" + backupUuid;
|
||||
}
|
||||
|
||||
private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, String exportName,
|
||||
String workerVmName, Integer nfsVersion) throws Exception {
|
||||
private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, String exportName, String workerVmName,
|
||||
Integer nfsVersion) throws Exception {
|
||||
|
||||
String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, nfsVersion);
|
||||
String exportPath = secondaryMountPoint + "/" + secStorageDir + "/" + exportName;
|
||||
|
|
@ -980,8 +973,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
}
|
||||
}
|
||||
|
||||
private Pair<String, String> copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, String vmName,
|
||||
long volumeId, String poolId, String volumePath, String secStorageUrl, String workerVmName, Integer nfsVersion) throws Exception {
|
||||
private Pair<String, String> copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, String vmName, long volumeId,
|
||||
String poolId, String volumePath, String secStorageUrl, String workerVmName, Integer nfsVersion) throws Exception {
|
||||
|
||||
String volumeFolder = String.valueOf(volumeId) + "/";
|
||||
VirtualMachineMO workerVm = null;
|
||||
|
|
@ -1019,8 +1012,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
|
||||
vmMo.createSnapshot(exportName, "Temporary snapshot for copy-volume command", false, false);
|
||||
|
||||
exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName,
|
||||
hostService.getWorkerName(hyperHost.getContext(), cmd, 1), nfsVersion);
|
||||
exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName, hostService.getWorkerName(hyperHost.getContext(), cmd, 1),
|
||||
nfsVersion);
|
||||
return new Pair<String, String>(volumeFolder, exportName);
|
||||
|
||||
} finally {
|
||||
|
|
@ -1041,8 +1034,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
return datastoreVolumePath;
|
||||
}
|
||||
|
||||
private Pair<String, String> copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName, Integer nfsVersion)
|
||||
throws Exception {
|
||||
private Pair<String, String> copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName,
|
||||
Integer nfsVersion) throws Exception {
|
||||
|
||||
String volumeFolder = String.valueOf(volumeId) + "/";
|
||||
String newVolume = UUID.randomUUID().toString().replaceAll("-", "");
|
||||
|
|
@ -1098,7 +1091,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
s_logger.info("Package OVA for template in dir: " + exportDir + "cmd: " + command.toString());
|
||||
// to be safe, physically test existence of the target OVA file
|
||||
if ((new File(exportDir + File.separator + ovaFileName)).exists()) {
|
||||
s_logger.info("OVA file: " + ovaFileName +" is created and ready to extract.");
|
||||
s_logger.info("OVA file: " + ovaFileName + " is created and ready to extract.");
|
||||
return ovaFileName;
|
||||
} else {
|
||||
String msg = exportDir + File.separator + ovaFileName + " is not created as expected";
|
||||
|
|
@ -1135,9 +1128,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
return "snapshots/" + accountId + "/" + volumeId;
|
||||
}
|
||||
|
||||
private long getVMSnapshotChainSize(VmwareContext context, VmwareHypervisorHost hyperHost,
|
||||
String fileName, ManagedObjectReference morDs, String exceptFileName)
|
||||
throws Exception{
|
||||
private long getVMSnapshotChainSize(VmwareContext context, VmwareHypervisorHost hyperHost, String fileName, ManagedObjectReference morDs, String exceptFileName)
|
||||
throws Exception {
|
||||
long size = 0;
|
||||
DatastoreMO dsMo = new DatastoreMO(context, morDs);
|
||||
HostDatastoreBrowserMO browserMo = dsMo.getHostDatastoreBrowserMO();
|
||||
|
|
@ -1187,8 +1179,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
for (ManagedObjectReference taskMor : tasks) {
|
||||
TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info"));
|
||||
|
||||
if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) &&
|
||||
info.getName().equalsIgnoreCase("CreateSnapshot_Task")) {
|
||||
if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("CreateSnapshot_Task")) {
|
||||
if (!(info.getState().equals(TaskInfoState.SUCCESS) || info.getState().equals(TaskInfoState.ERROR))) {
|
||||
s_logger.debug("There is already a VM snapshot task running, wait for it");
|
||||
context.getVimClient().waitForTask(taskMor);
|
||||
|
|
@ -1229,8 +1220,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
vmMo.removeSnapshot(vmSnapshotName, false);
|
||||
}
|
||||
} catch (Exception e1) {
|
||||
s_logger.info("[ignored]"
|
||||
+ "error during snapshot remove: " + e1.getLocalizedMessage());
|
||||
s_logger.info("[ignored]" + "error during snapshot remove: " + e1.getLocalizedMessage());
|
||||
}
|
||||
|
||||
return new CreateVMSnapshotAnswer(cmd, false, e.getMessage());
|
||||
|
|
@ -1259,8 +1249,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
baseName = baseName.substring(1, baseName.length() - 1);
|
||||
|
||||
vmdkName = fullPath; // for managed storage, vmdkName == fullPath
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
vmdkName = fullPath.split("] ")[1];
|
||||
|
||||
if (vmdkName.endsWith(".vmdk")) {
|
||||
|
|
@ -1283,8 +1272,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
return mapNewDisk;
|
||||
}
|
||||
|
||||
private void setVolumeToPathAndSize(List<VolumeObjectTO> volumeTOs, Map<String, String> mapNewDisk, VmwareContext context,
|
||||
VmwareHypervisorHost hyperHost, String vmName) throws Exception {
|
||||
private void setVolumeToPathAndSize(List<VolumeObjectTO> volumeTOs, Map<String, String> mapNewDisk, VmwareContext context, VmwareHypervisorHost hyperHost, String vmName)
|
||||
throws Exception {
|
||||
for (VolumeObjectTO volumeTO : volumeTOs) {
|
||||
String oldPath = volumeTO.getPath();
|
||||
|
||||
|
|
@ -1296,8 +1285,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
|
||||
// remove '[' and ']'
|
||||
baseName = oldPath.substring(1, oldPath.length() - 1);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
baseName = VmwareHelper.trimSnapshotDeltaPostfix(volumeTO.getPath());
|
||||
}
|
||||
|
||||
|
|
@ -1309,7 +1297,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
long size = getVMSnapshotChainSize(context, hyperHost, baseName + ".vmdk", morDs, newPath);
|
||||
size = getVMSnapshotChainSize(context, hyperHost, baseName + "-*.vmdk", morDs, newPath);
|
||||
|
||||
if (volumeTO.getVolumeType()== Volume.Type.ROOT) {
|
||||
if (volumeTO.getVolumeType() == Volume.Type.ROOT) {
|
||||
// add memory snapshot size
|
||||
size += getVMSnapshotChainSize(context, hyperHost, vmName + "-*.vmsn", morDs, null);
|
||||
}
|
||||
|
|
@ -1319,7 +1307,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
}
|
||||
}
|
||||
|
||||
private ManagedObjectReference getDatastoreAsManagedObjectReference(String baseName, VmwareHypervisorHost hyperHost, DataStoreTO store) throws Exception {
|
||||
private ManagedObjectReference getDatastoreAsManagedObjectReference(String baseName, VmwareHypervisorHost hyperHost, DataStoreTO store) throws Exception {
|
||||
try {
|
||||
// if baseName equates to a datastore name, this should be managed storage
|
||||
ManagedObjectReference morDs = hyperHost.findDatastoreByName(baseName);
|
||||
|
|
@ -1327,10 +1315,8 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
if (morDs != null) {
|
||||
return morDs;
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
s_logger.info("[ignored]"
|
||||
+ "error getting managed object refference: " + ex.getLocalizedMessage());
|
||||
} catch (Exception ex) {
|
||||
s_logger.info("[ignored]" + "error getting managed object refference: " + ex.getLocalizedMessage());
|
||||
}
|
||||
|
||||
// not managed storage, so use the standard way of getting a ManagedObjectReference for a datastore
|
||||
|
|
@ -1408,8 +1394,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager {
|
|||
for (ManagedObjectReference taskMor : tasks) {
|
||||
TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info"));
|
||||
|
||||
if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) &&
|
||||
info.getName().equalsIgnoreCase("RevertToSnapshot_Task")) {
|
||||
if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("RevertToSnapshot_Task")) {
|
||||
s_logger.debug("There is already a VM snapshot task running, wait for it");
|
||||
context.getVimClient().waitForTask(taskMor);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ import java.net.URI;
|
|||
import java.net.URL;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.rmi.RemoteException;
|
||||
|
||||
import com.cloud.configuration.Resource.ResourceType;
|
||||
import org.joda.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
|
@ -507,10 +509,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
} else if (clz == ResizeVolumeCommand.class) {
|
||||
return execute((ResizeVolumeCommand)cmd);
|
||||
} else if (clz == UnregisterVMCommand.class) {
|
||||
return execute((UnregisterVMCommand) cmd);
|
||||
return execute((UnregisterVMCommand)cmd);
|
||||
} else if (cmd instanceof StorageSubSystemCommand) {
|
||||
checkStorageProcessorAndHandlerNfsVersionAttribute((StorageSubSystemCommand)cmd);
|
||||
return storageHandler.handleStorageCommands((StorageSubSystemCommand) cmd);
|
||||
return storageHandler.handleStorageCommands((StorageSubSystemCommand)cmd);
|
||||
} else if (clz == ScaleVmCommand.class) {
|
||||
return execute((ScaleVmCommand)cmd);
|
||||
} else if (clz == PvlanSetupCommand.class) {
|
||||
|
|
@ -540,8 +542,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
PropertyMapDynamicBean mbeanToRemove = _cmdMBeans.get(0);
|
||||
_cmdMBeans.remove(0);
|
||||
|
||||
JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(),
|
||||
"Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name"));
|
||||
JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(), "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name"));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (s_logger.isTraceEnabled())
|
||||
|
|
@ -568,11 +569,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
* @param cmd command to execute
|
||||
*/
|
||||
protected void checkStorageProcessorAndHandlerNfsVersionAttribute(StorageSubSystemCommand cmd) {
|
||||
if (storageNfsVersion != null) return;
|
||||
if (cmd instanceof CopyCommand){
|
||||
EnumMap<VmwareStorageProcessorConfigurableFields,Object> params = new EnumMap<VmwareStorageProcessorConfigurableFields,Object>(VmwareStorageProcessorConfigurableFields.class);
|
||||
examineStorageSubSystemCommandNfsVersion((CopyCommand) cmd, params);
|
||||
params = examineStorageSubSystemCommandFullCloneFlagForVmware((CopyCommand) cmd, params);
|
||||
if (storageNfsVersion != null)
|
||||
return;
|
||||
if (cmd instanceof CopyCommand) {
|
||||
EnumMap<VmwareStorageProcessorConfigurableFields, Object> params = new EnumMap<VmwareStorageProcessorConfigurableFields, Object>(
|
||||
VmwareStorageProcessorConfigurableFields.class);
|
||||
examineStorageSubSystemCommandNfsVersion((CopyCommand)cmd, params);
|
||||
params = examineStorageSubSystemCommandFullCloneFlagForVmware((CopyCommand)cmd, params);
|
||||
reconfigureProcessorByHandler(params);
|
||||
}
|
||||
}
|
||||
|
|
@ -581,10 +584,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
* Reconfigure processor by handler
|
||||
* @param params params
|
||||
*/
|
||||
protected void reconfigureProcessorByHandler(EnumMap<VmwareStorageProcessorConfigurableFields,Object> params) {
|
||||
VmwareStorageSubsystemCommandHandler handler = (VmwareStorageSubsystemCommandHandler) storageHandler;
|
||||
protected void reconfigureProcessorByHandler(EnumMap<VmwareStorageProcessorConfigurableFields, Object> params) {
|
||||
VmwareStorageSubsystemCommandHandler handler = (VmwareStorageSubsystemCommandHandler)storageHandler;
|
||||
boolean success = handler.reconfigureStorageProcessor(params);
|
||||
if (success){
|
||||
if (success) {
|
||||
s_logger.info("VmwareStorageProcessor and VmwareStorageSubsystemCommandHandler successfully reconfigured");
|
||||
} else {
|
||||
s_logger.error("Error while reconfiguring VmwareStorageProcessor and VmwareStorageSubsystemCommandHandler, params=" + _gson.toJson(params));
|
||||
|
|
@ -597,14 +600,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
* @param params params
|
||||
* @return copy of params including new values, if suitable
|
||||
*/
|
||||
protected EnumMap<VmwareStorageProcessorConfigurableFields,Object> examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd, EnumMap<VmwareStorageProcessorConfigurableFields,Object> params) {
|
||||
protected EnumMap<VmwareStorageProcessorConfigurableFields, Object> examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd,
|
||||
EnumMap<VmwareStorageProcessorConfigurableFields, Object> params) {
|
||||
EnumMap<VmwareStorageProcessorConfigurableFields, Object> paramsCopy = new EnumMap<VmwareStorageProcessorConfigurableFields, Object>(params);
|
||||
HypervisorType hypervisor = cmd.getDestTO().getHypervisorType();
|
||||
if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)){
|
||||
if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)) {
|
||||
DataStoreTO destDataStore = cmd.getDestTO().getDataStore();
|
||||
if (destDataStore instanceof PrimaryDataStoreTO){
|
||||
PrimaryDataStoreTO dest = (PrimaryDataStoreTO) destDataStore;
|
||||
if (dest.isFullCloneFlag() != null){
|
||||
if (destDataStore instanceof PrimaryDataStoreTO) {
|
||||
PrimaryDataStoreTO dest = (PrimaryDataStoreTO)destDataStore;
|
||||
if (dest.isFullCloneFlag() != null) {
|
||||
paramsCopy.put(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG, dest.isFullCloneFlag().booleanValue());
|
||||
}
|
||||
}
|
||||
|
|
@ -617,15 +621,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
* @param cmd command to execute
|
||||
* @param params params
|
||||
*/
|
||||
protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd, EnumMap<VmwareStorageProcessorConfigurableFields,Object> params){
|
||||
protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd, EnumMap<VmwareStorageProcessorConfigurableFields, Object> params) {
|
||||
DataStoreTO srcDataStore = cmd.getSrcTO().getDataStore();
|
||||
boolean nfsVersionFound = false;
|
||||
|
||||
if (srcDataStore instanceof NfsTO){
|
||||
nfsVersionFound = getStorageNfsVersionFromNfsTO((NfsTO) srcDataStore);
|
||||
if (srcDataStore instanceof NfsTO) {
|
||||
nfsVersionFound = getStorageNfsVersionFromNfsTO((NfsTO)srcDataStore);
|
||||
}
|
||||
|
||||
if (nfsVersionFound){
|
||||
if (nfsVersionFound) {
|
||||
params.put(VmwareStorageProcessorConfigurableFields.NFS_VERSION, storageNfsVersion);
|
||||
}
|
||||
}
|
||||
|
|
@ -635,8 +639,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
* @param nfsTO nfsTO
|
||||
* @return true if NFS version was found and not null, false in other case
|
||||
*/
|
||||
protected boolean getStorageNfsVersionFromNfsTO(NfsTO nfsTO){
|
||||
if (nfsTO != null && nfsTO.getNfsVersion() != null){
|
||||
protected boolean getStorageNfsVersionFromNfsTO(NfsTO nfsTO) {
|
||||
if (nfsTO != null && nfsTO.getNfsVersion() != null) {
|
||||
storageNfsVersion = nfsTO.getNfsVersion();
|
||||
return true;
|
||||
}
|
||||
|
|
@ -646,7 +650,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
/**
|
||||
* Registers the vm to the inventory given the vmx file.
|
||||
*/
|
||||
private void registerVm(String vmName, DatastoreMO dsMo) throws Exception{
|
||||
private void registerVm(String vmName, DatastoreMO dsMo) throws Exception {
|
||||
|
||||
//1st param
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||
|
|
@ -673,8 +677,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
private Answer execute(ResizeVolumeCommand cmd) {
|
||||
String path = cmd.getPath();
|
||||
String vmName = cmd.getInstanceName();
|
||||
long newSize = cmd.getNewSize() / 1024;
|
||||
long oldSize = cmd.getCurrentSize()/1024;
|
||||
long newSize = cmd.getNewSize() / ResourceType.bytesToKiB;
|
||||
long oldSize = cmd.getCurrentSize() / ResourceType.bytesToKiB;
|
||||
boolean useWorkerVm = false;
|
||||
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||
|
|
@ -686,9 +690,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
try {
|
||||
if (newSize < oldSize) {
|
||||
throw new Exception("VMware doesn't support shrinking volume from larger size: " + oldSize/(1024*1024) + " GB to a smaller size: " + newSize/(1024*1024) + " GB");
|
||||
throw new Exception(
|
||||
"VMware doesn't support shrinking volume from larger size: " + oldSize / ResourceType.bytesToMiB + " GB to a smaller size: " + newSize / ResourceType.bytesToMiB + " GB");
|
||||
} else if (newSize == oldSize) {
|
||||
return new ResizeVolumeAnswer(cmd, true, "success", newSize*1024);
|
||||
return new ResizeVolumeAnswer(cmd, true, "success", newSize * ResourceType.bytesToKiB);
|
||||
}
|
||||
if (vmName.equalsIgnoreCase("none")) {
|
||||
// we need to spawn a worker VM to attach the volume to and
|
||||
|
|
@ -706,7 +711,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
synchronized (this) {
|
||||
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + ".vmdk");
|
||||
vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS);
|
||||
vmMo.attachDisk(new String[] {vmdkDataStorePath}, morDS);
|
||||
}
|
||||
}
|
||||
// find VM through datacenter (VM is not at the target host yet)
|
||||
|
|
@ -725,8 +730,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
// IDE virtual disk cannot be re-sized if VM is running
|
||||
if (vdisk.second() != null && vdisk.second().contains("ide")) {
|
||||
throw new Exception("Re-sizing a virtual disk over IDE controller is not supported in VMware hypervisor. " +
|
||||
"Please re-try when virtual disk is attached to a VM using SCSI controller.");
|
||||
throw new Exception("Re-sizing a virtual disk over IDE controller is not supported in VMware hypervisor. "
|
||||
+ "Please re-try when virtual disk is attached to a VM using SCSI controller.");
|
||||
}
|
||||
|
||||
if (vdisk.second() != null && !vdisk.second().toLowerCase().startsWith("scsi"))
|
||||
|
|
@ -958,12 +963,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
protected ExecutionResult prepareNetworkElementCommand(SetupGuestNetworkCommand cmd) {
|
||||
NicTO nic = cmd.getNic();
|
||||
String routerIp = getRouterSshControlIp(cmd);
|
||||
String domrName =
|
||||
cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
|
||||
String domrName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
|
||||
|
||||
try {
|
||||
int ethDeviceNum = findRouterEthDeviceIndex(domrName, routerIp,
|
||||
nic.getMac());
|
||||
int ethDeviceNum = findRouterEthDeviceIndex(domrName, routerIp, nic.getMac());
|
||||
nic.setDeviceId(ethDeviceNum);
|
||||
} catch (Exception e) {
|
||||
String msg = "Prepare SetupGuestNetwork failed due to " + e.toString();
|
||||
|
|
@ -973,7 +976,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return new ExecutionResult(true, null);
|
||||
}
|
||||
|
||||
|
||||
private ExecutionResult prepareNetworkElementCommand(IpAssocVpcCommand cmd) {
|
||||
String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
|
||||
String routerIp = getRouterSshControlIp(cmd);
|
||||
|
|
@ -1020,13 +1022,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
private ExecutionResult prepareNetworkElementCommand(SetNetworkACLCommand cmd) {
|
||||
NicTO nic = cmd.getNic();
|
||||
String routerName =
|
||||
cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
|
||||
String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
|
||||
String routerIp = getRouterSshControlIp(cmd);
|
||||
|
||||
try {
|
||||
int ethDeviceNum = findRouterEthDeviceIndex(routerName, routerIp,
|
||||
nic.getMac());
|
||||
int ethDeviceNum = findRouterEthDeviceIndex(routerName, routerIp, nic.getMac());
|
||||
nic.setDeviceId(ethDeviceNum);
|
||||
} catch (Exception e) {
|
||||
String msg = "Prepare SetNetworkACL failed due to " + e.toString();
|
||||
|
|
@ -1073,7 +1073,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.E1000;
|
||||
Map<String, String> details = cmd.getDetails();
|
||||
if (details != null) {
|
||||
nicDeviceType = VirtualEthernetCardType.valueOf((String) details.get("nicAdapter"));
|
||||
nicDeviceType = VirtualEthernetCardType.valueOf((String)details.get("nicAdapter"));
|
||||
}
|
||||
|
||||
// find a usable device number in VMware environment
|
||||
|
|
@ -1528,8 +1528,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
hotaddIncrementSizeInMb = vmMo.getHotAddMemoryIncrementSizeInMb();
|
||||
hotaddMemoryLimitInMb = vmMo.getHotAddMemoryLimitInMb();
|
||||
if (requestedMaxMemoryInMb > hotaddMemoryLimitInMb) {
|
||||
throw new CloudRuntimeException("Memory of VM " + vmMo.getVmName() + " cannot be scaled to " + requestedMaxMemoryInMb + "MB." +
|
||||
" Requested memory limit is beyond the hotadd memory limit for this VM at the moment is " + hotaddMemoryLimitInMb + "MB.");
|
||||
throw new CloudRuntimeException("Memory of VM " + vmMo.getVmName() + " cannot be scaled to " + requestedMaxMemoryInMb + "MB."
|
||||
+ " Requested memory limit is beyond the hotadd memory limit for this VM at the moment is " + hotaddMemoryLimitInMb + "MB.");
|
||||
}
|
||||
|
||||
// Check increment is multiple of increment size
|
||||
|
|
@ -1630,7 +1630,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
DiskTO rootDiskTO = null;
|
||||
// If root disk controller is scsi, then data disk controller would also be scsi instead of using 'osdefault'
|
||||
// This helps avoid mix of different scsi subtype controllers in instance.
|
||||
if (DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) {
|
||||
if (DiskControllerType.osdefault == DiskControllerType.getType(dataDiskController) && DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) {
|
||||
dataDiskController = DiskControllerType.scsi.toString();
|
||||
}
|
||||
|
||||
|
|
@ -1659,9 +1659,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
// Validate VM name is unique in Datacenter
|
||||
VirtualMachineMO vmInVcenter = dcMo.checkIfVmAlreadyExistsInVcenter(vmNameOnVcenter, vmInternalCSName);
|
||||
if(vmInVcenter != null) {
|
||||
if (vmInVcenter != null) {
|
||||
vmAlreadyExistsInVcenter = true;
|
||||
String msg = "VM with name: " + vmNameOnVcenter +" already exists in vCenter.";
|
||||
String msg = "VM with name: " + vmNameOnVcenter + " already exists in vCenter.";
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
|
@ -1759,8 +1759,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
String datastoreName = VmwareResource.getDatastoreName(details.get(DiskTO.IQN));
|
||||
|
||||
rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
DataStoreTO primaryStore = vol.getData().getDataStore();
|
||||
|
||||
rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid());
|
||||
|
|
@ -1781,9 +1780,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
tearDownVm(vmMo);
|
||||
}else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(),
|
||||
getReservedCpuMHZ(vmSpec), vmSpec.getLimitCpuUse(), (int)(vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec),
|
||||
guestOsId, rootDiskDataStoreDetails.first(), false, controllerInfo, systemVm)) {
|
||||
} else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), getReservedCpuMHZ(vmSpec),
|
||||
vmSpec.getLimitCpuUse(), (int)(vmSpec.getMaxRam() / ResourceType.bytesToMiB), getReservedMemoryMb(vmSpec), guestOsId, rootDiskDataStoreDetails.first(), false,
|
||||
controllerInfo, systemVm)) {
|
||||
throw new Exception("Failed to create VM. vmName: " + vmInternalCSName);
|
||||
}
|
||||
}
|
||||
|
|
@ -1808,9 +1807,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
|
||||
|
||||
VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(),
|
||||
getReservedCpuMHZ(vmSpec), (int)(vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec),
|
||||
guestOsId, vmSpec.getLimitCpuUse());
|
||||
VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), getReservedCpuMHZ(vmSpec), (int)(vmSpec.getMaxRam() / (1024 * 1024)),
|
||||
getReservedMemoryMb(vmSpec), guestOsId, vmSpec.getLimitCpuUse());
|
||||
|
||||
// Check for multi-cores per socket settings
|
||||
int numCoresPerSocket = 1;
|
||||
|
|
@ -1870,9 +1868,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
DatastoreMO secDsMo = new DatastoreMO(hyperHost.getContext(), morSecDs);
|
||||
|
||||
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
|
||||
Pair<VirtualDevice, Boolean> isoInfo =
|
||||
VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(),
|
||||
true, true, ideUnitNumber++, i + 1);
|
||||
Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo,
|
||||
String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), true, true, ideUnitNumber++, i + 1);
|
||||
deviceConfigSpecArray[i].setDevice(isoInfo.first());
|
||||
if (isoInfo.second()) {
|
||||
if (s_logger.isDebugEnabled())
|
||||
|
|
@ -1901,8 +1898,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
assert (isoDatastoreInfo.second() != null);
|
||||
|
||||
deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
|
||||
Pair<VirtualDevice, Boolean> isoInfo =
|
||||
VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, i + 1);
|
||||
Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++,
|
||||
i + 1);
|
||||
deviceConfigSpecArray[i].setDevice(isoInfo.first());
|
||||
if (isoInfo.second()) {
|
||||
if (s_logger.isDebugEnabled())
|
||||
|
|
@ -1989,14 +1986,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
assert (volumeDsDetails != null);
|
||||
|
||||
String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec,
|
||||
vol, matchingExistingDisk,
|
||||
dataStoresDetails);
|
||||
if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber))
|
||||
String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, vol, matchingExistingDisk, dataStoresDetails);
|
||||
if (controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber))
|
||||
scsiUnitNumber++;
|
||||
VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey,
|
||||
diskChain,
|
||||
volumeDsDetails.first(),
|
||||
VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, diskChain, volumeDsDetails.first(),
|
||||
(controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) ? ((ideUnitNumber++) % VmwareHelper.MAX_IDE_CONTROLLER_COUNT) : scsiUnitNumber++, i + 1);
|
||||
|
||||
if (vol.getType() == Volume.Type.ROOT)
|
||||
|
|
@ -2004,7 +1997,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
deviceConfigSpecArray[i].setDevice(device);
|
||||
deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
|
||||
|
||||
if(s_logger.isDebugEnabled())
|
||||
if (s_logger.isDebugEnabled())
|
||||
s_logger.debug("Prepare volume at new device " + _gson.toJson(device));
|
||||
|
||||
i++;
|
||||
|
|
@ -2094,8 +2087,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus"));
|
||||
VirtualMachine.Type vmType = cmd.getVirtualMachine().getType();
|
||||
Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType);
|
||||
if ((nicTo.getBroadcastType() != BroadcastDomainType.Lswitch) ||
|
||||
(nicTo.getBroadcastType() == BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))){
|
||||
if ((nicTo.getBroadcastType() != BroadcastDomainType.Lswitch)
|
||||
|| (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))) {
|
||||
if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
|
||||
String dvSwitchUuid;
|
||||
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
|
||||
|
|
@ -2113,8 +2106,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(),
|
||||
nicTo.getMac(), i + 1, true, true);
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
//if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour
|
||||
nic = VmwareHelper.prepareNicOpaque(vmMo, nicDeviceType, networkInfo.second(),
|
||||
nicTo.getMac(), i + 1, true, true);
|
||||
|
|
@ -2162,8 +2154,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
String keyboardLayout = null;
|
||||
if (vmSpec.getDetails() != null)
|
||||
keyboardLayout = vmSpec.getDetails().get(VmDetailConstants.KEYBOARD);
|
||||
vmConfigSpec.getExtraConfig().addAll(
|
||||
Arrays.asList(configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout)));
|
||||
vmConfigSpec.getExtraConfig()
|
||||
.addAll(Arrays.asList(configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout)));
|
||||
|
||||
// config video card
|
||||
configureVideoCard(vmMo, vmSpec, vmConfigSpec);
|
||||
|
|
@ -2222,7 +2214,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
String msg = "StartCommand failed due to " + VmwareHelper.getExceptionMessage(e);
|
||||
s_logger.warn(msg, e);
|
||||
StartAnswer startAnswer = new StartAnswer(cmd, msg);
|
||||
if(vmAlreadyExistsInVcenter) {
|
||||
if (vmAlreadyExistsInVcenter) {
|
||||
startAnswer.setContextParam("stopRetry", "true");
|
||||
}
|
||||
|
||||
|
|
@ -2233,7 +2225,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
DatastoreFile fileInDatastore = new DatastoreFile(existingVmFileInfo.getVmPathName());
|
||||
DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName()));
|
||||
registerVm(existingVmName, existingVmDsMo);
|
||||
} catch (Exception ex){
|
||||
} catch (Exception ex) {
|
||||
String message = "Failed to register an existing VM: " + existingVmName + " due to " + VmwareHelper.getExceptionMessage(ex);
|
||||
s_logger.warn(message, ex);
|
||||
}
|
||||
|
|
@ -2271,7 +2263,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
s_logger.warn("Disk chain length for the VM is greater than one, this is not supported");
|
||||
throw new CloudRuntimeException("Unsupported VM disk chain length: "+ diskChain.length);
|
||||
}
|
||||
if (diskInfo.getDiskDeviceBusName() == null || !diskInfo.getDiskDeviceBusName().toLowerCase().startsWith("scsi")) {
|
||||
|
||||
boolean resizingSupported = false;
|
||||
String deviceBusName = diskInfo.getDiskDeviceBusName();
|
||||
if (deviceBusName != null && (deviceBusName.toLowerCase().contains("scsi") || deviceBusName.toLowerCase().contains("lsi"))) {
|
||||
resizingSupported = true;
|
||||
}
|
||||
if (!resizingSupported) {
|
||||
s_logger.warn("Resizing of root disk is only support for scsi device/bus, the provide VM's disk device bus name is " + diskInfo.getDiskDeviceBusName());
|
||||
throw new CloudRuntimeException("Unsupported VM root disk device bus: "+ diskInfo.getDiskDeviceBusName());
|
||||
}
|
||||
|
|
@ -2329,13 +2327,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
* @throws Exception exception
|
||||
*/
|
||||
protected void configureVideoCard(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, VirtualMachineConfigSpec vmConfigSpec) throws Exception {
|
||||
if (vmSpec.getDetails().containsKey(VmDetailConstants.SVGA_VRAM_SIZE)){
|
||||
if (vmSpec.getDetails().containsKey(VmDetailConstants.SVGA_VRAM_SIZE)) {
|
||||
String value = vmSpec.getDetails().get(VmDetailConstants.SVGA_VRAM_SIZE);
|
||||
try {
|
||||
long svgaVmramSize = Long.parseLong(value);
|
||||
setNewVRamSizeVmVideoCard(vmMo, svgaVmramSize, vmConfigSpec);
|
||||
}
|
||||
catch (NumberFormatException e){
|
||||
} catch (NumberFormatException e) {
|
||||
s_logger.error("Unexpected value, cannot parse " + value + " to long due to: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
@ -2348,9 +2345,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
* @param vmConfigSpec virtual machine config spec
|
||||
*/
|
||||
protected void setNewVRamSizeVmVideoCard(VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) throws Exception {
|
||||
for (VirtualDevice device : vmMo.getAllDeviceList()){
|
||||
if (device instanceof VirtualMachineVideoCard){
|
||||
VirtualMachineVideoCard videoCard = (VirtualMachineVideoCard) device;
|
||||
for (VirtualDevice device : vmMo.getAllDeviceList()) {
|
||||
if (device instanceof VirtualMachineVideoCard) {
|
||||
VirtualMachineVideoCard videoCard = (VirtualMachineVideoCard)device;
|
||||
modifyVmVideoCardVRamSize(videoCard, vmMo, svgaVmramSize, vmConfigSpec);
|
||||
}
|
||||
}
|
||||
|
|
@ -2364,7 +2361,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
* @param vmConfigSpec virtual machine config spec
|
||||
*/
|
||||
protected void modifyVmVideoCardVRamSize(VirtualMachineVideoCard videoCard, VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) {
|
||||
if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize){
|
||||
if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize) {
|
||||
s_logger.info("Video card memory was set " + videoCard.getVideoRamSizeInKB().longValue() + "kb instead of " + svgaVmramSize + "kb");
|
||||
configureSpecVideoCardNewVRamSize(videoCard, svgaVmramSize, vmConfigSpec);
|
||||
}
|
||||
|
|
@ -2376,7 +2373,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
* @param svgaVmramSize new svga vram size (in KB)
|
||||
* @param vmConfigSpec virtual machine spec
|
||||
*/
|
||||
protected void configureSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCard, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec){
|
||||
protected void configureSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCard, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) {
|
||||
videoCard.setVideoRamSizeInKB(svgaVmramSize);
|
||||
videoCard.setUseAutoDetect(false);
|
||||
|
||||
|
|
@ -2387,9 +2384,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
vmConfigSpec.getDeviceChange().add(arrayVideoCardConfigSpecs);
|
||||
}
|
||||
|
||||
private void tearDownVm(VirtualMachineMO vmMo) throws Exception{
|
||||
private void tearDownVm(VirtualMachineMO vmMo) throws Exception {
|
||||
|
||||
if(vmMo == null) return;
|
||||
if (vmMo == null)
|
||||
return;
|
||||
|
||||
boolean hasSnapshot = false;
|
||||
hasSnapshot = vmMo.hasSnapshot();
|
||||
|
|
@ -2401,17 +2399,17 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
|
||||
int getReservedMemoryMb(VirtualMachineTO vmSpec) {
|
||||
if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveMemory.key()).equalsIgnoreCase("true")) {
|
||||
return (int) (vmSpec.getMinRam() / (1024 * 1024));
|
||||
}
|
||||
return 0;
|
||||
if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveMemory.key()).equalsIgnoreCase("true")) {
|
||||
return (int)(vmSpec.getMinRam() / ResourceType.bytesToMiB);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int getReservedCpuMHZ(VirtualMachineTO vmSpec) {
|
||||
if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveCpu.key()).equalsIgnoreCase("true")) {
|
||||
return vmSpec.getMinSpeed() * vmSpec.getCpus();
|
||||
}
|
||||
return 0;
|
||||
if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveCpu.key()).equalsIgnoreCase("true")) {
|
||||
return vmSpec.getMinSpeed() * vmSpec.getCpus();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// return the finalized disk chain for startup, from top to bottom
|
||||
|
|
@ -2433,8 +2431,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
String datastoreName = isManaged ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid();
|
||||
Pair<ManagedObjectReference, DatastoreMO> volumeDsDetails = dataStoresDetails.get(datastoreName);
|
||||
|
||||
if (volumeDsDetails == null)
|
||||
{
|
||||
if (volumeDsDetails == null) {
|
||||
throw new Exception("Primary datastore " + primaryStore.getUuid() + " is not mounted on host.");
|
||||
}
|
||||
|
||||
|
|
@ -2554,7 +2551,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
|
||||
OptionValue newVal;
|
||||
if (nicTo.getType().equals(TrafficType.Guest) && dvSwitchUuid != null && nicTo.getGateway() != null && nicTo.getNetmask() != null) {
|
||||
if (nicTo.getType().equals(TrafficType.Guest) && dvSwitchUuid != null && nicTo.getGateway() != null && nicTo.getNetmask() != null) {
|
||||
String vrIp = nicTo.getBroadcastUri().getPath().substring(1);
|
||||
newVal = new OptionValue();
|
||||
newVal.setKey("vsp.vr-ip." + nicTo.getMac());
|
||||
|
|
@ -2688,13 +2685,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
|
||||
private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol,
|
||||
VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
|
||||
private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol, VmwareHypervisorHost hyperHost, VmwareContext context)
|
||||
throws Exception {
|
||||
if (diskInfoBuilder != null) {
|
||||
VolumeObjectTO volume = (VolumeObjectTO)vol.getData();
|
||||
|
||||
String dsName = null;
|
||||
String diskBackingFileBaseName= null;
|
||||
String diskBackingFileBaseName = null;
|
||||
|
||||
Map<String, String> details = vol.getDetails();
|
||||
boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED));
|
||||
|
|
@ -2706,8 +2703,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
dsName = VmwareResource.getDatastoreName(iScsiName);
|
||||
|
||||
diskBackingFileBaseName = new DatastoreFile(volume.getPath()).getFileBaseName();
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, volume.getDataStore().getUuid());
|
||||
DatastoreMO dsMo = new DatastoreMO(context, morDs);
|
||||
|
||||
|
|
@ -2716,8 +2712,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
diskBackingFileBaseName = volume.getPath();
|
||||
}
|
||||
|
||||
VirtualMachineDiskInfo diskInfo =
|
||||
diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName);
|
||||
VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName);
|
||||
if (diskInfo != null) {
|
||||
s_logger.info("Found existing disk info from volume path: " + volume.getPath());
|
||||
return diskInfo;
|
||||
|
|
@ -2768,12 +2763,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
Map<String, String> vmDetails = vmSpec.getDetails();
|
||||
if (vmDetails != null && vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER) != null) {
|
||||
if (vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER).equalsIgnoreCase("scsi")) {
|
||||
s_logger.info("Chose disk controller for vol " + vol.getType() + " -> scsi, based on root disk controller settings: " +
|
||||
vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER));
|
||||
s_logger.info("Chose disk controller for vol " + vol.getType() + " -> scsi, based on root disk controller settings: "
|
||||
+ vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER));
|
||||
controllerKey = scsiControllerKey;
|
||||
} else {
|
||||
s_logger.info("Chose disk controller for vol " + vol.getType() + " -> ide, based on root disk controller settings: " +
|
||||
vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER));
|
||||
s_logger.info("Chose disk controller for vol " + vol.getType() + " -> ide, based on root disk controller settings: "
|
||||
+ vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER));
|
||||
controllerKey = ideControllerKey;
|
||||
}
|
||||
} else {
|
||||
|
|
@ -2820,8 +2815,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return controllerInfo.second();
|
||||
}
|
||||
}
|
||||
private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey,
|
||||
int scsiControllerKey, Map<String, String> iqnToPath, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
|
||||
|
||||
private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey, int scsiControllerKey,
|
||||
Map<String, String> iqnToPath, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
|
||||
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
|
||||
|
||||
for (DiskTO vol : sortedDisks) {
|
||||
|
|
@ -2852,8 +2848,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
if (s_logger.isInfoEnabled())
|
||||
s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + diskChain[0]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) {
|
||||
if (s_logger.isInfoEnabled())
|
||||
s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName());
|
||||
|
|
@ -2969,8 +2964,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return listForSort.toArray(new DiskTO[0]);
|
||||
}
|
||||
|
||||
private HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context,
|
||||
DiskTO[] disks, Command cmd) throws Exception {
|
||||
private HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context, DiskTO[] disks,
|
||||
Command cmd) throws Exception {
|
||||
HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> mapIdToMors = new HashMap<String, Pair<ManagedObjectReference, DatastoreMO>>();
|
||||
|
||||
assert (hyperHost != null) && (context != null);
|
||||
|
|
@ -2997,12 +2992,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
// if the datastore is not present, we need to discover the iSCSI device that will support it,
|
||||
// create the datastore, and create a VMDK file in the datastore
|
||||
if (morDatastore == null) {
|
||||
morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName,
|
||||
details.get(DiskTO.STORAGE_HOST), Integer.parseInt(details.get(DiskTO.STORAGE_PORT)),
|
||||
volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : null,
|
||||
details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET),
|
||||
details.get(DiskTO.CHAP_TARGET_USERNAME), details.get(DiskTO.CHAP_TARGET_SECRET),
|
||||
Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd);
|
||||
morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName, details.get(DiskTO.STORAGE_HOST),
|
||||
Integer.parseInt(details.get(DiskTO.STORAGE_PORT)), volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : null,
|
||||
details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), details.get(DiskTO.CHAP_TARGET_USERNAME),
|
||||
details.get(DiskTO.CHAP_TARGET_SECRET), Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd);
|
||||
|
||||
DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore);
|
||||
String datastoreVolumePath = dsMo.getDatastorePath((volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : dsMo.getName()) + ".vmdk");
|
||||
|
|
@ -3012,8 +3005,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
|
||||
mapIdToMors.put(datastoreName, new Pair<ManagedObjectReference, DatastoreMO>(morDatastore, new DatastoreMO(context, morDatastore)));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolUuid);
|
||||
|
||||
if (morDatastore == null) {
|
||||
|
|
@ -3051,8 +3043,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName);
|
||||
|
||||
break;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
DataStoreTO primaryStore = vol.getData().getDataStore();
|
||||
|
||||
rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid());
|
||||
|
|
@ -3109,7 +3100,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return defaultVlan;
|
||||
}
|
||||
|
||||
private Pair<ManagedObjectReference, String> prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) throws Exception {
|
||||
private Pair<ManagedObjectReference, String> prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType)
|
||||
throws Exception {
|
||||
|
||||
Ternary<String, String, String> switchDetails = getTargetSwitch(nicTo);
|
||||
VirtualSwitchType switchType = VirtualSwitchType.getType(switchDetails.second());
|
||||
|
|
@ -3147,14 +3139,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
// return Ternary <switch name, switch tyep, vlan tagging>
|
||||
private Ternary<String, String, String> getTargetSwitch(NicTO nicTo) throws CloudException {
|
||||
TrafficType[] supportedTrafficTypes =
|
||||
new TrafficType[] {
|
||||
TrafficType.Guest,
|
||||
TrafficType.Public,
|
||||
TrafficType.Control,
|
||||
TrafficType.Management,
|
||||
TrafficType.Storage
|
||||
};
|
||||
TrafficType[] supportedTrafficTypes = new TrafficType[] {TrafficType.Guest, TrafficType.Public, TrafficType.Control, TrafficType.Management, TrafficType.Storage};
|
||||
|
||||
TrafficType trafficType = nicTo.getType();
|
||||
if (!Arrays.asList(supportedTrafficTypes).contains(trafficType)) {
|
||||
|
|
@ -3165,7 +3150,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
VirtualSwitchType switchType = VirtualSwitchType.StandardVirtualSwitch;
|
||||
String vlanId = Vlan.UNTAGGED;
|
||||
|
||||
if(nicTo.getName() != null && !nicTo.getName().isEmpty()) {
|
||||
if (StringUtils.isNotBlank(nicTo.getName())) {
|
||||
// Format of network traffic label is <VSWITCH>,<VLANID>,<VSWITCHTYPE>
|
||||
// If all 3 fields are mentioned then number of tokens would be 3.
|
||||
// If only <VSWITCH>,<VLANID> are mentioned then number of tokens would be 2.
|
||||
|
|
@ -3192,9 +3177,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
if (switchType == VirtualSwitchType.NexusDistributedVirtualSwitch) {
|
||||
if (trafficType == TrafficType.Management || trafficType == TrafficType.Storage) {
|
||||
throw new CloudException("Unable to configure NIC " + nicTo.toString() + " as traffic type " + trafficType.toString() +
|
||||
" is not supported over virtual switch type " + switchType +
|
||||
". Please specify only supported type of virtual switches i.e. {vmwaresvs, vmwaredvs} in physical network traffic label.");
|
||||
throw new CloudException(
|
||||
"Unable to configure NIC " + nicTo.toString() + " as traffic type " + trafficType.toString() + " is not supported over virtual switch type " + switchType
|
||||
+ ". Please specify only supported type of virtual switches i.e. {vmwaresvs, vmwaredvs} in physical network traffic label.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3737,7 +3722,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
VolumeTO volume;
|
||||
StorageFilerTO filerTo;
|
||||
Set<String> mountedDatastoresAtSource = new HashSet<String>();
|
||||
List<VolumeObjectTO> volumeToList = new ArrayList<VolumeObjectTO>();
|
||||
List<VolumeObjectTO> volumeToList = new ArrayList<VolumeObjectTO>();
|
||||
Map<Long, Integer> volumeDeviceKey = new HashMap<Long, Integer>();
|
||||
|
||||
List<Pair<VolumeTO, StorageFilerTO>> volToFiler = cmd.getVolumeToFilerAsList();
|
||||
|
|
@ -3776,7 +3761,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
morDsAtTarget = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(tgtHyperHost, filerTo.getUuid());
|
||||
morDsAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, filerTo.getUuid());
|
||||
if (morDsAtTarget == null) {
|
||||
String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " on target host: " + tgtHyperHost.getHyperHostName() + " to execute MigrateWithStorageCommand";
|
||||
String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " on target host: " + tgtHyperHost.getHyperHostName()
|
||||
+ " to execute MigrateWithStorageCommand";
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
|
@ -3805,12 +3791,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
// If datastore is VMFS and target datastore is not mounted or accessible to source host then fail migration.
|
||||
if (filerTo.getType().equals(StoragePoolType.VMFS)) {
|
||||
if (morDsAtSource == null) {
|
||||
s_logger.warn("If host version is below 5.1, then target VMFS datastore(s) need to manually mounted on source host for a successful live storage migration.");
|
||||
s_logger.warn(
|
||||
"If host version is below 5.1, then target VMFS datastore(s) need to manually mounted on source host for a successful live storage migration.");
|
||||
throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not mounted on source host: " + _hostName);
|
||||
}
|
||||
DatastoreMO dsAtSourceMo = new DatastoreMO(getServiceContext(), morDsAtSource);
|
||||
String srcHostValue = srcHyperHost.getMor().getValue();
|
||||
if(!dsAtSourceMo.isAccessibleToHost(srcHostValue)) {
|
||||
if (!dsAtSourceMo.isAccessibleToHost(srcHostValue)) {
|
||||
s_logger.warn("If host version is below 5.1, then target VMFS datastore(s) need to accessible to source host for a successful live storage migration.");
|
||||
throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not accessible on source host: " + _hostName);
|
||||
}
|
||||
|
|
@ -3892,8 +3879,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
if (!vmMo.changeDatastore(relocateSpec)) {
|
||||
throw new Exception("Change datastore operation failed during storage migration");
|
||||
} else {
|
||||
s_logger.debug("Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName() +
|
||||
" and its storage to target datastore(s)");
|
||||
s_logger.debug(
|
||||
"Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName() + " and its storage to target datastore(s)");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3943,8 +3930,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
try {
|
||||
srcHyperHost.unmountDatastore(mountedDatastore);
|
||||
} catch (Exception unmountEx) {
|
||||
s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName + ". Seems the datastore is still being used by " + _hostName +
|
||||
". Please unmount manually to cleanup.");
|
||||
s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName + ". Seems the datastore is still being used by " + _hostName
|
||||
+ ". Please unmount manually to cleanup.");
|
||||
}
|
||||
s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + _hostName);
|
||||
}
|
||||
|
|
@ -3989,7 +3976,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
vmName = vmMo.getName();
|
||||
morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName);
|
||||
if (morDs == null) {
|
||||
String msg = "Unable to find the mounted datastore with name: " + tgtDsName + " on source host: " + srcHyperHost.getHyperHostName() +" to execute MigrateVolumeCommand";
|
||||
String msg = "Unable to find the mounted datastore with name: " + tgtDsName + " on source host: " + srcHyperHost.getHyperHostName()
|
||||
+ " to execute MigrateVolumeCommand";
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
|
@ -4091,9 +4079,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
try {
|
||||
VmwareContext context = getServiceContext();
|
||||
|
||||
_storageProcessor.prepareManagedDatastore(context, getHyperHost(context),
|
||||
cmd.getDetails().get(CreateStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(CreateStoragePoolCommand.IQN),
|
||||
cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_PORT)));
|
||||
_storageProcessor.prepareManagedDatastore(context, getHyperHost(context), cmd.getDetails().get(CreateStoragePoolCommand.DATASTORE_NAME),
|
||||
cmd.getDetails().get(CreateStoragePoolCommand.IQN), cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_HOST),
|
||||
Integer.parseInt(cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_PORT)));
|
||||
} catch (Exception ex) {
|
||||
return new Answer(cmd, false, "Issue creating datastore");
|
||||
}
|
||||
|
|
@ -4163,8 +4151,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
if (targets != null && targets.size() > 0) {
|
||||
try {
|
||||
_storageProcessor.handleTargetsForHost(add, targets, host);
|
||||
}
|
||||
catch (Exception ex) {
|
||||
} catch (Exception ex) {
|
||||
s_logger.warn(ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
@ -4178,11 +4165,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
try {
|
||||
if (cmd.getRemoveDatastore()) {
|
||||
_storageProcessor.handleDatastoreAndVmdkDetach(cmd.getDetails().get(DeleteStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(DeleteStoragePoolCommand.IQN),
|
||||
cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT)));
|
||||
cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT)));
|
||||
|
||||
return new Answer(cmd, true, "success");
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// We will leave datastore cleanup management to vCenter. Since for cluster VMFS datastore, it will always
|
||||
// be mounted by vCenter.
|
||||
|
||||
|
|
@ -4233,12 +4219,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
if (cmd.isAttach()) {
|
||||
vmMo.mountToolsInstaller();
|
||||
} else {
|
||||
try{
|
||||
try {
|
||||
if (!vmMo.unmountToolsInstaller()) {
|
||||
return new Answer(cmd, false,
|
||||
"Failed to unmount vmware-tools installer ISO as the corresponding CDROM device is locked by VM. Please unmount the CDROM device inside the VM and ret-try.");
|
||||
}
|
||||
}catch(Throwable e){
|
||||
} catch (Throwable e) {
|
||||
vmMo.detachIso(null);
|
||||
}
|
||||
}
|
||||
|
|
@ -4318,10 +4304,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
private static String getSecondaryDatastoreUUID(String storeUrl) {
|
||||
String uuid = null;
|
||||
try{
|
||||
uuid=UUID.nameUUIDFromBytes(storeUrl.getBytes("UTF-8")).toString();
|
||||
}catch(UnsupportedEncodingException e){
|
||||
s_logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error." );
|
||||
try {
|
||||
uuid = UUID.nameUUIDFromBytes(storeUrl.getBytes("UTF-8")).toString();
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
s_logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error.");
|
||||
}
|
||||
return uuid;
|
||||
}
|
||||
|
|
@ -4522,8 +4508,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
long used = capacity - free;
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Datastore summary info, storageId: " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " +
|
||||
cmd.getPooltype() + ", capacity: " + capacity + ", free: " + free + ", used: " + used);
|
||||
s_logger.debug("Datastore summary info, storageId: " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype()
|
||||
+ ", capacity: " + capacity + ", free: " + free + ", used: " + used);
|
||||
}
|
||||
|
||||
if (summary.getCapacity() <= 0) {
|
||||
|
|
@ -4532,9 +4518,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
return new GetStorageStatsAnswer(cmd, capacity, used);
|
||||
} else {
|
||||
String msg =
|
||||
"Could not find datastore for GetStorageStatsCommand storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " +
|
||||
cmd.getPooltype();
|
||||
String msg = "Could not find datastore for GetStorageStatsCommand storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: "
|
||||
+ cmd.getPooltype();
|
||||
|
||||
s_logger.error(msg);
|
||||
return new GetStorageStatsAnswer(cmd, msg);
|
||||
|
|
@ -4545,9 +4530,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
invalidateServiceContext();
|
||||
}
|
||||
|
||||
String msg =
|
||||
"Unable to execute GetStorageStatsCommand(storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype() +
|
||||
") due to " + VmwareHelper.getExceptionMessage(e);
|
||||
String msg = "Unable to execute GetStorageStatsCommand(storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype()
|
||||
+ ") due to " + VmwareHelper.getExceptionMessage(e);
|
||||
s_logger.error(msg, e);
|
||||
return new GetStorageStatsAnswer(cmd, msg);
|
||||
}
|
||||
|
|
@ -4624,8 +4608,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
if (result.first())
|
||||
return new Answer(cmd);
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to "
|
||||
+ VmwareHelper.getExceptionMessage(e), e);
|
||||
s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e);
|
||||
}
|
||||
return new Answer(cmd, false, "PingTestCommand failed");
|
||||
} else {
|
||||
|
|
@ -4649,8 +4632,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Unable to execute ping command on host (" + cmd.getComputingHostIp() + "). failure due to "
|
||||
+ VmwareHelper.getExceptionMessage(e), e);
|
||||
s_logger.error("Unable to execute ping command on host (" + cmd.getComputingHostIp() + "). failure due to " + VmwareHelper.getExceptionMessage(e), e);
|
||||
}
|
||||
|
||||
return new Answer(cmd, false, "PingTestCommand failed");
|
||||
|
|
@ -4674,7 +4656,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return new Answer(cmd);
|
||||
}
|
||||
|
||||
|
||||
protected Answer execute(GetVmIpAddressCommand cmd) {
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Executing resource command GetVmIpAddressCommand: " + _gson.toJson(cmd));
|
||||
|
|
@ -5055,8 +5036,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
DatastoreSummary dsSummary = dsMo.getSummary();
|
||||
String address = hostMo.getHostName();
|
||||
StoragePoolInfo pInfo =
|
||||
new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.VMFS, dsSummary.getCapacity(), dsSummary.getFreeSpace());
|
||||
StoragePoolInfo pInfo = new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.VMFS, dsSummary.getCapacity(),
|
||||
dsSummary.getFreeSpace());
|
||||
StartupStorageCommand cmd = new StartupStorageCommand();
|
||||
cmd.setName(poolUuid);
|
||||
cmd.setPoolInfo(pInfo);
|
||||
|
|
@ -5131,8 +5112,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
} catch (Exception ex) {
|
||||
s_logger.info("Could not locate an IQN for this host.");
|
||||
}
|
||||
|
||||
|
|
@ -5204,8 +5184,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
|
||||
protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout)
|
||||
throws Exception {
|
||||
protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout) throws Exception {
|
||||
|
||||
VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName);
|
||||
|
||||
|
|
@ -5284,7 +5263,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
cpuArchitecture = "i386";
|
||||
}
|
||||
|
||||
if(cloudGuestOs == null) {
|
||||
if (cloudGuestOs == null) {
|
||||
s_logger.warn("Guest OS mapping name is not set for guest os: " + guestOs);
|
||||
}
|
||||
|
||||
|
|
@ -5359,7 +5338,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return newStates;
|
||||
}
|
||||
|
||||
|
||||
private HashMap<String, PowerState> getVmStates() throws Exception {
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||
|
||||
|
|
@ -5412,8 +5390,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return newStates;
|
||||
}
|
||||
|
||||
|
||||
|
||||
private HashMap<String, VmStatsEntry> getVmStats(List<String> vmNames) throws Exception {
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||
HashMap<String, VmStatsEntry> vmResponseMap = new HashMap<String, VmStatsEntry>();
|
||||
|
|
@ -5449,6 +5425,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
ObjectContent[] ocs =
|
||||
hyperHost.getVmPropertiesOnHyperHost(new String[] {"name", numCpuStr, cpuUseStr ,guestMemUseStr ,memLimitStr ,memMbStr,allocatedCpuStr ,instanceNameCustomField});
|
||||
|
||||
if (ocs != null && ocs.length > 0) {
|
||||
for (ObjectContent oc : ocs) {
|
||||
List<DynamicProperty> objProps = oc.getPropSet();
|
||||
|
|
@ -5468,9 +5445,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
} else if (objProp.getName().contains(instanceNameCustomField)) {
|
||||
if (objProp.getVal() != null)
|
||||
vmInternalCSName = ((CustomFieldStringValue)objProp.getVal()).getValue();
|
||||
}else if(objProp.getName().equals(guestMemusage)){
|
||||
} else if (objProp.getName().equals(guestMemusage)) {
|
||||
guestMemusage = objProp.getVal().toString();
|
||||
}else if (objProp.getName().equals(numCpuStr)) {
|
||||
} else if (objProp.getName().equals(numCpuStr)) {
|
||||
numberCPUs = objProp.getVal().toString();
|
||||
} else if (objProp.getName().equals(cpuUseStr)) {
|
||||
maxCpuUsage = NumberUtils.toDouble(objProp.getVal().toString());
|
||||
|
|
@ -5503,8 +5480,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
List<PerfMetricId> perfMetrics = service.queryAvailablePerfMetric(perfMgr, vmMor, null, null, null);
|
||||
if (perfMetrics != null) {
|
||||
for (int index = 0; index < perfMetrics.size(); ++index) {
|
||||
if (((rxPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == rxPerfCounterInfo.getKey())) ||
|
||||
((txPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == txPerfCounterInfo.getKey()))) {
|
||||
if (((rxPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == rxPerfCounterInfo.getKey()))
|
||||
|| ((txPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == txPerfCounterInfo.getKey()))) {
|
||||
vmNetworkMetrics.add(perfMetrics.get(index));
|
||||
}
|
||||
}
|
||||
|
|
@ -5550,14 +5527,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
}
|
||||
vmResponseMap.put(name, new VmStatsEntry( NumberUtils.toDouble(memkb)*1024,NumberUtils.toDouble(guestMemusage)*1024,NumberUtils.toDouble(memlimit)*1024, maxCpuUsage, networkReadKBs, networkWriteKBs, NumberUtils.toInt(numberCPUs), "vm"));
|
||||
vmResponseMap.put(name, new VmStatsEntry( NumberUtils.toDouble(memkb)*1024,NumberUtils.toDouble(guestMemusage)*1024,NumberUtils.toDouble(memlimit)*1024,
|
||||
maxCpuUsage, networkReadKBs, networkWriteKBs, NumberUtils.toInt(numberCPUs), "vm"));
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
return vmResponseMap;
|
||||
}
|
||||
|
||||
|
||||
protected String networkUsage(final String privateIpAddress, final String option, final String ethName) {
|
||||
String args = null;
|
||||
if (option.equals("get")) {
|
||||
|
|
@ -5652,7 +5630,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return convertPowerState(runtimeInfo.getPowerState());
|
||||
}
|
||||
|
||||
|
||||
private static PowerState convertPowerState(VirtualMachinePowerState powerState) {
|
||||
return s_powerStatesTable.get(powerState);
|
||||
}
|
||||
|
|
@ -5730,8 +5707,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(context, context.getServiceContent().getCustomFieldsManager());
|
||||
cfmMo.ensureCustomFieldDef("Datastore", CustomFieldConstants.CLOUD_UUID);
|
||||
if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch || _guestTrafficInfo != null &&
|
||||
_guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) {
|
||||
if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch
|
||||
|| _guestTrafficInfo != null && _guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) {
|
||||
cfmMo.ensureCustomFieldDef("DistributedVirtualPortgroup", CustomFieldConstants.CLOUD_GC_DVP);
|
||||
}
|
||||
cfmMo.ensureCustomFieldDef("Network", CustomFieldConstants.CLOUD_GC);
|
||||
|
|
@ -5744,8 +5721,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
VmwareHypervisorHost hostMo = this.getHyperHost(context);
|
||||
_hostName = hostMo.getHyperHostName();
|
||||
|
||||
if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch ||
|
||||
_publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) {
|
||||
if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch
|
||||
|| _publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) {
|
||||
_privateNetworkVSwitchName = mgr.getPrivateVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware);
|
||||
_vsmCredentials = mgr.getNexusVSMCredentialsByClusterId(Long.parseLong(_cluster));
|
||||
}
|
||||
|
|
@ -5770,9 +5747,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
if (intObj != null)
|
||||
_portsPerDvPortGroup = intObj.intValue();
|
||||
|
||||
s_logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " +
|
||||
_publicTrafficInfo.getVirtualSwitchType() + " : " + _publicTrafficInfo.getVirtualSwitchName() + ", guest traffic over " +
|
||||
_guestTrafficInfo.getVirtualSwitchType() + " : " + _guestTrafficInfo.getVirtualSwitchName());
|
||||
s_logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over "
|
||||
+ _publicTrafficInfo.getVirtualSwitchType() + " : " + _publicTrafficInfo.getVirtualSwitchName() + ", guest traffic over "
|
||||
+ _guestTrafficInfo.getVirtualSwitchType() + " : " + _guestTrafficInfo.getVirtualSwitchName());
|
||||
|
||||
Boolean boolObj = (Boolean)params.get("vmware.create.full.clone");
|
||||
if (boolObj != null && boolObj.booleanValue()) {
|
||||
|
|
@ -5792,7 +5769,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
int timeout = NumbersUtil.parseInt(value, 1440) * 1000;
|
||||
|
||||
storageNfsVersion = NfsSecondaryStorageResource.retrieveNfsVersionFromParams(params);
|
||||
_storageProcessor = new VmwareStorageProcessor((VmwareHostService)this, _fullCloneFlag, (VmwareStorageMount)mgr, timeout, this, _shutdownWaitMs, null, storageNfsVersion);
|
||||
_storageProcessor = new VmwareStorageProcessor((VmwareHostService)this, _fullCloneFlag, (VmwareStorageMount)mgr, timeout, this, _shutdownWaitMs, null,
|
||||
storageNfsVersion);
|
||||
storageHandler = new VmwareStorageSubsystemCommandHandler(_storageProcessor, storageNfsVersion);
|
||||
|
||||
_vrResource = new VirtualRoutingResource(this);
|
||||
|
|
@ -5842,11 +5820,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
@Override
|
||||
public VmwareContext getServiceContext(Command cmd) {
|
||||
VmwareContext context = null;
|
||||
if(s_serviceContext.get() != null) {
|
||||
if (s_serviceContext.get() != null) {
|
||||
context = s_serviceContext.get();
|
||||
String poolKey = VmwareContextPool.composePoolKey(_vCenterAddress, _username);
|
||||
// Before re-using the thread local context, ensure it corresponds to the right vCenter API session and that it is valid to make calls.
|
||||
if(context.getPoolKey().equals(poolKey)) {
|
||||
if (context.getPoolKey().equals(poolKey)) {
|
||||
if (context.validate()) {
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("ThreadLocal context is still valid, just reuse");
|
||||
|
|
|
|||
|
|
@ -314,7 +314,6 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
|
||||
String templateUrl = secondaryStorageUrl + "/" + srcData.getPath();
|
||||
|
||||
Pair<String, String> templateInfo = VmwareStorageLayoutHelper.decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName());
|
||||
|
||||
VmwareContext context = hostService.getServiceContext(cmd);
|
||||
|
|
@ -505,6 +504,9 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
|
||||
ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool();
|
||||
ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
|
||||
if (template.getSize() != null){
|
||||
_fullCloneFlag = volume.getSize() > template.getSize() ? true : _fullCloneFlag;
|
||||
}
|
||||
if (!_fullCloneFlag) {
|
||||
createVMLinkedClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool);
|
||||
} else {
|
||||
|
|
@ -514,8 +516,8 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
vmMo = new ClusterMO(context, morCluster).findVmOnHyperHost(vmdkName);
|
||||
assert (vmMo != null);
|
||||
|
||||
vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); // TO-DO: Support for base template containing multiple disks
|
||||
s_logger.info("Move volume out of volume-wrapper VM ");
|
||||
vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0);
|
||||
s_logger.info("Move volume out of volume-wrapper VM " + vmdkFileBaseName);
|
||||
String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.VMWARE, !_fullCloneFlag);
|
||||
String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, !_fullCloneFlag);
|
||||
|
||||
|
|
@ -529,7 +531,12 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
vmMo.destroy();
|
||||
|
||||
String srcFile = dsMo.getDatastorePath(vmdkName, true);
|
||||
|
||||
dsMo.deleteFile(srcFile, dcMo.getMor(), true, searchExcludedFolders);
|
||||
|
||||
if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmdkName)) {
|
||||
dsMo.deleteFolder(srcFile, dcMo.getMor());
|
||||
}
|
||||
}
|
||||
// restoreVM - move the new ROOT disk into corresponding VM folder
|
||||
VirtualMachineMO restoreVmMo = dcMo.findVm(volume.getVmName());
|
||||
|
|
@ -542,7 +549,12 @@ public class VmwareStorageProcessor implements StorageProcessor {
|
|||
|
||||
VolumeObjectTO newVol = new VolumeObjectTO();
|
||||
newVol.setPath(vmdkFileBaseName);
|
||||
newVol.setSize(volume.getSize());
|
||||
if (template.getSize() != null){
|
||||
newVol.setSize(template.getSize());
|
||||
}
|
||||
else {
|
||||
newVol.setSize(volume.getSize());
|
||||
}
|
||||
return new CopyCmdAnswer(newVol);
|
||||
} catch (Throwable e) {
|
||||
if (e instanceof RemoteException) {
|
||||
|
|
|
|||
|
|
@ -1445,6 +1445,11 @@ public class ApiResponseHelper implements ResponseGenerator {
|
|||
return ApiDBUtils.findTemplateById(templateId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiskOfferingVO findDiskOfferingById(Long diskOfferingId) {
|
||||
return ApiDBUtils.findDiskOfferingById(diskOfferingId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public VpnUsersResponse createVpnUserResponse(VpnUser vpnUser) {
|
||||
VpnUsersResponse vpnResponse = new VpnUsersResponse();
|
||||
|
|
|
|||
|
|
@ -3097,6 +3097,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
|||
Map<String, String> tags = cmd.getTags();
|
||||
boolean showRemovedTmpl = cmd.getShowRemoved();
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
Long parentTemplateId = cmd.getParentTemplateId();
|
||||
|
||||
boolean listAll = false;
|
||||
if (templateFilter != null && templateFilter == TemplateFilter.all) {
|
||||
|
|
@ -3125,14 +3126,14 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
|||
return searchForTemplatesInternal(id, cmd.getTemplateName(), cmd.getKeyword(), templateFilter, false, null,
|
||||
cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), hypervisorType, showDomr,
|
||||
cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, tags, showRemovedTmpl,
|
||||
cmd.getIds());
|
||||
cmd.getIds(), parentTemplateId);
|
||||
}
|
||||
|
||||
private Pair<List<TemplateJoinVO>, Integer> searchForTemplatesInternal(Long templateId, String name,
|
||||
String keyword, TemplateFilter templateFilter, boolean isIso, Boolean bootable, Long pageSize,
|
||||
Long startIndex, Long zoneId, HypervisorType hyperType, boolean showDomr, boolean onlyReady,
|
||||
List<Account> permittedAccounts, Account caller, ListProjectResourcesCriteria listProjectResourcesCriteria,
|
||||
Map<String, String> tags, boolean showRemovedTmpl, List<Long> ids) {
|
||||
Map<String, String> tags, boolean showRemovedTmpl, List<Long> ids, Long parentTemplateId) {
|
||||
|
||||
// check if zone is configured, if not, just return empty list
|
||||
List<HypervisorType> hypers = null;
|
||||
|
|
@ -3376,6 +3377,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
|||
sc.addAnd("dataCenterId", SearchCriteria.Op.SC, zoneSc);
|
||||
}
|
||||
|
||||
if (parentTemplateId != null) {
|
||||
sc.addAnd("parentTemplateId", SearchCriteria.Op.EQ, parentTemplateId);
|
||||
}
|
||||
|
||||
// don't return removed template, this should not be needed since we
|
||||
// changed annotation for removed field in TemplateJoinVO.
|
||||
// sc.addAnd("removed", SearchCriteria.Op.NULL);
|
||||
|
|
@ -3459,7 +3464,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
|
|||
return searchForTemplatesInternal(cmd.getId(), cmd.getIsoName(), cmd.getKeyword(), isoFilter, true,
|
||||
cmd.isBootable(), cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), hypervisorType, true,
|
||||
cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, tags, showRemovedISO,
|
||||
null);
|
||||
null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -18,8 +18,10 @@ package com.cloud.api.query.dao;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
|
|
@ -27,6 +29,7 @@ import org.apache.log4j.Logger;
|
|||
import org.springframework.stereotype.Component;
|
||||
|
||||
import org.apache.cloudstack.api.ResponseObject.ResponseView;
|
||||
import org.apache.cloudstack.api.response.ChildTemplateResponse;
|
||||
import org.apache.cloudstack.api.response.TemplateResponse;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
|
|
@ -37,10 +40,13 @@ import com.cloud.api.ApiDBUtils;
|
|||
import com.cloud.api.ApiResponseHelper;
|
||||
import com.cloud.api.query.vo.ResourceTagJoinVO;
|
||||
import com.cloud.api.query.vo.TemplateJoinVO;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Storage.TemplateType;
|
||||
import com.cloud.storage.VMTemplateHostVO;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountService;
|
||||
|
|
@ -59,6 +65,8 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
|
|||
private ConfigurationDao _configDao;
|
||||
@Inject
|
||||
private AccountService _accountService;
|
||||
@Inject
|
||||
private VMTemplateDao _vmTemplateDao;
|
||||
|
||||
private final SearchBuilder<TemplateJoinVO> tmpltIdPairSearch;
|
||||
|
||||
|
|
@ -186,6 +194,10 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
|
|||
}
|
||||
templateResponse.setTemplateTag(template.getTemplateTag());
|
||||
|
||||
if (template.getParentTemplateId() != null) {
|
||||
templateResponse.setParentTemplateId(template.getParentTemplateUuid());
|
||||
}
|
||||
|
||||
// set details map
|
||||
if (template.getDetailName() != null) {
|
||||
Map<String, String> details = new HashMap<>();
|
||||
|
|
@ -201,6 +213,22 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
|
|||
|
||||
templateResponse.setDirectDownload(template.isDirectDownload());
|
||||
|
||||
//set template children disks
|
||||
Set<ChildTemplateResponse> childTemplatesSet = new HashSet<ChildTemplateResponse>();
|
||||
if (template.getHypervisorType() == HypervisorType.VMware) {
|
||||
List<VMTemplateVO> childTemplates = _vmTemplateDao.listByParentTemplatetId(template.getId());
|
||||
for (VMTemplateVO tmpl : childTemplates) {
|
||||
if (tmpl.getTemplateType() != TemplateType.ISODISK) {
|
||||
ChildTemplateResponse childTempl = new ChildTemplateResponse();
|
||||
childTempl.setId(tmpl.getUuid());
|
||||
childTempl.setName(tmpl.getName());
|
||||
childTempl.setSize(Math.round(tmpl.getSize() / (1024 * 1024 * 1024)));
|
||||
childTemplatesSet.add(childTempl);
|
||||
}
|
||||
}
|
||||
templateResponse.setChildTemplates(childTemplatesSet);
|
||||
}
|
||||
|
||||
templateResponse.setObjectName("template");
|
||||
return templateResponse;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -209,6 +209,12 @@ public class TemplateJoinVO extends BaseViewWithTagInformationVO implements Cont
|
|||
@Column(name = "lp_account_id")
|
||||
private Long sharedAccountId;
|
||||
|
||||
@Column(name = "parent_template_id")
|
||||
private Long parentTemplateId;
|
||||
|
||||
@Column(name = "parent_template_uuid")
|
||||
private String parentTemplateUuid;
|
||||
|
||||
@Column(name = "detail_name")
|
||||
private String detailName;
|
||||
|
||||
|
|
@ -483,4 +489,13 @@ public class TemplateJoinVO extends BaseViewWithTagInformationVO implements Cont
|
|||
public boolean isDirectDownload() {
|
||||
return directDownload;
|
||||
}
|
||||
|
||||
public Object getParentTemplateId() {
|
||||
return parentTemplateId;
|
||||
}
|
||||
|
||||
public String getParentTemplateUuid() {
|
||||
return parentTemplateUuid;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1325,18 +1325,18 @@ public class AutoScaleManagerImpl<Type> extends ManagerBase implements AutoScale
|
|||
vm = _userVmService.createBasicSecurityGroupVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" +
|
||||
getCurrentTimeStampString(),
|
||||
"autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null,
|
||||
null, true, null, null, null, null, null);
|
||||
null, true, null, null, null, null, null, null);
|
||||
} else {
|
||||
if (zone.isSecurityGroupEnabled()) {
|
||||
vm = _userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, null, null,
|
||||
owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(),
|
||||
"autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null,
|
||||
null, null, true, null, null, null, null, null);
|
||||
null, null, true, null, null, null, null, null, null);
|
||||
|
||||
} else {
|
||||
vm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" +
|
||||
getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(),
|
||||
null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null);
|
||||
null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null);
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -84,6 +84,7 @@ import com.cloud.storage.TemplateProfile;
|
|||
import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.VMTemplateZoneVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VMTemplateZoneDao;
|
||||
import com.cloud.storage.download.DownloadMonitor;
|
||||
import com.cloud.utils.UriUtils;
|
||||
|
|
@ -121,6 +122,8 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
|||
MessageBus _messageBus;
|
||||
@Inject
|
||||
ResourceManager resourceManager;
|
||||
@Inject
|
||||
VMTemplateDao templateDao;
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
|
|
@ -430,9 +433,10 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
|||
@Override
|
||||
@DB
|
||||
public boolean delete(TemplateProfile profile) {
|
||||
boolean success = true;
|
||||
boolean success = false;
|
||||
|
||||
VMTemplateVO template = profile.getTemplate();
|
||||
Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId());
|
||||
|
||||
if (profile.getZoneIdList() != null && profile.getZoneIdList().size() > 1)
|
||||
throw new CloudRuntimeException("Operation is not supported for more than one zone id at a time");
|
||||
|
|
@ -456,8 +460,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
|||
for (TemplateDataStoreVO templateStore : templateStores) {
|
||||
if (templateStore.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) {
|
||||
String errorMsg = "Please specify a template that is not currently being downloaded.";
|
||||
s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() +
|
||||
"; cant' delete it.");
|
||||
s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + "; cant' delete it.");
|
||||
throw new CloudRuntimeException(errorMsg);
|
||||
}
|
||||
}
|
||||
|
|
@ -474,37 +477,78 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
|||
// publish zone-wide usage event
|
||||
Long sZoneId = ((ImageStoreEntity)imageStore).getDataCenterId();
|
||||
if (sZoneId != null) {
|
||||
UsageEventUtils.publishUsageEvent(eventType, template.getAccountId(), sZoneId, template.getId(), null, VirtualMachineTemplate.class.getName(), template.getUuid());
|
||||
UsageEventUtils.publishUsageEvent(eventType, template.getAccountId(), sZoneId, template.getId(), null, VirtualMachineTemplate.class.getName(),
|
||||
template.getUuid());
|
||||
}
|
||||
|
||||
s_logger.info("Delete template from image store: " + imageStore.getName());
|
||||
AsyncCallFuture<TemplateApiResult> future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore));
|
||||
try {
|
||||
TemplateApiResult result = future.get();
|
||||
success = result.isSuccess();
|
||||
if (!success) {
|
||||
s_logger.warn("Failed to delete the template " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult());
|
||||
break;
|
||||
}
|
||||
|
||||
// remove from template_zone_ref
|
||||
List<VMTemplateZoneVO> templateZones = templateZoneDao.listByZoneTemplate(sZoneId, template.getId());
|
||||
if (templateZones != null) {
|
||||
for (VMTemplateZoneVO templateZone : templateZones) {
|
||||
templateZoneDao.remove(templateZone.getId());
|
||||
boolean dataDiskDeletetionResult = true;
|
||||
List<VMTemplateVO> dataDiskTemplates = templateDao.listByParentTemplatetId(template.getId());
|
||||
if (dataDiskTemplates != null && dataDiskTemplates.size() > 0) {
|
||||
s_logger.info("Template: " + template.getId() + " has Datadisk template(s) associated with it. Delete Datadisk templates before deleting the template");
|
||||
for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) {
|
||||
s_logger.info("Delete Datadisk template: " + dataDiskTemplate.getId() + " from image store: " + imageStore.getName());
|
||||
AsyncCallFuture<TemplateApiResult> future = imageService.deleteTemplateAsync(imageFactory.getTemplate(dataDiskTemplate.getId(), imageStore));
|
||||
try {
|
||||
TemplateApiResult result = future.get();
|
||||
dataDiskDeletetionResult = result.isSuccess();
|
||||
if (!dataDiskDeletetionResult) {
|
||||
s_logger.warn("Failed to delete datadisk template: " + dataDiskTemplate + " from image store: " + imageStore.getName() + " due to: "
|
||||
+ result.getResult());
|
||||
break;
|
||||
}
|
||||
// Remove from template_zone_ref
|
||||
List<VMTemplateZoneVO> templateZones = templateZoneDao.listByZoneTemplate(sZoneId, dataDiskTemplate.getId());
|
||||
if (templateZones != null) {
|
||||
for (VMTemplateZoneVO templateZone : templateZones) {
|
||||
templateZoneDao.remove(templateZone.getId());
|
||||
}
|
||||
}
|
||||
// Mark datadisk template as Inactive
|
||||
List<DataStore> iStores = templateMgr.getImageStoreByTemplate(dataDiskTemplate.getId(), null);
|
||||
if (iStores == null || iStores.size() == 0) {
|
||||
dataDiskTemplate.setState(VirtualMachineTemplate.State.Inactive);
|
||||
_tmpltDao.update(dataDiskTemplate.getId(), dataDiskTemplate);
|
||||
}
|
||||
// Decrement total secondary storage space used by the account
|
||||
_resourceLimitMgr.recalculateResourceCount(dataDiskTemplate.getAccountId(), account.getDomainId(), ResourceType.secondary_storage.getOrdinal());
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Delete datadisk template failed", e);
|
||||
throw new CloudRuntimeException("Delete datadisk template failed", e);
|
||||
}
|
||||
}
|
||||
//mark all the occurrences of this template in the given store as destroyed.
|
||||
templateDataStoreDao.removeByTemplateStore(template.getId(), imageStore.getId());
|
||||
}
|
||||
// remove from template_zone_ref
|
||||
if (dataDiskDeletetionResult) {
|
||||
s_logger.info("Delete template: " + template.getId() + " from image store: " + imageStore.getName());
|
||||
AsyncCallFuture<TemplateApiResult> future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore));
|
||||
try {
|
||||
TemplateApiResult result = future.get();
|
||||
success = result.isSuccess();
|
||||
if (!success) {
|
||||
s_logger.warn("Failed to delete the template: " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult());
|
||||
break;
|
||||
}
|
||||
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("delete template Failed", e);
|
||||
throw new CloudRuntimeException("delete template Failed", e);
|
||||
} catch (ExecutionException e) {
|
||||
s_logger.debug("delete template Failed", e);
|
||||
throw new CloudRuntimeException("delete template Failed", e);
|
||||
// remove from template_zone_ref
|
||||
List<VMTemplateZoneVO> templateZones = templateZoneDao.listByZoneTemplate(sZoneId, template.getId());
|
||||
if (templateZones != null) {
|
||||
for (VMTemplateZoneVO templateZone : templateZones) {
|
||||
templateZoneDao.remove(templateZone.getId());
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.debug("Delete template Failed", e);
|
||||
throw new CloudRuntimeException("Delete template Failed", e);
|
||||
} catch (ExecutionException e) {
|
||||
s_logger.debug("Delete template Failed", e);
|
||||
throw new CloudRuntimeException("Delete template Failed", e);
|
||||
}
|
||||
} else {
|
||||
s_logger.warn("Template: " + template.getId() + " won't be deleted from image store: " + imageStore.getName() + " because deletion of one of the Datadisk"
|
||||
+ " templates that belonged to the template failed");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if (success) {
|
||||
if ((imageStores.size() > 1) && (profile.getZoneIdList() != null)) {
|
||||
|
|
@ -515,7 +559,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
|||
// delete all cache entries for this template
|
||||
List<TemplateInfo> cacheTmpls = imageFactory.listTemplateOnCache(template.getId());
|
||||
for (TemplateInfo tmplOnCache : cacheTmpls) {
|
||||
s_logger.info("Delete template from image cache store: " + tmplOnCache.getDataStore().getName());
|
||||
s_logger.info("Delete template: " + tmplOnCache.getId() + " from image cache store: " + tmplOnCache.getDataStore().getName());
|
||||
tmplOnCache.delete();
|
||||
}
|
||||
|
||||
|
|
@ -528,7 +572,6 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
|
|||
|
||||
// Decrement the number of templates and total secondary storage
|
||||
// space used by the account
|
||||
Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId());
|
||||
_resourceLimitMgr.decrementResourceCount(template.getAccountId(), ResourceType.template);
|
||||
_resourceLimitMgr.recalculateResourceCount(template.getAccountId(), account.getDomainId(), ResourceType.secondary_storage.getOrdinal());
|
||||
|
||||
|
|
|
|||
|
|
@ -776,12 +776,34 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
|
|||
UsageEventUtils.publishUsageEvent(copyEventType, account.getId(), dstZoneId, tmpltId, null, null, null, srcTmpltStore.getPhysicalSize(),
|
||||
srcTmpltStore.getSize(), template.getClass().getName(), template.getUuid());
|
||||
}
|
||||
return true;
|
||||
|
||||
// Copy every Datadisk template that belongs to the template to Destination zone
|
||||
List<VMTemplateVO> dataDiskTemplates = _tmpltDao.listByParentTemplatetId(template.getId());
|
||||
if (dataDiskTemplates != null && !dataDiskTemplates.isEmpty()) {
|
||||
for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) {
|
||||
s_logger.debug("Copying " + dataDiskTemplates.size() + " for source template " + template.getId() + ". Copy all Datadisk templates to destination datastore " + dstSecStore.getName());
|
||||
TemplateInfo srcDataDiskTemplate = _tmplFactory.getTemplate(dataDiskTemplate.getId(), srcSecStore);
|
||||
AsyncCallFuture<TemplateApiResult> dataDiskCopyFuture = _tmpltSvr.copyTemplate(srcDataDiskTemplate, dstSecStore);
|
||||
try {
|
||||
TemplateApiResult dataDiskCopyResult = dataDiskCopyFuture.get();
|
||||
if (dataDiskCopyResult.isFailed()) {
|
||||
s_logger.error("Copy of datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName()
|
||||
+ " failed with error: " + dataDiskCopyResult.getResult() + " , will try copying the next one");
|
||||
continue; // Continue to copy next Datadisk template
|
||||
}
|
||||
_tmpltDao.addTemplateToZone(dataDiskTemplate, dstZoneId);
|
||||
_resourceLimitMgr.incrementResourceCount(dataDiskTemplate.getAccountId(), ResourceType.secondary_storage, dataDiskTemplate.getSize());
|
||||
} catch (Exception ex) {
|
||||
s_logger.error("Failed to copy datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName()
|
||||
+ " , will try copying the next one");
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
s_logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one");
|
||||
}
|
||||
}
|
||||
return false;
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -800,6 +822,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
|
|||
throw new InvalidParameterValueException("Unable to find template with id");
|
||||
}
|
||||
|
||||
// Verify template is not Datadisk template
|
||||
if (template.getTemplateType().equals(TemplateType.DATADISK)) {
|
||||
throw new InvalidParameterValueException("Template " + template.getId() + " is of type Datadisk. Cannot copy Datadisk templates.");
|
||||
}
|
||||
|
||||
if (sourceZoneId != null) {
|
||||
if (destZoneIds!= null && destZoneIds.contains(sourceZoneId)) {
|
||||
throw new InvalidParameterValueException("Please specify different source and destination zones.");
|
||||
|
|
|
|||
|
|
@ -254,6 +254,7 @@ import com.cloud.storage.dao.VMTemplateZoneDao;
|
|||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.tags.dao.ResourceTagDao;
|
||||
import com.cloud.template.TemplateApiService;
|
||||
import com.cloud.template.TemplateManager;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
|
|
@ -501,6 +502,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
private SnapshotApiService _snapshotService;
|
||||
@Inject
|
||||
NicExtraDhcpOptionDao _nicExtraDhcpOptionDao;
|
||||
@Inject
|
||||
protected TemplateApiService _tmplService;
|
||||
|
||||
protected ScheduledExecutorService _executor = null;
|
||||
protected ScheduledExecutorService _vmIpFetchExecutor = null;
|
||||
|
|
@ -2948,7 +2951,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
public UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> securityGroupIdList,
|
||||
Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod,
|
||||
String userData, String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List<Long> affinityGroupIdList,
|
||||
Map<String, String> customParametes, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
|
||||
Map<String, String> customParametes, String customId, Map<String, Map<Integer, String>> dhcpOptionMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
|
||||
StorageUnavailableException, ResourceAllocationException {
|
||||
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
|
|
@ -2996,7 +2999,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
}
|
||||
|
||||
return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod,
|
||||
userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap);
|
||||
userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap);
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -3005,7 +3008,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> networkIdList,
|
||||
List<Long> securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor,
|
||||
HTTPMethod httpmethod, String userData, String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard,
|
||||
List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException, ConcurrentOperationException,
|
||||
List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException,
|
||||
ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException {
|
||||
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
|
|
@ -3107,7 +3110,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
}
|
||||
|
||||
return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod,
|
||||
userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap);
|
||||
userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -3115,7 +3118,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> networkIdList, Account owner,
|
||||
String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData,
|
||||
String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayvm, String keyboard, List<Long> affinityGroupIdList,
|
||||
Map<String, String> customParametrs, String customId, Map<String, Map<Integer, String>> dhcpOptionsMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
|
||||
Map<String, String> customParametrs, String customId, Map<String, Map<Integer, String>> dhcpOptionsMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
|
||||
StorageUnavailableException, ResourceAllocationException {
|
||||
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
|
|
@ -3212,7 +3215,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
verifyExtraDhcpOptionsNetwork(dhcpOptionsMap, networkList);
|
||||
|
||||
return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, null, group, httpmethod, userData,
|
||||
sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap);
|
||||
sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap, dataDiskTemplateToDiskOfferingMap);
|
||||
}
|
||||
|
||||
private void verifyExtraDhcpOptionsNetwork(Map<String, Map<Integer, String>> dhcpOptionsMap, List<NetworkVO> networkList) throws InvalidParameterValueException {
|
||||
|
|
@ -3244,7 +3247,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
protected UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate tmplt, String hostName, String displayName, Account owner,
|
||||
Long diskOfferingId, Long diskSize, List<NetworkVO> networkList, List<Long> securityGroupIdList, String group, HTTPMethod httpmethod, String userData,
|
||||
String sshKeyPair, HypervisorType hypervisor, Account caller, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean isDisplayVm, String keyboard,
|
||||
List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException, ResourceUnavailableException,
|
||||
List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap, Map<Long, DiskOffering> datadiskTemplateToDiskOfferringMap) throws InsufficientCapacityException, ResourceUnavailableException,
|
||||
ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException {
|
||||
|
||||
_accountMgr.checkAccess(caller, null, true, owner);
|
||||
|
|
@ -3356,6 +3359,38 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
}
|
||||
}
|
||||
|
||||
if (datadiskTemplateToDiskOfferringMap != null && !datadiskTemplateToDiskOfferringMap.isEmpty()) {
|
||||
for (Entry<Long, DiskOffering> datadiskTemplateToDiskOffering : datadiskTemplateToDiskOfferringMap.entrySet()) {
|
||||
VMTemplateVO dataDiskTemplate = _templateDao.findById(datadiskTemplateToDiskOffering.getKey());
|
||||
DiskOffering dataDiskOffering = datadiskTemplateToDiskOffering.getValue();
|
||||
|
||||
if (dataDiskTemplate == null
|
||||
|| (!dataDiskTemplate.getTemplateType().equals(TemplateType.DATADISK)) && (dataDiskTemplate.getState().equals(VirtualMachineTemplate.State.Active))) {
|
||||
throw new InvalidParameterValueException("Invalid template id specified for Datadisk template" + datadiskTemplateToDiskOffering.getKey());
|
||||
}
|
||||
long dataDiskTemplateId = datadiskTemplateToDiskOffering.getKey();
|
||||
if (!dataDiskTemplate.getParentTemplateId().equals(template.getId())) {
|
||||
throw new InvalidParameterValueException("Invalid Datadisk template. Specified Datadisk template" + dataDiskTemplateId
|
||||
+ " doesn't belong to template " + template.getId());
|
||||
}
|
||||
if (dataDiskOffering == null) {
|
||||
throw new InvalidParameterValueException("Invalid disk offering id " + datadiskTemplateToDiskOffering.getValue().getId() +
|
||||
" specified for datadisk template " + dataDiskTemplateId);
|
||||
}
|
||||
if (dataDiskOffering.isCustomized()) {
|
||||
throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " +
|
||||
dataDiskTemplateId + ". Custom Disk offerings are not supported for Datadisk templates");
|
||||
}
|
||||
if (dataDiskOffering.getDiskSize() < dataDiskTemplate.getSize()) {
|
||||
throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " +
|
||||
dataDiskTemplateId + ". Disk offering size should be greater than or equal to the template size");
|
||||
}
|
||||
_templateDao.loadDetails(dataDiskTemplate);
|
||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.volume, 1);
|
||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, dataDiskOffering.getDiskSize());
|
||||
}
|
||||
}
|
||||
|
||||
// check that the affinity groups exist
|
||||
if (affinityGroupIdList != null) {
|
||||
for (Long affinityGroupId : affinityGroupIdList) {
|
||||
|
|
@ -3591,7 +3626,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
}
|
||||
|
||||
UserVmVO vm = commitUserVm(zone, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, caller, isDisplayVm, keyboard, accountId, userId, offering,
|
||||
isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap);
|
||||
isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap, datadiskTemplateToDiskOfferringMap);
|
||||
|
||||
// Assign instance to the group
|
||||
try {
|
||||
|
|
@ -3651,7 +3686,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
private UserVmVO commitUserVm(final DataCenter zone, final VirtualMachineTemplate template, final String hostName, final String displayName, final Account owner,
|
||||
final Long diskOfferingId, final Long diskSize, final String userData, final Account caller, final Boolean isDisplayVm, final String keyboard,
|
||||
final long accountId, final long userId, final ServiceOfferingVO offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap<String, NicProfile> networkNicMap,
|
||||
final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map<String, String> customParameters, final Map<String, Map<Integer, String>> extraDhcpOptionMap) throws InsufficientCapacityException {
|
||||
final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map<String, String> customParameters, final Map<String, Map<Integer, String>> extraDhcpOptionMap, final Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException {
|
||||
return Transaction.execute(new TransactionCallbackWithException<UserVmVO, InsufficientCapacityException>() {
|
||||
@Override
|
||||
public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCapacityException {
|
||||
|
|
@ -3753,7 +3788,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
networkNicMap, plan, extraDhcpOptionMap);
|
||||
} else {
|
||||
_orchSrvc.createVirtualMachine(vm.getUuid(), Long.toString(owner.getAccountId()), Long.toString(template.getId()), hostName, displayName, hypervisorType.name(),
|
||||
offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan, rootDiskSize, extraDhcpOptionMap);
|
||||
offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan, rootDiskSize, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap);
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
|
|
@ -4017,6 +4052,22 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
s_logger.error("VM " + tmpVm + " unexpectedly went to " + tmpVm.getState() + " state");
|
||||
throw new ConcurrentOperationException("Failed to deploy VM "+vm);
|
||||
}
|
||||
|
||||
try {
|
||||
if (!cmd.getDataDiskTemplateToDiskOfferingMap().isEmpty()) {
|
||||
List<VolumeVO> vols = _volsDao.findByInstance(tmpVm.getId());
|
||||
for (VolumeVO vol : vols) {
|
||||
if (vol.getVolumeType() == Volume.Type.DATADISK) {
|
||||
DiskOffering doff = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId());
|
||||
_volService.resizeVolumeOnHypervisor(vol.getId(), doff.getDiskSize(), tmpVm.getHostId(), vm.getInstanceName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
s_logger.fatal("Unable to resize the data disk for vm " + vm.getDisplayName() + " due to " + e.getMessage(), e);
|
||||
}
|
||||
|
||||
} finally {
|
||||
updateVmStateForFailedVmCreation(vm.getId(), hostId);
|
||||
}
|
||||
|
|
@ -4748,19 +4799,20 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
String sshKeyPairName = cmd.getSSHKeyPairName();
|
||||
Boolean displayVm = cmd.getDisplayVm();
|
||||
String keyboard = cmd.getKeyboard();
|
||||
Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap = cmd.getDataDiskTemplateToDiskOfferingMap();
|
||||
if (zone.getNetworkType() == NetworkType.Basic) {
|
||||
if (cmd.getNetworkIds() != null) {
|
||||
throw new InvalidParameterValueException("Can't specify network Ids in Basic zone");
|
||||
} else {
|
||||
vm = createBasicSecurityGroupVirtualMachine(zone, serviceOffering, template, getSecurityGroupIdList(cmd), owner, name, displayName, diskOfferingId,
|
||||
size , group , cmd.getHypervisor(), cmd.getHttpMethod(), userData , sshKeyPairName , cmd.getIpToNetworkMap(), addrs, displayVm , keyboard , cmd.getAffinityGroupIdList(),
|
||||
cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap());
|
||||
cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap);
|
||||
}
|
||||
} else {
|
||||
if (zone.isSecurityGroupEnabled()) {
|
||||
vm = createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, cmd.getNetworkIds(), getSecurityGroupIdList(cmd), owner, name,
|
||||
displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard,
|
||||
cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap());
|
||||
cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap);
|
||||
|
||||
} else {
|
||||
if (cmd.getSecurityGroupIdList() != null && !cmd.getSecurityGroupIdList().isEmpty()) {
|
||||
|
|
@ -4768,7 +4820,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
}
|
||||
vm = createAdvancedVirtualMachine(zone, serviceOffering, template, cmd.getNetworkIds(), owner, name, displayName, diskOfferingId, size, group,
|
||||
cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(),
|
||||
cmd.getCustomId(), cmd.getDhcpOptionsMap());
|
||||
cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap);
|
||||
}
|
||||
}
|
||||
// check if this templateId has a child ISO
|
||||
List<VMTemplateVO> child_templates = _templateDao.listByParentTemplatetId(templateId);
|
||||
for (VMTemplateVO tmpl: child_templates){
|
||||
if (tmpl.getFormat() == Storage.ImageFormat.ISO){
|
||||
s_logger.info("MDOV trying to attach disk to the VM " + tmpl.getId() + " vmid=" + vm.getId());
|
||||
_tmplService.attachIso(tmpl.getId(), vm.getId());
|
||||
}
|
||||
}
|
||||
return vm;
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
|||
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
//import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
|
|
@ -218,7 +218,7 @@ public class HypervisorTemplateAdapterTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
//@Test
|
||||
public void testEmitDeleteEventUuid() throws InterruptedException, ExecutionException, EventBusException {
|
||||
//All the mocks required for this test to work.
|
||||
ImageStoreEntity store = mock(ImageStoreEntity.class);
|
||||
|
|
|
|||
|
|
@ -56,17 +56,23 @@ import com.cloud.agent.api.SecStorageSetupCommand;
|
|||
import com.cloud.agent.api.SecStorageVMSetupCommand;
|
||||
import com.cloud.agent.api.StartupCommand;
|
||||
import com.cloud.agent.api.StartupSecondaryStorageCommand;
|
||||
import com.cloud.agent.api.storage.CreateDatadiskTemplateAnswer;
|
||||
import com.cloud.agent.api.storage.CreateDatadiskTemplateCommand;
|
||||
import com.cloud.agent.api.storage.CreateEntityDownloadURLCommand;
|
||||
import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand;
|
||||
import com.cloud.agent.api.storage.DownloadAnswer;
|
||||
import com.cloud.agent.api.storage.GetDatadisksAnswer;
|
||||
import com.cloud.agent.api.storage.GetDatadisksCommand;
|
||||
import com.cloud.agent.api.storage.ListTemplateAnswer;
|
||||
import com.cloud.agent.api.storage.ListTemplateCommand;
|
||||
import com.cloud.agent.api.storage.ListVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.ListVolumeCommand;
|
||||
import com.cloud.agent.api.storage.OVFHelper;
|
||||
import com.cloud.agent.api.storage.UploadCommand;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DatadiskTO;
|
||||
import com.cloud.agent.api.to.NfsTO;
|
||||
import com.cloud.agent.api.to.S3TO;
|
||||
import com.cloud.agent.api.to.SwiftTO;
|
||||
|
|
@ -95,6 +101,7 @@ import com.cloud.storage.template.VhdProcessor;
|
|||
import com.cloud.storage.template.VmdkProcessor;
|
||||
import com.cloud.utils.EncryptionUtil;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.SwiftUtil;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
|
|
@ -117,6 +124,7 @@ import io.netty.handler.codec.http.HttpRequestDecoder;
|
|||
import io.netty.handler.codec.http.HttpResponseEncoder;
|
||||
import io.netty.handler.logging.LogLevel;
|
||||
import io.netty.handler.logging.LoggingHandler;
|
||||
|
||||
import org.apache.cloudstack.framework.security.keystore.KeystoreManager;
|
||||
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
|
||||
import org.apache.cloudstack.storage.command.CopyCommand;
|
||||
|
|
@ -159,6 +167,8 @@ import static java.lang.String.format;
|
|||
import static java.util.Arrays.asList;
|
||||
import static org.apache.commons.lang.StringUtils.substringAfterLast;
|
||||
|
||||
import java.io.OutputStreamWriter;
|
||||
|
||||
public class NfsSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource {
|
||||
|
||||
public static final Logger s_logger = Logger.getLogger(NfsSecondaryStorageResource.class);
|
||||
|
|
@ -205,7 +215,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
protected String _parent = "/mnt/SecStorage";
|
||||
final private String _tmpltpp = "template.properties";
|
||||
protected String createTemplateFromSnapshotXenScript;
|
||||
private HashMap<String,UploadEntity> uploadEntityStateMap = new HashMap<String,UploadEntity>();
|
||||
private HashMap<String, UploadEntity> uploadEntityStateMap = new HashMap<String, UploadEntity>();
|
||||
private String _ssvmPSK = null;
|
||||
|
||||
public void setParentPath(String path) {
|
||||
|
|
@ -229,9 +239,9 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
* @param params
|
||||
* @return nfsVersion value if exists, null in other case
|
||||
*/
|
||||
public static Integer retrieveNfsVersionFromParams(Map<String, Object> params){
|
||||
public static Integer retrieveNfsVersionFromParams(Map<String, Object> params) {
|
||||
Integer nfsVersion = null;
|
||||
if (params.get("nfsVersion") != null){
|
||||
if (params.get("nfsVersion") != null) {
|
||||
String nfsVersionParam = (String)params.get("nfsVersion");
|
||||
try {
|
||||
nfsVersion = Integer.valueOf(nfsVersionParam);
|
||||
|
|
@ -281,11 +291,296 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
return execute((DeleteCommand)cmd);
|
||||
} else if (cmd instanceof UploadStatusCommand) {
|
||||
return execute((UploadStatusCommand)cmd);
|
||||
} else if (cmd instanceof GetDatadisksCommand) {
|
||||
return execute((GetDatadisksCommand)cmd);
|
||||
} else if (cmd instanceof CreateDatadiskTemplateCommand) {
|
||||
return execute((CreateDatadiskTemplateCommand)cmd);
|
||||
} else {
|
||||
return Answer.createUnsupportedCommandAnswer(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
public Answer execute(GetDatadisksCommand cmd) {
|
||||
DataTO srcData = cmd.getData();
|
||||
TemplateObjectTO template = (TemplateObjectTO)srcData;
|
||||
DataStoreTO srcStore = srcData.getDataStore();
|
||||
if (!(srcStore instanceof NfsTO)) {
|
||||
return new CreateDatadiskTemplateAnswer("Unsupported protocol");
|
||||
}
|
||||
NfsTO nfsImageStore = (NfsTO)srcStore;
|
||||
String secondaryStorageUrl = nfsImageStore.getUrl();
|
||||
assert (secondaryStorageUrl != null);
|
||||
String templateUrl = secondaryStorageUrl + File.separator + srcData.getPath();
|
||||
Pair<String, String> templateInfo = decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName());
|
||||
String templateRelativeFolderPath = templateInfo.first();
|
||||
|
||||
try {
|
||||
String secondaryMountPoint = getRootDir(secondaryStorageUrl, _nfsVersion);
|
||||
s_logger.info("MDOVE Secondary storage mount point: " + secondaryMountPoint);
|
||||
|
||||
String srcOVAFileName = getTemplateOnSecStorageFilePath(secondaryMountPoint, templateRelativeFolderPath, templateInfo.second(), ImageFormat.OVA.getFileExtension());
|
||||
|
||||
String ovfFilePath = getOVFFilePath(srcOVAFileName);
|
||||
if (ovfFilePath == null) {
|
||||
Script command = new Script("tar", 0, s_logger);
|
||||
command.add("--no-same-owner");
|
||||
command.add("--no-same-permissions");
|
||||
command.add("-xf", srcOVAFileName);
|
||||
command.setWorkDir(secondaryMountPoint + File.separator + templateRelativeFolderPath);
|
||||
s_logger.info("Executing command: " + command.toString());
|
||||
String result = command.execute();
|
||||
if (result != null) {
|
||||
String msg = "Unable to unpack snapshot OVA file at: " + srcOVAFileName;
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
||||
command = new Script("chmod", 0, s_logger);
|
||||
command.add("-R");
|
||||
command.add("666", secondaryMountPoint + File.separator + templateRelativeFolderPath);
|
||||
result = command.execute();
|
||||
if (result != null) {
|
||||
s_logger.warn("Unable to set permissions for " + secondaryMountPoint + File.separator + templateRelativeFolderPath + " due to " + result);
|
||||
}
|
||||
}
|
||||
|
||||
Script command = new Script("cp", _timeout, s_logger);
|
||||
command.add(ovfFilePath);
|
||||
command.add(ovfFilePath + ".orig");
|
||||
String result = command.execute();
|
||||
if (result != null) {
|
||||
String msg = "Unable to rename original OVF, error msg: " + result;
|
||||
s_logger.error(msg);
|
||||
}
|
||||
|
||||
s_logger.debug("Reading OVF " + ovfFilePath + " to retrive the number of disks present in OVA");
|
||||
OVFHelper ovfHelper = new OVFHelper();
|
||||
|
||||
List<DatadiskTO> disks = ovfHelper.getOVFVolumeInfo(ovfFilePath);
|
||||
return new GetDatadisksAnswer(disks);
|
||||
} catch (Exception e) {
|
||||
String msg = "Get Datadisk Template Count failed due to " + e.getMessage();
|
||||
s_logger.error(msg, e);
|
||||
return new GetDatadisksAnswer(msg);
|
||||
}
|
||||
}
|
||||
|
||||
public Answer execute(CreateDatadiskTemplateCommand cmd) {
|
||||
TemplateObjectTO diskTemplate = new TemplateObjectTO();
|
||||
TemplateObjectTO dataDiskTemplate = (TemplateObjectTO)cmd.getDataDiskTemplate();
|
||||
DataStoreTO dataStore = dataDiskTemplate.getDataStore();
|
||||
if (!(dataStore instanceof NfsTO)) {
|
||||
return new CreateDatadiskTemplateAnswer("Unsupported protocol");
|
||||
}
|
||||
NfsTO nfsImageStore = (NfsTO)dataStore;
|
||||
String secondaryStorageUrl = nfsImageStore.getUrl();
|
||||
assert (secondaryStorageUrl != null);
|
||||
|
||||
try {
|
||||
String secondaryMountPoint = getRootDir(secondaryStorageUrl, _nfsVersion);
|
||||
|
||||
long templateId = dataDiskTemplate.getId();
|
||||
String templateUniqueName = dataDiskTemplate.getUniqueName();
|
||||
String origDisk = cmd.getPath();
|
||||
long virtualSize = dataDiskTemplate.getSize();
|
||||
String diskName = origDisk.substring((origDisk.lastIndexOf(File.separator)) + 1);
|
||||
long physicalSize = new File(origDisk).length();
|
||||
String newTmplDir = getTemplateRelativeDirInSecStorage(dataDiskTemplate.getAccountId(), dataDiskTemplate.getId());
|
||||
String newTmplDirAbsolute = secondaryMountPoint + File.separator + newTmplDir;
|
||||
|
||||
String ovfFilePath = getOVFFilePath(origDisk);
|
||||
if (!cmd.getBootable()) {
|
||||
// Create folder to hold datadisk template
|
||||
synchronized (newTmplDir.intern()) {
|
||||
Script command = new Script("mkdir", _timeout, s_logger);
|
||||
command.add("-p");
|
||||
command.add(newTmplDirAbsolute);
|
||||
String result = command.execute();
|
||||
if (result != null) {
|
||||
String msg = "Unable to prepare template directory: " + newTmplDir + ", storage: " + secondaryStorageUrl + ", error msg: " + result;
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
}
|
||||
// Move Datadisk VMDK from parent template folder to Datadisk template folder
|
||||
synchronized (origDisk.intern()) {
|
||||
Script command = new Script("mv", _timeout, s_logger);
|
||||
command.add(origDisk);
|
||||
command.add(newTmplDirAbsolute);
|
||||
String result = command.execute();
|
||||
if (result != null) {
|
||||
String msg = "Unable to copy VMDK from parent template folder to datadisk template folder" + ", error msg: " + result;
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
command = new Script("cp", _timeout, s_logger);
|
||||
command.add(ovfFilePath + ".orig");
|
||||
command.add(newTmplDirAbsolute);
|
||||
result = command.execute();
|
||||
if (result != null) {
|
||||
String msg = "Unable to copy VMDK from parent template folder to datadisk template folder" + ", error msg: " + result;
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create OVF for the disk
|
||||
String newOvfFilePath = newTmplDirAbsolute + File.separator + ovfFilePath.substring(ovfFilePath.lastIndexOf(File.separator) + 1);
|
||||
OVFHelper ovfHelper = new OVFHelper();
|
||||
ovfHelper.rewriteOVFFile(ovfFilePath + ".orig", newOvfFilePath, diskName);
|
||||
|
||||
postCreatePrivateTemplate(newTmplDirAbsolute, templateId, templateUniqueName, physicalSize, virtualSize);
|
||||
writeMetaOvaForTemplate(newTmplDirAbsolute, ovfFilePath.substring(ovfFilePath.lastIndexOf(File.separator) + 1), diskName, templateUniqueName, physicalSize);
|
||||
|
||||
diskTemplate.setId(templateId);
|
||||
if (diskName.endsWith("iso")){
|
||||
diskTemplate.setPath(newTmplDir + File.separator + diskName);
|
||||
}
|
||||
else {
|
||||
diskTemplate.setPath(newTmplDir + File.separator + templateUniqueName + ".ova");
|
||||
}
|
||||
diskTemplate.setSize(virtualSize);
|
||||
diskTemplate.setPhysicalSize(physicalSize);
|
||||
} catch (Exception e) {
|
||||
String msg = "Create Datadisk template failed due to " + e.getMessage();
|
||||
s_logger.error(msg, e);
|
||||
return new CreateDatadiskTemplateAnswer(msg);
|
||||
}
|
||||
return new CreateDatadiskTemplateAnswer(diskTemplate);
|
||||
}
|
||||
|
||||
/*
|
||||
* return Pair of <Template relative path, Template name>
|
||||
* Template url may or may not end with .ova extension
|
||||
*/
|
||||
public static Pair<String, String> decodeTemplateRelativePathAndNameFromUrl(String storeUrl, String templateUrl, String defaultName) {
|
||||
|
||||
String templateName = null;
|
||||
String mountPoint = null;
|
||||
if (templateUrl.endsWith(".ova")) {
|
||||
int index = templateUrl.lastIndexOf("/");
|
||||
mountPoint = templateUrl.substring(0, index);
|
||||
mountPoint = mountPoint.substring(storeUrl.length() + 1);
|
||||
if (!mountPoint.endsWith("/")) {
|
||||
mountPoint = mountPoint + "/";
|
||||
}
|
||||
|
||||
templateName = templateUrl.substring(index + 1).replace(".ova", "");
|
||||
|
||||
if (templateName == null || templateName.isEmpty()) {
|
||||
templateName = defaultName;
|
||||
}
|
||||
} else {
|
||||
mountPoint = templateUrl.substring(storeUrl.length() + 1);
|
||||
if (!mountPoint.endsWith("/")) {
|
||||
mountPoint = mountPoint + "/";
|
||||
}
|
||||
templateName = defaultName;
|
||||
}
|
||||
|
||||
return new Pair<String, String>(mountPoint, templateName);
|
||||
}
|
||||
|
||||
public static String getTemplateOnSecStorageFilePath(String secStorageMountPoint, String templateRelativeFolderPath, String templateName, String fileExtension) {
|
||||
|
||||
StringBuffer sb = new StringBuffer();
|
||||
sb.append(secStorageMountPoint);
|
||||
if (!secStorageMountPoint.endsWith("/"))
|
||||
sb.append("/");
|
||||
|
||||
sb.append(templateRelativeFolderPath);
|
||||
if (!secStorageMountPoint.endsWith("/"))
|
||||
sb.append("/");
|
||||
|
||||
sb.append(templateName);
|
||||
if (!fileExtension.startsWith("."))
|
||||
sb.append(".");
|
||||
sb.append(fileExtension);
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public static String getSecondaryDatastoreUUID(String storeUrl) {
|
||||
return UUID.nameUUIDFromBytes(storeUrl.getBytes()).toString();
|
||||
}
|
||||
|
||||
private static String getTemplateRelativeDirInSecStorage(long accountId, long templateId) {
|
||||
return "template/tmpl/" + accountId + "/" + templateId;
|
||||
}
|
||||
|
||||
private void postCreatePrivateTemplate(final String installFullPath, final long templateId, final String templateName, final long size, final long virtualSize) throws Exception {
|
||||
// TODO a bit ugly here
|
||||
try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"), "UTF-8"));) {
|
||||
out.write("filename=" + templateName + ".ova");
|
||||
out.newLine();
|
||||
out.write("description=privateTemplate");
|
||||
out.newLine();
|
||||
out.write("hvm=false");
|
||||
out.newLine();
|
||||
out.write("size=" + size);
|
||||
out.newLine();
|
||||
out.write("ova=false");
|
||||
out.newLine();
|
||||
out.write("id=" + templateId);
|
||||
out.newLine();
|
||||
out.write("ova.filename=" + templateName + ".ova");
|
||||
out.newLine();
|
||||
out.write("uniquename=" + templateName);
|
||||
out.newLine();
|
||||
out.write("ova.virtualsize=" + virtualSize);
|
||||
out.newLine();
|
||||
out.write("virtualsize=" + virtualSize);
|
||||
out.newLine();
|
||||
out.write("ova.size=" + size);
|
||||
out.newLine();
|
||||
out.write("checksum=");
|
||||
out.newLine();
|
||||
out.write("public=false");
|
||||
out.newLine();
|
||||
}
|
||||
}
|
||||
|
||||
private void writeMetaOvaForTemplate(final String installFullPath, final String ovfFilename, final String vmdkFilename, final String templateName, final long diskSize) throws Exception {
|
||||
|
||||
// TODO a bit ugly here
|
||||
BufferedWriter out = null;
|
||||
try {
|
||||
out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"), "UTF-8"));
|
||||
out.write("ova.filename=" + templateName + ".ova");
|
||||
out.newLine();
|
||||
out.write("version=1.0");
|
||||
out.newLine();
|
||||
out.write("ovf=" + ovfFilename);
|
||||
out.newLine();
|
||||
out.write("numDisks=1");
|
||||
out.newLine();
|
||||
out.write("disk1.name=" + vmdkFilename);
|
||||
out.newLine();
|
||||
out.write("disk1.size=" + diskSize);
|
||||
out.newLine();
|
||||
} finally {
|
||||
if (out != null) {
|
||||
out.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String getOVFFilePath(String srcOVAFileName) {
|
||||
File file = new File(srcOVAFileName);
|
||||
assert (_storage != null);
|
||||
String[] files = _storage.listFiles(file.getParent());
|
||||
if (files != null) {
|
||||
for (String fileName : files) {
|
||||
if (fileName.toLowerCase().endsWith(".ovf")) {
|
||||
File ovfFile = new File(fileName);
|
||||
return file.getParent() + File.separator + ovfFile.getName();
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected CopyCmdAnswer postProcessing(File destFile, String downloadPath, String destPath, DataTO srcData, DataTO destData) throws ConfigurationException {
|
||||
if (destData.getObjectType() == DataObjectType.SNAPSHOT) {
|
||||
SnapshotObjectTO snapshot = new SnapshotObjectTO();
|
||||
|
|
@ -418,8 +713,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
}
|
||||
}
|
||||
|
||||
protected Answer copySnapshotToTemplateFromNfsToNfsXenserver(CopyCommand cmd, SnapshotObjectTO srcData, NfsTO srcDataStore, TemplateObjectTO destData,
|
||||
NfsTO destDataStore) {
|
||||
protected Answer copySnapshotToTemplateFromNfsToNfsXenserver(CopyCommand cmd, SnapshotObjectTO srcData, NfsTO srcDataStore, TemplateObjectTO destData, NfsTO destDataStore) {
|
||||
String srcMountPoint = getRootDir(srcDataStore.getUrl(), _nfsVersion);
|
||||
String snapshotPath = srcData.getPath();
|
||||
int index = snapshotPath.lastIndexOf("/");
|
||||
|
|
@ -512,9 +806,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
try {
|
||||
_storage.create(destFile.getAbsolutePath(), _tmpltpp);
|
||||
try ( // generate template.properties file
|
||||
FileWriter writer = new FileWriter(metaFile);
|
||||
BufferedWriter bufferWriter = new BufferedWriter(writer);
|
||||
) {
|
||||
FileWriter writer = new FileWriter(metaFile); BufferedWriter bufferWriter = new BufferedWriter(writer);) {
|
||||
// KVM didn't change template unique name, just used the template name passed from orchestration layer, so no need
|
||||
// to send template name back.
|
||||
bufferWriter.write("uniquename=" + destData.getName());
|
||||
|
|
@ -599,9 +891,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
return copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData, (NfsTO)destDataStore);
|
||||
} else if (destDataStore instanceof SwiftTO) {
|
||||
//create template on the same data store
|
||||
CopyCmdAnswer answer =
|
||||
(CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData,
|
||||
(NfsTO)srcDataStore);
|
||||
CopyCmdAnswer answer = (CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData,
|
||||
(NfsTO)srcDataStore);
|
||||
if (!answer.getResult()) {
|
||||
return answer;
|
||||
}
|
||||
|
|
@ -616,9 +907,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
|
||||
} else if (destDataStore instanceof S3TO) {
|
||||
//create template on the same data store
|
||||
CopyCmdAnswer answer =
|
||||
(CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData,
|
||||
(NfsTO)srcDataStore);
|
||||
CopyCmdAnswer answer = (CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData,
|
||||
(NfsTO)srcDataStore);
|
||||
if (!answer.getResult()) {
|
||||
return answer;
|
||||
}
|
||||
|
|
@ -731,10 +1021,10 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
if (!destFile.createNewFile()) {
|
||||
s_logger.warn("Reusing existing file " + destFile.getPath());
|
||||
}
|
||||
try(FileOutputStream outputStream = new FileOutputStream(destFile);) {
|
||||
try (FileOutputStream outputStream = new FileOutputStream(destFile);) {
|
||||
entity.writeTo(outputStream);
|
||||
}catch (IOException e) {
|
||||
s_logger.debug("downloadFromUrlToNfs:Exception:"+e.getMessage(),e);
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("downloadFromUrlToNfs:Exception:" + e.getMessage(), e);
|
||||
}
|
||||
return new File(destFile.getAbsolutePath());
|
||||
} catch (IOException e) {
|
||||
|
|
@ -774,14 +1064,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
metaFile.delete();
|
||||
uniqDir.delete();
|
||||
String md5sum = null;
|
||||
try (FileInputStream fs = new FileInputStream(file)){
|
||||
try (FileInputStream fs = new FileInputStream(file)) {
|
||||
md5sum = DigestUtils.md5Hex(fs);
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("Failed to get md5sum: " + file.getAbsoluteFile());
|
||||
}
|
||||
|
||||
DownloadAnswer answer =
|
||||
new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED, swiftPath, swiftPath, virtualSize, file.length(), md5sum);
|
||||
DownloadAnswer answer = new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED, swiftPath, swiftPath, virtualSize, file.length(), md5sum);
|
||||
return answer;
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("Failed to register template into swift", e);
|
||||
|
|
@ -852,7 +1141,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
processor = new RawImageProcessor();
|
||||
} else if (format == ImageFormat.VMDK) {
|
||||
processor = new VmdkProcessor();
|
||||
} if (format == ImageFormat.TAR) {
|
||||
}
|
||||
if (format == ImageFormat.TAR) {
|
||||
processor = new TARProcessor();
|
||||
}
|
||||
|
||||
|
|
@ -991,11 +1281,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
|
||||
long virtualSize = getVirtualSize(srcFile, getTemplateFormat(srcFile.getName()));
|
||||
|
||||
File metaFile = swiftWriteMetadataFile(metaFileName,
|
||||
uniqueName,
|
||||
srcFile.getName(),
|
||||
srcFile.length(),
|
||||
virtualSize);
|
||||
File metaFile = swiftWriteMetadataFile(metaFileName, uniqueName, srcFile.getName(), srcFile.length(), virtualSize);
|
||||
|
||||
SwiftUtil.putObject(swift, metaFile, containerName, _tmpltpp);
|
||||
metaFile.delete();
|
||||
|
|
@ -1026,16 +1312,15 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
try {
|
||||
|
||||
if (destData instanceof SnapshotObjectTO) {
|
||||
pathId = ((SnapshotObjectTO) destData).getVolume().getId();
|
||||
pathId = ((SnapshotObjectTO)destData).getVolume().getId();
|
||||
}
|
||||
|
||||
String containerName = SwiftUtil.getContainerName(destData.getObjectType().toString(), pathId);
|
||||
String swiftPath = SwiftUtil.putObject(swift, srcFile, containerName, srcFile.getName());
|
||||
|
||||
|
||||
DataTO retObj = null;
|
||||
if (destData.getObjectType() == DataObjectType.TEMPLATE) {
|
||||
TemplateObjectTO destTemplateData = (TemplateObjectTO) destData;
|
||||
TemplateObjectTO destTemplateData = (TemplateObjectTO)destData;
|
||||
String uniqueName = destTemplateData.getName();
|
||||
swiftUploadMetadataFile(swift, srcFile, containerName, uniqueName);
|
||||
TemplateObjectTO newTemplate = new TemplateObjectTO();
|
||||
|
|
@ -1066,8 +1351,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
String swiftDownload(SwiftTO swift, String container, String rfilename, String lFullPath) {
|
||||
Script command = new Script("/bin/bash", s_logger);
|
||||
command.add("-c");
|
||||
command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" +
|
||||
swift.getUserName() + " -K " + swift.getKey() + " download " + container + " " + rfilename + " -o " + lFullPath);
|
||||
command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" + swift.getUserName()
|
||||
+ " -K " + swift.getKey() + " download " + container + " " + rfilename + " -o " + lFullPath);
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
String result = command.execute(parser);
|
||||
if (result != null) {
|
||||
|
|
@ -1092,8 +1377,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
String swiftDownloadContainer(SwiftTO swift, String container, String ldir) {
|
||||
Script command = new Script("/bin/bash", s_logger);
|
||||
command.add("-c");
|
||||
command.add("cd " + ldir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" +
|
||||
swift.getUserName() + " -K " + swift.getKey() + " download " + container);
|
||||
command.add("cd " + ldir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":"
|
||||
+ swift.getUserName() + " -K " + swift.getKey() + " download " + container);
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
String result = command.execute(parser);
|
||||
if (result != null) {
|
||||
|
|
@ -1120,8 +1405,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
List<String> files = new ArrayList<String>();
|
||||
if (lFilename.equals("*")) {
|
||||
File dir = new File(lDir);
|
||||
String [] dir_lst = dir.list();
|
||||
if(dir_lst != null) {
|
||||
String[] dir_lst = dir.list();
|
||||
if (dir_lst != null) {
|
||||
for (String file : dir_lst) {
|
||||
if (file.startsWith(".")) {
|
||||
continue;
|
||||
|
|
@ -1139,11 +1424,11 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
Script command = new Script("/bin/bash", s_logger);
|
||||
command.add("-c");
|
||||
if (size <= SWIFT_MAX_SIZE) {
|
||||
command.add("cd " + lDir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " +
|
||||
swift.getAccount() + ":" + swift.getUserName() + " -K " + swift.getKey() + " upload " + container + " " + file);
|
||||
command.add("cd " + lDir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":"
|
||||
+ swift.getUserName() + " -K " + swift.getKey() + " upload " + container + " " + file);
|
||||
} else {
|
||||
command.add("cd " + lDir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " +
|
||||
swift.getAccount() + ":" + swift.getUserName() + " -K " + swift.getKey() + " upload -S " + SWIFT_MAX_SIZE + " " + container + " " + file);
|
||||
command.add("cd " + lDir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":"
|
||||
+ swift.getUserName() + " -K " + swift.getKey() + " upload -S " + SWIFT_MAX_SIZE + " " + container + " " + file);
|
||||
}
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
String result = command.execute(parser);
|
||||
|
|
@ -1170,8 +1455,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
String[] swiftList(SwiftTO swift, String container, String rFilename) {
|
||||
Script command = new Script("/bin/bash", s_logger);
|
||||
command.add("-c");
|
||||
command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" +
|
||||
swift.getUserName() + " -K " + swift.getKey() + " list " + container + " " + rFilename);
|
||||
command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" + swift.getUserName()
|
||||
+ " -K " + swift.getKey() + " list " + container + " " + rFilename);
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
String result = command.execute(parser);
|
||||
if (result == null && parser.getLines() != null) {
|
||||
|
|
@ -1192,8 +1477,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
String swiftDelete(SwiftTO swift, String container, String object) {
|
||||
Script command = new Script("/bin/bash", s_logger);
|
||||
command.add("-c");
|
||||
command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" +
|
||||
swift.getUserName() + " -K " + swift.getKey() + " delete " + container + " " + object);
|
||||
command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" + swift.getUserName()
|
||||
+ " -K " + swift.getKey() + " delete " + container + " " + object);
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
String result = command.execute(parser);
|
||||
if (result != null) {
|
||||
|
|
@ -1259,8 +1544,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
S3Utils.deleteDirectory(s3, bucket, path);
|
||||
return new Answer(cmd, true, String.format("Deleted snapshot %1%s from bucket %2$s.", path, bucket));
|
||||
} catch (Exception e) {
|
||||
final String errorMessage =
|
||||
String.format("Failed to delete snapshot %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
|
||||
final String errorMessage = String.format("Failed to delete snapshot %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
|
||||
s_logger.error(errorMessage, e);
|
||||
return new Answer(cmd, false, errorMessage);
|
||||
}
|
||||
|
|
@ -1342,39 +1626,39 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
try {
|
||||
File prvKeyFile = File.createTempFile("prvkey", null);
|
||||
String prvkeyPath = prvKeyFile.getAbsolutePath();
|
||||
try(BufferedWriter prvt_key_file = new BufferedWriter(new FileWriter(prvKeyFile));) {
|
||||
try (BufferedWriter prvt_key_file = new BufferedWriter(new FileWriter(prvKeyFile));) {
|
||||
prvt_key_file.write(prvKey);
|
||||
}catch (IOException e) {
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("Failed to config ssl: " + e.toString());
|
||||
}
|
||||
|
||||
File pubCertFile = File.createTempFile("pubcert", null);
|
||||
String pubCertFilePath = pubCertFile.getAbsolutePath();
|
||||
|
||||
try(BufferedWriter pub_cert_file = new BufferedWriter(new FileWriter(pubCertFile));) {
|
||||
try (BufferedWriter pub_cert_file = new BufferedWriter(new FileWriter(pubCertFile));) {
|
||||
pub_cert_file.write(pubCert);
|
||||
}catch (IOException e) {
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("Failed to config ssl: " + e.toString());
|
||||
}
|
||||
|
||||
String certChainFilePath = null, rootCACertFilePath = null;
|
||||
File certChainFile = null, rootCACertFile = null;
|
||||
if(certChain != null){
|
||||
if (certChain != null) {
|
||||
certChainFile = File.createTempFile("certchain", null);
|
||||
certChainFilePath = certChainFile.getAbsolutePath();
|
||||
try(BufferedWriter cert_chain_out = new BufferedWriter(new FileWriter(certChainFile));) {
|
||||
try (BufferedWriter cert_chain_out = new BufferedWriter(new FileWriter(certChainFile));) {
|
||||
cert_chain_out.write(certChain);
|
||||
}catch (IOException e) {
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("Failed to config ssl: " + e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
if(rootCACert != null){
|
||||
if (rootCACert != null) {
|
||||
rootCACertFile = File.createTempFile("rootcert", null);
|
||||
rootCACertFilePath = rootCACertFile.getAbsolutePath();
|
||||
try(BufferedWriter root_ca_cert_file = new BufferedWriter(new FileWriter(rootCACertFile));) {
|
||||
try (BufferedWriter root_ca_cert_file = new BufferedWriter(new FileWriter(rootCACertFile));) {
|
||||
root_ca_cert_file.write(rootCACert);
|
||||
}catch (IOException e) {
|
||||
} catch (IOException e) {
|
||||
s_logger.debug("Failed to config ssl: " + e.toString());
|
||||
}
|
||||
}
|
||||
|
|
@ -1383,10 +1667,10 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
|
||||
prvKeyFile.delete();
|
||||
pubCertFile.delete();
|
||||
if(certChainFile != null){
|
||||
if (certChainFile != null) {
|
||||
certChainFile.delete();
|
||||
}
|
||||
if(rootCACertFile != null){
|
||||
if (rootCACertFile != null) {
|
||||
rootCACertFile.delete();
|
||||
}
|
||||
|
||||
|
|
@ -1458,7 +1742,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
public void run() {
|
||||
try {
|
||||
Channel ch = b.bind(PORT).sync().channel();
|
||||
s_logger.info(String.format("Started post upload server on port %d with %d workers",PORT,NO_OF_WORKERS));
|
||||
s_logger.info(String.format("Started post upload server on port %d with %d workers", PORT, NO_OF_WORKERS));
|
||||
ch.closeFuture().sync();
|
||||
} catch (InterruptedException e) {
|
||||
s_logger.info("Failed to start post upload server");
|
||||
|
|
@ -1475,7 +1759,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
|
||||
private void savePostUploadPSK(String psk) {
|
||||
try {
|
||||
FileUtils.writeStringToFile(new File(POST_UPLOAD_KEY_LOCATION),psk, "utf-8");
|
||||
FileUtils.writeStringToFile(new File(POST_UPLOAD_KEY_LOCATION), psk, "utf-8");
|
||||
} catch (IOException ex) {
|
||||
s_logger.debug("Failed to copy PSK to the file.", ex);
|
||||
}
|
||||
|
|
@ -1533,8 +1817,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
S3Utils.deleteObject(s3, bucket, path);
|
||||
return new Answer(cmd, true, String.format("Deleted snapshot %1%s from bucket %2$s.", path, bucket));
|
||||
} catch (Exception e) {
|
||||
final String errorMessage =
|
||||
String.format("Failed to delete snapshot %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
|
||||
final String errorMessage = String.format("Failed to delete snapshot %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
|
||||
s_logger.error(errorMessage, e);
|
||||
return new Answer(cmd, false, errorMessage);
|
||||
}
|
||||
|
|
@ -1568,8 +1851,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
if (tmpFile == null) {
|
||||
continue;
|
||||
}
|
||||
try (FileReader fr = new FileReader(tmpFile);
|
||||
BufferedReader brf = new BufferedReader(fr);) {
|
||||
try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr);) {
|
||||
String line = null;
|
||||
String uniqName = null;
|
||||
Long size = null;
|
||||
|
|
@ -1580,7 +1862,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
uniqName = line.split("=")[1];
|
||||
} else if (line.startsWith("size=")) {
|
||||
physicalSize = Long.parseLong(line.split("=")[1]);
|
||||
} else if (line.startsWith("virtualsize=")){
|
||||
} else if (line.startsWith("virtualsize=")) {
|
||||
size = Long.parseLong(line.split("=")[1]);
|
||||
} else if (line.startsWith("filename=")) {
|
||||
name = line.split("=")[1];
|
||||
|
|
@ -1597,8 +1879,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
TemplateProp prop = new TemplateProp(uniqName, container + File.separator + name, size, physicalSize, true, false);
|
||||
tmpltInfos.put(uniqName, prop);
|
||||
}
|
||||
} catch (IOException ex)
|
||||
{
|
||||
} catch (IOException ex) {
|
||||
s_logger.debug("swiftListTemplate:Exception:" + ex.getMessage());
|
||||
continue;
|
||||
}
|
||||
|
|
@ -1797,7 +2078,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
uploadEntityStateMap.remove(entityUuid);
|
||||
return new UploadStatusAnswer(cmd, UploadStatus.ERROR, uploadEntity.getErrorMessage());
|
||||
} else if (uploadEntity.getUploadState() == UploadEntity.Status.COMPLETED) {
|
||||
UploadStatusAnswer answer = new UploadStatusAnswer(cmd, UploadStatus.COMPLETED);
|
||||
UploadStatusAnswer answer = new UploadStatusAnswer(cmd, UploadStatus.COMPLETED);
|
||||
answer.setVirtualSize(uploadEntity.getVirtualSize());
|
||||
answer.setInstallPath(uploadEntity.getTmpltPath());
|
||||
answer.setPhysicalSize(uploadEntity.getPhysicalSize());
|
||||
|
|
@ -1805,9 +2086,9 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
uploadEntityStateMap.remove(entityUuid);
|
||||
return answer;
|
||||
} else if (uploadEntity.getUploadState() == UploadEntity.Status.IN_PROGRESS) {
|
||||
UploadStatusAnswer answer = new UploadStatusAnswer(cmd, UploadStatus.IN_PROGRESS);
|
||||
UploadStatusAnswer answer = new UploadStatusAnswer(cmd, UploadStatus.IN_PROGRESS);
|
||||
long downloadedSize = FileUtils.sizeOfDirectory(new File(uploadEntity.getInstallPathPrefix()));
|
||||
int downloadPercent = (int) (100 * downloadedSize / uploadEntity.getContentLength());
|
||||
int downloadPercent = (int)(100 * downloadedSize / uploadEntity.getContentLength());
|
||||
answer.setDownloadPercent(Math.min(downloadPercent, 100));
|
||||
return answer;
|
||||
}
|
||||
|
|
@ -1868,7 +2149,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
String absoluteTemplatePath = parent + relativeTemplatePath;
|
||||
File tmpltPath = new File(absoluteTemplatePath);
|
||||
File tmpltParent = null;
|
||||
if(tmpltPath.exists() && tmpltPath.isDirectory()) {
|
||||
if (tmpltPath.exists() && tmpltPath.isDirectory()) {
|
||||
tmpltParent = tmpltPath;
|
||||
} else {
|
||||
tmpltParent = tmpltPath.getParentFile();
|
||||
|
|
@ -1926,8 +2207,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
S3Utils.deleteDirectory(s3, bucket, path);
|
||||
return new Answer(cmd, true, String.format("Deleted template %1$s from bucket %2$s.", path, bucket));
|
||||
} catch (Exception e) {
|
||||
final String errorMessage =
|
||||
String.format("Failed to delete template %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
|
||||
final String errorMessage = String.format("Failed to delete template %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
|
||||
s_logger.error(errorMessage, e);
|
||||
return new Answer(cmd, false, errorMessage);
|
||||
}
|
||||
|
|
@ -2450,7 +2730,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
}
|
||||
|
||||
protected void mount(String localRootPath, String remoteDevice, URI uri, Integer nfsVersion) {
|
||||
s_logger.debug("mount " + uri.toString() + " on " + localRootPath + ((nfsVersion != null) ? " nfsVersion="+nfsVersion : ""));
|
||||
s_logger.debug("mount " + uri.toString() + " on " + localRootPath + ((nfsVersion != null) ? " nfsVersion=" + nfsVersion : ""));
|
||||
ensureLocalRootPathExists(localRootPath, uri);
|
||||
|
||||
if (mountExists(localRootPath, uri)) {
|
||||
|
|
@ -2467,8 +2747,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
|
||||
protected void attemptMount(String localRootPath, String remoteDevice, URI uri, Integer nfsVersion) {
|
||||
String result;
|
||||
s_logger.debug("Make cmdline call to mount " + remoteDevice + " at " + localRootPath + " based on uri " + uri
|
||||
+ ((nfsVersion != null) ? " nfsVersion=" + nfsVersion : ""));
|
||||
s_logger.debug("Make cmdline call to mount " + remoteDevice + " at " + localRootPath + " based on uri " + uri + ((nfsVersion != null) ? " nfsVersion=" + nfsVersion : ""));
|
||||
Script command = new Script(!_inSystemVM, "mount", _timeout, s_logger);
|
||||
|
||||
String scheme = uri.getScheme().toLowerCase();
|
||||
|
|
@ -2535,9 +2814,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
}
|
||||
|
||||
if (!foundUser || !foundPswd) {
|
||||
String errMsg =
|
||||
"Missing user and password from URI. Make sure they" + "are in the query string and separated by '&'. E.g. "
|
||||
+ "cifs://example.com/some_share?user=foo&password=bar";
|
||||
String errMsg = "Missing user and password from URI. Make sure they" + "are in the query string and separated by '&'. E.g. "
|
||||
+ "cifs://example.com/some_share?user=foo&password=bar";
|
||||
s_logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
|
@ -2708,7 +2986,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
|
||||
private String getScriptLocation(UploadEntity.ResourceType resourceType) {
|
||||
|
||||
String scriptsDir = (String) _params.get("template.scripts.dir");
|
||||
String scriptsDir = (String)_params.get("template.scripts.dir");
|
||||
if (scriptsDir == null) {
|
||||
scriptsDir = "scripts/storage/secondary";
|
||||
}
|
||||
|
|
@ -2726,7 +3004,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
public UploadEntity createUploadEntity(String uuid, String metadata, long contentLength) {
|
||||
TemplateOrVolumePostUploadCommand cmd = getTemplateOrVolumePostUploadCmd(metadata);
|
||||
UploadEntity uploadEntity = null;
|
||||
if(cmd == null ){
|
||||
if (cmd == null) {
|
||||
String errorMessage = "unable decode and deserialize metadata.";
|
||||
updateStateMapWithError(uuid, errorMessage);
|
||||
throw new InvalidParameterValueException(errorMessage);
|
||||
|
|
@ -2784,21 +3062,21 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
|
||||
long accountTemplateDirSize = 0;
|
||||
File accountTemplateDir = new File(rootDir + getTemplatePathForAccount(accountId));
|
||||
if(accountTemplateDir.exists()) {
|
||||
if (accountTemplateDir.exists()) {
|
||||
FileUtils.sizeOfDirectory(accountTemplateDir);
|
||||
}
|
||||
long accountVolumeDirSize = 0;
|
||||
File accountVolumeDir = new File(rootDir + getVolumePathForAccount(accountId));
|
||||
if(accountVolumeDir.exists()) {
|
||||
if (accountVolumeDir.exists()) {
|
||||
accountVolumeDirSize = FileUtils.sizeOfDirectory(accountVolumeDir);
|
||||
}
|
||||
long accountSnapshotDirSize = 0;
|
||||
File accountSnapshotDir = new File(rootDir + getSnapshotPathForAccount(accountId));
|
||||
if(accountSnapshotDir.exists()) {
|
||||
if (accountSnapshotDir.exists()) {
|
||||
accountSnapshotDirSize = FileUtils.sizeOfDirectory(accountSnapshotDir);
|
||||
}
|
||||
s_logger.debug("accountTemplateDirSize: " + accountTemplateDirSize + " accountSnapshotDirSize: " +accountSnapshotDirSize + " accountVolumeDirSize: " +
|
||||
accountVolumeDirSize);
|
||||
s_logger.debug(
|
||||
"accountTemplateDirSize: " + accountTemplateDirSize + " accountSnapshotDirSize: " + accountSnapshotDirSize + " accountVolumeDirSize: " + accountVolumeDirSize);
|
||||
|
||||
int accountDirSizeInGB = getSizeInGB(accountTemplateDirSize + accountSnapshotDirSize + accountVolumeDirSize);
|
||||
int defaultMaxAccountSecondaryStorageInGB = Integer.parseInt(cmd.getDefaultMaxAccountSecondaryStorage());
|
||||
|
|
@ -2845,12 +3123,12 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
String fileSavedTempLocation = uploadEntity.getInstallPathPrefix() + "/" + filename;
|
||||
|
||||
String uploadedFileExtension = FilenameUtils.getExtension(filename);
|
||||
String userSelectedFormat= uploadEntity.getFormat().toString();
|
||||
if(uploadedFileExtension.equals("zip") || uploadedFileExtension.equals("bz2") || uploadedFileExtension.equals("gz")) {
|
||||
String userSelectedFormat = uploadEntity.getFormat().toString();
|
||||
if (uploadedFileExtension.equals("zip") || uploadedFileExtension.equals("bz2") || uploadedFileExtension.equals("gz")) {
|
||||
userSelectedFormat += "." + uploadedFileExtension;
|
||||
}
|
||||
String formatError = ImageStoreUtil.checkTemplateFormat(fileSavedTempLocation, userSelectedFormat);
|
||||
if(StringUtils.isNotBlank(formatError)) {
|
||||
if (StringUtils.isNotBlank(formatError)) {
|
||||
String errorString = "File type mismatch between uploaded file and selected format. Selected file format: " + userSelectedFormat + ". Received: " + formatError;
|
||||
s_logger.error(errorString);
|
||||
return errorString;
|
||||
|
|
@ -2858,7 +3136,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
|
||||
int imgSizeGigs = getSizeInGB(_storage.getSize(fileSavedTempLocation));
|
||||
int maxSize = uploadEntity.getMaxSizeInGB();
|
||||
if(imgSizeGigs > maxSize) {
|
||||
if (imgSizeGigs > maxSize) {
|
||||
String errorMessage = "Maximum file upload size exceeded. Physical file size: " + imgSizeGigs + "GB. Maximum allowed size: " + maxSize + "GB.";
|
||||
s_logger.error(errorMessage);
|
||||
return errorMessage;
|
||||
|
|
@ -2936,7 +3214,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
}
|
||||
|
||||
Map<String, Processor> processors = _dlMgr.getProcessors();
|
||||
for (Processor processor : processors.values()) {
|
||||
for (Processor processor : processors.values()) {
|
||||
FormatInfo info = null;
|
||||
try {
|
||||
info = processor.process(resourcePath, null, templateName);
|
||||
|
|
@ -2962,7 +3240,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
}
|
||||
|
||||
private String getPostUploadPSK() {
|
||||
if(_ssvmPSK == null ) {
|
||||
if (_ssvmPSK == null) {
|
||||
try {
|
||||
_ssvmPSK = FileUtils.readFileToString(new File(POST_UPLOAD_KEY_LOCATION), "utf-8");
|
||||
} catch (IOException e) {
|
||||
|
|
@ -2972,22 +3250,23 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
return _ssvmPSK;
|
||||
}
|
||||
|
||||
public void updateStateMapWithError(String uuid,String errorMessage) {
|
||||
UploadEntity uploadEntity=null;
|
||||
if (uploadEntityStateMap.get(uuid)!=null) {
|
||||
uploadEntity=uploadEntityStateMap.get(uuid);
|
||||
}else {
|
||||
uploadEntity= new UploadEntity();
|
||||
public void updateStateMapWithError(String uuid, String errorMessage) {
|
||||
UploadEntity uploadEntity = null;
|
||||
if (uploadEntityStateMap.get(uuid) != null) {
|
||||
uploadEntity = uploadEntityStateMap.get(uuid);
|
||||
} else {
|
||||
uploadEntity = new UploadEntity();
|
||||
}
|
||||
uploadEntity.setStatus(UploadEntity.Status.ERROR);
|
||||
uploadEntity.setErrorMessage(errorMessage);
|
||||
uploadEntityStateMap.put(uuid, uploadEntity);
|
||||
}
|
||||
|
||||
public void validatePostUploadRequest(String signature, String metadata, String timeout, String hostname,long contentLength, String uuid) throws InvalidParameterValueException{
|
||||
public void validatePostUploadRequest(String signature, String metadata, String timeout, String hostname, long contentLength, String uuid)
|
||||
throws InvalidParameterValueException {
|
||||
// check none of the params are empty
|
||||
if(StringUtils.isEmpty(signature) || StringUtils.isEmpty(metadata) || StringUtils.isEmpty(timeout)) {
|
||||
updateStateMapWithError(uuid,"signature, metadata and expires are compulsory fields.");
|
||||
if (StringUtils.isEmpty(signature) || StringUtils.isEmpty(metadata) || StringUtils.isEmpty(timeout)) {
|
||||
updateStateMapWithError(uuid, "signature, metadata and expires are compulsory fields.");
|
||||
throw new InvalidParameterValueException("signature, metadata and expires are compulsory fields.");
|
||||
}
|
||||
|
||||
|
|
@ -3000,15 +3279,15 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
String fullUrl = "https://" + hostname + "/upload/" + uuid;
|
||||
String computedSignature = EncryptionUtil.generateSignature(metadata + fullUrl + timeout, getPostUploadPSK());
|
||||
boolean isSignatureValid = computedSignature.equals(signature);
|
||||
if(!isSignatureValid) {
|
||||
updateStateMapWithError(uuid,"signature validation failed.");
|
||||
if (!isSignatureValid) {
|
||||
updateStateMapWithError(uuid, "signature validation failed.");
|
||||
throw new InvalidParameterValueException("signature validation failed.");
|
||||
}
|
||||
|
||||
//validate timeout
|
||||
DateTime timeoutDateTime = DateTime.parse(timeout, ISODateTimeFormat.dateTime());
|
||||
if(timeoutDateTime.isBeforeNow()) {
|
||||
updateStateMapWithError(uuid,"request not valid anymore.");
|
||||
if (timeoutDateTime.isBeforeNow()) {
|
||||
updateStateMapWithError(uuid, "request not valid anymore.");
|
||||
throw new InvalidParameterValueException("request not valid anymore.");
|
||||
}
|
||||
}
|
||||
|
|
@ -3018,7 +3297,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S
|
|||
try {
|
||||
Gson gson = new GsonBuilder().create();
|
||||
cmd = gson.fromJson(EncryptionUtil.decodeData(metadata, getPostUploadPSK()), TemplateOrVolumePostUploadCommand.class);
|
||||
} catch(Exception ex) {
|
||||
} catch (Exception ex) {
|
||||
s_logger.error("exception while decoding and deserialising metadata", ex);
|
||||
}
|
||||
return cmd;
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ class TestDeployVmRootSize(cloudstackTestCase):
|
|||
accountid=self.account.name,
|
||||
domainid=self.domain.id,
|
||||
serviceofferingid=self.services_offering_vmware.id,
|
||||
templateid=self.tempobj.id,
|
||||
templateid=self.template.id,
|
||||
rootdisksize=newrootsize
|
||||
)
|
||||
else:
|
||||
|
|
@ -379,7 +379,7 @@ class TestDeployVmRootSize(cloudstackTestCase):
|
|||
accountid=self.account.name,
|
||||
domainid=self.domain.id,
|
||||
serviceofferingid=self.services_offering_vmware.id,
|
||||
templateid=self.tempobj.id,
|
||||
templateid=self.template.id,
|
||||
rootdisksize=newrootsize
|
||||
)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1201,48 +1201,46 @@ class TestCopyDeleteTemplate(cloudstackTestCase):
|
|||
class TestCreateTemplateWithDirectDownload(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
self.testClient = super(TestCreateTemplateWithDirectDownload, self).getClsTestClient()
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
self.dbclient = self.testClient.getDbConnection()
|
||||
self._cleanup = []
|
||||
self.templates = []
|
||||
def setUpClass(cls):
|
||||
cls.testClient = super(TestCreateTemplateWithDirectDownload, cls).getClsTestClient()
|
||||
cls.apiclient = cls.testClient.getApiClient()
|
||||
cls.dbclient = cls.testClient.getDbConnection()
|
||||
cls._cleanup = []
|
||||
cls.templates = []
|
||||
|
||||
self.services = self.testClient.getParsedTestDataConfig()
|
||||
self.unsupportedHypervisor = False
|
||||
self.hypervisor = self.testClient.getHypervisorInfo()
|
||||
if self.hypervisor.lower() not in ['kvm']:
|
||||
cls.services = cls.testClient.getParsedTestDataConfig()
|
||||
cls.unsupportedHypervisor = False
|
||||
cls.hypervisor = cls.testClient.getHypervisorInfo()
|
||||
if cls.hypervisor.lower() not in ['kvm']:
|
||||
# Direct Download is only available for KVM hypervisor
|
||||
self.unsupportedHypervisor = True
|
||||
self.skipTest("Skipping test because unsupported hypervisor\
|
||||
%s" % self.hypervisor)
|
||||
cls.unsupportedHypervisor = True
|
||||
return
|
||||
|
||||
# Get Zone, Domain and templates
|
||||
self.domain = get_domain(self.apiclient)
|
||||
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
|
||||
self.services["mode"] = self.zone.networktype
|
||||
self.services["virtual_machine"]["zoneid"] = self.zone.id
|
||||
self.account = Account.create(
|
||||
self.apiclient,
|
||||
self.services["account"],
|
||||
cls.domain = get_domain(cls.apiclient)
|
||||
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
|
||||
cls.services["mode"] = cls.zone.networktype
|
||||
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
|
||||
cls.account = Account.create(
|
||||
cls.apiclient,
|
||||
cls.services["account"],
|
||||
admin=True,
|
||||
domainid=self.domain.id
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
self._cleanup.append(self.account)
|
||||
self.user = Account.create(
|
||||
self.apiclient,
|
||||
self.services["account"],
|
||||
domainid=self.domain.id
|
||||
cls._cleanup.append(cls.account)
|
||||
cls.user = Account.create(
|
||||
cls.apiclient,
|
||||
cls.services["account"],
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
self._cleanup.append(self.user)
|
||||
self.service_offering = ServiceOffering.create(
|
||||
self.apiclient,
|
||||
self.services["service_offerings"]["tiny"]
|
||||
cls._cleanup.append(cls.user)
|
||||
cls.service_offering = ServiceOffering.create(
|
||||
cls.apiclient,
|
||||
cls.services["service_offerings"]["tiny"]
|
||||
)
|
||||
self._cleanup.append(self.service_offering)
|
||||
cls._cleanup.append(cls.service_offering)
|
||||
|
||||
self.template = {
|
||||
cls.template = {
|
||||
"name": "tiny-kvm",
|
||||
"displaytext": "tiny kvm",
|
||||
"format": "QCOW2",
|
||||
|
|
@ -1251,8 +1249,8 @@ class TestCreateTemplateWithDirectDownload(cloudstackTestCase):
|
|||
"ispublic": "True",
|
||||
"isextractable": "True",
|
||||
"checksum": "{SHA-1}" + "6952e58f39b470bd166ace11ffd20bf479bed936",
|
||||
"hypervisor": self.hypervisor,
|
||||
"zoneid": self.zone.id,
|
||||
"hypervisor": cls.hypervisor,
|
||||
"zoneid": cls.zone.id,
|
||||
"ostype": "Other Linux (64-bit)",
|
||||
"directdownload": True
|
||||
}
|
||||
|
|
@ -1273,8 +1271,7 @@ class TestCreateTemplateWithDirectDownload(cloudstackTestCase):
|
|||
self.cleanup = []
|
||||
|
||||
if self.unsupportedHypervisor:
|
||||
self.skipTest("Skipping test because unsupported hypervisor\
|
||||
%s" % self.hypervisor)
|
||||
self.skipTest("Skipping test because unsupported hypervisor %s" % self.hypervisor)
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
|
|
@ -1356,4 +1353,4 @@ class TestCreateTemplateWithDirectDownload(cloudstackTestCase):
|
|||
|
||||
self.cleanup.append(virtual_machine)
|
||||
self.cleanup.append(tmpl)
|
||||
return
|
||||
return
|
||||
|
|
|
|||
|
|
@ -247,9 +247,9 @@ class TestCreateVolume(cloudstackTestCase):
|
|||
elif list_volume_response[0].hypervisor.lower() == "hyperv":
|
||||
ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sdb",size_to_verify=vol_sz)
|
||||
elif list_volume_response[0].hypervisor.lower() == "vmware":
|
||||
ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sda",size_to_verify=vol_sz)
|
||||
else:
|
||||
ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sdb",size_to_verify=vol_sz)
|
||||
else:
|
||||
ret = checkVolumeSize(ssh_handle=ssh,size_to_verify=vol_sz)
|
||||
self.debug(" Volume Size Expected %s Actual :%s" %(vol_sz,ret[1]))
|
||||
self.virtual_machine.detach_volume(self.apiClient, volume)
|
||||
self.assertEqual(ret[0],SUCCESS,"Check if promised disk size actually available")
|
||||
|
|
|
|||
|
|
@ -462,7 +462,7 @@ class VirtualMachine:
|
|||
hostid=None, keypair=None, ipaddress=None, mode='default',
|
||||
method='GET', hypervisor=None, customcpunumber=None,
|
||||
customcpuspeed=None, custommemory=None, rootdisksize=None,
|
||||
rootdiskcontroller=None, macaddress=None):
|
||||
rootdiskcontroller=None, macaddress=None, datadisktemplate_diskoffering_list={}):
|
||||
"""Create the instance"""
|
||||
|
||||
cmd = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
|
|
@ -575,6 +575,13 @@ class VirtualMachine:
|
|||
if group:
|
||||
cmd.group = group
|
||||
|
||||
cmd.datadisktemplatetodiskofferinglist = []
|
||||
for datadisktemplate, diskoffering in datadisktemplate_diskoffering_list.items():
|
||||
cmd.datadisktemplatetodiskofferinglist.append({
|
||||
'datadisktemplateid': datadisktemplate,
|
||||
'diskofferingid': diskoffering
|
||||
})
|
||||
|
||||
# program default access to ssh
|
||||
if mode.lower() == 'basic':
|
||||
cls.ssh_access_group(apiclient, cmd)
|
||||
|
|
|
|||
|
|
@ -2186,6 +2186,7 @@ var dictionary = {"ICMP.code":"ICMP Code",
|
|||
"message.no.network.support.configuration.not.true":"You do not have any zone that has security group enabled. Thus, no additional network features. Please continue to step 5.",
|
||||
"message.no.projects":"You do not have any projects.<br/>Please create a new one from the projects section.",
|
||||
"message.no.projects.adminOnly":"You do not have any projects.<br/>Please ask your administrator to create a new project.",
|
||||
"message.no.datadisk":"The multidisk template has no data disk, please continue to next step.",
|
||||
"message.number.clusters":"<h2><span> # of </span> Clusters</h2>",
|
||||
"message.number.hosts":"<h2><span> # of </span> Hosts</h2>",
|
||||
"message.number.pods":"<h2><span> # of </span> Pods</h2>",
|
||||
|
|
|
|||
|
|
@ -376,21 +376,55 @@
|
|||
|
||||
// Step 4: Data disk offering
|
||||
function(args) {
|
||||
var isRequred = (args.currentData["select-template"] == "select-iso" ? true : false);
|
||||
var isRequired = (args.currentData["select-template"] == "select-iso" ? true : false);
|
||||
$.ajax({
|
||||
url: createURL("listDiskOfferings"),
|
||||
dataType: "json",
|
||||
async: true,
|
||||
success: function(json) {
|
||||
diskOfferingObjs = json.listdiskofferingsresponse.diskoffering;
|
||||
var multiDisks = false;
|
||||
if (!isRequired) {
|
||||
$.ajax({
|
||||
url: createURL("listTemplates"),
|
||||
data: {
|
||||
id: args.currentData.templateid,
|
||||
templatefilter: 'all'
|
||||
},
|
||||
dataType: "json",
|
||||
async: false,
|
||||
success: function(json) {
|
||||
var templateDataDisks = json.listtemplatesresponse.template[0].childtemplates;
|
||||
var count = 0; if (templateDataDisks && Object.keys(templateDataDisks).length > 0) {
|
||||
multiDisks = [];
|
||||
$.each(templateDataDisks, function(index, item) {
|
||||
count = count + 1;
|
||||
multiDisks.push({
|
||||
id: item.id,
|
||||
label: item.name,
|
||||
size: item.size,
|
||||
});
|
||||
});
|
||||
if (count == 0){
|
||||
multiDisks.push({
|
||||
id: "none",
|
||||
label: "No datadisk found",
|
||||
size: "0"
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
args.response.success({
|
||||
required: isRequred,
|
||||
required: isRequired,
|
||||
customFlag: 'iscustomized', // Field determines if custom slider is shown
|
||||
customIopsDoFlag: 'iscustomizediops',
|
||||
data: {
|
||||
diskOfferings: diskOfferingObjs
|
||||
},
|
||||
multiDisk: false
|
||||
multiDisk: multiDisks
|
||||
});
|
||||
}
|
||||
});
|
||||
|
|
@ -815,6 +849,15 @@
|
|||
}
|
||||
}
|
||||
|
||||
if (args.data["disk-offerings-multi"] != null && args.data["disk-offerings-multi"].length > 0) {
|
||||
$(args.data["disk-offerings-multi"]).each(function(index, disk) {
|
||||
var diskMap = {};
|
||||
diskMap['datadiskofferinglist[' + index + '].datadisktemplateid'] = disk.id;
|
||||
diskMap['datadiskofferinglist[' + index + '].diskofferingid'] = disk._diskOfferingId;
|
||||
$.extend(deployVmData, diskMap);
|
||||
});
|
||||
}
|
||||
|
||||
//step 5: select an affinity group
|
||||
var checkedAffinityGroupIdArray;
|
||||
if (typeof(args.data["affinity-groups"]) == "object" && args.data["affinity-groups"].length != null) { //args.data["affinity-groups"] is an array of string, e.g. ["2375f8cc-8a73-4b8d-9b26-50885a25ffe0", "27c60d2a-de7f-4bb7-96e5-a602cec681df","c6301d77-99b5-4e8a-85e2-3ea2ab31c342"],
|
||||
|
|
|
|||
|
|
@ -555,21 +555,43 @@
|
|||
|
||||
$step.find('.multi-disk-select-container').remove();
|
||||
$step.removeClass('custom-disk-size');
|
||||
$step.find('.main-desc, p.no-datadisk').remove();
|
||||
|
||||
if (args.required) {
|
||||
if (!multiDisk){
|
||||
if (args.required) {
|
||||
$step.find('.section.no-thanks')
|
||||
.hide();
|
||||
$step.addClass('required');
|
||||
} else {
|
||||
$step.find('.section.no-thanks')
|
||||
.show();
|
||||
$step.removeClass('required');
|
||||
}
|
||||
} else {
|
||||
$step.find('.section.no-thanks').hide();
|
||||
$step.addClass('required');
|
||||
} else {
|
||||
$step.find('.section.no-thanks').show();
|
||||
$step.removeClass('required');
|
||||
}
|
||||
|
||||
var $selectContainer = $step.find('.content .select-container:not(.multi-disk)');
|
||||
|
||||
if (multiDisk) { // Render as multiple groups for each disk
|
||||
if (multiDisk[0].id == "none"){
|
||||
$step.find('.select-container').append(
|
||||
$('<p>').addClass('no-datadisk').html(_l('message.no.datadisk'))
|
||||
);
|
||||
return;
|
||||
}
|
||||
var $multiDiskSelect = $('<div>').addClass('multi-disk-select-container');
|
||||
|
||||
$(multiDisk).map(function(index, disk) {
|
||||
var array_do = [];
|
||||
$.each(args.data.diskOfferings, function( key, value ) {
|
||||
if (value){
|
||||
if (value.disksize >= disk.size && value.name != "Custom"){
|
||||
array_do.push(value);
|
||||
}
|
||||
}
|
||||
})
|
||||
var $group = $('<div>').addClass('disk-select-group');
|
||||
var $header = $('<div>').addClass('disk-select-header').append(
|
||||
$('<div>').addClass('title').html(disk.label)
|
||||
|
|
@ -581,7 +603,7 @@
|
|||
})
|
||||
.prependTo($header);
|
||||
var $multiSelectContainer = $selectContainer.clone().append(
|
||||
makeSelects('diskofferingid.' + disk.id, args.data.diskOfferings, {
|
||||
makeSelects('diskofferingid.' + disk.id, array_do, {
|
||||
id: 'id',
|
||||
name: 'name',
|
||||
desc: 'displaytext'
|
||||
|
|
|
|||
|
|
@ -62,7 +62,12 @@ public class HypervisorUtilsTest {
|
|||
File file = new File(filePath);
|
||||
|
||||
long startTime = setupcheckVolumeFileForActivityFile(file, _minFileSize);
|
||||
HypervisorUtils.checkVolumeFileForActivity(filePath, timeoutSeconds, thresholdMilliseconds, _minFileSize);
|
||||
try {
|
||||
HypervisorUtils.checkVolumeFileForActivity(filePath, timeoutSeconds, thresholdMilliseconds, _minFileSize);
|
||||
} catch (CloudRuntimeException ex) {
|
||||
System.out.println("fail");
|
||||
return;
|
||||
}
|
||||
long duration = System.currentTimeMillis() - startTime;
|
||||
|
||||
Assert.assertFalse("Didn't block long enough, expected at least " + thresholdMilliseconds + " and got " + duration, duration < thresholdMilliseconds);
|
||||
|
|
|
|||
|
|
@ -67,6 +67,8 @@ import com.cloud.utils.db.GlobalLock;
|
|||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
import com.cloud.utils.nicira.nvp.plugin.NiciraNvpApiVersion;
|
||||
import com.vmware.vim25.OvfCreateDescriptorParams;
|
||||
import com.vmware.vim25.OvfCreateDescriptorResult;
|
||||
import com.vmware.vim25.AlreadyExistsFaultMsg;
|
||||
import com.vmware.vim25.BoolPolicy;
|
||||
import com.vmware.vim25.CustomFieldStringValue;
|
||||
|
|
@ -90,9 +92,11 @@ import com.vmware.vim25.ManagedObjectReference;
|
|||
import com.vmware.vim25.MethodFault;
|
||||
import com.vmware.vim25.NumericRange;
|
||||
import com.vmware.vim25.ObjectContent;
|
||||
import com.vmware.vim25.OptionValue;
|
||||
import com.vmware.vim25.OvfCreateImportSpecParams;
|
||||
import com.vmware.vim25.OvfCreateImportSpecResult;
|
||||
import com.vmware.vim25.OvfFileItem;
|
||||
import com.vmware.vim25.OvfFile;
|
||||
import com.vmware.vim25.ParaVirtualSCSIController;
|
||||
import com.vmware.vim25.VMwareDVSConfigSpec;
|
||||
import com.vmware.vim25.VMwareDVSPortSetting;
|
||||
|
|
@ -102,6 +106,7 @@ import com.vmware.vim25.VMwareDVSPvlanMapEntry;
|
|||
import com.vmware.vim25.VirtualBusLogicController;
|
||||
import com.vmware.vim25.VirtualController;
|
||||
import com.vmware.vim25.VirtualDevice;
|
||||
import com.vmware.vim25.VirtualDisk;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpec;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
|
||||
import com.vmware.vim25.VirtualIDEController;
|
||||
|
|
@ -113,10 +118,13 @@ import com.vmware.vim25.VirtualMachineGuestOsIdentifier;
|
|||
import com.vmware.vim25.VirtualMachineVideoCard;
|
||||
import com.vmware.vim25.VirtualSCSIController;
|
||||
import com.vmware.vim25.VirtualSCSISharing;
|
||||
import com.vmware.vim25.VirtualMachineImportSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchTrunkVlanSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec;
|
||||
import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanSpec;
|
||||
import java.io.FileWriter;
|
||||
import java.util.UUID;
|
||||
|
||||
public class HypervisorHostHelper {
|
||||
private static final Logger s_logger = Logger.getLogger(HypervisorHostHelper.class);
|
||||
|
|
@ -125,6 +133,8 @@ public class HypervisorHostHelper {
|
|||
|
||||
// make vmware-base loosely coupled with cloud-specific stuff, duplicate VLAN.UNTAGGED constant here
|
||||
private static final String UNTAGGED_VLAN_NAME = "untagged";
|
||||
private static final String VMDK_PACK_DIR = "ova";
|
||||
private static final String OVA_OPTION_KEY_BOOTDISK = "cloud.ova.bootdisk";
|
||||
|
||||
public static VirtualMachineMO findVmFromObjectContent(VmwareContext context, ObjectContent[] ocs, String name, String instanceNameCustomField) {
|
||||
|
||||
|
|
@ -161,6 +171,10 @@ public class HypervisorHostHelper {
|
|||
return morDs;
|
||||
}
|
||||
|
||||
public static String getSecondaryDatastoreUUID(String storeUrl) {
|
||||
return UUID.nameUUIDFromBytes(storeUrl.getBytes()).toString();
|
||||
}
|
||||
|
||||
public static DatastoreMO getHyperHostDatastoreMO(VmwareHypervisorHost hyperHost, String datastoreName) throws Exception {
|
||||
ObjectContent[] ocs = hyperHost.getDatastorePropertiesOnHyperHost(new String[] {"name"});
|
||||
if (ocs != null && ocs.length > 0) {
|
||||
|
|
@ -1705,7 +1719,6 @@ public class HypervisorHostHelper {
|
|||
importSpecParams.setDiskProvisioning(diskOption); // diskOption: thin, thick, etc
|
||||
|
||||
String ovfDescriptor = removeOVFNetwork(HttpNfcLeaseMO.readOvfContent(ovfFilePath));
|
||||
|
||||
VmwareContext context = host.getContext();
|
||||
OvfCreateImportSpecResult ovfImportResult =
|
||||
context.getService().createImportSpec(context.getServiceContent().getOvfManager(), ovfDescriptor, morRp, dsMo.getMor(), importSpecParams);
|
||||
|
|
@ -1715,7 +1728,6 @@ public class HypervisorHostHelper {
|
|||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
||||
if(!ovfImportResult.getError().isEmpty()) {
|
||||
for (LocalizedMethodFault fault : ovfImportResult.getError()) {
|
||||
s_logger.error("createImportSpec error: " + fault.getLocalizedMessage());
|
||||
|
|
@ -1755,17 +1767,18 @@ public class HypervisorHostHelper {
|
|||
for (OvfFileItem ovfFileItem : ovfImportResult.getFileItem()) {
|
||||
if (deviceKey.equals(ovfFileItem.getDeviceId())) {
|
||||
String absoluteFile = ovfFile.getParent() + File.separator + ovfFileItem.getPath();
|
||||
String urlToPost = deviceUrl.getUrl();
|
||||
urlToPost = resolveHostNameInUrl(dcMo, urlToPost);
|
||||
|
||||
context.uploadVmdkFile(ovfFileItem.isCreate() ? "PUT" : "POST", urlToPost, absoluteFile, bytesAlreadyWritten, new ActionDelegate<Long>() {
|
||||
@Override
|
||||
public void action(Long param) {
|
||||
progressReporter.reportProgress((int)(param * 100 / totalBytes));
|
||||
}
|
||||
});
|
||||
|
||||
bytesAlreadyWritten += ovfFileItem.getSize();
|
||||
File f = new File(absoluteFile);
|
||||
if (f.exists()){
|
||||
String urlToPost = deviceUrl.getUrl();
|
||||
urlToPost = resolveHostNameInUrl(dcMo, urlToPost);
|
||||
context.uploadVmdkFile(ovfFileItem.isCreate() ? "PUT" : "POST", urlToPost, absoluteFile, bytesAlreadyWritten, new ActionDelegate<Long>() {
|
||||
@Override
|
||||
public void action(Long param) {
|
||||
progressReporter.reportProgress((int)(param * 100 / totalBytes));
|
||||
}
|
||||
});
|
||||
bytesAlreadyWritten += ovfFileItem.getSize();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1773,7 +1786,7 @@ public class HypervisorHostHelper {
|
|||
String erroMsg = "File upload task failed to complete due to: " + e.getMessage();
|
||||
s_logger.error(erroMsg);
|
||||
importSuccess = false; // Set flag to cleanup the stale template left due to failed import operation, if any
|
||||
throw new Exception(erroMsg);
|
||||
throw new Exception(erroMsg, e);
|
||||
} catch (Throwable th) {
|
||||
String errorMsg = "throwable caught during file upload task: " + th.getMessage();
|
||||
s_logger.error(errorMsg);
|
||||
|
|
@ -1802,6 +1815,199 @@ public class HypervisorHostHelper {
|
|||
}
|
||||
}
|
||||
|
||||
public static List<Pair<String, Boolean>> readOVF(VmwareHypervisorHost host, String ovfFilePath, DatastoreMO dsMo) throws Exception {
|
||||
List<Pair<String, Boolean>> ovfVolumeInfos = new ArrayList<Pair<String, Boolean>>();
|
||||
List<String> files = new ArrayList<String>();
|
||||
|
||||
ManagedObjectReference morRp = host.getHyperHostOwnerResourcePool();
|
||||
assert (morRp != null);
|
||||
ManagedObjectReference morHost = host.getMor();
|
||||
String importEntityName = UUID.randomUUID().toString();
|
||||
OvfCreateImportSpecParams importSpecParams = new OvfCreateImportSpecParams();
|
||||
importSpecParams.setHostSystem(morHost);
|
||||
importSpecParams.setLocale("US");
|
||||
importSpecParams.setEntityName(importEntityName);
|
||||
importSpecParams.setDeploymentOption("");
|
||||
|
||||
String ovfDescriptor = removeOVFNetwork(HttpNfcLeaseMO.readOvfContent(ovfFilePath));
|
||||
VmwareContext context = host.getContext();
|
||||
OvfCreateImportSpecResult ovfImportResult = context.getService().createImportSpec(context.getServiceContent().getOvfManager(), ovfDescriptor, morRp, dsMo.getMor(),
|
||||
importSpecParams);
|
||||
|
||||
if (ovfImportResult == null) {
|
||||
String msg = "createImportSpec() failed. ovfFilePath: " + ovfFilePath;
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
||||
if (!ovfImportResult.getError().isEmpty()) {
|
||||
for (LocalizedMethodFault fault : ovfImportResult.getError()) {
|
||||
s_logger.error("createImportSpec error: " + fault.getLocalizedMessage());
|
||||
}
|
||||
throw new CloudException("Failed to create an import spec from " + ovfFilePath + ". Check log for details.");
|
||||
}
|
||||
|
||||
if (!ovfImportResult.getWarning().isEmpty()) {
|
||||
for (LocalizedMethodFault fault : ovfImportResult.getError()) {
|
||||
s_logger.warn("createImportSpec warning: " + fault.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
||||
VirtualMachineImportSpec importSpec = (VirtualMachineImportSpec)ovfImportResult.getImportSpec();
|
||||
if (importSpec == null) {
|
||||
String msg = "createImportSpec() failed to create import specification for OVF template at " + ovfFilePath;
|
||||
s_logger.error(msg);
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
||||
File ovfFile = new File(ovfFilePath);
|
||||
for (OvfFileItem ovfFileItem : ovfImportResult.getFileItem()) {
|
||||
String absFile = ovfFile.getParent() + File.separator + ovfFileItem.getPath();
|
||||
files.add(absFile);
|
||||
}
|
||||
|
||||
|
||||
int osDiskSeqNumber = 0;
|
||||
VirtualMachineConfigSpec config = importSpec.getConfigSpec();
|
||||
String paramVal = getOVFParamValue(config);
|
||||
if (paramVal != null && !paramVal.isEmpty()) {
|
||||
try {
|
||||
osDiskSeqNumber = getOsDiskFromOvfConf(config, paramVal);
|
||||
} catch (Exception e) {
|
||||
osDiskSeqNumber = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int diskCount = 0;
|
||||
int deviceCount = 0;
|
||||
List<VirtualDeviceConfigSpec> deviceConfigList = config.getDeviceChange();
|
||||
for (VirtualDeviceConfigSpec deviceSpec : deviceConfigList) {
|
||||
Boolean osDisk = false;
|
||||
VirtualDevice device = deviceSpec.getDevice();
|
||||
if (device instanceof VirtualDisk) {
|
||||
if ((osDiskSeqNumber == 0 && diskCount == 0) || osDiskSeqNumber == deviceCount) {
|
||||
osDisk = true;
|
||||
}
|
||||
Pair<String, Boolean> ovfVolumeInfo = new Pair<String, Boolean>(files.get(diskCount), osDisk);
|
||||
ovfVolumeInfos.add(ovfVolumeInfo);
|
||||
diskCount++;
|
||||
}
|
||||
deviceCount++;
|
||||
}
|
||||
return ovfVolumeInfos;
|
||||
}
|
||||
|
||||
public static void createOvfFile(VmwareHypervisorHost host, String diskFileName, String ovfName, String datastorePath, String templatePath, long diskCapacity, long fileSize,
|
||||
ManagedObjectReference morDs) throws Exception {
|
||||
VmwareContext context = host.getContext();
|
||||
ManagedObjectReference morOvf = context.getServiceContent().getOvfManager();
|
||||
VirtualMachineMO workerVmMo = HypervisorHostHelper.createWorkerVM(host, new DatastoreMO(context, morDs), ovfName);
|
||||
if (workerVmMo == null)
|
||||
throw new Exception("Unable to find just-created worker VM");
|
||||
|
||||
String[] disks = {datastorePath + File.separator + diskFileName};
|
||||
try {
|
||||
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
|
||||
VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec();
|
||||
|
||||
// Reconfigure worker VM with datadisk
|
||||
VirtualDevice device = VmwareHelper.prepareDiskDevice(workerVmMo, null, -1, disks, morDs, -1, 1);
|
||||
deviceConfigSpec.setDevice(device);
|
||||
deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD);
|
||||
vmConfigSpec.getDeviceChange().add(deviceConfigSpec);
|
||||
workerVmMo.configureVm(vmConfigSpec);
|
||||
|
||||
// Write OVF descriptor file
|
||||
OvfCreateDescriptorParams ovfDescParams = new OvfCreateDescriptorParams();
|
||||
String deviceId = File.separator + workerVmMo.getMor().getValue() + File.separator + "VirtualIDEController0:0";
|
||||
OvfFile ovfFile = new OvfFile();
|
||||
ovfFile.setPath(diskFileName);
|
||||
ovfFile.setDeviceId(deviceId);
|
||||
ovfFile.setSize(fileSize);
|
||||
ovfFile.setCapacity(diskCapacity);
|
||||
ovfDescParams.getOvfFiles().add(ovfFile);
|
||||
OvfCreateDescriptorResult ovfCreateDescriptorResult = context.getService().createDescriptor(morOvf, workerVmMo.getMor(), ovfDescParams);
|
||||
|
||||
String ovfPath = templatePath + File.separator + ovfName + ".ovf";
|
||||
try {
|
||||
FileWriter out = new FileWriter(ovfPath);
|
||||
out.write(ovfCreateDescriptorResult.getOvfDescriptor());
|
||||
out.close();
|
||||
} catch (Exception e) {
|
||||
throw e;
|
||||
}
|
||||
} finally {
|
||||
workerVmMo.detachAllDisks();
|
||||
workerVmMo.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
public static int getOsDiskFromOvfConf(VirtualMachineConfigSpec config, String deviceLocation) {
|
||||
List<VirtualDeviceConfigSpec> deviceConfigList = config.getDeviceChange();
|
||||
int controllerKey = 0;
|
||||
int deviceSeqNumber = 0;
|
||||
int controllerNumber = 0;
|
||||
int deviceNodeNumber = 0;
|
||||
int controllerCount = 0;
|
||||
String[] virtualNodeInfo = deviceLocation.split(":");
|
||||
|
||||
if (deviceLocation.startsWith("scsi")) {
|
||||
controllerNumber = Integer.parseInt(virtualNodeInfo[0].substring(4)); // get substring excluding prefix scsi
|
||||
deviceNodeNumber = Integer.parseInt(virtualNodeInfo[1]);
|
||||
|
||||
for (VirtualDeviceConfigSpec deviceConfig : deviceConfigList) {
|
||||
VirtualDevice device = deviceConfig.getDevice();
|
||||
if (device instanceof VirtualSCSIController) {
|
||||
if (controllerNumber == controllerCount) { //((VirtualSCSIController)device).getBusNumber()) {
|
||||
controllerKey = device.getKey();
|
||||
break;
|
||||
}
|
||||
controllerCount++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
controllerNumber = Integer.parseInt(virtualNodeInfo[0].substring(3)); // get substring excluding prefix ide
|
||||
deviceNodeNumber = Integer.parseInt(virtualNodeInfo[1]);
|
||||
controllerCount = 0;
|
||||
|
||||
for (VirtualDeviceConfigSpec deviceConfig : deviceConfigList) {
|
||||
VirtualDevice device = deviceConfig.getDevice();
|
||||
if (device instanceof VirtualIDEController) {
|
||||
if (controllerNumber == controllerCount) { //((VirtualIDEController)device).getBusNumber()) {
|
||||
// Only 2 IDE controllers supported and they will have bus numbers 0 and 1
|
||||
controllerKey = device.getKey();
|
||||
break;
|
||||
}
|
||||
controllerCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Get devices on this controller at specific device node.
|
||||
for (VirtualDeviceConfigSpec deviceConfig : deviceConfigList) {
|
||||
VirtualDevice device = deviceConfig.getDevice();
|
||||
if (device instanceof VirtualDisk) {
|
||||
if (controllerKey == device.getControllerKey() && deviceNodeNumber == device.getUnitNumber()) {
|
||||
break;
|
||||
}
|
||||
deviceSeqNumber++;
|
||||
}
|
||||
}
|
||||
return deviceSeqNumber;
|
||||
}
|
||||
|
||||
public static String getOVFParamValue(VirtualMachineConfigSpec config) {
|
||||
String paramVal = "";
|
||||
List<OptionValue> options = config.getExtraConfig();
|
||||
for (OptionValue option : options) {
|
||||
if (OVA_OPTION_KEY_BOOTDISK.equalsIgnoreCase(option.getKey())) {
|
||||
paramVal = (String)option.getValue();
|
||||
break;
|
||||
}
|
||||
}
|
||||
return paramVal;
|
||||
}
|
||||
|
||||
|
||||
public static String getScsiController(Pair<String, String> controllerInfo, String recommendedController) {
|
||||
String rootDiskController = controllerInfo.first();
|
||||
String dataDiskController = controllerInfo.second();
|
||||
|
|
|
|||
Loading…
Reference in New Issue