diff --git a/agent/bindir/libvirtqemuhook.in b/agent/bindir/libvirtqemuhook.in
index 598968bdb54..27e07119ccc 100755
--- a/agent/bindir/libvirtqemuhook.in
+++ b/agent/bindir/libvirtqemuhook.in
@@ -78,7 +78,9 @@ def handleMigrateBegin():
def executeCustomScripts(sysArgs):
- createDirectoryIfNotExists(customDir, customDirPermissions)
+ if not os.path.exists(customDir) or not os.path.isdir(customDir):
+ return
+
scripts = getCustomScriptsFromDirectory()
for scriptName in scripts:
@@ -127,12 +129,6 @@ def getCustomScriptsFromDirectory():
os.listdir(customDir)), key=lambda fileName: substringAfter(fileName, '_'))
-def createDirectoryIfNotExists(dir, permissions):
- if not os.path.exists(dir):
- logger.info('Directory %s does not exist; creating it.' % dir)
- os.makedirs(dir, permissions)
-
-
def substringAfter(s, delimiter):
return s.partition(delimiter)[2]
diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java
index 500724dd5a3..e5fbdd7a2ed 100644
--- a/agent/src/main/java/com/cloud/agent/Agent.java
+++ b/agent/src/main/java/com/cloud/agent/Agent.java
@@ -39,7 +39,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import javax.naming.ConfigurationException;
-import org.apache.cloudstack.agent.directdownload.SetupDirectDownloadCertificate;
import org.apache.cloudstack.agent.lb.SetupMSListAnswer;
import org.apache.cloudstack.agent.lb.SetupMSListCommand;
import org.apache.cloudstack.ca.PostCertificateRenewalCommand;
@@ -630,8 +629,6 @@ public class Agent implements HandlerFactory, IAgentControl {
if (Host.Type.Routing.equals(_resource.getType())) {
scheduleServicesRestartTask();
}
- } else if (cmd instanceof SetupDirectDownloadCertificate) {
- answer = setupDirectDownloadCertificate((SetupDirectDownloadCertificate) cmd);
} else if (cmd instanceof SetupMSListCommand) {
answer = setupManagementServerList((SetupMSListCommand) cmd);
} else {
@@ -683,31 +680,6 @@ public class Agent implements HandlerFactory, IAgentControl {
}
}
- private Answer setupDirectDownloadCertificate(SetupDirectDownloadCertificate cmd) {
- String certificate = cmd.getCertificate();
- String certificateName = cmd.getCertificateName();
- s_logger.info("Importing certificate " + certificateName + " into keystore");
-
- final File agentFile = PropertiesUtil.findConfigFile("agent.properties");
- if (agentFile == null) {
- return new Answer(cmd, false, "Failed to find agent.properties file");
- }
-
- final String keyStoreFile = agentFile.getParent() + "/" + KeyStoreUtils.KS_FILENAME;
-
- String cerFile = agentFile.getParent() + "/" + certificateName + ".cer";
- Script.runSimpleBashScript(String.format("echo '%s' > %s", certificate, cerFile));
-
- String privatePasswordFormat = "sed -n '/keystore.passphrase/p' '%s' 2>/dev/null | sed 's/keystore.passphrase=//g' 2>/dev/null";
- String privatePasswordCmd = String.format(privatePasswordFormat, agentFile.getAbsolutePath());
- String privatePassword = Script.runSimpleBashScript(privatePasswordCmd);
-
- String importCommandFormat = "keytool -importcert -file %s -keystore %s -alias '%s' -storepass '%s' -noprompt";
- String importCmd = String.format(importCommandFormat, cerFile, keyStoreFile, certificateName, privatePassword);
- Script.runSimpleBashScript(importCmd);
- return new Answer(cmd, true, "Certificate " + certificateName + " imported");
- }
-
public Answer setupAgentKeystore(final SetupKeyStoreCommand cmd) {
final String keyStorePassword = cmd.getKeystorePassword();
final long validityDays = cmd.getValidityDays();
diff --git a/api/src/main/java/com/cloud/network/element/DhcpServiceProvider.java b/api/src/main/java/com/cloud/network/element/DhcpServiceProvider.java
index 12830f8cec0..3f530c23d34 100644
--- a/api/src/main/java/com/cloud/network/element/DhcpServiceProvider.java
+++ b/api/src/main/java/com/cloud/network/element/DhcpServiceProvider.java
@@ -37,4 +37,6 @@ public interface DhcpServiceProvider extends NetworkElement {
boolean removeDhcpSupportForSubnet(Network network) throws ResourceUnavailableException;
boolean setExtraDhcpOptions(Network network, long nicId, Map dhcpOptions);
+
+ boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vmProfile) throws ResourceUnavailableException;
}
diff --git a/api/src/main/java/com/cloud/storage/MigrationOptions.java b/api/src/main/java/com/cloud/storage/MigrationOptions.java
new file mode 100644
index 00000000000..38c1ee87bbe
--- /dev/null
+++ b/api/src/main/java/com/cloud/storage/MigrationOptions.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.cloud.storage;
+
+import java.io.Serializable;
+
+public class MigrationOptions implements Serializable {
+
+ private String srcPoolUuid;
+ private Storage.StoragePoolType srcPoolType;
+ private Type type;
+ private String srcBackingFilePath;
+ private boolean copySrcTemplate;
+ private String srcVolumeUuid;
+ private int timeout;
+
+ public enum Type {
+ LinkedClone, FullClone
+ }
+
+ public MigrationOptions() {
+ }
+
+ public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcBackingFilePath, boolean copySrcTemplate) {
+ this.srcPoolUuid = srcPoolUuid;
+ this.srcPoolType = srcPoolType;
+ this.type = Type.LinkedClone;
+ this.srcBackingFilePath = srcBackingFilePath;
+ this.copySrcTemplate = copySrcTemplate;
+ }
+
+ public MigrationOptions(String srcPoolUuid, Storage.StoragePoolType srcPoolType, String srcVolumeUuid) {
+ this.srcPoolUuid = srcPoolUuid;
+ this.srcPoolType = srcPoolType;
+ this.type = Type.FullClone;
+ this.srcVolumeUuid = srcVolumeUuid;
+ }
+
+ public String getSrcPoolUuid() {
+ return srcPoolUuid;
+ }
+
+ public Storage.StoragePoolType getSrcPoolType() {
+ return srcPoolType;
+ }
+
+ public String getSrcBackingFilePath() {
+ return srcBackingFilePath;
+ }
+
+ public boolean isCopySrcTemplate() {
+ return copySrcTemplate;
+ }
+
+ public String getSrcVolumeUuid() {
+ return srcVolumeUuid;
+ }
+
+ public Type getType() {
+ return type;
+ }
+
+ public int getTimeout() {
+ return timeout;
+ }
+
+ public void setTimeout(int timeout) {
+ this.timeout = timeout;
+ }
+}
diff --git a/api/src/main/java/com/cloud/template/TemplateApiService.java b/api/src/main/java/com/cloud/template/TemplateApiService.java
index 7348547cee0..b62628560ae 100644
--- a/api/src/main/java/com/cloud/template/TemplateApiService.java
+++ b/api/src/main/java/com/cloud/template/TemplateApiService.java
@@ -24,6 +24,7 @@ import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd;
import org.apache.cloudstack.api.BaseUpdateTemplateOrIsoPermissionsCmd;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd;
+import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.iso.UpdateIsoCmd;
import org.apache.cloudstack.api.command.user.template.CopyTemplateCmd;
@@ -45,10 +46,12 @@ public interface TemplateApiService {
VirtualMachineTemplate registerTemplate(RegisterTemplateCmd cmd) throws URISyntaxException, ResourceAllocationException;
- public GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException;
+ GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException;
VirtualMachineTemplate registerIso(RegisterIsoCmd cmd) throws IllegalArgumentException, ResourceAllocationException;
+ GetUploadParamsResponse registerIsoForPostUpload(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException, MalformedURLException;
+
VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUnavailableException, ResourceAllocationException;
VirtualMachineTemplate prepareTemplate(long templateId, long zoneId, Long storageId);
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificate.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java
old mode 100644
new mode 100755
similarity index 81%
rename from api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificate.java
rename to api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java
index 60d4262546c..c93fca2d300
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificate.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java
@@ -1,90 +1,87 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package org.apache.cloudstack.api.command.admin.direct.download;
-
-import com.cloud.exception.ConcurrentOperationException;
-import com.cloud.exception.InsufficientCapacityException;
-import com.cloud.exception.ResourceAllocationException;
-import com.cloud.exception.ResourceUnavailableException;
-import com.cloud.exception.NetworkRuleConflictException;
-import org.apache.cloudstack.acl.RoleType;
-import org.apache.cloudstack.api.APICommand;
-import org.apache.cloudstack.api.ApiConstants;
-import org.apache.cloudstack.api.BaseCmd;
-import org.apache.cloudstack.api.Parameter;
-import org.apache.cloudstack.api.ServerApiException;
-import org.apache.cloudstack.api.ApiErrorCode;
-import org.apache.cloudstack.api.response.SuccessResponse;
-import org.apache.cloudstack.context.CallContext;
-import org.apache.cloudstack.direct.download.DirectDownloadManager;
-import org.apache.log4j.Logger;
-
-import javax.inject.Inject;
-
-@APICommand(name = UploadTemplateDirectDownloadCertificate.APINAME,
- description = "Upload a certificate for HTTPS direct template download on KVM hosts",
- responseObject = SuccessResponse.class,
- requestHasSensitiveInfo = true,
- responseHasSensitiveInfo = true,
- since = "4.11.0",
- authorized = {RoleType.Admin})
-public class UploadTemplateDirectDownloadCertificate extends BaseCmd {
-
- @Inject
- DirectDownloadManager directDownloadManager;
-
- private static final Logger LOG = Logger.getLogger(UploadTemplateDirectDownloadCertificate.class);
- public static final String APINAME = "uploadTemplateDirectDownloadCertificate";
-
- @Parameter(name = ApiConstants.CERTIFICATE, type = BaseCmd.CommandType.STRING, required = true, length = 65535,
- description = "SSL certificate")
- private String certificate;
-
- @Parameter(name = ApiConstants.NAME , type = BaseCmd.CommandType.STRING, required = true,
- description = "Name for the uploaded certificate")
- private String name;
-
- @Parameter(name = ApiConstants.HYPERVISOR, type = BaseCmd.CommandType.STRING, required = true, description = "Hypervisor type")
- private String hypervisor;
-
- @Override
- public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
- if (!hypervisor.equalsIgnoreCase("kvm")) {
- throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Currently supporting KVM hosts only");
- }
-
- SuccessResponse response = new SuccessResponse(getCommandName());
- try {
- LOG.debug("Uploading certificate " + name + " to agents for Direct Download");
- boolean result = directDownloadManager.uploadCertificateToHosts(certificate, name, hypervisor);
- response.setSuccess(result);
- setResponseObject(response);
- } catch (Exception e) {
- throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
- }
- }
-
- @Override
- public String getCommandName() {
- return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
- }
-
- @Override
- public long getEntityOwnerId() {
- return CallContext.current().getCallingAccount().getId();
- }
-}
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.admin.direct.download;
+
+import org.apache.cloudstack.acl.RoleType;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.response.SuccessResponse;
+import org.apache.cloudstack.context.CallContext;
+import org.apache.cloudstack.direct.download.DirectDownloadManager;
+import org.apache.log4j.Logger;
+
+import javax.inject.Inject;
+
+@APICommand(name = UploadTemplateDirectDownloadCertificateCmd.APINAME,
+ description = "Upload a certificate for HTTPS direct template download on KVM hosts",
+ responseObject = SuccessResponse.class,
+ requestHasSensitiveInfo = true,
+ responseHasSensitiveInfo = true,
+ since = "4.11.0",
+ authorized = {RoleType.Admin})
+public class UploadTemplateDirectDownloadCertificateCmd extends BaseCmd {
+
+ @Inject
+ DirectDownloadManager directDownloadManager;
+
+ private static final Logger LOG = Logger.getLogger(UploadTemplateDirectDownloadCertificateCmd.class);
+ public static final String APINAME = "uploadTemplateDirectDownloadCertificate";
+
+ @Parameter(name = ApiConstants.CERTIFICATE, type = BaseCmd.CommandType.STRING, required = true, length = 65535,
+ description = "SSL certificate")
+ private String certificate;
+
+ @Parameter(name = ApiConstants.NAME , type = BaseCmd.CommandType.STRING, required = true,
+ description = "Name for the uploaded certificate")
+ private String name;
+
+ @Parameter(name = ApiConstants.HYPERVISOR, type = BaseCmd.CommandType.STRING, required = true, description = "Hypervisor type")
+ private String hypervisor;
+
+ @Override
+ public void execute() {
+ if (!hypervisor.equalsIgnoreCase("kvm")) {
+ throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Currently supporting KVM hosts only");
+ }
+
+ try {
+ LOG.debug("Uploading certificate " + name + " to agents for Direct Download");
+ boolean result = directDownloadManager.uploadCertificateToHosts(certificate, name, hypervisor);
+ SuccessResponse response = new SuccessResponse(getCommandName());
+ response.setSuccess(result);
+ setResponseObject(response);
+ } catch (Exception e) {
+ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+ }
+ }
+
+ @Override
+ public String getCommandName() {
+ return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
+ }
+
+ @Override
+ public long getEntityOwnerId() {
+ return CallContext.current().getCallingAccount().getId();
+ }
+}
+
+
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/address/ListPublicIpAddressesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/address/ListPublicIpAddressesCmd.java
index d590081104a..d25d167636f 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/address/ListPublicIpAddressesCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/address/ListPublicIpAddressesCmd.java
@@ -86,6 +86,13 @@ public class ListPublicIpAddressesCmd extends BaseListTaggedResourcesCmd {
description = "lists all public IP addresses associated to the network specified")
private Long associatedNetworkId;
+ @Parameter(name = ApiConstants.NETWORK_ID,
+ type = CommandType.UUID,
+ entityType = NetworkResponse.class,
+ description = "lists all public IP addresses by source network ID",
+ since = "4.13.0")
+ private Long networkId;
+
@Parameter(name = ApiConstants.IS_SOURCE_NAT, type = CommandType.BOOLEAN, description = "list only source NAT IP addresses")
private Boolean isSourceNat;
@@ -133,6 +140,10 @@ public class ListPublicIpAddressesCmd extends BaseListTaggedResourcesCmd {
return associatedNetworkId;
}
+ public Long getNetworkId() {
+ return networkId;
+ }
+
public Boolean isSourceNat() {
return isSourceNat;
}
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java
new file mode 100644
index 00000000000..92e3b979885
--- /dev/null
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java
@@ -0,0 +1,158 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.user.iso;
+
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.NetworkRuleConflictException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.exception.ResourceUnavailableException;
+import org.apache.cloudstack.api.APICommand;
+import org.apache.cloudstack.api.AbstractGetUploadParamsCmd;
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.ServerApiException;
+import org.apache.cloudstack.api.response.GetUploadParamsResponse;
+import org.apache.cloudstack.api.response.GuestOSResponse;
+import org.apache.cloudstack.api.response.ZoneResponse;
+import org.apache.cloudstack.context.CallContext;
+
+import java.net.MalformedURLException;
+
+@APICommand(name = GetUploadParamsForIsoCmd.APINAME,
+ description = "upload an existing ISO into the CloudStack cloud.",
+ responseObject = GetUploadParamsResponse.class, since = "4.13",
+ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
+public class GetUploadParamsForIsoCmd extends AbstractGetUploadParamsCmd {
+
+ public static final String APINAME = "getUploadParamsForIso";
+
+ private static final String s_name = "postuploadisoresponse";
+
+ /////////////////////////////////////////////////////
+ //////////////// API parameters /////////////////////
+ /////////////////////////////////////////////////////
+
+ @Parameter(name = ApiConstants.BOOTABLE, type = BaseCmd.CommandType.BOOLEAN, description = "true if this ISO is bootable. If not passed explicitly its assumed to be true")
+ private Boolean bootable;
+
+ @Parameter(name = ApiConstants.DISPLAY_TEXT,
+ type = BaseCmd.CommandType.STRING,
+ required = true,
+ description = "the display text of the ISO. This is usually used for display purposes.",
+ length = 4096)
+ private String displayText;
+
+ @Parameter(name = ApiConstants.IS_FEATURED, type = BaseCmd.CommandType.BOOLEAN, description = "true if you want this ISO to be featured")
+ private Boolean featured;
+
+ @Parameter(name = ApiConstants.IS_PUBLIC,
+ type = BaseCmd.CommandType.BOOLEAN,
+ description = "true if you want to register the ISO to be publicly available to all users, false otherwise.")
+ private Boolean publicIso;
+
+ @Parameter(name = ApiConstants.IS_EXTRACTABLE, type = BaseCmd.CommandType.BOOLEAN, description = "true if the ISO or its derivatives are extractable; default is false")
+ private Boolean extractable;
+
+ @Parameter(name = ApiConstants.NAME, type = BaseCmd.CommandType.STRING, required = true, description = "the name of the ISO")
+ private String isoName;
+
+ @Parameter(name = ApiConstants.OS_TYPE_ID,
+ type = BaseCmd.CommandType.UUID,
+ entityType = GuestOSResponse.class,
+ description = "the ID of the OS type that best represents the OS of this ISO. If the ISO is bootable this parameter needs to be passed")
+ private Long osTypeId;
+
+ @Parameter(name=ApiConstants.ZONE_ID, type= BaseCmd.CommandType.UUID, entityType = ZoneResponse.class,
+ required=true, description="the ID of the zone you wish to register the ISO to.")
+ protected Long zoneId;
+
+ /////////////////////////////////////////////////////
+ /////////////////// Accessors ///////////////////////
+ /////////////////////////////////////////////////////
+
+ public Boolean isBootable() {
+ return bootable;
+ }
+
+ public String getDisplayText() {
+ return displayText;
+ }
+
+ public Boolean isFeatured() {
+ return featured;
+ }
+
+ public Boolean isPublic() {
+ return publicIso;
+ }
+
+ public Boolean isExtractable() {
+ return extractable;
+ }
+
+ public String getIsoName() {
+ return isoName;
+ }
+
+ public Long getOsTypeId() {
+ return osTypeId;
+ }
+
+ public Long getZoneId() {
+ return zoneId;
+ }
+
+ /////////////////////////////////////////////////////
+ /////////////// API Implementation///////////////////
+ /////////////////////////////////////////////////////
+
+ @Override
+ public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
+ validateRequest();
+ try {
+ GetUploadParamsResponse response = _templateService.registerIsoForPostUpload(this);
+ response.setResponseName(getCommandName());
+ setResponseObject(response);
+ } catch (ResourceAllocationException | MalformedURLException e) {
+ s_logger.error("Exception while registering template", e);
+ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Exception while registering ISO: " + e.getMessage());
+ }
+ }
+
+ private void validateRequest() {
+ if (getZoneId() <= 0) {
+ throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Invalid zoneid");
+ }
+ }
+
+ @Override
+ public String getCommandName() {
+ return s_name;
+ }
+
+ @Override
+ public long getEntityOwnerId() {
+ Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true);
+ if (accountId == null) {
+ return CallContext.current().getCallingAccount().getId();
+ }
+ return accountId;
+ }
+}
\ No newline at end of file
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java
index 8c2a7e41754..b77b9854590 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java
@@ -16,15 +16,6 @@
// under the License.
package org.apache.cloudstack.api.command.user.snapshot;
-import com.cloud.event.EventTypes;
-import com.cloud.exception.InvalidParameterValueException;
-import com.cloud.exception.PermissionDeniedException;
-import com.cloud.exception.ResourceAllocationException;
-import com.cloud.projects.Project;
-import com.cloud.storage.Snapshot;
-import com.cloud.storage.Volume;
-import com.cloud.user.Account;
-import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiCommandJobType;
import org.apache.cloudstack.api.ApiConstants;
@@ -39,6 +30,16 @@ import org.apache.cloudstack.api.response.SnapshotResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.log4j.Logger;
+import com.cloud.event.EventTypes;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.exception.PermissionDeniedException;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.projects.Project;
+import com.cloud.storage.Snapshot;
+import com.cloud.storage.Volume;
+import com.cloud.user.Account;
+import com.cloud.utils.exception.CloudRuntimeException;
+
@APICommand(name = "createSnapshot", description = "Creates an instant snapshot of a volume.", responseObject = SnapshotResponse.class, entityType = {Snapshot.class},
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
@@ -171,7 +172,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
@Override
public String getEventDescription() {
- return "creating snapshot for volume: " + this._uuidMgr.getUuid(Volume.class, getVolumeId());
+ return "creating snapshot for volume: " + getVolumeUuid();
}
@Override
@@ -186,7 +187,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
setEntityId(snapshot.getId());
setEntityUuid(snapshot.getUuid());
} else {
- throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot");
+ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot for volume" + getVolumeUuid());
}
}
@@ -202,10 +203,10 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
response.setResponseName(getCommandName());
setResponseObject(response);
} else {
- throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeId());
+ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeUuid());
}
} catch (Exception e) {
- throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeId());
+ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeUuid());
}
}
@@ -249,4 +250,8 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
return asyncBackup;
}
}
+
+ protected String getVolumeUuid() {
+ return _uuidMgr.getUuid(Volume.class, getVolumeId());
+ }
}
diff --git a/api/src/test/java/org/apache/cloudstack/api/command/test/CreateSnapshotCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/test/CreateSnapshotCmdTest.java
index bc2beb6cb66..ceb63ab6e56 100644
--- a/api/src/test/java/org/apache/cloudstack/api/command/test/CreateSnapshotCmdTest.java
+++ b/api/src/test/java/org/apache/cloudstack/api/command/test/CreateSnapshotCmdTest.java
@@ -16,11 +16,12 @@
// under the License.
package org.apache.cloudstack.api.command.test;
-import com.cloud.storage.Snapshot;
-import com.cloud.storage.VolumeApiService;
-import com.cloud.user.Account;
-import com.cloud.user.AccountService;
-import junit.framework.TestCase;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.isNull;
+
import org.apache.cloudstack.api.ResponseGenerator;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotCmd;
@@ -32,11 +33,12 @@ import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.mockito.Mockito;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Matchers.isNull;
+import com.cloud.storage.Snapshot;
+import com.cloud.storage.VolumeApiService;
+import com.cloud.user.Account;
+import com.cloud.user.AccountService;
+
+import junit.framework.TestCase;
public class CreateSnapshotCmdTest extends TestCase {
@@ -66,6 +68,11 @@ public class CreateSnapshotCmdTest extends TestCase {
public long getEntityOwnerId(){
return 1L;
}
+
+ @Override
+ protected String getVolumeUuid() {
+ return "123";
+ }
};
}
@@ -126,7 +133,7 @@ public class CreateSnapshotCmdTest extends TestCase {
try {
createSnapshotCmd.execute();
} catch (ServerApiException exception) {
- Assert.assertEquals("Failed to create snapshot due to an internal error creating snapshot for volume 1", exception.getDescription());
+ Assert.assertEquals("Failed to create snapshot due to an internal error creating snapshot for volume 123", exception.getDescription());
}
}
}
diff --git a/client/conf/ehcache.xml.in b/client/conf/ehcache.xml.in
index 19bfd0f6967..4f25978ed00 100755
--- a/client/conf/ehcache.xml.in
+++ b/client/conf/ehcache.xml.in
@@ -163,14 +163,6 @@ under the License.
used.
-->
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
diff --git a/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java
index 65e222df10a..982a386161b 100644
--- a/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java
+++ b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java
@@ -30,6 +30,7 @@ import com.cloud.upgrade.dao.DbUpgrade;
import com.cloud.upgrade.dao.Upgrade41000to41100;
import com.cloud.upgrade.dao.Upgrade41100to41110;
import com.cloud.upgrade.dao.Upgrade41110to41120;
+import com.cloud.upgrade.dao.Upgrade41120to41130;
import com.cloud.upgrade.dao.Upgrade41120to41200;
import com.cloud.upgrade.dao.Upgrade452to453;
import com.cloud.upgrade.dao.Upgrade453to460;
@@ -98,10 +99,11 @@ public class DatabaseUpgradeCheckerTest {
assertTrue(upgrades[0] instanceof Upgrade41000to41100);
assertTrue(upgrades[1] instanceof Upgrade41100to41110);
assertTrue(upgrades[2] instanceof Upgrade41110to41120);
- assertTrue(upgrades[3] instanceof Upgrade41120to41200);
+ assertTrue(upgrades[3] instanceof Upgrade41120to41130);
+ assertTrue(upgrades[4] instanceof Upgrade41120to41200);
assertTrue(Arrays.equals(new String[] {"4.11.0.0", "4.11.1.0"}, upgrades[1].getUpgradableVersionRange()));
- assertEquals(currentVersion.toString(), upgrades[3].getUpgradedVersion());
+ assertEquals(currentVersion.toString(), upgrades[4].getUpgradedVersion());
}
diff --git a/engine/storage/configdrive/test/org/apache/cloudstack/storage/configdrive/ConfigDriveTest.java b/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveTest.java
similarity index 100%
rename from engine/storage/configdrive/test/org/apache/cloudstack/storage/configdrive/ConfigDriveTest.java
rename to engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveTest.java
diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java
index 1bbe1770809..e42715a1e6d 100644
--- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java
+++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java
@@ -24,6 +24,8 @@ import java.util.Set;
import javax.inject.Inject;
+import com.cloud.storage.ScopeType;
+import com.cloud.storage.Storage;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
@@ -54,6 +56,7 @@ import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachineManager;
+import org.apache.commons.collections.MapUtils;
/**
* Extends {@link StorageSystemDataMotionStrategy}, allowing KVM hosts to migrate VMs with the ROOT volume on a non managed local storage pool.
@@ -77,14 +80,16 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot
* Note that the super implementation (override) is called by {@link #canHandle(Map, Host, Host)} which ensures that {@link #internalCanHandle(Map)} will be executed only if the source host is KVM.
*/
@Override
- protected StrategyPriority internalCanHandle(Map volumeMap) {
- if (super.internalCanHandle(volumeMap) == StrategyPriority.CANT_HANDLE) {
- Set volumeInfoSet = volumeMap.keySet();
+ protected StrategyPriority internalCanHandle(Map volumeMap, Host srcHost, Host destHost) {
+ if (super.internalCanHandle(volumeMap, srcHost, destHost) == StrategyPriority.CANT_HANDLE) {
+ if (canHandleKVMNonManagedLiveNFSStorageMigration(volumeMap, srcHost, destHost) == StrategyPriority.CANT_HANDLE) {
+ Set volumeInfoSet = volumeMap.keySet();
- for (VolumeInfo volumeInfo : volumeInfoSet) {
- StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId());
- if (storagePoolVO.getPoolType() != StoragePoolType.Filesystem && storagePoolVO.getPoolType() != StoragePoolType.NetworkFilesystem) {
- return StrategyPriority.CANT_HANDLE;
+ for (VolumeInfo volumeInfo : volumeInfoSet) {
+ StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId());
+ if (storagePoolVO.getPoolType() != StoragePoolType.Filesystem && storagePoolVO.getPoolType() != StoragePoolType.NetworkFilesystem) {
+ return StrategyPriority.CANT_HANDLE;
+ }
}
}
return StrategyPriority.HYPERVISOR;
@@ -92,6 +97,52 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot
return StrategyPriority.CANT_HANDLE;
}
+ /**
+ * Allow KVM live storage migration for non managed storage when:
+ * - Source host and destination host are different, and are on the same cluster
+ * - Source and destination storage are NFS
+ * - Destination storage is cluster-wide
+ */
+ protected StrategyPriority canHandleKVMNonManagedLiveNFSStorageMigration(Map volumeMap,
+ Host srcHost, Host destHost) {
+ if (srcHost.getId() != destHost.getId() &&
+ srcHost.getClusterId().equals(destHost.getClusterId()) &&
+ isSourceNfsPrimaryStorage(volumeMap) &&
+ isDestinationNfsPrimaryStorageClusterWide(volumeMap)) {
+ return StrategyPriority.HYPERVISOR;
+ }
+ return StrategyPriority.CANT_HANDLE;
+ }
+
+ /**
+ * True if volumes source storage are NFS
+ */
+ protected boolean isSourceNfsPrimaryStorage(Map volumeMap) {
+ if (MapUtils.isNotEmpty(volumeMap)) {
+ for (VolumeInfo volumeInfo : volumeMap.keySet()) {
+ StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId());
+ return storagePoolVO != null &&
+ storagePoolVO.getPoolType() == Storage.StoragePoolType.NetworkFilesystem;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * True if destination storage is cluster-wide NFS
+ */
+ protected boolean isDestinationNfsPrimaryStorageClusterWide(Map volumeMap) {
+ if (MapUtils.isNotEmpty(volumeMap)) {
+ for (DataStore dataStore : volumeMap.values()) {
+ StoragePoolVO storagePoolVO = _storagePoolDao.findById(dataStore.getId());
+ return storagePoolVO != null &&
+ storagePoolVO.getPoolType() == Storage.StoragePoolType.NetworkFilesystem &&
+ storagePoolVO.getScope() == ScopeType.CLUSTER;
+ }
+ }
+ return false;
+ }
+
/**
* Configures a {@link MigrateDiskInfo} object configured for migrating a File System volume and calls rootImageProvisioning.
*/
@@ -135,7 +186,7 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot
*/
@Override
protected boolean shouldMigrateVolume(StoragePoolVO sourceStoragePool, Host destHost, StoragePoolVO destStoragePool) {
- return sourceStoragePool.getPoolType() == StoragePoolType.Filesystem;
+ return sourceStoragePool.getPoolType() == StoragePoolType.Filesystem || sourceStoragePool.getPoolType() == StoragePoolType.NetworkFilesystem;
}
/**
diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
index 1f3368fd1bb..45cd2954f15 100644
--- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
+++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java
@@ -18,62 +18,18 @@
*/
package org.apache.cloudstack.storage.motion;
-import com.cloud.agent.AgentManager;
-import com.cloud.agent.api.Answer;
-import com.cloud.agent.api.storage.CopyVolumeAnswer;
-import com.cloud.agent.api.storage.CopyVolumeCommand;
-import com.cloud.agent.api.MigrateAnswer;
-import com.cloud.agent.api.MigrateCommand;
-import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
-import com.cloud.agent.api.ModifyTargetsAnswer;
-import com.cloud.agent.api.ModifyTargetsCommand;
-import com.cloud.agent.api.PrepareForMigrationCommand;
-import com.cloud.agent.api.storage.MigrateVolumeAnswer;
-import com.cloud.agent.api.storage.MigrateVolumeCommand;
-import com.cloud.agent.api.to.DataStoreTO;
-import com.cloud.agent.api.to.DataTO;
-import com.cloud.agent.api.to.DiskTO;
-import com.cloud.agent.api.to.NfsTO;
-import com.cloud.agent.api.to.VirtualMachineTO;
-import com.cloud.configuration.Config;
-import com.cloud.dc.dao.ClusterDao;
-import com.cloud.exception.AgentUnavailableException;
-import com.cloud.exception.OperationTimedoutException;
-import com.cloud.host.Host;
-import com.cloud.host.HostVO;
-import com.cloud.host.dao.HostDao;
-import com.cloud.hypervisor.Hypervisor.HypervisorType;
-import com.cloud.resource.ResourceState;
-import com.cloud.storage.DataStoreRole;
-import com.cloud.storage.DiskOfferingVO;
-import com.cloud.storage.Snapshot;
-import com.cloud.storage.SnapshotVO;
-import com.cloud.storage.Storage.ImageFormat;
-import com.cloud.storage.Storage.StoragePoolType;
-import com.cloud.storage.StorageManager;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.VolumeDetailVO;
-import com.cloud.storage.Volume;
-import com.cloud.storage.VolumeVO;
-import com.cloud.storage.dao.DiskOfferingDao;
-import com.cloud.storage.dao.GuestOSCategoryDao;
-import com.cloud.storage.dao.GuestOSDao;
-import com.cloud.storage.dao.SnapshotDao;
-import com.cloud.storage.dao.SnapshotDetailsDao;
-import com.cloud.storage.dao.SnapshotDetailsVO;
-import com.cloud.storage.dao.VMTemplateDao;
-import com.cloud.storage.dao.VolumeDao;
-import com.cloud.storage.dao.VolumeDetailsDao;
-import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.db.GlobalLock;
-import com.cloud.utils.exception.CloudRuntimeException;
-import com.cloud.vm.VirtualMachine;
-import com.cloud.vm.VirtualMachineManager;
-import com.cloud.vm.VMInstanceVO;
-import com.cloud.vm.dao.VMInstanceDao;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
-import com.google.common.base.Preconditions;
+import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
@@ -86,6 +42,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
@@ -111,25 +68,73 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.MigrateAnswer;
+import com.cloud.agent.api.MigrateCommand;
+import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
+import com.cloud.agent.api.ModifyTargetsAnswer;
+import com.cloud.agent.api.ModifyTargetsCommand;
+import com.cloud.agent.api.PrepareForMigrationCommand;
+import com.cloud.agent.api.storage.CheckStorageAvailabilityCommand;
+import com.cloud.agent.api.storage.CopyVolumeAnswer;
+import com.cloud.agent.api.storage.CopyVolumeCommand;
+import com.cloud.agent.api.storage.MigrateVolumeAnswer;
+import com.cloud.agent.api.storage.MigrateVolumeCommand;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.DiskTO;
+import com.cloud.agent.api.to.NfsTO;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.configuration.Config;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.AgentUnavailableException;
+import com.cloud.exception.OperationTimedoutException;
+import com.cloud.host.Host;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.resource.ResourceState;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.MigrationOptions;
+import com.cloud.storage.ScopeType;
+import com.cloud.storage.Snapshot;
+import com.cloud.storage.SnapshotVO;
+import com.cloud.storage.Storage;
+import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.VMTemplateStoragePoolVO;
+import com.cloud.storage.VMTemplateStorageResourceAssoc;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeDetailVO;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.GuestOSCategoryDao;
+import com.cloud.storage.dao.GuestOSDao;
+import com.cloud.storage.dao.SnapshotDao;
+import com.cloud.storage.dao.SnapshotDetailsDao;
+import com.cloud.storage.dao.SnapshotDetailsVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.dao.VMTemplatePoolDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.VolumeDetailsDao;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.db.GlobalLock;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineManager;
+import com.cloud.vm.dao.VMInstanceDao;
+import com.google.common.base.Preconditions;
-import javax.inject.Inject;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-@Component
public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private static final Logger LOGGER = Logger.getLogger(StorageSystemDataMotionStrategy.class);
private static final Random RANDOM = new Random(System.nanoTime());
@@ -176,6 +181,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
private StorageCacheManager cacheMgr;
@Inject
private EndPointSelector selector;
+ @Inject
+ VMTemplatePoolDao templatePoolDao;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
@@ -272,7 +279,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
@Override
public final StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) {
if (HypervisorType.KVM.equals(srcHost.getHypervisorType())) {
- return internalCanHandle(volumeMap);
+ return internalCanHandle(volumeMap, srcHost, destHost);
}
return StrategyPriority.CANT_HANDLE;
}
@@ -280,7 +287,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
/**
* Handles migrating volumes on managed Storage.
*/
- protected StrategyPriority internalCanHandle(Map volumeMap) {
+ protected StrategyPriority internalCanHandle(Map volumeMap, Host srcHost, Host destHost) {
Set volumeInfoSet = volumeMap.keySet();
for (VolumeInfo volumeInfo : volumeInfoSet) {
@@ -299,6 +306,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
if (storagePoolVO.isManaged()) {
return StrategyPriority.HIGHEST;
}
+
}
return StrategyPriority.CANT_HANDLE;
}
@@ -1698,6 +1706,50 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return _snapshotDetailsDao.persist(snapshotDetails);
}
+ /**
+ * Return expected MigrationOptions for a linked clone volume live storage migration
+ */
+ protected MigrationOptions createLinkedCloneMigrationOptions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, String srcVolumeBackingFile, String srcPoolUuid, Storage.StoragePoolType srcPoolType) {
+ VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(destVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId());
+ boolean updateBackingFileReference = ref == null;
+ String backingFile = ref != null ? ref.getInstallPath() : srcVolumeBackingFile;
+ return new MigrationOptions(srcPoolUuid, srcPoolType, backingFile, updateBackingFileReference);
+ }
+
+ /**
+ * Return expected MigrationOptions for a full clone volume live storage migration
+ */
+ protected MigrationOptions createFullCloneMigrationOptions(VolumeInfo srcVolumeInfo, VirtualMachineTO vmTO, Host srcHost, String srcPoolUuid, Storage.StoragePoolType srcPoolType) {
+ return new MigrationOptions(srcPoolUuid, srcPoolType, srcVolumeInfo.getPath());
+ }
+
+ /**
+ * Prepare hosts for KVM live storage migration depending on volume type by setting MigrationOptions on destination volume:
+ * - Linked clones (backing file on disk): Decide if template (backing file) should be copied to destination storage prior disk creation
+ * - Full clones (no backing file): Take snapshot of the VM prior disk creation
+ * Return this information
+ */
+ protected void setVolumeMigrationOptions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo,
+ VirtualMachineTO vmTO, Host srcHost, StoragePoolVO destStoragePool) {
+ if (!destStoragePool.isManaged()) {
+ String srcVolumeBackingFile = getVolumeBackingFile(srcVolumeInfo);
+
+ String srcPoolUuid = srcVolumeInfo.getDataStore().getUuid();
+ StoragePoolVO srcPool = _storagePoolDao.findById(srcVolumeInfo.getPoolId());
+ Storage.StoragePoolType srcPoolType = srcPool.getPoolType();
+
+ MigrationOptions migrationOptions;
+ if (StringUtils.isNotBlank(srcVolumeBackingFile)) {
+ migrationOptions = createLinkedCloneMigrationOptions(srcVolumeInfo, destVolumeInfo,
+ srcVolumeBackingFile, srcPoolUuid, srcPoolType);
+ } else {
+ migrationOptions = createFullCloneMigrationOptions(srcVolumeInfo, vmTO, srcHost, srcPoolUuid, srcPoolType);
+ }
+ migrationOptions.setTimeout(StorageManager.KvmStorageOnlineMigrationWait.value());
+ destVolumeInfo.setMigrationOptions(migrationOptions);
+ }
+ }
+
/**
* For each disk to migrate:
*
@@ -1716,7 +1768,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
throw new CloudRuntimeException("Invalid hypervisor type (only KVM supported for this operation at the time being)");
}
- verifyLiveMigrationMapForKVM(volumeDataStoreMap);
+ verifyLiveMigrationForKVM(volumeDataStoreMap, destHost);
VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId());
vmTO.setState(vmInstance.getState());
@@ -1725,6 +1777,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
Map migrateStorage = new HashMap<>();
Map srcVolumeInfoToDestVolumeInfo = new HashMap<>();
+ boolean managedStorageDestination = false;
for (Map.Entry entry : volumeDataStoreMap.entrySet()) {
VolumeInfo srcVolumeInfo = entry.getKey();
DataStore destDataStore = entry.getValue();
@@ -1749,15 +1802,23 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
// move the volume from Ready to Migrating
destVolumeInfo.processEvent(Event.MigrationRequested);
+ setVolumeMigrationOptions(srcVolumeInfo, destVolumeInfo, vmTO, srcHost, destStoragePool);
+
// create a volume on the destination storage
destDataStore.getDriver().createAsync(destDataStore, destVolumeInfo, null);
+ managedStorageDestination = destStoragePool.isManaged();
+ String volumeIdentifier = managedStorageDestination ? destVolumeInfo.get_iScsiName() : destVolumeInfo.getUuid();
+
destVolume = _volumeDao.findById(destVolume.getId());
+ destVolume.setPath(volumeIdentifier);
setVolumePath(destVolume);
_volumeDao.update(destVolume.getId(), destVolume);
+ postVolumeCreationActions(srcVolumeInfo, destVolumeInfo, vmTO, srcHost);
+
destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION);
@@ -1766,9 +1827,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
String destPath = generateDestPath(destHost, destStoragePool, destVolumeInfo);
- MigrateCommand.MigrateDiskInfo migrateDiskInfo = configureMigrateDiskInfo(srcVolumeInfo, destPath);
- migrateDiskInfo.setSourceDiskOnStorageFileSystem(isStoragePoolTypeOfFile(sourceStoragePool));
- migrateDiskInfoList.add(migrateDiskInfo);
+ MigrateCommand.MigrateDiskInfo migrateDiskInfo;
+ if (managedStorageDestination) {
+ migrateDiskInfo = configureMigrateDiskInfo(srcVolumeInfo, destPath);
+ migrateDiskInfo.setSourceDiskOnStorageFileSystem(isStoragePoolTypeOfFile(sourceStoragePool));
+ migrateDiskInfoList.add(migrateDiskInfo);
+ } else {
+ migrateDiskInfo = new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(),
+ MigrateCommand.MigrateDiskInfo.DiskType.FILE,
+ MigrateCommand.MigrateDiskInfo.DriverType.QCOW2,
+ MigrateCommand.MigrateDiskInfo.Source.FILE,
+ connectHostToVolume(destHost, destVolumeInfo.getPoolId(), volumeIdentifier));
+ }
migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo);
@@ -1795,15 +1865,12 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
MigrateCommand migrateCommand = new MigrateCommand(vmTO.getName(), destHost.getPrivateIpAddress(), isWindows, vmTO, true);
-
migrateCommand.setWait(StorageManager.KvmStorageOnlineMigrationWait.value());
-
migrateCommand.setMigrateStorage(migrateStorage);
migrateCommand.setMigrateDiskInfoList(migrateDiskInfoList);
+ migrateCommand.setMigrateStorageManaged(managedStorageDestination);
- String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString());
- boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence);
-
+ boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
migrateCommand.setAutoConvergence(kvmAutoConvergence);
MigrateAnswer migrateAnswer = (MigrateAnswer)agentManager.send(srcHost.getId(), migrateCommand);
@@ -1863,7 +1930,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
* Configures a {@link MigrateDiskInfo} object with disk type of BLOCK, Driver type RAW and Source DEV
*/
protected MigrateCommand.MigrateDiskInfo configureMigrateDiskInfo(VolumeInfo srcVolumeInfo, String destPath) {
- return new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(), MigrateCommand.MigrateDiskInfo.DiskType.BLOCK, MigrateCommand.MigrateDiskInfo.DriverType.RAW,
+ return new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(),
+ MigrateCommand.MigrateDiskInfo.DiskType.BLOCK,
+ MigrateCommand.MigrateDiskInfo.DriverType.RAW,
MigrateCommand.MigrateDiskInfo.Source.DEV, destPath);
}
@@ -1883,6 +1952,21 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
// This method is used by classes that extend this one
}
+ /*
+ * Return backing file for volume (if any), only for KVM volumes
+ */
+ private String getVolumeBackingFile(VolumeInfo srcVolumeInfo) {
+ if (srcVolumeInfo.getHypervisorType() == HypervisorType.KVM &&
+ srcVolumeInfo.getTemplateId() != null && srcVolumeInfo.getPoolId() != null) {
+ VMTemplateVO template = _vmTemplateDao.findById(srcVolumeInfo.getTemplateId());
+ if (template.getFormat() != null && template.getFormat() != Storage.ImageFormat.ISO) {
+ VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(srcVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId());
+ return ref != null ? ref.getInstallPath() : null;
+ }
+ }
+ return null;
+ }
+
private void handlePostMigration(boolean success, Map srcVolumeInfoToDestVolumeInfo, VirtualMachineTO vmTO, Host destHost) {
if (!success) {
try {
@@ -2046,10 +2130,40 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
return modifyTargetsAnswer.getConnectedPaths();
}
+ /**
+ * Update reference on template_spool_ref table of copied template to destination storage
+ */
+ protected void updateCopiedTemplateReference(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) {
+ VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(srcVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId());
+ VMTemplateStoragePoolVO newRef = new VMTemplateStoragePoolVO(destVolumeInfo.getPoolId(), ref.getTemplateId());
+ newRef.setDownloadPercent(100);
+ newRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
+ newRef.setState(ObjectInDataStoreStateMachine.State.Ready);
+ newRef.setTemplateSize(ref.getTemplateSize());
+ newRef.setLocalDownloadPath(ref.getLocalDownloadPath());
+ newRef.setInstallPath(ref.getInstallPath());
+ templatePoolDao.persist(newRef);
+ }
+
+ /**
+ * Handle post destination volume creation actions depending on the migrating volume type: full clone or linked clone
+ */
+ protected void postVolumeCreationActions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, VirtualMachineTO vmTO, Host srcHost) {
+ MigrationOptions migrationOptions = destVolumeInfo.getMigrationOptions();
+ if (migrationOptions != null) {
+ if (migrationOptions.getType() == MigrationOptions.Type.LinkedClone && migrationOptions.isCopySrcTemplate()) {
+ updateCopiedTemplateReference(srcVolumeInfo, destVolumeInfo);
+ }
+ }
+ }
+
/*
- * At a high level: The source storage cannot be managed and the destination storage must be managed.
+ * At a high level: The source storage cannot be managed and
+ * the destination storages can be all managed or all not managed, not mixed.
*/
- private void verifyLiveMigrationMapForKVM(Map volumeDataStoreMap) {
+ protected void verifyLiveMigrationForKVM(Map volumeDataStoreMap, Host destHost) {
+ Boolean storageTypeConsistency = null;
+ Map sourcePools = new HashMap<>();
for (Map.Entry entry : volumeDataStoreMap.entrySet()) {
VolumeInfo volumeInfo = entry.getKey();
@@ -2070,6 +2184,47 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
if (destStoragePoolVO == null) {
throw new CloudRuntimeException("Destination storage pool with ID " + dataStore.getId() + " was not located.");
}
+
+ if (storageTypeConsistency == null) {
+ storageTypeConsistency = destStoragePoolVO.isManaged();
+ } else if (storageTypeConsistency != destStoragePoolVO.isManaged()) {
+ throw new CloudRuntimeException("Destination storage pools must be either all managed or all not managed");
+ }
+
+ if (!destStoragePoolVO.isManaged()) {
+ if (destStoragePoolVO.getPoolType() == StoragePoolType.NetworkFilesystem &&
+ destStoragePoolVO.getScope() != ScopeType.CLUSTER) {
+ throw new CloudRuntimeException("KVM live storage migrations currently support cluster-wide " +
+ "not managed NFS destination storage");
+ }
+ if (!sourcePools.containsKey(srcStoragePoolVO.getUuid())) {
+ sourcePools.put(srcStoragePoolVO.getUuid(), srcStoragePoolVO.getPoolType());
+ }
+ }
+ }
+ verifyDestinationStorage(sourcePools, destHost);
+ }
+
+ /**
+ * Perform storage validation on destination host for KVM live storage migrations.
+ * Validate that volume source storage pools are mounted on the destination host prior the migration
+ * @throws CloudRuntimeException if any source storage pool is not mounted on the destination host
+ */
+ private void verifyDestinationStorage(Map sourcePools, Host destHost) {
+ if (MapUtils.isNotEmpty(sourcePools)) {
+ LOGGER.debug("Verifying source pools are already available on destination host " + destHost.getUuid());
+ CheckStorageAvailabilityCommand cmd = new CheckStorageAvailabilityCommand(sourcePools);
+ try {
+ Answer answer = agentManager.send(destHost.getId(), cmd);
+ if (answer == null || !answer.getResult()) {
+ throw new CloudRuntimeException("Storage verification failed on host "
+ + destHost.getUuid() +": " + answer.getDetails());
+ }
+ } catch (AgentUnavailableException | OperationTimedoutException e) {
+ e.printStackTrace();
+ throw new CloudRuntimeException("Cannot perform storage verification on host " + destHost.getUuid() +
+ "due to: " + e.getMessage());
+ }
}
}
diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java
index 89a278349f2..5b8d3aff2b8 100644
--- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java
+++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java
@@ -21,6 +21,10 @@ package org.apache.cloudstack.storage.motion;
import java.util.HashMap;
import java.util.Map;
+import com.cloud.host.Host;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.storage.ScopeType;
+import com.cloud.storage.Storage;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
@@ -38,6 +42,7 @@ import org.apache.cloudstack.storage.image.store.ImageStoreImpl;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.junit.Assert;
+import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InOrder;
@@ -53,7 +58,6 @@ import com.cloud.agent.api.MigrateCommand;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.CloudException;
import com.cloud.exception.OperationTimedoutException;
-import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.DataStoreRole;
@@ -66,6 +70,9 @@ import com.cloud.storage.dao.VMTemplatePoolDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VirtualMachineManager;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.when;
+
@RunWith(MockitoJUnitRunner.class)
public class KvmNonManagedStorageSystemDataMotionTest {
@@ -90,6 +97,36 @@ public class KvmNonManagedStorageSystemDataMotionTest {
@InjectMocks
private KvmNonManagedStorageDataMotionStrategy kvmNonManagedStorageDataMotionStrategy;
+ @Mock
+ VolumeInfo volumeInfo1;
+ @Mock
+ VolumeInfo volumeInfo2;
+ @Mock
+ DataStore dataStore1;
+ @Mock
+ DataStore dataStore2;
+ @Mock
+ DataStore dataStore3;
+ @Mock
+ StoragePoolVO pool1;
+ @Mock
+ StoragePoolVO pool2;
+ @Mock
+ StoragePoolVO pool3;
+ @Mock
+ Host host1;
+ @Mock
+ Host host2;
+
+ Map migrationMap;
+
+ private static final Long POOL_1_ID = 1L;
+ private static final Long POOL_2_ID = 2L;
+ private static final Long POOL_3_ID = 3L;
+ private static final Long HOST_1_ID = 1L;
+ private static final Long HOST_2_ID = 2L;
+ private static final Long CLUSTER_ID = 1L;
+
@Test
public void canHandleTestExpectHypervisorStrategyForKvm() {
canHandleExpectCannotHandle(HypervisorType.KVM, 1, StrategyPriority.HYPERVISOR);
@@ -109,12 +146,13 @@ public class KvmNonManagedStorageSystemDataMotionTest {
private void canHandleExpectCannotHandle(HypervisorType hypervisorType, int times, StrategyPriority expectedStrategyPriority) {
HostVO srcHost = new HostVO("sourceHostUuid");
+ HostVO destHost = new HostVO("destHostUuid");
srcHost.setHypervisorType(hypervisorType);
- Mockito.doReturn(StrategyPriority.HYPERVISOR).when(kvmNonManagedStorageDataMotionStrategy).internalCanHandle(new HashMap<>());
+ Mockito.doReturn(StrategyPriority.HYPERVISOR).when(kvmNonManagedStorageDataMotionStrategy).internalCanHandle(new HashMap<>(), srcHost, destHost);
- StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.canHandle(new HashMap<>(), srcHost, new HostVO("destHostUuid"));
+ StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.canHandle(new HashMap<>(), srcHost, destHost);
- Mockito.verify(kvmNonManagedStorageDataMotionStrategy, Mockito.times(times)).internalCanHandle(new HashMap<>());
+ Mockito.verify(kvmNonManagedStorageDataMotionStrategy, Mockito.times(times)).internalCanHandle(new HashMap<>(), srcHost, destHost);
Assert.assertEquals(expectedStrategyPriority, strategyPriority);
}
@@ -123,7 +161,7 @@ public class KvmNonManagedStorageSystemDataMotionTest {
StoragePoolType[] storagePoolTypeArray = StoragePoolType.values();
for (int i = 0; i < storagePoolTypeArray.length; i++) {
Map volumeMap = configureTestInternalCanHandle(false, storagePoolTypeArray[i]);
- StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.internalCanHandle(volumeMap);
+ StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.internalCanHandle(volumeMap, new HostVO("sourceHostUuid"), new HostVO("destHostUuid"));
if (storagePoolTypeArray[i] == StoragePoolType.Filesystem || storagePoolTypeArray[i] == StoragePoolType.NetworkFilesystem) {
Assert.assertEquals(StrategyPriority.HYPERVISOR, strategyPriority);
} else {
@@ -137,7 +175,7 @@ public class KvmNonManagedStorageSystemDataMotionTest {
StoragePoolType[] storagePoolTypeArray = StoragePoolType.values();
for (int i = 0; i < storagePoolTypeArray.length; i++) {
Map volumeMap = configureTestInternalCanHandle(true, storagePoolTypeArray[i]);
- StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.internalCanHandle(volumeMap);
+ StrategyPriority strategyPriority = kvmNonManagedStorageDataMotionStrategy.internalCanHandle(volumeMap, null, null);
Assert.assertEquals(StrategyPriority.CANT_HANDLE, strategyPriority);
}
}
@@ -202,7 +240,7 @@ public class KvmNonManagedStorageSystemDataMotionTest {
for (int i = 0; i < storagePoolTypes.length; i++) {
Mockito.doReturn(storagePoolTypes[i]).when(sourceStoragePool).getPoolType();
boolean result = kvmNonManagedStorageDataMotionStrategy.shouldMigrateVolume(sourceStoragePool, destHost, destStoragePool);
- if (storagePoolTypes[i] == StoragePoolType.Filesystem) {
+ if (storagePoolTypes[i] == StoragePoolType.Filesystem || storagePoolTypes[i] == StoragePoolType.NetworkFilesystem) {
Assert.assertTrue(result);
} else {
Assert.assertFalse(result);
@@ -330,4 +368,102 @@ public class KvmNonManagedStorageSystemDataMotionTest {
verifyInOrder.verify(kvmNonManagedStorageDataMotionStrategy, Mockito.times(times)).sendCopyCommand(Mockito.eq(destHost), Mockito.any(TemplateObjectTO.class),
Mockito.any(TemplateObjectTO.class), Mockito.eq(destDataStore));
}
+
+ @Before
+ public void setUp() {
+ migrationMap = new HashMap<>();
+ migrationMap.put(volumeInfo1, dataStore2);
+ migrationMap.put(volumeInfo2, dataStore2);
+
+ when(volumeInfo1.getPoolId()).thenReturn(POOL_1_ID);
+ when(primaryDataStoreDao.findById(POOL_1_ID)).thenReturn(pool1);
+ when(pool1.isManaged()).thenReturn(false);
+ when(dataStore2.getId()).thenReturn(POOL_2_ID);
+ when(primaryDataStoreDao.findById(POOL_2_ID)).thenReturn(pool2);
+ when(pool2.isManaged()).thenReturn(true);
+ when(volumeInfo1.getDataStore()).thenReturn(dataStore1);
+
+ when(volumeInfo2.getPoolId()).thenReturn(POOL_1_ID);
+ when(volumeInfo2.getDataStore()).thenReturn(dataStore1);
+
+ when(dataStore1.getId()).thenReturn(POOL_1_ID);
+ when(pool1.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
+ when(pool2.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
+ when(pool2.getScope()).thenReturn(ScopeType.CLUSTER);
+
+ when(dataStore3.getId()).thenReturn(POOL_3_ID);
+ when(primaryDataStoreDao.findById(POOL_3_ID)).thenReturn(pool3);
+ when(pool3.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
+ when(pool3.getScope()).thenReturn(ScopeType.CLUSTER);
+ when(host1.getId()).thenReturn(HOST_1_ID);
+ when(host1.getClusterId()).thenReturn(CLUSTER_ID);
+ when(host1.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM);
+ when(host2.getId()).thenReturn(HOST_2_ID);
+ when(host2.getClusterId()).thenReturn(CLUSTER_ID);
+ when(host2.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM);
+ }
+
+ @Test
+ public void canHandleKVMLiveStorageMigrationSameHost() {
+ StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host1);
+ assertEquals(StrategyPriority.CANT_HANDLE, priority);
+ }
+
+ @Test
+ public void canHandleKVMLiveStorageMigrationInterCluster() {
+ when(host2.getClusterId()).thenReturn(5L);
+ StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
+ assertEquals(StrategyPriority.CANT_HANDLE, priority);
+ }
+
+ @Test
+ public void canHandleKVMLiveStorageMigration() {
+ StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
+ assertEquals(StrategyPriority.HYPERVISOR, priority);
+ }
+
+ @Test
+ public void canHandleKVMLiveStorageMigrationMultipleSources() {
+ when(volumeInfo1.getDataStore()).thenReturn(dataStore2);
+ StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
+ assertEquals(StrategyPriority.HYPERVISOR, priority);
+ }
+
+ @Test
+ public void canHandleKVMLiveStorageMigrationMultipleDestination() {
+ migrationMap.put(volumeInfo2, dataStore3);
+ StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
+ assertEquals(StrategyPriority.HYPERVISOR, priority);
+ }
+
+ @Test
+ public void testCanHandleLiveMigrationUnmanagedStorage() {
+ when(pool2.isManaged()).thenReturn(false);
+ StrategyPriority priority = kvmNonManagedStorageDataMotionStrategy.canHandleKVMNonManagedLiveNFSStorageMigration(migrationMap, host1, host2);
+ assertEquals(StrategyPriority.HYPERVISOR, priority);
+ }
+
+ @Test
+ public void testVerifyLiveMigrationMapForKVM() {
+ kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
+ }
+
+ @Test(expected = CloudRuntimeException.class)
+ public void testVerifyLiveMigrationMapForKVMNotExistingSource() {
+ when(primaryDataStoreDao.findById(POOL_1_ID)).thenReturn(null);
+ kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
+ }
+
+ @Test(expected = CloudRuntimeException.class)
+ public void testVerifyLiveMigrationMapForKVMNotExistingDest() {
+ when(primaryDataStoreDao.findById(POOL_2_ID)).thenReturn(null);
+ kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
+ }
+
+ @Test(expected = CloudRuntimeException.class)
+ public void testVerifyLiveMigrationMapForKVMMixedManagedUnmagedStorage() {
+ when(pool1.isManaged()).thenReturn(true);
+ when(pool2.isManaged()).thenReturn(false);
+ kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2);
+ }
}
diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java
index 197e66ce5b0..1b383d91574 100644
--- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java
+++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java
@@ -51,8 +51,8 @@ import com.cloud.host.HostVO;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ImageStore;
import com.cloud.storage.Storage;
-import com.cloud.storage.Volume;
import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
@RunWith(MockitoJUnitRunner.class)
@@ -60,14 +60,14 @@ public class StorageSystemDataMotionStrategyTest {
@Spy
@InjectMocks
- private StorageSystemDataMotionStrategy storageSystemDataMotionStrategy;
+ private StorageSystemDataMotionStrategy strategy;
@Mock
private VolumeObject volumeObjectSource;
@Mock
private DataObject dataObjectDestination;
@Mock
- private PrimaryDataStore primaryDataStoreSourceStore;
+ private PrimaryDataStore sourceStore;
@Mock
private ImageStore destinationStore;
@Mock
@@ -75,26 +75,26 @@ public class StorageSystemDataMotionStrategyTest {
@Before
public void setUp() throws Exception {
- primaryDataStoreSourceStore = mock(PrimaryDataStoreImpl.class);
+ sourceStore = mock(PrimaryDataStoreImpl.class);
destinationStore = mock(ImageStoreImpl.class);
volumeObjectSource = mock(VolumeObject.class);
dataObjectDestination = mock(VolumeObject.class);
- initMocks(storageSystemDataMotionStrategy);
+ initMocks(strategy);
}
@Test
public void cantHandleSecondary() {
- doReturn(primaryDataStoreSourceStore).when(volumeObjectSource).getDataStore();
- doReturn(DataStoreRole.Primary).when(primaryDataStoreSourceStore).getRole();
+ doReturn(sourceStore).when(volumeObjectSource).getDataStore();
+ doReturn(DataStoreRole.Primary).when(sourceStore).getRole();
doReturn(destinationStore).when(dataObjectDestination).getDataStore();
doReturn(DataStoreRole.Image).when((DataStore)destinationStore).getRole();
- doReturn(primaryDataStoreSourceStore).when(volumeObjectSource).getDataStore();
+ doReturn(sourceStore).when(volumeObjectSource).getDataStore();
doReturn(destinationStore).when(dataObjectDestination).getDataStore();
StoragePoolVO storeVO = new StoragePoolVO();
doReturn(storeVO).when(primaryDataStoreDao).findById(0l);
- assertTrue(storageSystemDataMotionStrategy.canHandle(volumeObjectSource, dataObjectDestination) == StrategyPriority.CANT_HANDLE);
+ assertTrue(strategy.canHandle(volumeObjectSource, dataObjectDestination) == StrategyPriority.CANT_HANDLE);
}
@Test
@@ -135,7 +135,7 @@ public class StorageSystemDataMotionStrategyTest {
Mockito.doReturn(storagePool0).when(primaryDataStoreDao).findById(0l);
Mockito.doReturn(storagePool1).when(primaryDataStoreDao).findById(1l);
- StrategyPriority strategyPriority = storageSystemDataMotionStrategy.internalCanHandle(volumeMap);
+ StrategyPriority strategyPriority = strategy.internalCanHandle(volumeMap, new HostVO("srcHostUuid"), new HostVO("destHostUuid"));
Assert.assertEquals(expectedStrategyPriority, strategyPriority);
}
@@ -146,7 +146,7 @@ public class StorageSystemDataMotionStrategyTest {
StoragePoolType[] storagePoolTypeArray = StoragePoolType.values();
for (int i = 0; i < storagePoolTypeArray.length; i++) {
Mockito.doReturn(storagePoolTypeArray[i]).when(sourceStoragePool).getPoolType();
- boolean result = storageSystemDataMotionStrategy.isStoragePoolTypeOfFile(sourceStoragePool);
+ boolean result = strategy.isStoragePoolTypeOfFile(sourceStoragePool);
if (sourceStoragePool.getPoolType() == StoragePoolType.Filesystem) {
Assert.assertTrue(result);
} else {
@@ -161,19 +161,19 @@ public class StorageSystemDataMotionStrategyTest {
HostVO destHost = new HostVO("guid");
Mockito.doReturn("iScsiName").when(destVolumeInfo).get_iScsiName();
Mockito.doReturn(0l).when(destVolumeInfo).getPoolId();
- Mockito.doReturn("expected").when(storageSystemDataMotionStrategy).connectHostToVolume(destHost, 0l, "iScsiName");
+ Mockito.doReturn("expected").when(strategy).connectHostToVolume(destHost, 0l, "iScsiName");
- String expected = storageSystemDataMotionStrategy.generateDestPath(destHost, Mockito.mock(StoragePoolVO.class), destVolumeInfo);
+ String expected = strategy.generateDestPath(destHost, Mockito.mock(StoragePoolVO.class), destVolumeInfo);
Assert.assertEquals(expected, "expected");
- Mockito.verify(storageSystemDataMotionStrategy).connectHostToVolume(destHost, 0l, "iScsiName");
+ Mockito.verify(strategy).connectHostToVolume(destHost, 0l, "iScsiName");
}
@Test
public void configureMigrateDiskInfoTest() {
VolumeObject srcVolumeInfo = Mockito.spy(new VolumeObject());
Mockito.doReturn("volume path").when(srcVolumeInfo).getPath();
- MigrateCommand.MigrateDiskInfo migrateDiskInfo = storageSystemDataMotionStrategy.configureMigrateDiskInfo(srcVolumeInfo, "destPath");
+ MigrateCommand.MigrateDiskInfo migrateDiskInfo = strategy.configureMigrateDiskInfo(srcVolumeInfo, "destPath");
Assert.assertEquals(MigrateCommand.MigrateDiskInfo.DiskType.BLOCK, migrateDiskInfo.getDiskType());
Assert.assertEquals(MigrateCommand.MigrateDiskInfo.DriverType.RAW, migrateDiskInfo.getDriverType());
Assert.assertEquals(MigrateCommand.MigrateDiskInfo.Source.DEV, migrateDiskInfo.getSource());
@@ -187,7 +187,7 @@ public class StorageSystemDataMotionStrategyTest {
String volumePath = "iScsiName";
volume.set_iScsiName(volumePath);
- storageSystemDataMotionStrategy.setVolumePath(volume);
+ strategy.setVolumePath(volume);
Assert.assertEquals(volumePath, volume.getPath());
}
@@ -200,8 +200,9 @@ public class StorageSystemDataMotionStrategyTest {
StoragePoolType[] storagePoolTypes = StoragePoolType.values();
for (int i = 0; i < storagePoolTypes.length; i++) {
Mockito.doReturn(storagePoolTypes[i]).when(sourceStoragePool).getPoolType();
- boolean result = storageSystemDataMotionStrategy.shouldMigrateVolume(sourceStoragePool, destHost, destStoragePool);
+ boolean result = strategy.shouldMigrateVolume(sourceStoragePool, destHost, destStoragePool);
Assert.assertTrue(result);
}
}
-}
+
+}
\ No newline at end of file
diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
index 45e3941a5ec..097b85477da 100644
--- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
+++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
@@ -409,8 +409,15 @@ public class TemplateServiceImpl implements TemplateService {
_templateDao.update(tmplt.getId(), tmlpt);
if (tmplt.getState() == VirtualMachineTemplate.State.NotUploaded || tmplt.getState() == VirtualMachineTemplate.State.UploadInProgress) {
+ VirtualMachineTemplate.Event event = VirtualMachineTemplate.Event.OperationSucceeded;
+ // For multi-disk OVA, check and create data disk templates
+ if (tmplt.getFormat().equals(ImageFormat.OVA)) {
+ if (!createOvaDataDiskTemplates(_templateFactory.getTemplate(tmlpt.getId(), store))) {
+ event = VirtualMachineTemplate.Event.OperationFailed;
+ }
+ }
try {
- stateMachine.transitTo(tmplt, VirtualMachineTemplate.Event.OperationSucceeded, null, _templateDao);
+ stateMachine.transitTo(tmplt, event, null, _templateDao);
} catch (NoTransitionException e) {
s_logger.error("Unexpected state transition exception for template " + tmplt.getName() + ". Details: " + e.getMessage());
}
@@ -701,7 +708,7 @@ public class TemplateServiceImpl implements TemplateService {
return null;
}
- // Check if OVA contains additional data disks. If yes, create Datadisk templates for each of the additional datadisk present in the OVA
+ // For multi-disk OVA, check and create data disk templates
if (template.getFormat().equals(ImageFormat.OVA)) {
if (!createOvaDataDiskTemplates(template)) {
template.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
@@ -729,8 +736,8 @@ public class TemplateServiceImpl implements TemplateService {
return null;
}
-
- protected boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate) {
+ @Override
+ public boolean createOvaDataDiskTemplates(TemplateInfo parentTemplate) {
try {
// Get Datadisk template (if any) for OVA
List dataDiskTemplates = new ArrayList();
diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java
index 5117b7cb84f..062e89a4247 100644
--- a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java
+++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java
@@ -90,6 +90,7 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
stateMachines.addTransition(State.Allocated, Event.CreateOnlyRequested, State.Creating);
stateMachines.addTransition(State.Allocated, Event.DestroyRequested, State.Destroying);
stateMachines.addTransition(State.Allocated, Event.OperationFailed, State.Failed);
+ stateMachines.addTransition(State.Allocated, Event.OperationSuccessed, State.Ready);
stateMachines.addTransition(State.Creating, Event.OperationFailed, State.Allocated);
stateMachines.addTransition(State.Creating, Event.OperationSuccessed, State.Ready);
stateMachines.addTransition(State.Ready, Event.CopyingRequested, State.Copying);
diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java
index 85d95240617..d62a0baa04a 100644
--- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java
+++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java
@@ -20,6 +20,7 @@ import java.util.Date;
import javax.inject.Inject;
+import com.cloud.storage.MigrationOptions;
import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
@@ -72,6 +73,7 @@ public class VolumeObject implements VolumeInfo {
@Inject
DiskOfferingDao diskOfferingDao;
private Object payload;
+ private MigrationOptions migrationOptions;
public VolumeObject() {
_volStateMachine = Volume.State.getStateMachine();
@@ -315,6 +317,16 @@ public class VolumeObject implements VolumeInfo {
return null;
}
+ @Override
+ public MigrationOptions getMigrationOptions() {
+ return migrationOptions;
+ }
+
+ @Override
+ public void setMigrationOptions(MigrationOptions migrationOptions) {
+ this.migrationOptions = migrationOptions;
+ }
+
public void update() {
volumeDao.update(volumeVO.getId(), volumeVO);
volumeVO = volumeDao.findById(volumeVO.getId());
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java
index d2d0029701a..8265f951f8a 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java
@@ -40,6 +40,7 @@ import com.cloud.user.Account;
import com.cloud.utils.db.DB;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
+import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
@@ -72,6 +73,11 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem
throw new CloudRuntimeException("Baremetal doesn't support ISO template");
}
+ @Override
+ public TemplateProfile prepare(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException {
+ throw new CloudRuntimeException("Baremetal doesn't support ISO template");
+ }
+
private void templateCreateUsage(VMTemplateVO template, long dcId) {
if (template.getAccountId() != Account.ACCOUNT_ID_SYSTEM) {
UsageEventVO usageEvent =
diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java
index 5696048b847..807babcb09f 100644
--- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java
+++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java
@@ -185,4 +185,9 @@ public class BaremetalDhcpElement extends AdapterBase implements DhcpServiceProv
return false;
}
+ @Override
+ public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vmProfile) throws ResourceUnavailableException {
+ return false;
+ }
+
}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java
index 51dbd9234ac..c3e3e6edf41 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java
@@ -34,46 +34,67 @@ public class MigrateKVMAsync implements Callable {
private String vmName = "";
private String destIp = "";
private boolean migrateStorage;
+ private boolean migrateStorageManaged;
private boolean autoConvergence;
- /**
- * Do not pause the domain during migration. The domain's memory will be transferred to the destination host while the domain is running. The migration may never converge if the domain is changing its memory faster then it can be transferred. The domain can be manually paused anytime during migration using virDomainSuspend.
- * @value 1
- * @see Libvirt virDomainMigrateFlags documentation
- */
+ // Libvirt Migrate Flags reference:
+ // https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainMigrateFlags
+
+ // Do not pause the domain during migration. The domain's memory will be
+ // transferred to the destination host while the domain is running. The migration
+ // may never converge if the domain is changing its memory faster then it can be
+ // transferred. The domain can be manually paused anytime during migration using
+ // virDomainSuspend.
private static final long VIR_MIGRATE_LIVE = 1L;
- /**
- * Migrate full disk images in addition to domain's memory. By default only non-shared non-readonly disk images are transferred. The VIR_MIGRATE_PARAM_MIGRATE_DISKS parameter can be used to specify which disks should be migrated. This flag and VIR_MIGRATE_NON_SHARED_INC are mutually exclusive.
- * @value 64
- * @see Libvirt virDomainMigrateFlags documentation
- */
+
+ // Define the domain as persistent on the destination host after successful
+ // migration. If the domain was persistent on the source host and
+ // VIR_MIGRATE_UNDEFINE_SOURCE is not used, it will end up persistent on both
+ // hosts.
+ private static final long VIR_MIGRATE_PERSIST_DEST = 8L;
+
+ // Migrate full disk images in addition to domain's memory. By default only
+ // non-shared non-readonly disk images are transferred. The
+ // VIR_MIGRATE_PARAM_MIGRATE_DISKS parameter can be used to specify which disks
+ // should be migrated. This flag and VIR_MIGRATE_NON_SHARED_INC are mutually
+ // exclusive.
private static final long VIR_MIGRATE_NON_SHARED_DISK = 64L;
- /**
- * Compress migration data. The compression methods can be specified using VIR_MIGRATE_PARAM_COMPRESSION. A hypervisor default method will be used if this parameter is omitted. Individual compression methods can be tuned via their specific VIR_MIGRATE_PARAM_COMPRESSION_* parameters.
- * @value 2048
- * @see Libvirt virDomainMigrateFlags documentation
- */
+
+ // Migrate disk images in addition to domain's memory. This is similar to
+ // VIR_MIGRATE_NON_SHARED_DISK, but only the top level of each disk's backing chain
+ // is copied. That is, the rest of the backing chain is expected to be present on
+ // the destination and to be exactly the same as on the source host. This flag and
+ // VIR_MIGRATE_NON_SHARED_DISK are mutually exclusive.
+ private static final long VIR_MIGRATE_NON_SHARED_INC = 128L;
+
+ // Compress migration data. The compression methods can be specified using
+ // VIR_MIGRATE_PARAM_COMPRESSION. A hypervisor default method will be used if this
+ // parameter is omitted. Individual compression methods can be tuned via their
+ // specific VIR_MIGRATE_PARAM_COMPRESSION_* parameters.
private static final long VIR_MIGRATE_COMPRESSED = 2048L;
- /**
- * Enable algorithms that ensure a live migration will eventually converge. This usually means the domain will be slowed down to make sure it does not change its memory faster than a hypervisor can transfer the changed memory to the destination host. VIR_MIGRATE_PARAM_AUTO_CONVERGE_* parameters can be used to tune the algorithm.
- * @value 8192
- * @see Libvirt virDomainMigrateFlags documentation
- */
+
+ // Enable algorithms that ensure a live migration will eventually converge.
+ // This usually means the domain will be slowed down to make sure it does not
+ // change its memory faster than a hypervisor can transfer the changed memory to
+ // the destination host. VIR_MIGRATE_PARAM_AUTO_CONVERGE_* parameters can be used
+ // to tune the algorithm.
private static final long VIR_MIGRATE_AUTO_CONVERGE = 8192L;
- /**
- * Libvirt 1.0.3 supports compression flag for migration.
- */
+ // Libvirt 1.0.3 supports compression flag for migration.
private static final int LIBVIRT_VERSION_SUPPORTS_MIGRATE_COMPRESSED = 1000003;
+ // Libvirt 1.2.3 supports auto converge.
+ private static final int LIBVIRT_VERSION_SUPPORTS_AUTO_CONVERGE = 1002003;
+
public MigrateKVMAsync(final LibvirtComputingResource libvirtComputingResource, final Domain dm, final Connect dconn, final String dxml,
- final boolean migrateStorage, final boolean autoConvergence, final String vmName, final String destIp) {
+ final boolean migrateStorage, final boolean migrateStorageManaged, final boolean autoConvergence, final String vmName, final String destIp) {
this.libvirtComputingResource = libvirtComputingResource;
this.dm = dm;
this.dconn = dconn;
this.dxml = dxml;
this.migrateStorage = migrateStorage;
+ this.migrateStorageManaged = migrateStorageManaged;
this.autoConvergence = autoConvergence;
this.vmName = vmName;
this.destIp = destIp;
@@ -84,15 +105,20 @@ public class MigrateKVMAsync implements Callable {
long flags = VIR_MIGRATE_LIVE;
if (dconn.getLibVirVersion() >= LIBVIRT_VERSION_SUPPORTS_MIGRATE_COMPRESSED) {
- flags += VIR_MIGRATE_COMPRESSED;
+ flags |= VIR_MIGRATE_COMPRESSED;
}
if (migrateStorage) {
- flags += VIR_MIGRATE_NON_SHARED_DISK;
+ if (migrateStorageManaged) {
+ flags |= VIR_MIGRATE_NON_SHARED_DISK;
+ } else {
+ flags |= VIR_MIGRATE_PERSIST_DEST;
+ flags |= VIR_MIGRATE_NON_SHARED_INC;
+ }
}
- if (autoConvergence && dconn.getLibVirVersion() >= 1002003) {
- flags += VIR_MIGRATE_AUTO_CONVERGE;
+ if (autoConvergence && dconn.getLibVirVersion() >= LIBVIRT_VERSION_SUPPORTS_AUTO_CONVERGE) {
+ flags |= VIR_MIGRATE_AUTO_CONVERGE;
}
return dm.migrate(dconn, flags, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed());
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckStorageAvailabilityWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckStorageAvailabilityWrapper.java
new file mode 100644
index 00000000000..5022e01f21c
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckStorageAvailabilityWrapper.java
@@ -0,0 +1,61 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.storage.CheckStorageAvailabilityCommand;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
+import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import com.cloud.storage.Storage;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.log4j.Logger;
+
+import java.util.Map;
+
+@ResourceWrapper(handles = CheckStorageAvailabilityCommand.class)
+public class LibvirtCheckStorageAvailabilityWrapper extends CommandWrapper {
+
+ private static final Logger s_logger = Logger.getLogger(LibvirtCheckStorageAvailabilityWrapper.class);
+
+ @Override
+ public Answer execute(CheckStorageAvailabilityCommand command, LibvirtComputingResource resource) {
+ KVMStoragePoolManager storagePoolMgr = resource.getStoragePoolMgr();
+ Map poolsMap = command.getPoolsMap();
+
+ for (String poolUuid : poolsMap.keySet()) {
+ Storage.StoragePoolType type = poolsMap.get(poolUuid);
+ s_logger.debug("Checking if storage pool " + poolUuid + " (" + type + ") is mounted on this host");
+ try {
+ KVMStoragePool storagePool = storagePoolMgr.getStoragePool(type, poolUuid);
+ if (storagePool == null) {
+ s_logger.info("Storage pool " + poolUuid + " is not available");
+ return new Answer(command, false, "Storage pool " + poolUuid + " not available");
+ }
+ } catch (CloudRuntimeException e) {
+ s_logger.info("Storage pool " + poolUuid + " is not available");
+ return new Answer(command, e);
+ }
+ }
+ return new Answer(command);
+ }
+}
\ No newline at end of file
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
index 0c1370e2551..5bf8d25e949 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
@@ -147,9 +147,10 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, migrateStorage,
+ final Callable worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc,
+ migrateStorage, migrateStorageManaged,
command.isAutoConvergence(), vmName, command.getDestinationIp());
final Future migrateThread = executor.submit(worker);
executor.shutdown();
@@ -356,7 +358,8 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapperThe source of the disk needs an attribute that is either 'file' or 'dev' as well as its corresponding value.
*
*/
- protected String replaceStorage(String xmlDesc, Map migrateStorage)
+ protected String replaceStorage(String xmlDesc, Map migrateStorage,
+ boolean migrateStorageManaged)
throws IOException, ParserConfigurationException, SAXException, TransformerException {
InputStream in = IOUtils.toInputStream(xmlDesc);
@@ -398,7 +401,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper {
+
+ private static final String temporaryCertFilePrefix = "CSCERTIFICATE";
+
+ private static final Logger s_logger = Logger.getLogger(LibvirtSetupDirectDownloadCertificateCommandWrapper.class);
+
+ /**
+ * Retrieve agent.properties file
+ */
+ private File getAgentPropertiesFile() throws FileNotFoundException {
+ final File agentFile = PropertiesUtil.findConfigFile("agent.properties");
+ if (agentFile == null) {
+ throw new FileNotFoundException("Failed to find agent.properties file");
+ }
+ return agentFile;
+ }
+
+ /**
+ * Get the property 'keystore.passphrase' value from agent.properties file
+ */
+ private String getKeystorePassword(File agentFile) {
+ String pass = null;
+ if (agentFile != null) {
+ try {
+ pass = PropertiesUtil.loadFromFile(agentFile).getProperty(KeyStoreUtils.KS_PASSPHRASE_PROPERTY);
+ } catch (IOException e) {
+ s_logger.error("Could not get 'keystore.passphrase' property value due to: " + e.getMessage());
+ }
+ }
+ return pass;
+ }
+
+ /**
+ * Get keystore path
+ */
+ private String getKeyStoreFilePath(File agentFile) {
+ return agentFile.getParent() + "/" + KeyStoreUtils.KS_FILENAME;
+ }
+
+ /**
+ * Import certificate from temporary file into keystore
+ */
+ private void importCertificate(String tempCerFilePath, String keyStoreFile, String certificateName, String privatePassword) {
+ s_logger.debug("Importing certificate from temporary file to keystore");
+ String importCommandFormat = "keytool -importcert -file %s -keystore %s -alias '%s' -storepass '%s' -noprompt";
+ String importCmd = String.format(importCommandFormat, tempCerFilePath, keyStoreFile, certificateName, privatePassword);
+ int result = Script.runSimpleBashScriptForExitValue(importCmd);
+ if (result != 0) {
+ s_logger.debug("Certificate " + certificateName + " not imported as it already exist on keystore");
+ }
+ }
+
+ /**
+ * Create temporary file and return its path
+ */
+ private String createTemporaryFile(File agentFile, String certificateName, String certificate) {
+ String tempCerFilePath = String.format("%s/%s-%s",
+ agentFile.getParent(), temporaryCertFilePrefix, certificateName);
+ s_logger.debug("Creating temporary certificate file into: " + tempCerFilePath);
+ int result = Script.runSimpleBashScriptForExitValue(String.format("echo '%s' > %s", certificate, tempCerFilePath));
+ if (result != 0) {
+ throw new CloudRuntimeException("Could not create the certificate file on path: " + tempCerFilePath);
+ }
+ return tempCerFilePath;
+ }
+
+ /**
+ * Remove temporary file
+ */
+ private void cleanupTemporaryFile(String temporaryFile) {
+ s_logger.debug("Cleaning up temporary certificate file");
+ Script.runSimpleBashScript("rm -f " + temporaryFile);
+ }
+
+ @Override
+ public Answer execute(SetupDirectDownloadCertificateCommand cmd, LibvirtComputingResource serverResource) {
+ String certificate = cmd.getCertificate();
+ String certificateName = cmd.getCertificateName();
+
+ try {
+ File agentFile = getAgentPropertiesFile();
+ String privatePassword = getKeystorePassword(agentFile);
+ if (isBlank(privatePassword)) {
+ return new Answer(cmd, false, "No password found for keystore: " + KeyStoreUtils.KS_FILENAME);
+ }
+
+ final String keyStoreFile = getKeyStoreFilePath(agentFile);
+ String temporaryFile = createTemporaryFile(agentFile, certificateName, certificate);
+ importCertificate(temporaryFile, keyStoreFile, certificateName, privatePassword);
+ cleanupTemporaryFile(temporaryFile);
+ } catch (FileNotFoundException | CloudRuntimeException e) {
+ s_logger.error("Error while setting up certificate " + certificateName, e);
+ return new Answer(cmd, false, e.getMessage());
+ }
+
+ return new Answer(cmd, true, "Certificate " + certificateName + " imported");
+ }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java
index efb51fb82cc..8d1ae771b29 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java
@@ -427,7 +427,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
}
@Override
- public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
+ public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) {
throw new UnsupportedOperationException("Creating a disk from a snapshot is not supported in this configuration.");
}
@@ -440,4 +440,9 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
public boolean createFolder(String uuid, String path) {
throw new UnsupportedOperationException("A folder cannot be created in this configuration.");
}
+
+ @Override
+ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) {
+ return null;
+ }
}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
index 91cfc4ef582..c1f73d7a088 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
@@ -394,9 +394,15 @@ public class KVMStoragePoolManager {
return adaptor.copyPhysicalDisk(disk, name, destPool, timeout);
}
- public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
+ public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) {
StorageAdaptor adaptor = getStorageAdaptor(destPool.getType());
- return adaptor.createDiskFromSnapshot(snapshot, snapshotName, name, destPool);
+ return adaptor.createDiskFromSnapshot(snapshot, snapshotName, name, destPool, timeout);
+ }
+
+ public KVMPhysicalDisk createDiskWithTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size,
+ KVMStoragePool destPool, int timeout) {
+ StorageAdaptor adaptor = getStorageAdaptor(destPool.getType());
+ return adaptor.createDiskFromTemplateBacking(template, name, format, size, destPool, timeout);
}
}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index 83a7a12d22b..9a2fd275dd3 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -36,19 +36,12 @@ import java.util.UUID;
import javax.naming.ConfigurationException;
-import com.cloud.agent.direct.download.DirectTemplateDownloader;
-import com.cloud.agent.direct.download.DirectTemplateDownloader.DirectTemplateInformation;
-import com.cloud.agent.direct.download.HttpDirectTemplateDownloader;
-import com.cloud.agent.direct.download.MetalinkDirectTemplateDownloader;
-import com.cloud.agent.direct.download.NfsDirectTemplateDownloader;
-import com.cloud.agent.direct.download.HttpsDirectTemplateDownloader;
-import com.cloud.exception.InvalidParameterValueException;
-import org.apache.cloudstack.agent.directdownload.HttpsDirectDownloadCommand;
+import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.HttpDirectDownloadCommand;
+import org.apache.cloudstack.agent.directdownload.HttpsDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.MetalinkDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.NfsDirectDownloadCommand;
-import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer;
import org.apache.cloudstack.storage.command.AttachAnswer;
import org.apache.cloudstack.storage.command.AttachCommand;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
@@ -95,7 +88,14 @@ import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.NfsTO;
import com.cloud.agent.api.to.S3TO;
+import com.cloud.agent.direct.download.DirectTemplateDownloader;
+import com.cloud.agent.direct.download.DirectTemplateDownloader.DirectTemplateInformation;
+import com.cloud.agent.direct.download.HttpDirectTemplateDownloader;
+import com.cloud.agent.direct.download.HttpsDirectTemplateDownloader;
+import com.cloud.agent.direct.download.MetalinkDirectTemplateDownloader;
+import com.cloud.agent.direct.download.NfsDirectTemplateDownloader;
import com.cloud.exception.InternalErrorException;
+import com.cloud.exception.InvalidParameterValueException;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
@@ -105,6 +105,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DeviceType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiscardType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiskProtocol;
import com.cloud.storage.JavaStorageLayer;
+import com.cloud.storage.MigrationOptions;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageLayer;
@@ -114,9 +115,9 @@ import com.cloud.storage.template.Processor.FormatInfo;
import com.cloud.storage.template.QCOW2Processor;
import com.cloud.storage.template.TemplateLocation;
import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.storage.S3.S3Utils;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
+import com.cloud.utils.storage.S3.S3Utils;
public class KVMStorageProcessor implements StorageProcessor {
private static final Logger s_logger = Logger.getLogger(KVMStorageProcessor.class);
@@ -1352,6 +1353,32 @@ public class KVMStorageProcessor implements StorageProcessor {
}
}
+ /**
+ * Create volume with backing file (linked clone)
+ */
+ protected KVMPhysicalDisk createLinkedCloneVolume(MigrationOptions migrationOptions, KVMStoragePool srcPool, KVMStoragePool primaryPool, VolumeObjectTO volume, PhysicalDiskFormat format, int timeout) {
+ String srcBackingFilePath = migrationOptions.getSrcBackingFilePath();
+ boolean copySrcTemplate = migrationOptions.isCopySrcTemplate();
+ KVMPhysicalDisk srcTemplate = srcPool.getPhysicalDisk(srcBackingFilePath);
+ KVMPhysicalDisk destTemplate;
+ if (copySrcTemplate) {
+ KVMPhysicalDisk copiedTemplate = storagePoolMgr.copyPhysicalDisk(srcTemplate, srcTemplate.getName(), primaryPool, 10000 * 1000);
+ destTemplate = primaryPool.getPhysicalDisk(copiedTemplate.getPath());
+ } else {
+ destTemplate = primaryPool.getPhysicalDisk(srcBackingFilePath);
+ }
+ return storagePoolMgr.createDiskWithTemplateBacking(destTemplate, volume.getUuid(), format, volume.getSize(),
+ primaryPool, timeout);
+ }
+
+ /**
+ * Create full clone volume from VM snapshot
+ */
+ protected KVMPhysicalDisk createFullCloneVolume(MigrationOptions migrationOptions, VolumeObjectTO volume, KVMStoragePool primaryPool, PhysicalDiskFormat format) {
+ s_logger.debug("For VM migration with full-clone volume: Creating empty stub disk for source disk " + migrationOptions.getSrcVolumeUuid() + " and size: " + volume.getSize() + " and format: " + format);
+ return primaryPool.createPhysicalDisk(volume.getUuid(), format, volume.getProvisioningType(), volume.getSize());
+ }
+
@Override
public Answer createVolume(final CreateObjectCommand cmd) {
final VolumeObjectTO volume = (VolumeObjectTO)cmd.getData();
@@ -1369,8 +1396,23 @@ public class KVMStorageProcessor implements StorageProcessor {
} else {
format = PhysicalDiskFormat.valueOf(volume.getFormat().toString().toUpperCase());
}
- vol = primaryPool.createPhysicalDisk(volume.getUuid(), format,
- volume.getProvisioningType(), disksize);
+
+ MigrationOptions migrationOptions = volume.getMigrationOptions();
+ if (migrationOptions != null) {
+ String srcStoreUuid = migrationOptions.getSrcPoolUuid();
+ StoragePoolType srcPoolType = migrationOptions.getSrcPoolType();
+ KVMStoragePool srcPool = storagePoolMgr.getStoragePool(srcPoolType, srcStoreUuid);
+ int timeout = migrationOptions.getTimeout();
+
+ if (migrationOptions.getType() == MigrationOptions.Type.LinkedClone) {
+ vol = createLinkedCloneVolume(migrationOptions, srcPool, primaryPool, volume, format, timeout);
+ } else if (migrationOptions.getType() == MigrationOptions.Type.FullClone) {
+ vol = createFullCloneVolume(migrationOptions, volume, primaryPool, format);
+ }
+ } else {
+ vol = primaryPool.createPhysicalDisk(volume.getUuid(), format,
+ volume.getProvisioningType(), disksize);
+ }
final VolumeObjectTO newVol = new VolumeObjectTO();
if(vol != null) {
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
index dc8083f0eea..f858a4f1577 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
@@ -95,6 +95,33 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
return true;
}
+ @Override
+ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size,
+ KVMStoragePool destPool, int timeout) {
+ s_logger.info("Creating volume " + name + " with template backing " + template.getName() + " in pool " + destPool.getUuid() +
+ " (" + destPool.getType().toString() + ") with size " + size);
+
+ KVMPhysicalDisk disk = null;
+ String destPath = destPool.getLocalPath().endsWith("/") ?
+ destPool.getLocalPath() + name :
+ destPool.getLocalPath() + "/" + name;
+
+ if (destPool.getType() == StoragePoolType.NetworkFilesystem) {
+ try {
+ if (format == PhysicalDiskFormat.QCOW2) {
+ QemuImg qemu = new QemuImg(timeout);
+ QemuImgFile destFile = new QemuImgFile(destPath, format);
+ destFile.setSize(size);
+ QemuImgFile backingFile = new QemuImgFile(template.getPath(), template.getFormat());
+ qemu.create(destFile, backingFile);
+ }
+ } catch (QemuImgException e) {
+ s_logger.error("Failed to create " + destPath + " due to a failed executing of qemu-img: " + e.getMessage());
+ }
+ }
+ return disk;
+ }
+
public StorageVol getVolume(StoragePool pool, String volName) {
StorageVol vol = null;
@@ -914,7 +941,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
case SPARSE:
case FAT:
QemuImgFile srcFile = new QemuImgFile(template.getPath(), template.getFormat());
- qemu.convert(srcFile, destFile, options);
+ qemu.convert(srcFile, destFile, options, null);
break;
}
} else if (format == PhysicalDiskFormat.RAW) {
@@ -927,7 +954,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
}
QemuImg qemu = new QemuImg(timeout);
Map options = new HashMap();
- qemu.convert(sourceFile, destFile, options);
+ qemu.convert(sourceFile, destFile, options, null);
}
} catch (QemuImgException e) {
s_logger.error("Failed to create " + disk.getPath() +
@@ -1302,8 +1329,35 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
}
@Override
- public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
- return null;
+ public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) {
+ s_logger.info("Creating volume " + name + " from snapshot " + snapshotName + " in pool " + destPool.getUuid() +
+ " (" + destPool.getType().toString() + ")");
+
+ PhysicalDiskFormat format = snapshot.getFormat();
+ long size = snapshot.getSize();
+ String destPath = destPool.getLocalPath().endsWith("/") ?
+ destPool.getLocalPath() + name :
+ destPool.getLocalPath() + "/" + name;
+
+ if (destPool.getType() == StoragePoolType.NetworkFilesystem) {
+ try {
+ if (format == PhysicalDiskFormat.QCOW2) {
+ QemuImg qemu = new QemuImg(timeout);
+ QemuImgFile destFile = new QemuImgFile(destPath, format);
+ if (size > snapshot.getVirtualSize()) {
+ destFile.setSize(size);
+ } else {
+ destFile.setSize(snapshot.getVirtualSize());
+ }
+ QemuImgFile srcFile = new QemuImgFile(snapshot.getPath(), snapshot.getFormat());
+ qemu.convert(srcFile, destFile, snapshotName);
+ }
+ } catch (QemuImgException e) {
+ s_logger.error("Failed to create " + destPath +
+ " due to a failed executing of qemu-img: " + e.getMessage());
+ }
+ }
+ return destPool.getPhysicalDisk(name);
}
@Override
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java
index 596582db34d..309308ae9c1 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java
@@ -294,7 +294,7 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor {
}
@Override
- public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) {
+ public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) {
throw new UnsupportedOperationException("Creating a disk from a snapshot is not supported in this configuration.");
}
@@ -313,6 +313,11 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor {
return true;
}
+ @Override
+ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) {
+ return null;
+ }
+
@Override
public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, PhysicalDiskFormat format, ProvisioningType provisioningType, long size) {
return null;
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java
index 2c1ed233b40..a3c1387aa6b 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java
@@ -66,11 +66,19 @@ public interface StorageAdaptor {
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout);
- public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool);
+ public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout);
public boolean refresh(KVMStoragePool pool);
public boolean deleteStoragePool(KVMStoragePool pool);
public boolean createFolder(String uuid, String path);
+
+ /**
+ * Creates disk using template backing.
+ * Precondition: Template is on destPool
+ */
+ KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template,
+ String name, PhysicalDiskFormat format, long size,
+ KVMStoragePool destPool, int timeout);
}
diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java
index eb1eeea2546..481dcdcd406 100644
--- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java
+++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java
@@ -20,17 +20,24 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.NotImplementedException;
+
import com.cloud.storage.Storage;
import com.cloud.utils.script.OutputInterpreter;
import com.cloud.utils.script.Script;
-import org.apache.commons.lang.NotImplementedException;
public class QemuImg {
/* The qemu-img binary. We expect this to be in $PATH */
public String _qemuImgPath = "qemu-img";
+ private String cloudQemuImgPath = "cloud-qemu-img";
private int timeout;
+ private String getQemuImgPathScript = String.format("which %s >& /dev/null; " +
+ "if [ $? -gt 0 ]; then echo \"%s\"; else echo \"%s\"; fi",
+ cloudQemuImgPath, _qemuImgPath, cloudQemuImgPath);
+
/* Shouldn't we have KVMPhysicalDisk and LibvirtVMDef read this? */
public static enum PhysicalDiskFormat {
RAW("raw"), QCOW2("qcow2"), VMDK("vmdk"), FILE("file"), RBD("rbd"), SHEEPDOG("sheepdog"), HTTP("http"), HTTPS("https"), TAR("tar"), DIR("dir");
@@ -220,10 +227,18 @@ public class QemuImg {
* @param options
* Options for the convert. Takes a Map with key value
* pairs which are passed on to qemu-img without validation.
+ * @param snapshotName
+ * If it is provided, convertion uses it as parameter
* @return void
*/
- public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, final Map options) throws QemuImgException {
- final Script script = new Script(_qemuImgPath, timeout);
+ public void convert(final QemuImgFile srcFile, final QemuImgFile destFile,
+ final Map options, final String snapshotName) throws QemuImgException {
+ Script script = new Script(_qemuImgPath, timeout);
+ if (StringUtils.isNotBlank(snapshotName)) {
+ String qemuPath = Script.runSimpleBashScript(getQemuImgPathScript);
+ script = new Script(qemuPath, timeout);
+ }
+
script.add("convert");
// autodetect source format. Sometime int he future we may teach KVMPhysicalDisk about more formats, then we can explicitly pass them if necessary
//s.add("-f");
@@ -242,6 +257,13 @@ public class QemuImg {
script.add(optionsStr);
}
+ if (StringUtils.isNotBlank(snapshotName)) {
+ script.add("-f");
+ script.add(srcFile.getFormat().toString());
+ script.add("-s");
+ script.add(snapshotName);
+ }
+
script.add(srcFile.getFileName());
script.add(destFile.getFileName());
@@ -269,7 +291,26 @@ public class QemuImg {
* @return void
*/
public void convert(final QemuImgFile srcFile, final QemuImgFile destFile) throws QemuImgException {
- this.convert(srcFile, destFile, null);
+ this.convert(srcFile, destFile, null, null);
+ }
+
+ /**
+ * Convert a image from source to destination
+ *
+ * This method calls 'qemu-img convert' and takes three objects
+ * as an argument.
+ *
+ *
+ * @param srcFile
+ * The source file
+ * @param destFile
+ * The destination file
+ * @param snapshotName
+ * The snapshot name
+ * @return void
+ */
+ public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, String snapshotName) throws QemuImgException {
+ this.convert(srcFile, destFile, null, snapshotName);
}
/**
diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java
index eaab179726a..5289481b66d 100644
--- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java
+++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java
@@ -19,16 +19,22 @@
package com.cloud.hypervisor.kvm.resource.wrapper;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.InputStream;
+import java.io.IOException;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
import java.util.Scanner;
import org.apache.cloudstack.utils.linux.MemStat;
import java.util.Map;
-import java.util.HashMap;
import org.apache.commons.io.IOUtils;
+
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.transform.TransformerException;
+
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -40,7 +46,9 @@ import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
+import org.xml.sax.SAXException;
+import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo.DiskType;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo.DriverType;
@@ -310,6 +318,114 @@ public class LibvirtMigrateCommandWrapperTest {
PowerMockito.whenNew(Scanner.class).withAnyArguments().thenReturn(scanner);
}
+ private static final String sourcePoolUuid = "07eb495b-5590-3877-9fb7-23c6e9a40d40";
+ private static final String destPoolUuid = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
+ private static final String disk1SourceFilename = "981ab1dc-40f4-41b5-b387-6539aeddbf47";
+ private static final String disk2SourceFilename = "bf8621b3-027c-497d-963b-06319650f048";
+ private static final String sourceMultidiskDomainXml =
+ "\n" +
+ " i-2-3-VM\n" +
+ " 91860126-7dda-4876-ac1e-48d06cd4b2eb\n" +
+ " Apple Mac OS X 10.6 (32-bit)\n" +
+ " 524288\n" +
+ " 524288\n" +
+ " 1\n" +
+ " \n" +
+ " 250\n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " Apache Software Foundation\n" +
+ " CloudStack KVM Hypervisor\n" +
+ " 91860126-7dda-4876-ac1e-48d06cd4b2eb\n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " hvm\n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " destroy\n" +
+ " restart\n" +
+ " destroy\n" +
+ " \n" +
+ " /usr/libexec/qemu-kvm\n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " e8141f63b5364a7f8cbb\n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " bf8621b3027c497d963b\n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ "\n";
+
@Test
public void testReplaceIpForVNCInDescFile() {
final String targetIp = "192.168.22.21";
@@ -488,7 +604,7 @@ public class LibvirtMigrateCommandWrapperTest {
MigrateDiskInfo diskInfo = new MigrateDiskInfo("123456", DiskType.BLOCK, DriverType.RAW, Source.FILE, "sourctest");
mapMigrateStorage.put("/mnt/812ea6a3-7ad0-30f4-9cab-01e3f2985b98/4650a2f7-fce5-48e2-beaa-bcdf063194e6", diskInfo);
- final String result = libvirtMigrateCmdWrapper.replaceStorage(fullfile, mapMigrateStorage);
+ final String result = libvirtMigrateCmdWrapper.replaceStorage(fullfile, mapMigrateStorage, true);
InputStream in = IOUtils.toInputStream(result);
DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
@@ -499,4 +615,25 @@ public class LibvirtMigrateCommandWrapperTest {
assertXpath(doc, "/domain/devices/disk/driver/@type", "raw");
}
+ public void testReplaceStorageXmlDiskNotManagedStorage() throws ParserConfigurationException, TransformerException, SAXException, IOException {
+ final LibvirtMigrateCommandWrapper lw = new LibvirtMigrateCommandWrapper();
+ String destDisk1FileName = "XXXXXXXXXXXXXX";
+ String destDisk2FileName = "YYYYYYYYYYYYYY";
+ String destDisk1Path = String.format("/mnt/%s/%s", destPoolUuid, destDisk1FileName);
+ MigrateCommand.MigrateDiskInfo migrateDisk1Info = new MigrateCommand.MigrateDiskInfo(disk1SourceFilename,
+ MigrateCommand.MigrateDiskInfo.DiskType.FILE, MigrateCommand.MigrateDiskInfo.DriverType.QCOW2,
+ MigrateCommand.MigrateDiskInfo.Source.FILE, destDisk1Path);
+ String destDisk2Path = String.format("/mnt/%s/%s", destPoolUuid, destDisk2FileName);
+ MigrateCommand.MigrateDiskInfo migrateDisk2Info = new MigrateCommand.MigrateDiskInfo(disk2SourceFilename,
+ MigrateCommand.MigrateDiskInfo.DiskType.FILE, MigrateCommand.MigrateDiskInfo.DriverType.QCOW2,
+ MigrateCommand.MigrateDiskInfo.Source.FILE, destDisk2Path);
+ Map migrateStorage = new HashMap<>();
+ migrateStorage.put(disk1SourceFilename, migrateDisk1Info);
+ migrateStorage.put(disk2SourceFilename, migrateDisk2Info);
+ String newXml = lw.replaceStorage(sourceMultidiskDomainXml, migrateStorage, false);
+ assertTrue(newXml.contains(destDisk1Path));
+ assertTrue(newXml.contains(destDisk2Path));
+ assertFalse(newXml.contains("/mnt/" + sourcePoolUuid + "/" + disk1SourceFilename));
+ assertFalse(newXml.contains("/mnt/" + sourcePoolUuid + "/" + disk2SourceFilename));
+ }
}
diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java
index 10c3feb2609..4777b738391 100644
--- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java
+++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java
@@ -157,11 +157,11 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co
}
public static final ConfigKey VmwareReserveCpu = new ConfigKey(Boolean.class, "vmware.reserve.cpu", "Advanced", "false",
- "Specify whether or not to reserve CPU when not overprovisioning, In case of cpu overprovisioning we will always reserve cpu.", true, ConfigKey.Scope.Cluster,
+ "Specify whether or not to reserve CPU when deploying an instance.", true, ConfigKey.Scope.Cluster,
null);
public static final ConfigKey VmwareReserveMemory = new ConfigKey(Boolean.class, "vmware.reserve.mem", "Advanced", "false",
- "Specify whether or not to reserve memory when not overprovisioning, In case of memory overprovisioning we will always reserve memory.", true,
+ "Specify whether or not to reserve memory when deploying an instance.", true,
ConfigKey.Scope.Cluster, null);
protected ConfigKey VmwareEnableNestedVirtualization = new ConfigKey(Boolean.class, "vmware.nested.virtualization", "Advanced", "false",
diff --git a/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessorTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessorTest.java
similarity index 100%
rename from plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessorTest.java
rename to plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessorTest.java
diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java
index 62ca29c7e44..4771441e9e7 100644
--- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java
+++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java
@@ -379,4 +379,9 @@ public class ContrailElementImpl extends AdapterBase
public boolean setExtraDhcpOptions(Network network, long nicId, Map dhcpOptions) {
return false;
}
+
+ @Override
+ public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vmProfile) {
+ return false;
+ }
}
diff --git a/plugins/network-elements/juniper-srx/src/main/java/com/cloud/network/resource/JuniperSrxResource.java b/plugins/network-elements/juniper-srx/src/main/java/com/cloud/network/resource/JuniperSrxResource.java
index 20031e3227f..8ada819c7a3 100644
--- a/plugins/network-elements/juniper-srx/src/main/java/com/cloud/network/resource/JuniperSrxResource.java
+++ b/plugins/network-elements/juniper-srx/src/main/java/com/cloud/network/resource/JuniperSrxResource.java
@@ -2078,11 +2078,11 @@ public class JuniperSrxResource implements ServerResource {
xml = replaceXmlValue(xml, "rule-set", _privateZone);
xml = replaceXmlValue(xml, "from-zone", _privateZone);
xml = replaceXmlValue(xml, "rule-name", ruleName_private);
- }
- if (!sendRequestAndCheckResponse(command, xml, "name", ruleName_private))
- {
- throw new ExecutionException("Failed to delete trust static NAT rule from public IP " + publicIp + " to private IP " + privateIp);
+ if (!sendRequestAndCheckResponse(command, xml, "name", ruleName_private))
+ {
+ throw new ExecutionException("Failed to delete trust static NAT rule from public IP " + publicIp + " to private IP " + privateIp);
+ }
}
return true;
}
@@ -3568,6 +3568,7 @@ public class JuniperSrxResource implements ServerResource {
case CHECK_IF_EXISTS:
case CHECK_IF_IN_USE:
+ case CHECK_PRIVATE_IF_EXISTS:
assert (keyAndValue != null && keyAndValue.length == 2) : "If the SrxCommand is " + command + ", both a key and value must be specified.";
key = keyAndValue[0];
diff --git a/plugins/user-authenticators/ldap/test/org/apache/cloudstack/ldap/ADLdapUserManagerImplTest.java b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/ADLdapUserManagerImplTest.java
similarity index 100%
rename from plugins/user-authenticators/ldap/test/org/apache/cloudstack/ldap/ADLdapUserManagerImplTest.java
rename to plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/ADLdapUserManagerImplTest.java
diff --git a/plugins/user-authenticators/ldap/test/org/apache/cloudstack/ldap/LdapAuthenticatorTest.java b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/LdapAuthenticatorTest.java
similarity index 100%
rename from plugins/user-authenticators/ldap/test/org/apache/cloudstack/ldap/LdapAuthenticatorTest.java
rename to plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/LdapAuthenticatorTest.java
diff --git a/scripts/util/keystore-cert-import b/scripts/util/keystore-cert-import
index 424ab4a718e..459f8366ee4 100755
--- a/scripts/util/keystore-cert-import
+++ b/scripts/util/keystore-cert-import
@@ -38,9 +38,6 @@ if [ -z "${KS_PASS// }" ]; then
exit 1
fi
-# Use a new keystore file
-NEW_KS_FILE="$KS_FILE.new"
-
# Import certificate
if [ ! -z "${CERT// }" ]; then
echo "$CERT" > "$CERT_FILE"
@@ -54,8 +51,8 @@ fi
# Import cacerts into the keystore
awk '/-----BEGIN CERTIFICATE-----?/{n++}{print > "cloudca." n }' "$CACERT_FILE"
for caChain in $(ls cloudca.*); do
- keytool -delete -noprompt -alias "$caChain" -keystore "$NEW_KS_FILE" -storepass "$KS_PASS" > /dev/null 2>&1 || true
- keytool -import -noprompt -storepass "$KS_PASS" -trustcacerts -alias "$caChain" -file "$caChain" -keystore "$NEW_KS_FILE" > /dev/null 2>&1
+ keytool -delete -noprompt -alias "$caChain" -keystore "$KS_FILE" -storepass "$KS_PASS" > /dev/null 2>&1 || true
+ keytool -import -noprompt -storepass "$KS_PASS" -trustcacerts -alias "$caChain" -file "$caChain" -keystore "$KS_FILE" > /dev/null 2>&1
done
rm -f cloudca.*
@@ -63,21 +60,19 @@ rm -f cloudca.*
if [ ! -z "${PRIVKEY// }" ]; then
echo "$PRIVKEY" > "$PRIVKEY_FILE"
# Re-initialize keystore when private key is provided
- keytool -delete -noprompt -alias "$ALIAS" -keystore "$NEW_KS_FILE" -storepass "$KS_PASS" 2>/dev/null || true
- openssl pkcs12 -export -name "$ALIAS" -in "$CERT_FILE" -inkey "$PRIVKEY_FILE" -out "$NEW_KS_FILE.p12" -password pass:"$KS_PASS" > /dev/null 2>&1
- keytool -importkeystore -srckeystore "$NEW_KS_FILE.p12" -destkeystore "$NEW_KS_FILE" -srcstoretype PKCS12 -alias "$ALIAS" -deststorepass "$KS_PASS" -destkeypass "$KS_PASS" -srcstorepass "$KS_PASS" -srckeypass "$KS_PASS" > /dev/null 2>&1
+ keytool -delete -noprompt -alias "$ALIAS" -keystore "$KS_FILE" -storepass "$KS_PASS" 2>/dev/null || true
+ openssl pkcs12 -export -name "$ALIAS" -in "$CERT_FILE" -inkey "$PRIVKEY_FILE" -out "$KS_FILE.p12" -password pass:"$KS_PASS" > /dev/null 2>&1
+ keytool -importkeystore -srckeystore "$KS_FILE.p12" -destkeystore "$KS_FILE" -srcstoretype PKCS12 -alias "$ALIAS" -deststorepass "$KS_PASS" -destkeypass "$KS_PASS" -srcstorepass "$KS_PASS" -srckeypass "$KS_PASS" > /dev/null 2>&1
else
# Import certificate into the keystore
- keytool -import -storepass "$KS_PASS" -alias "$ALIAS" -file "$CERT_FILE" -keystore "$NEW_KS_FILE" > /dev/null 2>&1 || true
+ keytool -import -storepass "$KS_PASS" -alias "$ALIAS" -file "$CERT_FILE" -keystore "$KS_FILE" > /dev/null 2>&1 || true
# Export private key from keystore
rm -f "$PRIVKEY_FILE"
- keytool -importkeystore -srckeystore "$NEW_KS_FILE" -destkeystore "$NEW_KS_FILE.p12" -deststoretype PKCS12 -srcalias "$ALIAS" -deststorepass "$KS_PASS" -destkeypass "$KS_PASS" -srcstorepass "$KS_PASS" -srckeypass "$KS_PASS" > /dev/null 2>&1
- openssl pkcs12 -in "$NEW_KS_FILE.p12" -nodes -nocerts -nomac -password pass:"$KS_PASS" 2>/dev/null | openssl rsa -out "$PRIVKEY_FILE" > /dev/null 2>&1
+ keytool -importkeystore -srckeystore "$KS_FILE" -destkeystore "$KS_FILE.p12" -deststoretype PKCS12 -srcalias "$ALIAS" -deststorepass "$KS_PASS" -destkeypass "$KS_PASS" -srcstorepass "$KS_PASS" -srckeypass "$KS_PASS" > /dev/null 2>&1
+ openssl pkcs12 -in "$KS_FILE.p12" -nodes -nocerts -nomac -password pass:"$KS_PASS" 2>/dev/null | openssl rsa -out "$PRIVKEY_FILE" > /dev/null 2>&1
fi
-# Commit the new keystore
-rm -f "$NEW_KS_FILE.p12"
-mv -f "$NEW_KS_FILE" "$KS_FILE"
+rm -f "$KS_FILE.p12"
# Secure libvirtd on cert import
if [ -f "$LIBVIRTD_FILE" ]; then
diff --git a/scripts/util/keystore-setup b/scripts/util/keystore-setup
index ce963363c1d..65f04c48d57 100755
--- a/scripts/util/keystore-setup
+++ b/scripts/util/keystore-setup
@@ -17,7 +17,7 @@
# under the License.
PROPS_FILE="$1"
-KS_FILE="$2.new"
+KS_FILE="$2"
KS_PASS="$3"
KS_VALIDITY="$4"
CSR_FILE="$5"
@@ -35,8 +35,10 @@ if [ -f "$PROPS_FILE" ]; then
fi
fi
-# Generate keystore
-rm -f "$KS_FILE"
+if [ -f "$KS_FILE" ]; then
+ keytool -delete -noprompt -alias "$ALIAS" -keystore "$KS_FILE" -storepass "$KS_PASS" > /dev/null 2>&1 || true
+fi
+
CN=$(hostname --fqdn)
keytool -genkey -storepass "$KS_PASS" -keypass "$KS_PASS" -alias "$ALIAS" -keyalg RSA -validity "$KS_VALIDITY" -dname cn="$CN",ou="cloudstack",o="cloudstack",c="cloudstack" -keystore "$KS_FILE" > /dev/null 2>&1
diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java
deleted file mode 100755
index 07eb072e82c..00000000000
--- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java
+++ /dev/null
@@ -1,1444 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.storage.snapshot;
-
-import com.cloud.agent.api.Answer;
-import com.cloud.utils.db.GlobalLock;
-import com.cloud.agent.api.Command;
-import com.cloud.agent.api.DeleteSnapshotsDirCommand;
-import com.cloud.alert.AlertManager;
-import com.cloud.api.commands.ListRecurringSnapshotScheduleCmd;
-import com.cloud.api.query.MutualExclusiveIdsManagerBase;
-import com.cloud.configuration.Config;
-import com.cloud.configuration.Resource.ResourceType;
-import com.cloud.dc.ClusterVO;
-import com.cloud.dc.dao.ClusterDao;
-import com.cloud.domain.dao.DomainDao;
-import com.cloud.event.ActionEvent;
-import com.cloud.event.ActionEventUtils;
-import com.cloud.event.EventTypes;
-import com.cloud.event.EventVO;
-import com.cloud.event.UsageEventUtils;
-import com.cloud.exception.InvalidParameterValueException;
-import com.cloud.exception.PermissionDeniedException;
-import com.cloud.exception.ResourceAllocationException;
-import com.cloud.exception.StorageUnavailableException;
-import com.cloud.host.HostVO;
-import com.cloud.hypervisor.Hypervisor.HypervisorType;
-import com.cloud.projects.Project.ListProjectResourcesCriteria;
-import com.cloud.resource.ResourceManager;
-import com.cloud.server.ResourceTag.ResourceObjectType;
-import com.cloud.storage.CreateSnapshotPayload;
-import com.cloud.storage.DataStoreRole;
-import com.cloud.storage.ScopeType;
-import com.cloud.storage.Snapshot;
-import com.cloud.storage.Snapshot.Type;
-import com.cloud.storage.SnapshotPolicyVO;
-import com.cloud.storage.SnapshotScheduleVO;
-import com.cloud.storage.SnapshotVO;
-import com.cloud.storage.Storage;
-import com.cloud.storage.Storage.ImageFormat;
-import com.cloud.storage.StorageManager;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.Volume;
-import com.cloud.storage.VolumeVO;
-import com.cloud.storage.dao.SnapshotDao;
-import com.cloud.storage.dao.SnapshotPolicyDao;
-import com.cloud.storage.dao.SnapshotScheduleDao;
-import com.cloud.storage.dao.VMTemplateDao;
-import com.cloud.storage.dao.VolumeDao;
-import com.cloud.storage.template.TemplateConstants;
-import com.cloud.tags.ResourceTagVO;
-import com.cloud.tags.dao.ResourceTagDao;
-import com.cloud.user.Account;
-import com.cloud.user.AccountManager;
-import com.cloud.user.AccountVO;
-import com.cloud.user.DomainManager;
-import com.cloud.user.ResourceLimitService;
-import com.cloud.user.User;
-import com.cloud.user.dao.AccountDao;
-import com.cloud.utils.DateUtil;
-import com.cloud.utils.DateUtil.IntervalType;
-import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.Pair;
-import com.cloud.utils.Ternary;
-import com.cloud.utils.concurrency.NamedThreadFactory;
-import com.cloud.utils.db.DB;
-import com.cloud.utils.db.Filter;
-import com.cloud.utils.db.JoinBuilder;
-import com.cloud.utils.db.SearchBuilder;
-import com.cloud.utils.db.SearchCriteria;
-import com.cloud.utils.exception.CloudRuntimeException;
-import com.cloud.vm.UserVmVO;
-import com.cloud.vm.VMInstanceVO;
-import com.cloud.vm.VirtualMachine;
-import com.cloud.vm.VirtualMachine.State;
-import com.cloud.vm.dao.UserVmDao;
-import com.cloud.vm.snapshot.VMSnapshot;
-import com.cloud.vm.snapshot.VMSnapshotVO;
-import com.cloud.vm.snapshot.dao.VMSnapshotDao;
-import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd;
-import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd;
-import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd;
-import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd;
-import org.apache.cloudstack.api.command.user.snapshot.UpdateSnapshotPolicyCmd;
-import org.apache.cloudstack.context.CallContext;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
-import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
-import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
-import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
-import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
-import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
-import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
-import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy;
-import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation;
-import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory;
-import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
-import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
-import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
-import org.apache.cloudstack.framework.config.ConfigKey;
-import org.apache.cloudstack.framework.config.Configurable;
-import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
-import org.apache.cloudstack.managed.context.ManagedContextRunnable;
-import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
-
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-@Component
-public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implements SnapshotManager, SnapshotApiService, Configurable {
- private static final Logger s_logger = Logger.getLogger(SnapshotManagerImpl.class);
- @Inject
- VMTemplateDao _templateDao;
- @Inject
- UserVmDao _vmDao;
- @Inject
- VolumeDao _volsDao;
- @Inject
- AccountDao _accountDao;
- @Inject
- SnapshotDao _snapshotDao;
- @Inject
- SnapshotDataStoreDao _snapshotStoreDao;
- @Inject
- PrimaryDataStoreDao _storagePoolDao;
- @Inject
- SnapshotPolicyDao _snapshotPolicyDao = null;
- @Inject
- SnapshotScheduleDao _snapshotScheduleDao;
- @Inject
- DomainDao _domainDao;
- @Inject
- StorageManager _storageMgr;
- @Inject
- SnapshotScheduler _snapSchedMgr;
- @Inject
- AccountManager _accountMgr;
- @Inject
- AlertManager _alertMgr;
- @Inject
- ClusterDao _clusterDao;
- @Inject
- ResourceLimitService _resourceLimitMgr;
- @Inject
- DomainManager _domainMgr;
- @Inject
- ResourceTagDao _resourceTagDao;
- @Inject
- ConfigurationDao _configDao;
- @Inject
- VMSnapshotDao _vmSnapshotDao;
- @Inject
- DataStoreManager dataStoreMgr;
- @Inject
- SnapshotService snapshotSrv;
- @Inject
- VolumeDataFactory volFactory;
- @Inject
- SnapshotDataFactory snapshotFactory;
- @Inject
- EndPointSelector _epSelector;
- @Inject
- ResourceManager _resourceMgr;
- @Inject
- StorageStrategyFactory _storageStrategyFactory;
-
- private int _totalRetries;
- private int _pauseInterval;
- private int snapshotBackupRetries, snapshotBackupRetryInterval;
-
- private ScheduledExecutorService backupSnapshotExecutor;
-
- @Override
- public String getConfigComponentName() {
- return SnapshotManager.class.getSimpleName();
- }
-
- @Override
- public ConfigKey>[] getConfigKeys() {
- return new ConfigKey>[] {BackupRetryAttempts, BackupRetryInterval, SnapshotHourlyMax, SnapshotDailyMax, SnapshotMonthlyMax, SnapshotWeeklyMax, usageSnapshotSelection, BackupSnapshotAfterTakingSnapshot};
- }
-
- @Override
- public Answer sendToPool(Volume vol, Command cmd) {
- StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId());
- long[] hostIdsToTryFirst = null;
-
- Long vmHostId = getHostIdForSnapshotOperation(vol);
-
- if (vmHostId != null) {
- hostIdsToTryFirst = new long[] {vmHostId};
- }
-
- List hostIdsToAvoid = new ArrayList();
- for (int retry = _totalRetries; retry >= 0; retry--) {
- try {
- Pair result = _storageMgr.sendToPool(pool, hostIdsToTryFirst, hostIdsToAvoid, cmd);
- if (result.second().getResult()) {
- return result.second();
- }
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("The result for " + cmd.getClass().getName() + " is " + result.second().getDetails() + " through " + result.first());
- }
- hostIdsToAvoid.add(result.first());
- } catch (StorageUnavailableException e1) {
- s_logger.warn("Storage unavailable ", e1);
- return null;
- }
-
- try {
- Thread.sleep(_pauseInterval * 1000);
- } catch (InterruptedException e) {
- s_logger.debug("[ignored] interupted while retry cmd.");
- }
-
- s_logger.debug("Retrying...");
- }
-
- s_logger.warn("After " + _totalRetries + " retries, the command " + cmd.getClass().getName() + " did not succeed.");
-
- return null;
- }
-
- @Override
- public Long getHostIdForSnapshotOperation(Volume vol) {
- VMInstanceVO vm = _vmDao.findById(vol.getInstanceId());
- if (vm != null) {
- if (vm.getHostId() != null) {
- return vm.getHostId();
- } else if (vm.getLastHostId() != null) {
- return vm.getLastHostId();
- }
- }
- return null;
- }
-
- @Override
- public Snapshot revertSnapshot(Long snapshotId) {
- SnapshotVO snapshot = _snapshotDao.findById(snapshotId);
- if (snapshot == null) {
- throw new InvalidParameterValueException("No such snapshot");
- }
-
- VolumeVO volume = _volsDao.findById(snapshot.getVolumeId());
- if (volume.getState() != Volume.State.Ready) {
- throw new InvalidParameterValueException("The volume is not in Ready state.");
- }
-
- Long instanceId = volume.getInstanceId();
-
- // If this volume is attached to an VM, then the VM needs to be in the stopped state
- // in order to revert the volume
- if (instanceId != null) {
- UserVmVO vm = _vmDao.findById(instanceId);
- if (vm.getState() != State.Stopped && vm.getState() != State.Shutdowned) {
- throw new InvalidParameterValueException("The VM the specified disk is attached to is not in the shutdown state.");
- }
- // If target VM has associated VM snapshots then don't allow to revert from snapshot
- List vmSnapshots = _vmSnapshotDao.findByVm(instanceId);
- if (vmSnapshots.size() > 0) {
- throw new InvalidParameterValueException("Unable to revert snapshot for VM, please remove VM snapshots before reverting VM from snapshot");
- }
- }
-
- DataStoreRole dataStoreRole = getDataStoreRole(snapshot, _snapshotStoreDao, dataStoreMgr);
-
- SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshotId, dataStoreRole);
- if (snapshotInfo == null) {
- throw new CloudRuntimeException("snapshot:" + snapshotId + " not exist in data store");
- }
-
- SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.REVERT);
-
- if (snapshotStrategy == null) {
- s_logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshotId + "'");
- return null;
- }
-
- boolean result = snapshotStrategy.revertSnapshot(snapshotInfo);
- if (result) {
- // update volume size and primary storage count
- _resourceLimitMgr.decrementResourceCount(snapshot.getAccountId(), ResourceType.primary_storage, new Long(volume.getSize() - snapshot.getSize()));
- volume.setSize(snapshot.getSize());
- _volsDao.update(volume.getId(), volume);
- return snapshotInfo;
- }
- return null;
- }
-
- @Override
- @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_POLICY_UPDATE, eventDescription = "updating snapshot policy", async = true)
- public SnapshotPolicy updateSnapshotPolicy(UpdateSnapshotPolicyCmd cmd) {
-
- Long id = cmd.getId();
- String customUUID = cmd.getCustomId();
- Boolean display = cmd.getDisplay();
-
- SnapshotPolicyVO policyVO = _snapshotPolicyDao.findById(id);
- if(display != null){
- boolean previousDisplay = policyVO.isDisplay();
- policyVO.setDisplay(display);
- _snapSchedMgr.scheduleOrCancelNextSnapshotJobOnDisplayChange(policyVO, previousDisplay);
- }
-
- if(customUUID != null)
- policyVO.setUuid(customUUID);
-
- _snapshotPolicyDao.update(id, policyVO);
-
- return policyVO;
-
- }
-
- @Override
- @DB
- @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "creating snapshot", async = true)
- public Snapshot createSnapshot(Long volumeId, Long policyId, Long snapshotId, Account snapshotOwner) {
- VolumeInfo volume = volFactory.getVolume(volumeId);
- if (volume == null) {
- throw new InvalidParameterValueException("No such volume exist");
- }
-
- if (volume.getState() != Volume.State.Ready) {
- throw new InvalidParameterValueException("Volume is not in ready state");
- }
-
-
- // does the caller have the authority to act on this volume
- _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
-
- SnapshotInfo snapshot = snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Primary);
- if(snapshot == null)
- {
- s_logger.debug("Failed to create snapshot");
- throw new CloudRuntimeException("Failed to create snapshot");
- }
- try {
- postCreateSnapshot(volumeId, snapshot.getId(), policyId);
- //Check if the snapshot was removed while backingUp. If yes, do not log snapshot create usage event
- SnapshotVO freshSnapshot = _snapshotDao.findById(snapshot.getId());
- if (freshSnapshot != null) {
- UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(),
- null, null, volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid());
- }
- _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot);
-
- } catch (Exception e) {
- s_logger.debug("Failed to create snapshot", e);
- throw new CloudRuntimeException("Failed to create snapshot", e);
- }
-
- return snapshot;
- }
-
- @Override
- public Snapshot backupSnapshot(Long snapshotId) {
- SnapshotInfo snapshot = snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Image);
- if (snapshot != null) {
- throw new CloudRuntimeException("Already in the backup snapshot:" + snapshotId);
- }
-
- return snapshotSrv.backupSnapshot(snapshot);
- }
-
- @Override
- public Snapshot backupSnapshotFromVmSnapshot(Long snapshotId, Long vmId, Long volumeId, Long vmSnapshotId) {
- VMInstanceVO vm = _vmDao.findById(vmId);
- if (vm == null) {
- throw new InvalidParameterValueException("Creating snapshot failed due to vm:" + vmId + " doesn't exist");
- }
- if (! HypervisorType.KVM.equals(vm.getHypervisorType())) {
- throw new InvalidParameterValueException("Unsupported hypervisor type " + vm.getHypervisorType() + ". This supports KVM only");
- }
-
- VMSnapshotVO vmSnapshot = _vmSnapshotDao.findById(vmSnapshotId);
- if (vmSnapshot == null) {
- throw new InvalidParameterValueException("Creating snapshot failed due to vmSnapshot:" + vmSnapshotId + " doesn't exist");
- }
- // check vmsnapshot permissions
- Account caller = CallContext.current().getCallingAccount();
- _accountMgr.checkAccess(caller, null, true, vmSnapshot);
-
- SnapshotVO snapshot = _snapshotDao.findById(snapshotId);
- if (snapshot == null) {
- throw new InvalidParameterValueException("Creating snapshot failed due to snapshot:" + snapshotId + " doesn't exist");
- }
-
- VolumeInfo volume = volFactory.getVolume(volumeId);
- if (volume == null) {
- throw new InvalidParameterValueException("Creating snapshot failed due to volume:" + volumeId + " doesn't exist");
- }
-
- if (volume.getState() != Volume.State.Ready) {
- throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot.");
- }
-
- DataStore store = volume.getDataStore();
- SnapshotDataStoreVO parentSnapshotDataStoreVO = _snapshotStoreDao.findParent(store.getRole(), store.getId(), volumeId);
- if (parentSnapshotDataStoreVO != null) {
- //Double check the snapshot is removed or not
- SnapshotVO parentSnap = _snapshotDao.findById(parentSnapshotDataStoreVO.getSnapshotId());
- if (parentSnap != null && parentSnapshotDataStoreVO.getInstallPath() != null && parentSnapshotDataStoreVO.getInstallPath().equals(vmSnapshot.getName())) {
- throw new InvalidParameterValueException("Creating snapshot failed due to snapshot : " + parentSnap.getUuid() + " is created from the same vm snapshot");
- }
- }
- SnapshotInfo snapshotInfo = this.snapshotFactory.getSnapshot(snapshotId, store);
- snapshotInfo = (SnapshotInfo) store.create(snapshotInfo);
- SnapshotDataStoreVO snapshotOnPrimaryStore = this._snapshotStoreDao.findBySnapshot(snapshot.getId(), store.getRole());
- snapshotOnPrimaryStore.setState(ObjectInDataStoreStateMachine.State.Ready);
- snapshotOnPrimaryStore.setInstallPath(vmSnapshot.getName());
- _snapshotStoreDao.update(snapshotOnPrimaryStore.getId(), snapshotOnPrimaryStore);
- snapshot.setState(Snapshot.State.CreatedOnPrimary);
- _snapshotDao.update(snapshot.getId(), snapshot);
-
- snapshotInfo = this.snapshotFactory.getSnapshot(snapshotId, store);
-
- Long snapshotOwnerId = vm.getAccountId();
-
- try {
- SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.BACKUP);
- if (snapshotStrategy == null) {
- throw new CloudRuntimeException("Unable to find snaphot strategy to handle snapshot with id '" + snapshotId + "'");
- }
- snapshotInfo = snapshotStrategy.backupSnapshot(snapshotInfo);
-
- } catch(Exception e) {
- s_logger.debug("Failed to backup snapshot from vm snapshot", e);
- _resourceLimitMgr.decrementResourceCount(snapshotOwnerId, ResourceType.snapshot);
- _resourceLimitMgr.decrementResourceCount(snapshotOwnerId, ResourceType.secondary_storage, new Long(volume.getSize()));
- throw new CloudRuntimeException("Failed to backup snapshot from vm snapshot", e);
- }
- return snapshotInfo;
- }
-
- @Override
- public SnapshotVO getParentSnapshot(VolumeInfo volume) {
- long preId = _snapshotDao.getLastSnapshot(volume.getId(), DataStoreRole.Primary);
-
- SnapshotVO preSnapshotVO = null;
- if (preId != 0 && !(volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId()))) {
- preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preId);
- }
-
- return preSnapshotVO;
- }
-
- private Long getSnapshotUserId() {
- Long userId = CallContext.current().getCallingUserId();
- if (userId == null) {
- return User.UID_SYSTEM;
- }
- return userId;
- }
-
- private void postCreateSnapshot(Long volumeId, Long snapshotId, Long policyId) {
- Long userId = getSnapshotUserId();
- SnapshotVO snapshot = _snapshotDao.findById(snapshotId);
- if (policyId != Snapshot.MANUAL_POLICY_ID) {
- SnapshotScheduleVO snapshotSchedule = _snapshotScheduleDao.getCurrentSchedule(volumeId, policyId, true);
- if (snapshotSchedule !=null) {
- snapshotSchedule.setSnapshotId(snapshotId);
- _snapshotScheduleDao.update(snapshotSchedule.getId(), snapshotSchedule);
- }
- }
-
- if (snapshot != null && snapshot.isRecursive()) {
- postCreateRecurringSnapshotForPolicy(userId, volumeId, snapshotId, policyId);
- }
- }
-
- private void postCreateRecurringSnapshotForPolicy(long userId, long volumeId, long snapshotId, long policyId) {
- // Use count query
- SnapshotVO spstVO = _snapshotDao.findById(snapshotId);
- Type type = spstVO.getRecurringType();
- int maxSnaps = type.getMax();
-
- List snaps = listSnapsforVolumeTypeNotDestroyed(volumeId, type);
- SnapshotPolicyVO policy = _snapshotPolicyDao.findById(policyId);
- if (policy != null && policy.getMaxSnaps() < maxSnaps) {
- maxSnaps = policy.getMaxSnaps();
- }
- while (snaps.size() > maxSnaps && snaps.size() > 1) {
- SnapshotVO oldestSnapshot = snaps.get(0);
- long oldSnapId = oldestSnapshot.getId();
- if (policy != null) {
- s_logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId);
- }
- if (deleteSnapshot(oldSnapId)) {
- //log Snapshot delete event
- ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, oldestSnapshot.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_SNAPSHOT_DELETE,
- "Successfully deleted oldest snapshot: " + oldSnapId, 0);
- }
- snaps.remove(oldestSnapshot);
- }
- }
-
- @Override
- @DB
- @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_DELETE, eventDescription = "deleting snapshot", async = true)
- public boolean deleteSnapshot(long snapshotId) {
- Account caller = CallContext.current().getCallingAccount();
-
- // Verify parameters
- SnapshotVO snapshotCheck = _snapshotDao.findById(snapshotId);
-
- if (snapshotCheck == null) {
- throw new InvalidParameterValueException("unable to find a snapshot with id " + snapshotId);
- }
-
- if (snapshotCheck.getState() == Snapshot.State.Destroyed) {
- throw new InvalidParameterValueException("Snapshot with id: " + snapshotId + " is already destroyed");
- }
-
- _accountMgr.checkAccess(caller, null, true, snapshotCheck);
-
- SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshotCheck, SnapshotOperation.DELETE);
-
- if (snapshotStrategy == null) {
- s_logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshotId + "'");
-
- return false;
- }
-
- DataStoreRole dataStoreRole = getDataStoreRole(snapshotCheck, _snapshotStoreDao, dataStoreMgr);
-
- SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, dataStoreRole);
-
- try {
- boolean result = snapshotStrategy.deleteSnapshot(snapshotId);
-
- if (result) {
- if (snapshotCheck.getState() == Snapshot.State.BackedUp) {
- UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshotCheck.getAccountId(), snapshotCheck.getDataCenterId(), snapshotId,
- snapshotCheck.getName(), null, null, 0L, snapshotCheck.getClass().getName(), snapshotCheck.getUuid());
- }
-
- if (snapshotCheck.getState() != Snapshot.State.Error && snapshotCheck.getState() != Snapshot.State.Destroyed) {
- _resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.snapshot);
- }
-
- if (snapshotCheck.getState() == Snapshot.State.BackedUp) {
- if (snapshotStoreRef != null) {
- _resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.secondary_storage, new Long(snapshotStoreRef.getPhysicalSize()));
- }
- }
- }
-
- return result;
- } catch (Exception e) {
- s_logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString());
-
- throw new CloudRuntimeException("Failed to delete snapshot:" + e.toString());
- }
- }
-
- @Override
- public String getSecondaryStorageURL(SnapshotVO snapshot) {
- SnapshotDataStoreVO snapshotStore = _snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Image);
- if (snapshotStore != null) {
- DataStore store = dataStoreMgr.getDataStore(snapshotStore.getDataStoreId(), DataStoreRole.Image);
- if (store != null) {
- return store.getUri();
- }
- }
- throw new CloudRuntimeException("Can not find secondary storage hosting the snapshot");
- }
-
- @Override
- public Pair, Integer> listSnapshots(ListSnapshotsCmd cmd) {
- Long volumeId = cmd.getVolumeId();
- String name = cmd.getSnapshotName();
- Long id = cmd.getId();
- String keyword = cmd.getKeyword();
- String snapshotTypeStr = cmd.getSnapshotType();
- String intervalTypeStr = cmd.getIntervalType();
- Map tags = cmd.getTags();
- Long zoneId = cmd.getZoneId();
- Account caller = CallContext.current().getCallingAccount();
- List permittedAccounts = new ArrayList();
-
- // Verify parameters
- if (volumeId != null) {
- VolumeVO volume = _volsDao.findById(volumeId);
- if (volume != null) {
- _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
- }
- }
-
- List ids = getIdsListFromCmd(cmd.getId(), cmd.getIds());
-
- Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), null);
- _accountMgr.buildACLSearchParameters(caller, id, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false);
- Long domainId = domainIdRecursiveListProject.first();
- Boolean isRecursive = domainIdRecursiveListProject.second();
- ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third();
-
- Filter searchFilter = new Filter(SnapshotVO.class, "created", false, cmd.getStartIndex(), cmd.getPageSizeVal());
- SearchBuilder sb = _snapshotDao.createSearchBuilder();
- _accountMgr.buildACLSearchBuilder(sb, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria);
-
- sb.and("statusNEQ", sb.entity().getState(), SearchCriteria.Op.NEQ); //exclude those Destroyed snapshot, not showing on UI
- sb.and("volumeId", sb.entity().getVolumeId(), SearchCriteria.Op.EQ);
- sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE);
- sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
- sb.and("idIN", sb.entity().getId(), SearchCriteria.Op.IN);
- sb.and("snapshotTypeEQ", sb.entity().getsnapshotType(), SearchCriteria.Op.IN);
- sb.and("snapshotTypeNEQ", sb.entity().getsnapshotType(), SearchCriteria.Op.NEQ);
- sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ);
-
- if (tags != null && !tags.isEmpty()) {
- SearchBuilder tagSearch = _resourceTagDao.createSearchBuilder();
- for (int count = 0; count < tags.size(); count++) {
- tagSearch.or().op("key" + String.valueOf(count), tagSearch.entity().getKey(), SearchCriteria.Op.EQ);
- tagSearch.and("value" + String.valueOf(count), tagSearch.entity().getValue(), SearchCriteria.Op.EQ);
- tagSearch.cp();
- }
- tagSearch.and("resourceType", tagSearch.entity().getResourceType(), SearchCriteria.Op.EQ);
- sb.groupBy(sb.entity().getId());
- sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER);
- }
-
- SearchCriteria sc = sb.create();
- _accountMgr.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria);
-
- sc.setParameters("statusNEQ", Snapshot.State.Destroyed);
-
- if (volumeId != null) {
- sc.setParameters("volumeId", volumeId);
- }
-
- if (tags != null && !tags.isEmpty()) {
- int count = 0;
- sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.Snapshot.toString());
- for (String key : tags.keySet()) {
- sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key);
- sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key));
- count++;
- }
- }
-
- if (zoneId != null) {
- sc.setParameters("dataCenterId", zoneId);
- }
-
- setIdsListToSearchCriteria(sc, ids);
-
- if (name != null) {
- sc.setParameters("name", "%" + name + "%");
- }
-
- if (id != null) {
- sc.setParameters("id", id);
- }
-
- if (keyword != null) {
- SearchCriteria ssc = _snapshotDao.createSearchCriteria();
- ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%");
- sc.addAnd("name", SearchCriteria.Op.SC, ssc);
- }
-
- if (snapshotTypeStr != null) {
- Type snapshotType = SnapshotVO.getSnapshotType(snapshotTypeStr);
- if (snapshotType == null) {
- throw new InvalidParameterValueException("Unsupported snapshot type " + snapshotTypeStr);
- }
- if (snapshotType == Type.RECURRING) {
- sc.setParameters("snapshotTypeEQ", Type.HOURLY.ordinal(), Type.DAILY.ordinal(), Type.WEEKLY.ordinal(), Type.MONTHLY.ordinal());
- } else {
- sc.setParameters("snapshotTypeEQ", snapshotType.ordinal());
- }
- } else if (intervalTypeStr != null && volumeId != null) {
- Type type = SnapshotVO.getSnapshotType(intervalTypeStr);
- if (type == null) {
- throw new InvalidParameterValueException("Unsupported snapstho interval type " + intervalTypeStr);
- }
- sc.setParameters("snapshotTypeEQ", type.ordinal());
- } else {
- // Show only MANUAL and RECURRING snapshot types
- sc.setParameters("snapshotTypeNEQ", Snapshot.Type.TEMPLATE.ordinal());
- }
-
- Pair, Integer> result = _snapshotDao.searchAndCount(sc, searchFilter);
- return new Pair, Integer>(result.first(), result.second());
- }
-
- @Override
- public boolean deleteSnapshotDirsForAccount(long accountId) {
-
- List volumes = _volsDao.findByAccount(accountId);
- // The above call will list only non-destroyed volumes.
- // So call this method before marking the volumes as destroyed.
- // i.e Call them before the VMs for those volumes are destroyed.
- boolean success = true;
- for (VolumeVO volume : volumes) {
- if (volume.getPoolId() == null) {
- continue;
- }
- Long volumeId = volume.getId();
- Long dcId = volume.getDataCenterId();
- if (_snapshotDao.listByVolumeIdIncludingRemoved(volumeId).isEmpty()) {
- // This volume doesn't have any snapshots. Nothing do delete.
- continue;
- }
- List ssHosts = dataStoreMgr.getImageStoresByScope(new ZoneScope(dcId));
- for (DataStore ssHost : ssHosts) {
- String snapshotDir = TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR + "/" + accountId + "/" + volumeId;
- DeleteSnapshotsDirCommand cmd = new DeleteSnapshotsDirCommand(ssHost.getTO(), snapshotDir);
- EndPoint ep = _epSelector.select(ssHost);
- Answer answer = null;
- if (ep == null) {
- String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
- s_logger.error(errMsg);
- answer = new Answer(cmd, false, errMsg);
- } else {
- answer = ep.sendMessage(cmd);
- }
- if ((answer != null) && answer.getResult()) {
- s_logger.debug("Deleted all snapshots for volume: " + volumeId + " under account: " + accountId);
- } else {
- success = false;
- if (answer != null) {
- s_logger.warn("Failed to delete all snapshot for volume " + volumeId + " on secondary storage " + ssHost.getUri());
- s_logger.error(answer.getDetails());
- }
- }
- }
-
- // Either way delete the snapshots for this volume.
- List snapshots = listSnapsforVolume(volumeId);
- for (SnapshotVO snapshot : snapshots) {
- SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.DELETE);
- if (snapshotStrategy == null) {
- s_logger.error("Unable to find snaphot strategy to handle snapshot with id '" + snapshot.getId() + "'");
- continue;
- }
- SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Image);
-
- if (snapshotStrategy.deleteSnapshot(snapshot.getId())) {
- if (Type.MANUAL == snapshot.getRecurringType()) {
- _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.snapshot);
- if (snapshotStoreRef != null) {
- _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.secondary_storage, new Long(snapshotStoreRef.getPhysicalSize()));
- }
- }
-
- // Log event after successful deletion
- UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshot.getAccountId(), volume.getDataCenterId(), snapshot.getId(),
- snapshot.getName(), null, null, volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid());
- }
- }
- }
-
- // Returns true if snapshotsDir has been deleted for all volumes.
- return success;
- }
-
- @Override
- @DB
- @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_POLICY_CREATE, eventDescription = "creating snapshot policy")
- public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policyOwner) {
- Long volumeId = cmd.getVolumeId();
- boolean display = cmd.isDisplay();
- SnapshotPolicyVO policy = null;
- VolumeVO volume = _volsDao.findById(cmd.getVolumeId());
- if (volume == null) {
- throw new InvalidParameterValueException("Failed to create snapshot policy, unable to find a volume with id " + volumeId);
- }
-
- _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
-
- // If display is false we don't actually schedule snapshots.
- if (volume.getState() != Volume.State.Ready && display) {
- throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() +
- ". Cannot take snapshot.");
- }
-
- if (volume.getTemplateId() != null) {
- VMTemplateVO template = _templateDao.findById(volume.getTemplateId());
- if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) {
- throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported");
- }
- }
-
- AccountVO owner = _accountDao.findById(volume.getAccountId());
- Long instanceId = volume.getInstanceId();
- if (instanceId != null) {
- // It is not detached, but attached to a VM
- if (_vmDao.findById(instanceId) == null) {
- // It is not a UserVM but a SystemVM or DomR
- throw new InvalidParameterValueException("Failed to create snapshot policy, snapshots of volumes attached to System or router VM are not allowed");
- }
- }
- IntervalType intvType = DateUtil.IntervalType.getIntervalType(cmd.getIntervalType());
- if (intvType == null) {
- throw new InvalidParameterValueException("Unsupported interval type " + cmd.getIntervalType());
- }
- Type type = getSnapshotType(intvType);
-
- TimeZone timeZone = TimeZone.getTimeZone(cmd.getTimezone());
- String timezoneId = timeZone.getID();
- if (!timezoneId.equals(cmd.getTimezone())) {
- s_logger.warn("Using timezone: " + timezoneId + " for running this snapshot policy as an equivalent of " + cmd.getTimezone());
- }
- try {
- DateUtil.getNextRunTime(intvType, cmd.getSchedule(), timezoneId, null);
- } catch (Exception e) {
- throw new InvalidParameterValueException("Invalid schedule: " + cmd.getSchedule() + " for interval type: " + cmd.getIntervalType());
- }
-
- if (cmd.getMaxSnaps() <= 0) {
- throw new InvalidParameterValueException("maxSnaps should be greater than 0");
- }
-
- int intervalMaxSnaps = type.getMax();
- if (cmd.getMaxSnaps() > intervalMaxSnaps) {
- throw new InvalidParameterValueException("maxSnaps exceeds limit: " + intervalMaxSnaps + " for interval type: " + cmd.getIntervalType());
- }
-
- // Verify that max doesn't exceed domain and account snapshot limits in case display is on
- if(display){
- long accountLimit = _resourceLimitMgr.findCorrectResourceLimitForAccount(owner, ResourceType.snapshot);
- long domainLimit = _resourceLimitMgr.findCorrectResourceLimitForDomain(_domainMgr.getDomain(owner.getDomainId()), ResourceType.snapshot);
- int max = cmd.getMaxSnaps().intValue();
- if (!_accountMgr.isRootAdmin(owner.getId())&& ((accountLimit != -1 && max > accountLimit) || (domainLimit != -1 && max > domainLimit))) {
- String message = "domain/account";
- if (owner.getType() == Account.ACCOUNT_TYPE_PROJECT) {
- message = "domain/project";
- }
-
- throw new InvalidParameterValueException("Max number of snapshots shouldn't exceed the " + message + " level snapshot limit");
- }
- }
-
- final GlobalLock createSnapshotPolicyLock = GlobalLock.getInternLock("createSnapshotPolicy_" + volumeId);
- boolean isLockAcquired = createSnapshotPolicyLock.lock(5);
- if (isLockAcquired) {
- s_logger.debug("Acquired lock for creating snapshot policy of volume : " + volume.getName());
- try {
- policy = _snapshotPolicyDao.findOneByVolumeInterval(volumeId, intvType);
- if (policy == null) {
- policy = new SnapshotPolicyVO(volumeId, cmd.getSchedule(), timezoneId, intvType, cmd.getMaxSnaps(), display);
- policy = _snapshotPolicyDao.persist(policy);
- _snapSchedMgr.scheduleNextSnapshotJob(policy);
- } else {
- boolean previousDisplay = policy.isDisplay();
- policy.setSchedule(cmd.getSchedule());
- policy.setTimezone(timezoneId);
- policy.setInterval((short)intvType.ordinal());
- policy.setMaxSnaps(cmd.getMaxSnaps());
- policy.setActive(true);
- policy.setDisplay(display);
- _snapshotPolicyDao.update(policy.getId(), policy);
- _snapSchedMgr.scheduleOrCancelNextSnapshotJobOnDisplayChange(policy, previousDisplay);
- }
- } finally {
- createSnapshotPolicyLock.unlock();
- }
-
- // TODO - Make createSnapshotPolicy - BaseAsyncCreate and remove this.
- CallContext.current().putContextParameter(SnapshotPolicy.class, policy.getUuid());
- return policy;
- } else {
- s_logger.warn("Unable to acquire lock for creating snapshot policy of volume : " + volume.getName());
- return null;
- }
- }
-
- protected boolean deletePolicy(long userId, Long policyId) {
- SnapshotPolicyVO snapshotPolicy = _snapshotPolicyDao.findById(policyId);
- _snapSchedMgr.removeSchedule(snapshotPolicy.getVolumeId(), snapshotPolicy.getId());
- return _snapshotPolicyDao.remove(policyId);
- }
-
- @Override
- public Pair, Integer> listPoliciesforVolume(ListSnapshotPoliciesCmd cmd) {
- Long volumeId = cmd.getVolumeId();
- boolean display = cmd.isDisplay();
- Long id = cmd.getId();
- Pair, Integer> result = null;
- // TODO - Have a better way of doing this.
- if(id != null){
- result = _snapshotPolicyDao.listAndCountById(id, display, null);
- if(result != null && result.first() != null && !result.first().isEmpty()){
- SnapshotPolicyVO snapshotPolicy = result.first().get(0);
- volumeId = snapshotPolicy.getVolumeId();
- }
- }
- VolumeVO volume = _volsDao.findById(volumeId);
- if (volume == null) {
- throw new InvalidParameterValueException("Unable to find a volume with id " + volumeId);
- }
- _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
- if(result != null)
- return new Pair, Integer>(result.first(), result.second());
- result = _snapshotPolicyDao.listAndCountByVolumeId(volumeId, display);
- return new Pair, Integer>(result.first(), result.second());
- }
-
- private List listPoliciesforVolume(long volumeId) {
- return _snapshotPolicyDao.listByVolumeId(volumeId);
- }
-
- private List listSnapsforVolume(long volumeId) {
- return _snapshotDao.listByVolumeId(volumeId);
- }
-
- private List listSnapsforVolumeTypeNotDestroyed(long volumeId, Type type) {
- return _snapshotDao.listByVolumeIdTypeNotDestroyed(volumeId, type);
- }
-
- @Override
- public void deletePoliciesForVolume(Long volumeId) {
- List policyInstances = listPoliciesforVolume(volumeId);
- for (SnapshotPolicyVO policyInstance : policyInstances) {
- Long policyId = policyInstance.getId();
- deletePolicy(1L, policyId);
- }
- // We also want to delete the manual snapshots scheduled for this volume
- // We can only delete the schedules in the future, not the ones which are already executing.
- SnapshotScheduleVO snapshotSchedule = _snapshotScheduleDao.getCurrentSchedule(volumeId, Snapshot.MANUAL_POLICY_ID, false);
- if (snapshotSchedule != null) {
- _snapshotScheduleDao.expunge(snapshotSchedule.getId());
- }
- }
-
- @Override
- public List findRecurringSnapshotSchedule(ListRecurringSnapshotScheduleCmd cmd) {
- Long volumeId = cmd.getVolumeId();
- Long policyId = cmd.getSnapshotPolicyId();
- Account account = CallContext.current().getCallingAccount();
-
- // Verify parameters
- VolumeVO volume = _volsDao.findById(volumeId);
- if (volume == null) {
- throw new InvalidParameterValueException("Failed to list snapshot schedule, unable to find a volume with id " + volumeId);
- }
-
- if (account != null) {
- long volAcctId = volume.getAccountId();
- if (_accountMgr.isAdmin(account.getId())) {
- Account userAccount = _accountDao.findById(Long.valueOf(volAcctId));
- if (!_domainDao.isChildDomain(account.getDomainId(), userAccount.getDomainId())) {
- throw new PermissionDeniedException("Unable to list snapshot schedule for volume " + volumeId + ", permission denied.");
- }
- } else if (account.getId() != volAcctId) {
- throw new PermissionDeniedException("Unable to list snapshot schedule, account " + account.getAccountName() + " does not own volume id " + volAcctId);
- }
- }
-
- // List only future schedules, not past ones.
- List snapshotSchedules = new ArrayList();
- if (policyId == null) {
- List policyInstances = listPoliciesforVolume(volumeId);
- for (SnapshotPolicyVO policyInstance : policyInstances) {
- SnapshotScheduleVO snapshotSchedule = _snapshotScheduleDao.getCurrentSchedule(volumeId, policyInstance.getId(), false);
- snapshotSchedules.add(snapshotSchedule);
- }
- } else {
- snapshotSchedules.add(_snapshotScheduleDao.getCurrentSchedule(volumeId, policyId, false));
- }
- return snapshotSchedules;
- }
-
- private Type getSnapshotType(Long policyId) {
- if (policyId.equals(Snapshot.MANUAL_POLICY_ID)) {
- return Type.MANUAL;
- } else {
- SnapshotPolicyVO spstPolicyVO = _snapshotPolicyDao.findById(policyId);
- IntervalType intvType = DateUtil.getIntervalType(spstPolicyVO.getInterval());
- return getSnapshotType(intvType);
- }
- }
-
- private Type getSnapshotType(IntervalType intvType) {
- if (intvType.equals(IntervalType.HOURLY)) {
- return Type.HOURLY;
- } else if (intvType.equals(IntervalType.DAILY)) {
- return Type.DAILY;
- } else if (intvType.equals(IntervalType.WEEKLY)) {
- return Type.WEEKLY;
- } else if (intvType.equals(IntervalType.MONTHLY)) {
- return Type.MONTHLY;
- }
- return null;
- }
-
- private boolean hostSupportSnapsthotForVolume(HostVO host, VolumeInfo volume) {
- if (host.getHypervisorType() != HypervisorType.KVM) {
- return true;
- }
-
- //Turn off snapshot by default for KVM if the volume attached to vm that is not in the Stopped/Destroyed state,
- //unless it is set in the global flag
- Long vmId = volume.getInstanceId();
- if (vmId != null) {
- VMInstanceVO vm = _vmDao.findById(vmId);
- if (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Destroyed) {
- boolean snapshotEnabled = Boolean.parseBoolean(_configDao.getValue("kvm.snapshot.enabled"));
- if (!snapshotEnabled) {
- s_logger.debug("Snapshot is not supported on host " + host + " for the volume " + volume + " attached to the vm " + vm);
- return false;
- }
- }
- }
-
- // Determine host capabilities
- String caps = host.getCapabilities();
-
- if (caps != null) {
- String[] tokens = caps.split(",");
- for (String token : tokens) {
- if (token.contains("snapshot")) {
- return true;
- }
- }
- }
- return false;
- }
-
- private boolean supportedByHypervisor(VolumeInfo volume) {
- HypervisorType hypervisorType;
- StoragePoolVO storagePool = _storagePoolDao.findById(volume.getDataStore().getId());
- ScopeType scope = storagePool.getScope();
- if (scope.equals(ScopeType.ZONE)) {
- hypervisorType = storagePool.getHypervisor();
- } else {
- hypervisorType = volume.getHypervisorType();
- }
-
- if (hypervisorType.equals(HypervisorType.Ovm)) {
- throw new InvalidParameterValueException("Ovm won't support taking snapshot");
- }
-
- if (hypervisorType.equals(HypervisorType.KVM)) {
- List hosts = null;
- if (scope.equals(ScopeType.CLUSTER)) {
- ClusterVO cluster = _clusterDao.findById(storagePool.getClusterId());
- hosts = _resourceMgr.listAllHostsInCluster(cluster.getId());
- } else if (scope.equals(ScopeType.ZONE)) {
- hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, volume.getDataCenterId());
- }
- if (hosts != null && !hosts.isEmpty()) {
- HostVO host = hosts.get(0);
- if (!hostSupportSnapsthotForVolume(host, volume)) {
- throw new CloudRuntimeException("KVM Snapshot is not supported: " + host.getId());
- }
- }
- }
-
- // if volume is attached to a vm in destroyed or expunging state; disallow
- if (volume.getInstanceId() != null) {
- UserVmVO userVm = _vmDao.findById(volume.getInstanceId());
- if (userVm != null) {
- if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) {
- throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volume.getId() + " is associated with vm:" + userVm.getInstanceName() +
- " is in " + userVm.getState().toString() + " state");
- }
-
- if (userVm.getHypervisorType() == HypervisorType.VMware || userVm.getHypervisorType() == HypervisorType.KVM) {
- List activeSnapshots =
- _snapshotDao.listByInstanceId(volume.getInstanceId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp);
- if (activeSnapshots.size() > 0) {
- throw new InvalidParameterValueException("There is other active snapshot tasks on the instance to which the volume is attached, please try again later");
- }
- }
-
- List activeVMSnapshots =
- _vmSnapshotDao.listByInstanceId(userVm.getId(), VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging);
- if (activeVMSnapshots.size() > 0) {
- throw new CloudRuntimeException("There is other active vm snapshot tasks on the instance to which the volume is attached, please try again later");
- }
- }
- }
-
- return true;
- }
-
- @Override
- @DB
- public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationException {
- CreateSnapshotPayload payload = (CreateSnapshotPayload)volume.getpayload();
-
- updateSnapshotPayload(volume.getPoolId(), payload);
-
- Long snapshotId = payload.getSnapshotId();
- Account snapshotOwner = payload.getAccount();
- SnapshotInfo snapshot = snapshotFactory.getSnapshot(snapshotId, volume.getDataStore());
- snapshot.addPayload(payload);
- try {
- SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.TAKE);
-
- if (snapshotStrategy == null) {
- throw new CloudRuntimeException("Can't find snapshot strategy to deal with snapshot:" + snapshotId);
- }
-
- SnapshotInfo snapshotOnPrimary = snapshotStrategy.takeSnapshot(snapshot);
- boolean backupSnapToSecondary = BackupSnapshotAfterTakingSnapshot.value() == null ||
- BackupSnapshotAfterTakingSnapshot.value();
-
- if (backupSnapToSecondary) {
- backupSnapshotToSecondary(payload.getAsyncBackup(), snapshotStrategy, snapshotOnPrimary);
- } else {
- if(s_logger.isDebugEnabled()) {
- s_logger.debug("skipping backup of snapshot " + snapshotId + " to secondary due to configuration");
- }
- snapshotOnPrimary.markBackedUp();
- }
-
- try {
- postCreateSnapshot(volume.getId(), snapshotId, payload.getSnapshotPolicyId());
-
- DataStoreRole dataStoreRole = getDataStoreRole(snapshot, _snapshotStoreDao, dataStoreMgr);
-
- SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, dataStoreRole);
- if(snapshotStoreRef == null) {
- // The snapshot was not backed up to secondary. Find the snap on primary
- snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Primary);
- if(snapshotStoreRef == null) {
- throw new CloudRuntimeException("Could not find snapshot");
- }
- }
- UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(),
- null, null, snapshotStoreRef.getPhysicalSize(), volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid());
-
- // Correct the resource count of snapshot in case of delta snapshots.
- _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize() - snapshotStoreRef.getPhysicalSize()));
- } catch (Exception e) {
- s_logger.debug("post process snapshot failed", e);
- }
- } catch (CloudRuntimeException cre) {
- if(s_logger.isDebugEnabled()) {
- s_logger.debug("Failed to create snapshot" + cre.getLocalizedMessage());
- }
- _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot);
- _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize()));
- throw cre;
- } catch (Exception e) {
- if(s_logger.isDebugEnabled()) {
- s_logger.debug("Failed to create snapshot", e);
- }
- _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot);
- _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize()));
- throw new CloudRuntimeException("Failed to create snapshot", e);
- }
- return snapshot;
- }
-
- protected void backupSnapshotToSecondary(boolean asyncBackup, SnapshotStrategy snapshotStrategy, SnapshotInfo snapshotOnPrimary) {
- if (asyncBackup) {
- backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshotOnPrimary, snapshotBackupRetries - 1, snapshotStrategy), 0, TimeUnit.SECONDS);
- } else {
- SnapshotInfo backupedSnapshot = snapshotStrategy.backupSnapshot(snapshotOnPrimary);
- if (backupedSnapshot != null) {
- snapshotStrategy.postSnapshotCreation(snapshotOnPrimary);
- }
- }
- }
-
- protected class BackupSnapshotTask extends ManagedContextRunnable {
- SnapshotInfo snapshot;
- int attempts;
- SnapshotStrategy snapshotStrategy;
-
- public BackupSnapshotTask(SnapshotInfo snap, int maxRetries, SnapshotStrategy strategy) {
- snapshot = snap;
- attempts = maxRetries;
- snapshotStrategy = strategy;
- }
-
- @Override
- protected void runInContext() {
- try {
- s_logger.debug("Value of attempts is " + (snapshotBackupRetries-attempts));
-
- SnapshotInfo backupedSnapshot = snapshotStrategy.backupSnapshot(snapshot);
-
- if (backupedSnapshot != null) {
- snapshotStrategy.postSnapshotCreation(snapshot);
- }
- } catch (final Exception e) {
- if (attempts >= 0) {
- s_logger.debug("Backing up of snapshot failed, for snapshot with ID "+snapshot.getSnapshotId()+", left with "+attempts+" more attempts");
- backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshot, --attempts, snapshotStrategy), snapshotBackupRetryInterval, TimeUnit.SECONDS);
- } else {
- s_logger.debug("Done with "+snapshotBackupRetries+" attempts in backing up of snapshot with ID "+snapshot.getSnapshotId());
- snapshotSrv.cleanupOnSnapshotBackupFailure(snapshot);
- }
- }
- }
- }
-
- private void updateSnapshotPayload(long storagePoolId, CreateSnapshotPayload payload) {
- StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
-
- if (storagePoolVO.isManaged()) {
- Snapshot.LocationType locationType = payload.getLocationType();
-
- if (locationType == null) {
- payload.setLocationType(Snapshot.LocationType.PRIMARY);
- }
- }
- else {
- payload.setLocationType(null);
- }
- }
-
- private static DataStoreRole getDataStoreRole(Snapshot snapshot, SnapshotDataStoreDao snapshotStoreDao, DataStoreManager dataStoreMgr) {
- SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary);
-
- if (snapshotStore == null) {
- return DataStoreRole.Image;
- }
-
- long storagePoolId = snapshotStore.getDataStoreId();
- DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
-
- Map mapCapabilities = dataStore.getDriver().getCapabilities();
-
- if (mapCapabilities != null) {
- String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString());
- Boolean supportsStorageSystemSnapshots = new Boolean(value);
-
- if (supportsStorageSystemSnapshots) {
- return DataStoreRole.Primary;
- }
- }
-
- return DataStoreRole.Image;
- }
-
- @Override
- public boolean configure(String name, Map params) throws ConfigurationException {
-
- String value = _configDao.getValue(Config.BackupSnapshotWait.toString());
-
- Type.HOURLY.setMax(SnapshotHourlyMax.value());
- Type.DAILY.setMax(SnapshotDailyMax.value());
- Type.WEEKLY.setMax(SnapshotWeeklyMax.value());
- Type.MONTHLY.setMax(SnapshotMonthlyMax.value());
- _totalRetries = NumbersUtil.parseInt(_configDao.getValue("total.retries"), 4);
- _pauseInterval = 2 * NumbersUtil.parseInt(_configDao.getValue("ping.interval"), 60);
-
- snapshotBackupRetries = BackupRetryAttempts.value();
- snapshotBackupRetryInterval = BackupRetryInterval.value();
- backupSnapshotExecutor = Executors.newScheduledThreadPool(10, new NamedThreadFactory("BackupSnapshotTask"));
- s_logger.info("Snapshot Manager is configured.");
-
- return true;
- }
-
- @Override
- public boolean start() {
- //destroy snapshots in destroying state
- List snapshots = _snapshotDao.listAllByStatus(Snapshot.State.Destroying);
- for (SnapshotVO snapshotVO : snapshots) {
- try {
- if (!deleteSnapshot(snapshotVO.getId())) {
- s_logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid());
- }
- } catch (Exception e) {
- s_logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid());
- }
- }
- return true;
- }
-
- @Override
- public boolean stop() {
- backupSnapshotExecutor.shutdown();
- return true;
- }
-
- @Override
- public boolean deleteSnapshotPolicies(DeleteSnapshotPoliciesCmd cmd) {
- Long policyId = cmd.getId();
- List policyIds = cmd.getIds();
- Long userId = getSnapshotUserId();
-
- if ((policyId == null) && (policyIds == null)) {
- throw new InvalidParameterValueException("No policy id (or list of ids) specified.");
- }
-
- if (policyIds == null) {
- policyIds = new ArrayList();
- policyIds.add(policyId);
- } else if (policyIds.size() <= 0) {
- // Not even sure how this is even possible
- throw new InvalidParameterValueException("There are no policy ids");
- }
-
- for (Long policy : policyIds) {
- SnapshotPolicyVO snapshotPolicyVO = _snapshotPolicyDao.findById(policy);
- if (snapshotPolicyVO == null) {
- throw new InvalidParameterValueException("Policy id given: " + policy + " does not exist");
- }
- VolumeVO volume = _volsDao.findById(snapshotPolicyVO.getVolumeId());
- if (volume == null) {
- throw new InvalidParameterValueException("Policy id given: " + policy + " does not belong to a valid volume");
- }
-
- _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
- }
-
- boolean success = true;
-
- if (policyIds.contains(Snapshot.MANUAL_POLICY_ID)) {
- throw new InvalidParameterValueException("Invalid Policy id given: " + Snapshot.MANUAL_POLICY_ID);
- }
-
- for (Long pId : policyIds) {
- if (!deletePolicy(userId, pId)) {
- success = false;
- s_logger.warn("Failed to delete snapshot policy with Id: " + policyId);
- return success;
- }
- }
-
- return success;
- }
-
- @Override
- public boolean canOperateOnVolume(Volume volume) {
- List snapshots = _snapshotDao.listByStatus(volume.getId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp);
- if (snapshots.size() > 0) {
- return false;
- }
- return true;
- }
-
- @Override
- public void cleanupSnapshotsByVolume(Long volumeId) {
- List infos = snapshotFactory.getSnapshots(volumeId, DataStoreRole.Primary);
- for(SnapshotInfo info: infos) {
- try {
- if(info != null) {
- snapshotSrv.deleteSnapshot(info);
- }
- } catch(CloudRuntimeException e) {
- String msg = "Cleanup of Snapshot with uuid " + info.getUuid() + " in primary storage is failed. Ignoring";
- s_logger.warn(msg);
- }
- }
- }
-
- @Override
- public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType) throws ResourceAllocationException {
- Account caller = CallContext.current().getCallingAccount();
- VolumeInfo volume = volFactory.getVolume(volumeId);
- supportedByHypervisor(volume);
-
- // Verify permissions
- _accountMgr.checkAccess(caller, null, true, volume);
- Type snapshotType = getSnapshotType(policyId);
- Account owner = _accountMgr.getAccount(volume.getAccountId());
-
- try {
- _resourceLimitMgr.checkResourceLimit(owner, ResourceType.snapshot);
- _resourceLimitMgr.checkResourceLimit(owner, ResourceType.secondary_storage, new Long(volume.getSize()).longValue());
- } catch (ResourceAllocationException e) {
- if (snapshotType != Type.MANUAL) {
- String msg = "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots";
- s_logger.warn(msg);
- _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg,
- "Snapshot resource limit exceeded for account id : " + owner.getId() +
- ". Failed to create recurring snapshots; please use updateResourceLimit to increase the limit");
- }
- throw e;
- }
-
- // Determine the name for this snapshot
- // Snapshot Name: VMInstancename + volumeName + timeString
- String timeString = DateUtil.getDateDisplayString(DateUtil.GMT_TIMEZONE, new Date(), DateUtil.YYYYMMDD_FORMAT);
-
- VMInstanceVO vmInstance = _vmDao.findById(volume.getInstanceId());
- String vmDisplayName = "detached";
- if (vmInstance != null) {
- vmDisplayName = vmInstance.getHostName();
- }
- if (snapshotName == null)
- snapshotName = vmDisplayName + "_" + volume.getName() + "_" + timeString;
-
- HypervisorType hypervisorType = HypervisorType.None;
- StoragePoolVO storagePool = _storagePoolDao.findById(volume.getDataStore().getId());
- if (storagePool.getScope() == ScopeType.ZONE) {
- hypervisorType = storagePool.getHypervisor();
-
- // at the time being, managed storage only supports XenServer, ESX(i), and KVM (i.e. not Hyper-V), so the VHD file type can be mapped to XenServer
- if (storagePool.isManaged() && HypervisorType.Any.equals(hypervisorType)) {
- if (ImageFormat.VHD.equals(volume.getFormat())) {
- hypervisorType = HypervisorType.XenServer;
- }
- else if (ImageFormat.OVA.equals(volume.getFormat())) {
- hypervisorType = HypervisorType.VMware;
- }
- else if (ImageFormat.QCOW2.equals(volume.getFormat())) {
- hypervisorType = HypervisorType.KVM;
- }
- }
- } else {
- hypervisorType = volume.getHypervisorType();
- }
-
- SnapshotVO snapshotVO =
- new SnapshotVO(volume.getDataCenterId(), volume.getAccountId(), volume.getDomainId(), volume.getId(), volume.getDiskOfferingId(), snapshotName,
- (short)snapshotType.ordinal(), snapshotType.name(), volume.getSize(), volume.getMinIops(), volume.getMaxIops(), hypervisorType, locationType);
-
- SnapshotVO snapshot = _snapshotDao.persist(snapshotVO);
- if (snapshot == null) {
- throw new CloudRuntimeException("Failed to create snapshot for volume: " + volume.getId());
- }
- _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot);
- _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.secondary_storage, new Long(volume.getSize()));
- return snapshot;
- }
-}
diff --git a/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java
index c67611a7ccb..8252a2a5a18 100644
--- a/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java
+++ b/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java
@@ -77,6 +77,7 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase domainVln = _domainVlanMapDao.listDomainVlanMapsByVlan(vlanRange.getId());
+ List domainVlan = _domainVlanMapDao.listDomainVlanMapsByVlan(vlanRange.getId());
// Check for domain wide pool. It will have an entry for domain_vlan_map.
- if (domainVln != null && !domainVln.isEmpty()) {
+ if (domainVlan != null && !domainVlan.isEmpty()) {
isDomainSpecific = true;
}
@@ -4052,10 +4052,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
forSystemVms = ip.isForSystemVms();
final Long allocatedToAccountId = ip.getAllocatedToAccountId();
if (allocatedToAccountId != null) {
- final Account accountAllocatedTo = _accountMgr.getActiveAccountById(allocatedToAccountId);
- if (!accountAllocatedTo.getAccountName().equalsIgnoreCase(accountName)) {
+ if (vlanOwner != null && allocatedToAccountId != vlanOwner.getId()) {
throw new InvalidParameterValueException(ip.getAddress() + " Public IP address in range is allocated to another account ");
}
+ final Account accountAllocatedTo = _accountMgr.getActiveAccountById(allocatedToAccountId);
if (vlanOwner == null && domain != null && domain.getId() != accountAllocatedTo.getDomainId()){
throw new InvalidParameterValueException(ip.getAddress()
+ " Public IP address in range is allocated to another domain/account ");
@@ -4116,9 +4116,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
}
boolean isDomainSpecific = false;
- final List domainVln = _domainVlanMapDao.listDomainVlanMapsByVlan(vlanDbId);
+ final List domainVlan = _domainVlanMapDao.listDomainVlanMapsByVlan(vlanDbId);
// Check for domain wide pool. It will have an entry for domain_vlan_map.
- if (domainVln != null && !domainVln.isEmpty()) {
+ if (domainVlan != null && !domainVlan.isEmpty()) {
isDomainSpecific = true;
}
@@ -4171,7 +4171,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
// decrement resource count for dedicated public ip's
_resourceLimitMgr.decrementResourceCount(acctVln.get(0).getAccountId(), ResourceType.public_ip, new Long(ips.size()));
return true;
- } else if (isDomainSpecific && _domainVlanMapDao.remove(domainVln.get(0).getId())) {
+ } else if (isDomainSpecific && _domainVlanMapDao.remove(domainVlan.get(0).getId())) {
s_logger.debug("Remove the vlan from domain_vlan_map successfully.");
return true;
} else {
diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java
index 0b8b40b1b9b..6c9bcace90f 100644
--- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java
+++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java
@@ -210,6 +210,12 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
return null;
}
+ // Set cluster GUID based on cluster ID if null
+ if (cluster.getGuid() == null) {
+ cluster.setGuid(UUID.nameUUIDFromBytes(String.valueOf(clusterId).getBytes()).toString());
+ _clusterDao.update(clusterId, cluster);
+ }
+
Map> resources = new HashMap>();
Map details = new HashMap();
if (!uri.getScheme().equals("http")) {
@@ -230,8 +236,9 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
if (existingHosts != null) {
for (HostVO existingHost : existingHosts) {
if (existingHost.getGuid().toLowerCase().startsWith(guid.toLowerCase())) {
- s_logger.debug("Skipping " + agentIp + " because " + guid + " is already in the database for resource " + existingHost.getGuid());
- return null;
+ final String msg = "Skipping host " + agentIp + " because " + guid + " is already in the database for resource " + existingHost.getGuid() + " with ID " + existingHost.getUuid();
+ s_logger.debug(msg);
+ throw new CloudRuntimeException(msg);
}
}
}
@@ -326,12 +333,6 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
details.put("guid", connectedHost.getGuid());
- // place a place holder guid derived from cluster ID
- if (cluster.getGuid() == null) {
- cluster.setGuid(UUID.nameUUIDFromBytes(String.valueOf(clusterId).getBytes()).toString());
- _clusterDao.update(clusterId, cluster);
- }
-
// save user name and password
_hostDao.loadDetails(connectedHost);
Map hostDetails = connectedHost.getDetails();
diff --git a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java
index 862ccfe69e6..b78dcfdbb5d 100644
--- a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java
+++ b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java
@@ -895,6 +895,7 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ
@Override
public boolean release(final Network network, final NicProfile nic, final VirtualMachineProfile vm, final ReservationContext context) throws ConcurrentOperationException,
ResourceUnavailableException {
+ removeDhcpEntry(network, nic, vm);
return true;
}
@@ -946,6 +947,34 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ
return false;
}
+ @Override
+ public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vmProfile) throws ResourceUnavailableException {
+ boolean result = true;
+ if (canHandle(network, Service.Dhcp)) {
+ if (vmProfile.getType() != VirtualMachine.Type.User) {
+ return false;
+ }
+
+ final List routers = _routerDao.listByNetworkAndRole(network.getId(), VirtualRouter.Role.VIRTUAL_ROUTER);
+
+ if (CollectionUtils.isEmpty(routers)) {
+ throw new ResourceUnavailableException("Can't find at least one router!", DataCenter.class, network.getDataCenterId());
+ }
+
+ final DataCenterVO dcVO = _dcDao.findById(network.getDataCenterId());
+ final NetworkTopology networkTopology = networkTopologyContext.retrieveNetworkTopology(dcVO);
+
+ for (final DomainRouterVO domainRouterVO : routers) {
+ if (domainRouterVO.getState() != VirtualMachine.State.Running) {
+ continue;
+ }
+
+ result = result && networkTopology.removeDhcpEntry(network, nic, vmProfile, domainRouterVO);
+ }
+ }
+ return result;
+ }
+
@Override
public boolean removeDnsSupportForSubnet(Network network) throws ResourceUnavailableException {
// Ignore if virtual router is already dhcp provider
diff --git a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java
index 829e07dc697..9ea2b984264 100644
--- a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java
+++ b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java
@@ -211,7 +211,7 @@ public class CommandSetupHelper {
cmds.addCommand("users", cmd);
}
- public void createDhcpEntryCommand(final VirtualRouter router, final UserVm vm, final NicVO nic, final Commands cmds) {
+ public void createDhcpEntryCommand(final VirtualRouter router, final UserVm vm, final NicVO nic, boolean remove, final Commands cmds) {
final DhcpEntryCommand dhcpCommand = new DhcpEntryCommand(nic.getMacAddress(), nic.getIPv4Address(), vm.getHostName(), nic.getIPv6Address(),
_networkModel.getExecuteInSeqNtwkElmtCmd());
@@ -229,6 +229,7 @@ public class CommandSetupHelper {
dhcpCommand.setDefaultDns(ipaddress);
dhcpCommand.setDuid(NetUtils.getDuidLL(nic.getMacAddress()));
dhcpCommand.setDefault(nic.isDefaultNic());
+ dhcpCommand.setRemove(remove);
dhcpCommand.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId()));
dhcpCommand.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName());
@@ -622,7 +623,7 @@ public class CommandSetupHelper {
final NicVO nic = _nicDao.findByNtwkIdAndInstanceId(guestNetworkId, vm.getId());
if (nic != null) {
s_logger.debug("Creating dhcp entry for vm " + vm + " on domR " + router + ".");
- createDhcpEntryCommand(router, vm, nic, cmds);
+ createDhcpEntryCommand(router, vm, nic, false, cmds);
}
}
}
diff --git a/server/src/main/java/com/cloud/network/rules/DhcpEntryRules.java b/server/src/main/java/com/cloud/network/rules/DhcpEntryRules.java
index c4a91f42686..530cf0aea87 100644
--- a/server/src/main/java/com/cloud/network/rules/DhcpEntryRules.java
+++ b/server/src/main/java/com/cloud/network/rules/DhcpEntryRules.java
@@ -36,6 +36,8 @@ public class DhcpEntryRules extends RuleApplier {
private final VirtualMachineProfile _profile;
private final DeployDestination _destination;
+ private boolean remove;
+
private NicVO _nicVo;
private UserVmVO _userVM;
@@ -77,4 +79,12 @@ public class DhcpEntryRules extends RuleApplier {
public UserVmVO getUserVM() {
return _userVM;
}
+
+ public boolean isRemove() {
+ return remove;
+ }
+
+ public void setRemove(boolean remove) {
+ this.remove = remove;
+ }
}
\ No newline at end of file
diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java
index b211cab27a2..2c5892ed70e 100644
--- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java
+++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java
@@ -37,7 +37,6 @@ import javax.crypto.spec.SecretKeySpec;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
-import com.cloud.storage.ScopeType;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
@@ -66,7 +65,7 @@ import org.apache.cloudstack.api.command.admin.config.ListDeploymentPlannersCmd;
import org.apache.cloudstack.api.command.admin.config.ListHypervisorCapabilitiesCmd;
import org.apache.cloudstack.api.command.admin.config.UpdateCfgCmd;
import org.apache.cloudstack.api.command.admin.config.UpdateHypervisorCapabilitiesCmd;
-import org.apache.cloudstack.api.command.admin.direct.download.UploadTemplateDirectDownloadCertificate;
+import org.apache.cloudstack.api.command.admin.direct.download.UploadTemplateDirectDownloadCertificateCmd;
import org.apache.cloudstack.api.command.admin.domain.CreateDomainCmd;
import org.apache.cloudstack.api.command.admin.domain.DeleteDomainCmd;
import org.apache.cloudstack.api.command.admin.domain.ListDomainChildrenCmd;
@@ -217,10 +216,10 @@ import org.apache.cloudstack.api.command.admin.usage.AddTrafficTypeCmd;
import org.apache.cloudstack.api.command.admin.usage.DeleteTrafficMonitorCmd;
import org.apache.cloudstack.api.command.admin.usage.DeleteTrafficTypeCmd;
import org.apache.cloudstack.api.command.admin.usage.GenerateUsageRecordsCmd;
-import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListTrafficMonitorsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListTrafficTypeImplementorsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListTrafficTypesCmd;
+import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.ListUsageTypesCmd;
import org.apache.cloudstack.api.command.admin.usage.RemoveRawUsageRecordsCmd;
import org.apache.cloudstack.api.command.admin.usage.UpdateTrafficTypeCmd;
@@ -338,6 +337,7 @@ import org.apache.cloudstack.api.command.user.iso.CopyIsoCmd;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
import org.apache.cloudstack.api.command.user.iso.DetachIsoCmd;
import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd;
+import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.iso.ListIsoPermissionsCmd;
import org.apache.cloudstack.api.command.user.iso.ListIsosCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
@@ -634,6 +634,7 @@ import com.cloud.storage.GuestOSHypervisor;
import com.cloud.storage.GuestOSHypervisorVO;
import com.cloud.storage.GuestOSVO;
import com.cloud.storage.GuestOsCategory;
+import com.cloud.storage.ScopeType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
@@ -1848,6 +1849,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
final Object keyword = cmd.getKeyword();
final Long physicalNetworkId = cmd.getPhysicalNetworkId();
final Long associatedNetworkId = cmd.getAssociatedNetworkId();
+ final Long sourceNetworkId = cmd.getNetworkId();
final Long zone = cmd.getZoneId();
final String address = cmd.getIpAddress();
final Long vlan = cmd.getVlanId();
@@ -1893,7 +1895,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
sb.and("vlanDbId", sb.entity().getVlanId(), SearchCriteria.Op.EQ);
sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ);
sb.and("physicalNetworkId", sb.entity().getPhysicalNetworkId(), SearchCriteria.Op.EQ);
- sb.and("associatedNetworkIdEq", sb.entity().getAssociatedWithNetworkId(), SearchCriteria.Op.EQ);
+ sb.and("associatedNetworkId", sb.entity().getAssociatedWithNetworkId(), SearchCriteria.Op.EQ);
+ sb.and("sourceNetworkId", sb.entity().getSourceNetworkId(), SearchCriteria.Op.EQ);
sb.and("isSourceNat", sb.entity().isSourceNat(), SearchCriteria.Op.EQ);
sb.and("isStaticNat", sb.entity().isOneToOneNat(), SearchCriteria.Op.EQ);
sb.and("vpcId", sb.entity().getVpcId(), SearchCriteria.Op.EQ);
@@ -1991,7 +1994,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
if (associatedNetworkId != null) {
- sc.setParameters("associatedNetworkIdEq", associatedNetworkId);
+ sc.setParameters("associatedNetworkId", associatedNetworkId);
+ }
+
+ if (sourceNetworkId != null) {
+ sc.setParameters("sourceNetworkId", sourceNetworkId);
}
if (forDisplay != null) {
@@ -3068,8 +3075,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(ReleasePodIpCmdByAdmin.class);
cmdList.add(CreateManagementNetworkIpRangeCmd.class);
cmdList.add(DeleteManagementNetworkIpRangeCmd.class);
- cmdList.add(UploadTemplateDirectDownloadCertificate.class);
+ cmdList.add(UploadTemplateDirectDownloadCertificateCmd.class);
cmdList.add(ListMgmtsCmd.class);
+ cmdList.add(GetUploadParamsForIsoCmd.class);
// Out-of-band management APIs for admins
cmdList.add(EnableOutOfBandManagementForHostCmd.class);
diff --git a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java
index 10406b569db..2b39518f8b8 100755
--- a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java
+++ b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java
@@ -25,17 +25,14 @@ import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
-import com.cloud.configuration.Resource;
-import com.cloud.event.EventTypes;
-import com.cloud.event.UsageEventUtils;
-import com.cloud.user.ResourceLimitService;
-import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
@@ -48,6 +45,8 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
import com.cloud.agent.Listener;
import com.cloud.agent.api.AgentControlAnswer;
@@ -56,6 +55,9 @@ import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.StartupCommand;
import com.cloud.alert.AlertManager;
+import com.cloud.configuration.Resource;
+import com.cloud.event.EventTypes;
+import com.cloud.event.UsageEventUtils;
import com.cloud.exception.ConnectionException;
import com.cloud.host.Host;
import com.cloud.host.Status;
@@ -65,6 +67,7 @@ import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateZoneDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.template.VirtualMachineTemplate;
+import com.cloud.user.ResourceLimitService;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.db.Transaction;
@@ -102,6 +105,12 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto
private AlertManager _alertMgr;
@Inject
private VMTemplateZoneDao _vmTemplateZoneDao;
+ @Inject
+ private DataStoreManager dataStoreManager;
+ @Inject
+ private TemplateDataFactory templateFactory;
+ @Inject
+ private TemplateService templateService;
private long _nodeId;
private ScheduledExecutorService _executor = null;
@@ -110,7 +119,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto
static final ConfigKey UploadMonitoringInterval = new ConfigKey("Advanced", Integer.class, "upload.monitoring.interval", "60",
"Interval (in seconds) to check the status of volumes that are uploaded using HTTP POST request", true);
- static final ConfigKey UploadOperationTimeout = new ConfigKey("Advanced", Integer.class, "upload.operation.timeout", "10",
+ static final ConfigKey UploadOperationTimeout = new ConfigKey("Advanced", Integer.class, "upload.operation.timeout", "60",
"Time (in minutes) to wait before abandoning volume upload using HTTP POST request", true);
@Override
@@ -395,6 +404,20 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto
VMTemplateVO templateUpdate = _templateDao.createForUpdate();
templateUpdate.setSize(answer.getVirtualSize());
_templateDao.update(tmpTemplate.getId(), templateUpdate);
+ // For multi-disk OVA, check and create data disk templates
+ if (tmpTemplate.getFormat().equals(Storage.ImageFormat.OVA)) {
+ final DataStore store = dataStoreManager.getDataStore(templateDataStore.getDataStoreId(), templateDataStore.getDataStoreRole());
+ final TemplateInfo templateInfo = templateFactory.getTemplate(tmpTemplate.getId(), store);
+ if (!templateService.createOvaDataDiskTemplates(templateInfo)) {
+ tmpTemplateDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.ABANDONED);
+ tmpTemplateDataStore.setState(State.Failed);
+ stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao);
+ msg = "Multi-disk OVA template " + tmpTemplate.getUuid() + " failed to process data disks";
+ s_logger.error(msg);
+ sendAlert = true;
+ break;
+ }
+ }
stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationSucceeded, null, _templateDao);
_resourceLimitMgr.incrementResourceCount(template.getAccountId(), Resource.ResourceType.secondary_storage, answer.getVirtualSize());
//publish usage event
diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
index ae7df6556e3..79343ab4725 100644
--- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
@@ -2489,8 +2489,17 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Override
public ConfigKey>[] getConfigKeys() {
- return new ConfigKey>[] { StorageCleanupInterval, StorageCleanupDelay, StorageCleanupEnabled, TemplateCleanupEnabled,
- KvmStorageOfflineMigrationWait, KvmStorageOnlineMigrationWait, MaxNumberOfManagedClusteredFileSystems, PRIMARY_STORAGE_DOWNLOAD_WAIT};
+ return new ConfigKey>[]{
+ StorageCleanupInterval,
+ StorageCleanupDelay,
+ StorageCleanupEnabled,
+ TemplateCleanupEnabled,
+ KvmStorageOfflineMigrationWait,
+ KvmStorageOnlineMigrationWait,
+ KvmAutoConvergence,
+ MaxNumberOfManagedClusteredFileSystems,
+ PRIMARY_STORAGE_DOWNLOAD_WAIT
+ };
}
@Override
diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
index 0ff8effaf8b..07ba903020d 100644
--- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
@@ -72,8 +72,6 @@ import org.apache.cloudstack.storage.command.AttachCommand;
import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
@@ -120,6 +118,7 @@ import com.cloud.service.dao.ServiceOfferingDetailsDao;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
+import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.snapshot.SnapshotApiService;
@@ -254,7 +253,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@Inject
private StorageManager storageMgr;
@Inject
- private StoragePoolDetailsDao storagePoolDetailsDao;
+ private StoragePoolTagsDao storagePoolTagsDao;
@Inject
private StorageUtil storageUtil;
@@ -2076,11 +2075,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
// OfflineVmwareMigration: check storage tags on disk(offering)s in comparison to destination storage pool
// OfflineVmwareMigration: if no match return a proper error now
DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId());
- if(diskOffering.equals(null)) {
- throw new CloudRuntimeException("volume '" + vol.getUuid() +"', has no diskoffering. Migration target cannot be checked.");
+ if (diskOffering.equals(null)) {
+ throw new CloudRuntimeException("volume '" + vol.getUuid() + "', has no diskoffering. Migration target cannot be checked.");
}
- if(! doesTargetStorageSupportDiskOffering(destPool, diskOffering)) {
- throw new CloudRuntimeException("Migration target has no matching tags for volume '" +vol.getName() + "(" + vol.getUuid() + ")'");
+ if (!doesTargetStorageSupportDiskOffering(destPool, diskOffering)) {
+ throw new CloudRuntimeException(String.format("Migration target pool [%s, tags:%s] has no matching tags for volume [%s, uuid:%s, tags:%s]", destPool.getName(),
+ getStoragePoolTags(destPool), vol.getName(), vol.getUuid(), diskOffering.getTags()));
}
if (liveMigrateVolume && destPool.getClusterId() != null && srcClusterId != null) {
@@ -2275,15 +2275,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
* Retrieves the storage pool tags as a {@link String}. If the storage pool does not have tags we return a null value.
*/
protected String getStoragePoolTags(StoragePool destPool) {
- List storagePoolDetails = storagePoolDetailsDao.listDetails(destPool.getId());
- if (CollectionUtils.isEmpty(storagePoolDetails)) {
+ List destPoolTags = storagePoolTagsDao.getStoragePoolTags(destPool.getId());
+ if (CollectionUtils.isEmpty(destPoolTags)) {
return null;
}
- String storageTags = "";
- for (StoragePoolDetailVO storagePoolDetailVO : storagePoolDetails) {
- storageTags = storageTags + storagePoolDetailVO.getName() + ",";
- }
- return storageTags.substring(0, storageTags.length() - 1);
+ return StringUtils.join(destPoolTags, ",");
}
private Volume orchestrateMigrateVolume(VolumeVO volume, StoragePool destPool, boolean liveMigrateVolume, DiskOfferingVO newDiskOffering) {
diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java
index fd16a3cca78..dc71b365353 100644
--- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java
+++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java
@@ -297,7 +297,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu
tmpSnapshotScheduleVO = _snapshotScheduleDao.acquireInLockTable(snapshotScheId);
final Long eventId =
ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, volume.getAccountId(), EventTypes.EVENT_SNAPSHOT_CREATE, "creating snapshot for volume Id:" +
- volumeId, true, 0);
+ volume.getUuid(), true, 0);
final Map params = new HashMap();
params.put(ApiConstants.VOLUME_ID, "" + volumeId);
diff --git a/server/src/main/java/com/cloud/storage/upload/params/IsoUploadParams.java b/server/src/main/java/com/cloud/storage/upload/params/IsoUploadParams.java
new file mode 100644
index 00000000000..7c2b5799e96
--- /dev/null
+++ b/server/src/main/java/com/cloud/storage/upload/params/IsoUploadParams.java
@@ -0,0 +1,33 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.storage.upload.params;
+
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.storage.Storage;
+
+public class IsoUploadParams extends UploadParamsBase {
+
+ public IsoUploadParams(long userId, String name, String displayText, Boolean isPublic, Boolean isFeatured,
+ Boolean isExtractable, Long osTypeId, Long zoneId, Boolean bootable, long ownerId) {
+ super(userId, name, displayText, isPublic, isFeatured, isExtractable, osTypeId, zoneId, bootable, ownerId);
+ setIso(true);
+ setBits(64);
+ setFormat(Storage.ImageFormat.ISO.toString());
+ setHypervisorType(Hypervisor.HypervisorType.None);
+ setRequiresHVM(true);
+ }
+}
diff --git a/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java b/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java
new file mode 100644
index 00000000000..9dede2ee5a3
--- /dev/null
+++ b/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java
@@ -0,0 +1,38 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.storage.upload.params;
+
+import com.cloud.hypervisor.Hypervisor;
+
+import java.util.Map;
+
+public class TemplateUploadParams extends UploadParamsBase {
+
+ public TemplateUploadParams(long userId, String name, String displayText,
+ Integer bits, Boolean passwordEnabled, Boolean requiresHVM,
+ Boolean isPublic, Boolean featured,
+ Boolean isExtractable, String format, Long guestOSId,
+ Long zoneId, Hypervisor.HypervisorType hypervisorType, String chksum,
+ String templateTag, long templateOwnerId,
+ Map details, Boolean sshkeyEnabled,
+ Boolean isDynamicallyScalable, Boolean isRoutingType) {
+ super(userId, name, displayText, bits, passwordEnabled, requiresHVM, isPublic, featured, isExtractable,
+ format, guestOSId, zoneId, hypervisorType, chksum, templateTag, templateOwnerId, details,
+ sshkeyEnabled, isDynamicallyScalable, isRoutingType);
+ setBootable(true);
+ }
+}
diff --git a/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java b/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java
new file mode 100644
index 00000000000..0d42b760b6d
--- /dev/null
+++ b/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java
@@ -0,0 +1,49 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.storage.upload.params;
+
+import com.cloud.hypervisor.Hypervisor;
+
+import java.util.Map;
+
+public interface UploadParams {
+ boolean isIso();
+ long getUserId();
+ String getName();
+ String getDisplayText();
+ Integer getBits();
+ boolean isPasswordEnabled();
+ boolean requiresHVM();
+ String getUrl();
+ boolean isPublic();
+ boolean isFeatured();
+ boolean isExtractable();
+ String getFormat();
+ Long getGuestOSId();
+ Long getZoneId();
+ Hypervisor.HypervisorType getHypervisorType();
+ String getChecksum();
+ boolean isBootable();
+ String getTemplateTag();
+ long getTemplateOwnerId();
+ Map getDetails();
+ boolean isSshKeyEnabled();
+ String getImageStoreUuid();
+ boolean isDynamicallyScalable();
+ boolean isRoutingType();
+ boolean isDirectDownload();
+}
diff --git a/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java b/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java
new file mode 100644
index 00000000000..67b04f7b480
--- /dev/null
+++ b/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java
@@ -0,0 +1,240 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.storage.upload.params;
+
+import com.cloud.hypervisor.Hypervisor;
+
+import java.util.Map;
+
+public abstract class UploadParamsBase implements UploadParams {
+
+ private boolean isIso;
+ private long userId;
+ private String name;
+ private String displayText;
+ private Integer bits;
+ private boolean passwordEnabled;
+ private boolean requiresHVM;
+ private boolean isPublic;
+ private boolean featured;
+ private boolean isExtractable;
+ private String format;
+ private Long guestOSId;
+ private Long zoneId;
+ private Hypervisor.HypervisorType hypervisorType;
+ private String checksum;
+ private boolean bootable;
+ private String templateTag;
+ private long templateOwnerId;
+ private Map details;
+ private boolean sshkeyEnabled;
+ private boolean isDynamicallyScalable;
+ private boolean isRoutingType;
+
+ UploadParamsBase(long userId, String name, String displayText,
+ Integer bits, boolean passwordEnabled, boolean requiresHVM,
+ boolean isPublic, boolean featured,
+ boolean isExtractable, String format, Long guestOSId,
+ Long zoneId, Hypervisor.HypervisorType hypervisorType, String checksum,
+ String templateTag, long templateOwnerId,
+ Map details, boolean sshkeyEnabled,
+ boolean isDynamicallyScalable, boolean isRoutingType) {
+ this.userId = userId;
+ this.name = name;
+ this.displayText = displayText;
+ this.bits = bits;
+ this.passwordEnabled = passwordEnabled;
+ this.requiresHVM = requiresHVM;
+ this.isPublic = isPublic;
+ this.featured = featured;
+ this.isExtractable = isExtractable;
+ this.format = format;
+ this.guestOSId = guestOSId;
+ this.zoneId = zoneId;
+ this.hypervisorType = hypervisorType;
+ this.checksum = checksum;
+ this.templateTag = templateTag;
+ this.templateOwnerId = templateOwnerId;
+ this.details = details;
+ this.sshkeyEnabled = sshkeyEnabled;
+ this.isDynamicallyScalable = isDynamicallyScalable;
+ this.isRoutingType = isRoutingType;
+ }
+
+ UploadParamsBase(long userId, String name, String displayText, boolean isPublic, boolean isFeatured,
+ boolean isExtractable, Long osTypeId, Long zoneId, boolean bootable, long ownerId) {
+ this.userId = userId;
+ this.name = name;
+ this.displayText = displayText;
+ this.isPublic = isPublic;
+ this.featured = isFeatured;
+ this.isExtractable = isExtractable;
+ this.guestOSId = osTypeId;
+ this.zoneId = zoneId;
+ this.bootable = bootable;
+ this.templateOwnerId = ownerId;
+ }
+
+ @Override
+ public boolean isIso() {
+ return isIso;
+ }
+
+ @Override
+ public long getUserId() {
+ return userId;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public String getDisplayText() {
+ return displayText;
+ }
+
+ @Override
+ public Integer getBits() {
+ return bits;
+ }
+
+ @Override
+ public boolean isPasswordEnabled() {
+ return passwordEnabled;
+ }
+
+ @Override
+ public boolean requiresHVM() {
+ return requiresHVM;
+ }
+
+ @Override
+ public String getUrl() {
+ return null;
+ }
+
+ @Override
+ public boolean isPublic() {
+ return isPublic;
+ }
+
+ @Override
+ public boolean isFeatured() {
+ return featured;
+ }
+
+ @Override
+ public boolean isExtractable() {
+ return isExtractable;
+ }
+
+ @Override
+ public String getFormat() {
+ return format;
+ }
+
+ @Override
+ public Long getGuestOSId() {
+ return guestOSId;
+ }
+
+ @Override
+ public Long getZoneId() {
+ return zoneId;
+ }
+
+ @Override
+ public Hypervisor.HypervisorType getHypervisorType() {
+ return hypervisorType;
+ }
+
+ @Override
+ public String getChecksum() {
+ return checksum;
+ }
+
+ @Override
+ public boolean isBootable() {
+ return bootable;
+ }
+
+ @Override
+ public String getTemplateTag() {
+ return templateTag;
+ }
+
+ @Override
+ public long getTemplateOwnerId() {
+ return templateOwnerId;
+ }
+
+ @Override
+ public Map getDetails() {
+ return details;
+ }
+
+ @Override
+ public boolean isSshKeyEnabled() {
+ return sshkeyEnabled;
+ }
+
+ @Override
+ public String getImageStoreUuid() {
+ return null;
+ }
+
+ @Override
+ public boolean isDynamicallyScalable() {
+ return isDynamicallyScalable;
+ }
+
+ @Override
+ public boolean isRoutingType() {
+ return isRoutingType;
+ }
+
+ @Override
+ public boolean isDirectDownload() {
+ return false;
+ }
+
+ void setIso(boolean iso) {
+ isIso = iso;
+ }
+
+ void setBootable(boolean bootable) {
+ this.bootable = bootable;
+ }
+
+ void setBits(Integer bits) {
+ this.bits = bits;
+ }
+
+ void setFormat(String format) {
+ this.format = format;
+ }
+
+ void setRequiresHVM(boolean requiresHVM) {
+ this.requiresHVM = requiresHVM;
+ }
+
+ void setHypervisorType(Hypervisor.HypervisorType hypervisorType) {
+ this.hypervisorType = hypervisorType;
+ }
+}
diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java
index 8aa21661675..e2db31a16c9 100644
--- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java
+++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java
@@ -35,6 +35,7 @@ import com.cloud.utils.db.TransactionCallback;
import com.cloud.utils.db.TransactionStatus;
import org.apache.cloudstack.agent.directdownload.CheckUrlAnswer;
import org.apache.cloudstack.agent.directdownload.CheckUrlCommand;
+import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
@@ -166,6 +167,15 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
return profile;
}
+ @Override
+ public TemplateProfile prepare(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException {
+ TemplateProfile profile = super.prepare(cmd);
+
+ // Check that the resource limit for secondary storage won't be exceeded
+ _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(cmd.getEntityOwnerId()), ResourceType.secondary_storage);
+ return profile;
+ }
+
@Override
public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException {
TemplateProfile profile = super.prepare(cmd);
diff --git a/server/src/main/java/com/cloud/template/TemplateAdapter.java b/server/src/main/java/com/cloud/template/TemplateAdapter.java
index 595de66ed17..c048ceaf1fc 100644
--- a/server/src/main/java/com/cloud/template/TemplateAdapter.java
+++ b/server/src/main/java/com/cloud/template/TemplateAdapter.java
@@ -20,6 +20,7 @@ import java.util.List;
import java.util.Map;
import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd;
+import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd;
import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd;
@@ -51,29 +52,31 @@ public interface TemplateAdapter extends Adapter {
}
}
- public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException;
+ TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException;
- public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException;
+ TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException;
- public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException;
+ TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException;
- public VMTemplateVO create(TemplateProfile profile);
+ TemplateProfile prepare(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException;
- public List createTemplateForPostUpload(TemplateProfile profile);
+ VMTemplateVO create(TemplateProfile profile);
- public TemplateProfile prepareDelete(DeleteTemplateCmd cmd);
+ List createTemplateForPostUpload(TemplateProfile profile);
- public TemplateProfile prepareDelete(DeleteIsoCmd cmd);
+ TemplateProfile prepareDelete(DeleteTemplateCmd cmd);
- public TemplateProfile prepareExtractTemplate(ExtractTemplateCmd cmd);
+ TemplateProfile prepareDelete(DeleteIsoCmd cmd);
- public boolean delete(TemplateProfile profile);
+ TemplateProfile prepareExtractTemplate(ExtractTemplateCmd cmd);
- public TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url,
+ boolean delete(TemplateProfile profile);
+
+ TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url,
Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String accountName,
Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload) throws ResourceAllocationException;
- public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url,
+ TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url,
Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String chksum,
Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable,
TemplateType templateType, boolean directDownload) throws ResourceAllocationException;
diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java
index dc4074c957f..0e88c147f51 100644
--- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java
+++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java
@@ -23,8 +23,13 @@ import java.util.Map;
import javax.inject.Inject;
+import com.cloud.storage.upload.params.IsoUploadParams;
+import com.cloud.storage.upload.params.TemplateUploadParams;
+import com.cloud.storage.upload.params.UploadParams;
+import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang3.BooleanUtils;
import org.apache.log4j.Logger;
import org.apache.cloudstack.api.ApiConstants;
@@ -284,35 +289,55 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
}
- @Override
- public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException {
+ /**
+ * Prepare upload parameters internal method for templates and ISOs local upload
+ */
+ private TemplateProfile prepareUploadParamsInternal(UploadParams params) throws ResourceAllocationException {
//check if the caller can operate with the template owner
Account caller = CallContext.current().getCallingAccount();
- Account owner = _accountMgr.getAccount(cmd.getEntityOwnerId());
+ Account owner = _accountMgr.getAccount(params.getTemplateOwnerId());
_accountMgr.checkAccess(caller, null, true, owner);
- boolean isRouting = (cmd.isRoutingType() == null) ? false : cmd.isRoutingType();
-
List zoneList = null;
- Long zoneId = cmd.getZoneId();
// ignore passed zoneId if we are using region wide image store
List stores = _imgStoreDao.findRegionImageStores();
if (!(stores != null && stores.size() > 0)) {
zoneList = new ArrayList<>();
- zoneList.add(zoneId);
+ zoneList.add(params.getZoneId());
}
- HypervisorType hypervisorType = HypervisorType.getType(cmd.getHypervisor());
- if(hypervisorType == HypervisorType.None) {
- throw new InvalidParameterValueException("Hypervisor Type: " + cmd.getHypervisor() + " is invalid. Supported Hypervisor types are "
- + EnumUtils.listValues(HypervisorType.values()).replace("None, ", ""));
+ if(!params.isIso() && params.getHypervisorType() == HypervisorType.None) {
+ throw new InvalidParameterValueException("Hypervisor Type: " + params.getHypervisorType() + " is invalid. Supported Hypervisor types are "
+ + EnumUtils.listValues(HypervisorType.values()).replace("None, ", ""));
}
- return prepare(false, CallContext.current().getCallingUserId(), cmd.getName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(),
- cmd.getRequiresHvm(), null, cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneList,
- hypervisorType, cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null,
- cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER, false);
+ return prepare(params.isIso(), params.getUserId(), params.getName(), params.getDisplayText(), params.getBits(),
+ params.isPasswordEnabled(), params.requiresHVM(), params.getUrl(), params.isPublic(), params.isFeatured(),
+ params.isExtractable(), params.getFormat(), params.getGuestOSId(), zoneList,
+ params.getHypervisorType(), params.getChecksum(), params.isBootable(), params.getTemplateTag(), owner,
+ params.getDetails(), params.isSshKeyEnabled(), params.getImageStoreUuid(),
+ params.isDynamicallyScalable(), params.isRoutingType() ? TemplateType.ROUTING : TemplateType.USER, params.isDirectDownload());
+ }
+ @Override
+ public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException {
+ UploadParams params = new TemplateUploadParams(CallContext.current().getCallingUserId(), cmd.getName(),
+ cmd.getDisplayText(), cmd.getBits(), BooleanUtils.toBoolean(cmd.isPasswordEnabled()),
+ BooleanUtils.toBoolean(cmd.getRequiresHvm()), BooleanUtils.toBoolean(cmd.isPublic()),
+ BooleanUtils.toBoolean(cmd.isFeatured()), BooleanUtils.toBoolean(cmd.isExtractable()), cmd.getFormat(), cmd.getOsTypeId(),
+ cmd.getZoneId(), HypervisorType.getType(cmd.getHypervisor()), cmd.getChecksum(),
+ cmd.getTemplateTag(), cmd.getEntityOwnerId(), cmd.getDetails(), BooleanUtils.toBoolean(cmd.isSshKeyEnabled()),
+ BooleanUtils.toBoolean(cmd.isDynamicallyScalable()), BooleanUtils.toBoolean(cmd.isRoutingType()));
+ return prepareUploadParamsInternal(params);
+ }
+
+ @Override
+ public TemplateProfile prepare(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException {
+ UploadParams params = new IsoUploadParams(CallContext.current().getCallingUserId(), cmd.getName(),
+ cmd.getDisplayText(), BooleanUtils.toBoolean(cmd.isPublic()), BooleanUtils.toBoolean(cmd.isFeatured()),
+ BooleanUtils.toBoolean(cmd.isExtractable()), cmd.getOsTypeId(),
+ cmd.getZoneId(), BooleanUtils.toBoolean(cmd.isBootable()), cmd.getEntityOwnerId());
+ return prepareUploadParamsInternal(params);
}
@Override
diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java
index df3b59b5ff7..9beeb7bc010 100755
--- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java
+++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java
@@ -43,6 +43,7 @@ import com.google.common.base.Joiner;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
+import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd;
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
@@ -349,11 +350,14 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
}
}
- @Override
- @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating post upload template")
- public GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException {
- TemplateAdapter adapter = getAdapter(HypervisorType.getType(cmd.getHypervisor()));
- TemplateProfile profile = adapter.prepare(cmd);
+ /**
+ * Internal register template or ISO method - post local upload
+ * @param adapter
+ * @param profile
+ */
+ private GetUploadParamsResponse registerPostUploadInternal(TemplateAdapter adapter,
+ TemplateProfile profile) throws MalformedURLException {
+
List payload = adapter.createTemplateForPostUpload(profile);
if(CollectionUtils.isNotEmpty(payload)) {
@@ -403,6 +407,21 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
}
}
+ @Override
+ @ActionEvent(eventType = EventTypes.EVENT_ISO_CREATE, eventDescription = "creating post upload iso")
+ public GetUploadParamsResponse registerIsoForPostUpload(GetUploadParamsForIsoCmd cmd) throws ResourceAllocationException, MalformedURLException {
+ TemplateAdapter adapter = getAdapter(HypervisorType.None);
+ TemplateProfile profile = adapter.prepare(cmd);
+ return registerPostUploadInternal(adapter, profile);
+ }
+
+ @Override
+ @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating post upload template")
+ public GetUploadParamsResponse registerTemplateForPostUpload(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException, MalformedURLException {
+ TemplateAdapter adapter = getAdapter(HypervisorType.getType(cmd.getHypervisor()));
+ TemplateProfile profile = adapter.prepare(cmd);
+ return registerPostUploadInternal(adapter, profile);
+ }
@Override
public DataStore getImageStore(String storeUuid, Long zoneId) {
@@ -558,6 +577,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
if (vm.getIsoId() != null) {
Map storageForDisks = dest.getStorageForDisks();
Long poolId = null;
+ TemplateInfo template;
if (MapUtils.isNotEmpty(storageForDisks)) {
for (StoragePool storagePool : storageForDisks.values()) {
if (poolId != null && storagePool.getId() != poolId) {
@@ -565,8 +585,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
}
poolId = storagePool.getId();
}
+ template = prepareIso(vm.getIsoId(), vm.getDataCenterId(), dest.getHost().getId(), poolId);
+ } else {
+ template = _tmplFactory.getTemplate(vm.getIsoId(), DataStoreRole.Primary, dest.getDataCenter().getId());
}
- TemplateInfo template = prepareIso(vm.getIsoId(), vm.getDataCenterId(), dest.getHost().getId(), poolId);
+
if (template == null){
s_logger.error("Failed to prepare ISO on secondary or cache storage");
throw new CloudRuntimeException("Failed to prepare ISO on secondary or cache storage");
diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
index 72df11a261d..00e241c07e1 100644
--- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
@@ -302,7 +302,7 @@ import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.vm.snapshot.VMSnapshotManager;
import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
-
+import com.google.common.base.Strings;
public class UserVmManagerImpl extends ManagerBase implements UserVmManager, VirtualMachineGuru, UserVmService, Configurable {
private static final Logger s_logger = Logger.getLogger(UserVmManagerImpl.class);
@@ -1460,6 +1460,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
newNetworkOfferingId, null, 0L, VirtualMachine.class.getName(), vmInstance.getUuid(), vmInstance.isDisplay());
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vmInstance.getAccountId(), vmInstance.getDataCenterId(), vmInstance.getId(),
oldNicIdString, oldNetworkOfferingId, null, 0L, VirtualMachine.class.getName(), vmInstance.getUuid(), vmInstance.isDisplay());
+
+ if (vmInstance.getState() != State.Stopped) {
+ try {
+ VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vmInstance);
+ User callerUser = _accountMgr.getActiveUser(CallContext.current().getCallingUserId());
+ ReservationContext context = new ReservationContextImpl(null, null, callerUser, caller);
+ DeployDestination dest = new DeployDestination(dc, null, null, null);
+ _networkMgr.prepare(vmProfile, dest, context);
+ } catch (final Exception e) {
+ s_logger.info("Got exception: ", e);
+ }
+ }
+
return _vmDao.findById(vmInstance.getId());
}
@@ -4419,10 +4432,16 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
}
- List nics = _nicDao.listByVmId(vm.getId());
- for (NicVO nic : nics) {
- NetworkVO network = _networkDao.findById(nic.getNetworkId());
- if (network.getTrafficType() == TrafficType.Guest) {
+ final List nics = _nicDao.listByVmId(vm.getId());
+ for (final NicVO nic : nics) {
+ final NetworkVO network = _networkDao.findById(nic.getNetworkId());
+ if (network != null && network.getTrafficType() == TrafficType.Guest) {
+ final String nicIp = Strings.isNullOrEmpty(nic.getIPv4Address()) ? nic.getIPv6Address() : nic.getIPv4Address();
+ if (!Strings.isNullOrEmpty(nicIp)) {
+ NicProfile nicProfile = new NicProfile(nic.getIPv4Address(), nic.getIPv6Address(), nic.getMacAddress());
+ nicProfile.setId(nic.getId());
+ _networkMgr.cleanupNicDhcpDnsEntry(network, profile, nicProfile);
+ }
if (nic.getBroadcastUri() != null && nic.getBroadcastUri().getScheme().equals("pvlan")) {
NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), 0, false, "pvlan-nic");
setupVmForPvlan(false, vm.getHostId(), nicProfile);
diff --git a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java
index c1ffc5ef473..99860934cd5 100644
--- a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java
+++ b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java
@@ -41,6 +41,10 @@ import com.cloud.utils.exception.CloudRuntimeException;
import java.net.URI;
import java.net.URISyntaxException;
+import java.security.cert.Certificate;
+import java.security.cert.CertificateException;
+import java.security.cert.CertificateExpiredException;
+import java.security.cert.CertificateNotYetValidException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -50,6 +54,7 @@ import java.util.Collections;
import java.util.stream.Collectors;
import javax.inject.Inject;
+import com.cloud.utils.security.CertificateHelper;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand.DownloadProtocol;
@@ -57,7 +62,7 @@ import org.apache.cloudstack.agent.directdownload.HttpDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.MetalinkDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.NfsDirectDownloadCommand;
import org.apache.cloudstack.agent.directdownload.HttpsDirectDownloadCommand;
-import org.apache.cloudstack.agent.directdownload.SetupDirectDownloadCertificate;
+import org.apache.cloudstack.agent.directdownload.SetupDirectDownloadCertificateCommand;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@@ -69,6 +74,7 @@ import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.log4j.Logger;
+import sun.security.x509.X509CertImpl;
public class DirectDownloadManagerImpl extends ManagerBase implements DirectDownloadManager {
@@ -79,17 +85,17 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
protected final static String LINE_SEPARATOR = "\n";
@Inject
- VMTemplateDao vmTemplateDao;
+ private VMTemplateDao vmTemplateDao;
@Inject
- PrimaryDataStoreDao primaryDataStoreDao;
+ private PrimaryDataStoreDao primaryDataStoreDao;
@Inject
- HostDao hostDao;
+ private HostDao hostDao;
@Inject
- AgentManager agentManager;
+ private AgentManager agentManager;
@Inject
- VMTemplatePoolDao vmTemplatePoolDao;
+ private VMTemplatePoolDao vmTemplatePoolDao;
@Inject
- DataStoreManager dataStoreManager;
+ private DataStoreManager dataStoreManager;
@Override
public List> getCommands() {
@@ -313,17 +319,76 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
.collect(Collectors.toList());
}
- @Override
- public boolean uploadCertificateToHosts(String certificateCer, String certificateName, String hypervisor) {
- HypervisorType hypervisorType = HypervisorType.getType(hypervisor);
- List hosts = getRunningHostsToUploadCertificate(hypervisorType);
- if (CollectionUtils.isNotEmpty(hosts)) {
- for (HostVO host : hosts) {
- if (!uploadCertificate(certificateCer, certificateName, host.getId())) {
- throw new CloudRuntimeException("Uploading certificate " + certificateName + " failed on host: " + host.getId());
- }
+ /**
+ * Return pretified PEM certificate
+ */
+ protected String getPretifiedCertificate(String certificateCer) {
+ String cert = certificateCer.replaceAll("(.{64})", "$1\n");
+ if (!cert.startsWith(BEGIN_CERT) && !cert.endsWith(END_CERT)) {
+ cert = BEGIN_CERT + LINE_SEPARATOR + cert + LINE_SEPARATOR + END_CERT;
+ }
+ return cert;
+ }
+
+ /**
+ * Generate and return certificate from the string
+ * @throws CloudRuntimeException if the certificate is not well formed
+ */
+ private Certificate getCertificateFromString(String certificatePem) {
+ try {
+ return CertificateHelper.buildCertificate(certificatePem);
+ } catch (CertificateException e) {
+ e.printStackTrace();
+ throw new CloudRuntimeException("Cannot parse the certificate provided, please provide a PEM certificate. Error: " + e.getMessage());
+ }
+ }
+
+ /**
+ * Perform sanity of string parsed certificate
+ */
+ protected void certificateSanity(String certificatePem) {
+ Certificate certificate = getCertificateFromString(certificatePem);
+
+ if (certificate instanceof X509CertImpl) {
+ X509CertImpl x509Cert = (X509CertImpl) certificate;
+ try {
+ x509Cert.checkValidity();
+ } catch (CertificateExpiredException | CertificateNotYetValidException e) {
+ String msg = "Certificate is invalid. Please provide a valid certificate. Error: " + e.getMessage();
+ s_logger.error(msg);
+ throw new CloudRuntimeException(msg);
+ }
+ if (x509Cert.getSubjectDN() != null) {
+ s_logger.debug("Valid certificate for domain name: " + x509Cert.getSubjectDN().getName());
}
}
+ }
+
+ @Override
+ public boolean uploadCertificateToHosts(String certificateCer, String alias, String hypervisor) {
+ if (alias != null && (alias.equalsIgnoreCase("cloud") || alias.startsWith("cloudca"))) {
+ throw new CloudRuntimeException("Please provide a different alias name for the certificate");
+ }
+
+ HypervisorType hypervisorType = HypervisorType.getType(hypervisor);
+ List hosts = getRunningHostsToUploadCertificate(hypervisorType);
+
+ String certificatePem = getPretifiedCertificate(certificateCer);
+ certificateSanity(certificatePem);
+
+ s_logger.info("Attempting to upload certificate: " + alias + " to " + hosts.size() + " hosts");
+ int hostCount = 0;
+ if (CollectionUtils.isNotEmpty(hosts)) {
+ for (HostVO host : hosts) {
+ if (!uploadCertificate(certificatePem, alias, host.getId())) {
+ String msg = "Could not upload certificate " + alias + " on host: " + host.getName() + " (" + host.getUuid() + ")";
+ s_logger.error(msg);
+ throw new CloudRuntimeException(msg);
+ }
+ hostCount++;
+ }
+ }
+ s_logger.info("Certificate was successfully uploaded to " + hostCount + " hosts");
return true;
}
@@ -331,14 +396,19 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
* Upload and import certificate to hostId on keystore
*/
protected boolean uploadCertificate(String certificate, String certificateName, long hostId) {
- String cert = certificate.replaceAll("(.{64})", "$1\n");
- final String prettified_cert = BEGIN_CERT + LINE_SEPARATOR + cert + LINE_SEPARATOR + END_CERT;
- SetupDirectDownloadCertificate cmd = new SetupDirectDownloadCertificate(prettified_cert, certificateName);
+ s_logger.debug("Uploading certificate: " + certificateName + " to host " + hostId);
+ SetupDirectDownloadCertificateCommand cmd = new SetupDirectDownloadCertificateCommand(certificate, certificateName);
Answer answer = agentManager.easySend(hostId, cmd);
if (answer == null || !answer.getResult()) {
+ String msg = "Certificate " + certificateName + " could not be added to host " + hostId;
+ if (answer != null) {
+ msg += " due to: " + answer.getDetails();
+ }
+ s_logger.error(msg);
return false;
}
s_logger.info("Certificate " + certificateName + " successfully uploaded to host: " + hostId);
return true;
}
+
}
diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java
index f456fcee177..f35f1425f81 100644
--- a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java
+++ b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java
@@ -172,6 +172,21 @@ public class AdvancedNetworkTopology extends BasicNetworkTopology {
return applyRules(network, router, typeString, isPodLevelException, podId, failWhenDisconnect, new RuleApplierWrapper(dhcpRules));
}
+ @Override
+ public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile profile, VirtualRouter virtualRouter) throws ResourceUnavailableException {
+ s_logger.debug("REMOVE VPC DHCP ENTRY RULES");
+
+ final String typeString = "dhcp entry";
+ final Long podId = null;
+ final boolean isPodLevelException = false;
+ final boolean failWhenDisconnect = false;
+
+ final DhcpEntryRules dhcpRules = new DhcpEntryRules(network, nic, profile, null);
+ dhcpRules.setRemove(true);
+
+ return applyRules(network, virtualRouter, typeString, isPodLevelException, podId, failWhenDisconnect, new RuleApplierWrapper(dhcpRules));
+ }
+
@Override
public boolean associatePublicIP(final Network network, final List extends PublicIpAddress> ipAddresses, final VirtualRouter router)
throws ResourceUnavailableException {
diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkVisitor.java b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkVisitor.java
index b5283dacfeb..c21b6d7e9ad 100644
--- a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkVisitor.java
+++ b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkVisitor.java
@@ -80,8 +80,9 @@ public class AdvancedNetworkVisitor extends BasicNetworkVisitor {
final Commands commands = new Commands(Command.OnError.Stop);
final NicVO nicVo = dhcp.getNicVo();
final UserVmVO userVM = dhcp.getUserVM();
+ final boolean remove = dhcp.isRemove();
- _commandSetupHelper.createDhcpEntryCommand(router, userVM, nicVo, commands);
+ _commandSetupHelper.createDhcpEntryCommand(router, userVM, nicVo, remove, commands);
return _networkGeneralHelper.sendCommandsToRouter(router, commands);
}
diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java
index 0869b2289ca..a7fbe317828 100644
--- a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java
+++ b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java
@@ -442,4 +442,25 @@ public class BasicNetworkTopology implements NetworkTopology {
}
return result;
}
+
+ @Override
+ public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile profile, VirtualRouter virtualRouter) throws ResourceUnavailableException {
+ s_logger.debug("REMOVING DHCP ENTRY RULE");
+
+ final String typeString = "dhcp entry";
+ final Long podId = profile.getVirtualMachine().getPodIdToDeployIn();
+ boolean isPodLevelException = false;
+
+ if (podId != null && profile.getVirtualMachine().getType() == VirtualMachine.Type.User && network.getTrafficType() == TrafficType.Guest
+ && network.getGuestType() == Network.GuestType.Shared) {
+ isPodLevelException = true;
+ }
+
+ final boolean failWhenDisconnect = false;
+
+ final DhcpEntryRules dhcpRules = new DhcpEntryRules(network, nic, profile, null);
+ dhcpRules.setRemove(true);
+
+ return applyRules(network, virtualRouter, typeString, isPodLevelException, podId, failWhenDisconnect, new RuleApplierWrapper(dhcpRules));
+ }
}
\ No newline at end of file
diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java
index 31493dc02bf..8efb16320cf 100644
--- a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java
+++ b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java
@@ -196,9 +196,10 @@ public class BasicNetworkVisitor extends NetworkTopologyVisitor {
final NicVO nicVo = dhcp.getNicVo();
final UserVmVO userVM = dhcp.getUserVM();
final DeployDestination destination = dhcp.getDestination();
+ final boolean remove = dhcp.isRemove();
if (router.getPodIdToDeployIn().longValue() == destination.getPod().getId()) {
- _commandSetupHelper.createDhcpEntryCommand(router, userVM, nicVo, commands);
+ _commandSetupHelper.createDhcpEntryCommand(router, userVM, nicVo, remove, commands);
return _networkGeneralHelper.sendCommandsToRouter(router, commands);
}
diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopology.java
index 5190d5e2f5d..fa76f8375f9 100644
--- a/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopology.java
+++ b/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopology.java
@@ -87,4 +87,6 @@ public interface NetworkTopology {
boolean applyRules(final Network network, final VirtualRouter router, final String typeString, final boolean isPodLevelException, final Long podId,
final boolean failWhenDisconnect, RuleApplierWrapper ruleApplier) throws ResourceUnavailableException;
+
+ boolean removeDhcpEntry(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final VirtualRouter virtualRouter) throws ResourceUnavailableException;
}
\ No newline at end of file
diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java
index 693b437079b..bb5599f0a99 100644
--- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java
+++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java
@@ -49,8 +49,6 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager;
import org.apache.cloudstack.framework.jobs.dao.AsyncJobJoinMapDao;
import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
@@ -79,6 +77,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.org.Grouping;
import com.cloud.serializer.GsonHelper;
import com.cloud.storage.Volume.Type;
+import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.user.Account;
@@ -146,7 +145,7 @@ public class VolumeApiServiceImplTest {
@Mock
private HostDao _hostDao;
@Mock
- private StoragePoolDetailsDao storagePoolDetailsDao;
+ private StoragePoolTagsDao storagePoolTagsDao;
private DetachVolumeCmd detachCmd = new DetachVolumeCmd();
private Class> _detachCmdClass = detachCmd.getClass();
@@ -516,26 +515,25 @@ public class VolumeApiServiceImplTest {
@Test
public void getStoragePoolTagsTestStorageWithoutTags() {
- Mockito.when(storagePoolDetailsDao.listDetails(storagePoolMockId)).thenReturn(new ArrayList<>());
+ Mockito.when(storagePoolTagsDao.getStoragePoolTags(storagePoolMockId)).thenReturn(new ArrayList<>());
String returnedStoragePoolTags = volumeApiServiceImpl.getStoragePoolTags(storagePoolMock);
Assert.assertNull(returnedStoragePoolTags);
-
}
@Test
public void getStoragePoolTagsTestStorageWithTags() {
- ArrayList tags = new ArrayList<>();
- StoragePoolDetailVO tag1 = new StoragePoolDetailVO(1l, "tag1", "value", true);
- StoragePoolDetailVO tag2 = new StoragePoolDetailVO(1l, "tag2", "value", true);
- StoragePoolDetailVO tag3 = new StoragePoolDetailVO(1l, "tag3", "value", true);
+ ArrayList tags = new ArrayList<>();
+ String tag1 = "tag1";
+ String tag2 = "tag2";
+ String tag3 = "tag3";
tags.add(tag1);
tags.add(tag2);
tags.add(tag3);
- Mockito.when(storagePoolDetailsDao.listDetails(storagePoolMockId)).thenReturn(tags);
+ Mockito.when(storagePoolTagsDao.getStoragePoolTags(storagePoolMockId)).thenReturn(tags);
String returnedStoragePoolTags = volumeApiServiceImpl.getStoragePoolTags(storagePoolMock);
diff --git a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java
index 57ecbd1fdc6..b3027f23486 100644
--- a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java
+++ b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java
@@ -925,6 +925,10 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches
return false;
}
+ @Override
+ public void cleanupNicDhcpDnsEntry(Network network, VirtualMachineProfile vmProfile, NicProfile nicProfile) {
+ }
+
@Override
public void finalizeUpdateInSequence(Network network, boolean success) {
return;
diff --git a/server/src/test/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImplTest.java
index 59405998f9f..5082500a9ae 100644
--- a/server/src/test/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImplTest.java
+++ b/server/src/test/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImplTest.java
@@ -20,6 +20,7 @@ package org.apache.cloudstack.direct.download;
import com.cloud.agent.AgentManager;
import com.cloud.host.dao.HostDao;
+import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand.DownloadProtocol;
import org.junit.Assert;
import org.junit.Before;
@@ -50,6 +51,26 @@ public class DirectDownloadManagerImplTest {
private static final String HTTP_HEADER_2 = "Accept-Encoding";
private static final String HTTP_VALUE_2 = "gzip";
+ private static final String VALID_CERTIFICATE =
+ "MIIDSzCCAjMCFDa0LoW+1O8/cEwCI0nIqfl8c1TLMA0GCSqGSIb3DQEBCwUAMGEx\n" +
+ "CzAJBgNVBAYTAkNTMQswCQYDVQQIDAJDUzELMAkGA1UEBwwCQ1MxCzAJBgNVBAoM\n" +
+ "AkNTMQswCQYDVQQLDAJDUzELMAkGA1UEAwwCQ1MxETAPBgkqhkiG9w0BCQEWAkNT\n" +
+ "MCAXDTE5MDQyNDE1NTIzNVoYDzIwOTgwOTE1MTU1MjM1WjBhMQswCQYDVQQGEwJD\n" +
+ "UzELMAkGA1UECAwCQ1MxCzAJBgNVBAcMAkNTMQswCQYDVQQKDAJDUzELMAkGA1UE\n" +
+ "CwwCQ1MxCzAJBgNVBAMMAkNTMREwDwYJKoZIhvcNAQkBFgJDUzCCASIwDQYJKoZI\n" +
+ "hvcNAQEBBQADggEPADCCAQoCggEBAKstLRcMGCo6+2hojRMjEuuimnWp27yfYhDU\n" +
+ "w/Cj03MJe/KCOhwsDqX82QNIr/bNtLdFf2ZJEUQd08sLLlHeUy9y5aOcxt9SGx2j\n" +
+ "xolqO4MBL7BW3dklO0IvjaEfBeFP6udz8ajeVur/iPPZb2Edd0zlXuHvDozfQisv\n" +
+ "bpuJImnTUVx0ReCXP075PBGvlqQXW2uEht+E/w3H8/2rra3JFV6J5xc77KyQSq2t\n" +
+ "1+2ZU7PJiy/rppXf5rjTvNm6ydfag8/av7lcgs2ntdkK4koAmkmROhAwNonlL7cD\n" +
+ "xIC83cKOqOFiQXSwr1IgoLf7zBNafKoTlSb/ev6Zt18BXEMLGpkCAwEAATANBgkq\n" +
+ "hkiG9w0BAQsFAAOCAQEAVS5uWZRz2m3yx7EUQm47RTMW5WMXU4pI8D+N5WZ9xubY\n" +
+ "OqtU3r2OAYpfL/QO8iT7jcqNYGoDqe8ZjEaNvfxiTG8cOI6TSXhKBG6hjSaSFQSH\n" +
+ "OZ5mfstM36y/3ENFh6JCJ2ao1rgWSbfDRyAaHuvt6aCkaV6zRq2OMEgoJqZSgwxL\n" +
+ "QO230xa2hYgKXOePMVZyHFA2oKJtSOc3jCke9Y8zDUwm0McGdMRBD8tVB0rcaOqQ\n" +
+ "0PlDLjB9sQuhhLu8vjdgbznmPbUmMG7JN0yhT1eJbIX5ImXyh0DoTwiaGcYwW6Sq\n" +
+ "YodjXACsC37xaQXAPYBiaAs4iI80TJSx1DVFO1LV0g==";
+
@Before
public void setUp() {
}
@@ -103,4 +124,16 @@ public class DirectDownloadManagerImplTest {
Map headers = manager.getHeadersFromDetails(details);
Assert.assertTrue(headers.isEmpty());
}
+
+ @Test
+ public void testCertificateSanityValidCertificate() {
+ String pretifiedCertificate = manager.getPretifiedCertificate(VALID_CERTIFICATE);
+ manager.certificateSanity(pretifiedCertificate);
+ }
+
+ @Test(expected = CloudRuntimeException.class)
+ public void testCertificateSanityInvalidCertificate() {
+ String pretifiedCertificate = manager.getPretifiedCertificate(VALID_CERTIFICATE + "xxx");
+ manager.certificateSanity(pretifiedCertificate);
+ }
}
diff --git a/systemvm/agent/js/ajaxviewer.js b/systemvm/agent/js/ajaxviewer.js
index f160abc9b58..1a44b3e7c1f 100644
--- a/systemvm/agent/js/ajaxviewer.js
+++ b/systemvm/agent/js/ajaxviewer.js
@@ -1431,7 +1431,9 @@ AjaxViewer.prototype = {
if(e.shiftLeft)
modifiers |= AjaxViewer.LEFT_SHIFT_MASK;
- if(e.metaKey)
+ // Don't pass meta key modifier filter if control key is pressed.
+ // For more details see https://github.com/apache/cloudstack/issues/3229
+ if(e.metaKey && !e.ctrlKey)
modifiers |= AjaxViewer.META_KEY_MASK;
return modifiers;
diff --git a/systemvm/debian/opt/cloud/bin/configure.py b/systemvm/debian/opt/cloud/bin/configure.py
index 253eb7c57fe..a7f297edbb2 100755
--- a/systemvm/debian/opt/cloud/bin/configure.py
+++ b/systemvm/debian/opt/cloud/bin/configure.py
@@ -858,7 +858,7 @@ class CsForwardingRules(CsDataBag):
rule['protocol'],
rule['protocol'],
public_fwports,
- hex(int(public_fwinterface[3:]))
+ hex(100 + int(public_fwinterface[3:]))
)
fw6 = "-A PREROUTING -d %s/32 -i %s -p %s -m %s --dport %s -m state --state NEW -j CONNMARK --save-mark --nfmask 0xffffffff --ctmask 0xffffffff" % \
(
@@ -922,12 +922,12 @@ class CsForwardingRules(CsDataBag):
if device is None:
raise Exception("Ip address %s has no device in the ips databag" % rule["public_ip"])
- self.fw.append(["mangle", "",
- "-I PREROUTING -s %s/32 -m state --state NEW -j CONNMARK --save-mark --nfmask 0xffffffff --ctmask 0xffffffff" %
+ self.fw.append(["mangle", "front",
+ "-A PREROUTING -s %s/32 -m state --state NEW -j CONNMARK --save-mark --nfmask 0xffffffff --ctmask 0xffffffff" %
rule["internal_ip"]])
- self.fw.append(["mangle", "",
- "-I PREROUTING -s %s/32 -m state --state NEW -j MARK --set-xmark %s/0xffffffff" %
- (rule["internal_ip"], hex(int(device[len("eth"):])))])
+ self.fw.append(["mangle", "front",
+ "-A PREROUTING -s %s/32 -m state --state NEW -j MARK --set-xmark %s/0xffffffff" %
+ (rule["internal_ip"], hex(100 + int(device[len("eth"):])))])
self.fw.append(["nat", "front",
"-A PREROUTING -d %s/32 -j DNAT --to-destination %s" % (rule["public_ip"], rule["internal_ip"])])
self.fw.append(["nat", "front",
diff --git a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py
index ab0cee60039..8e678251fe3 100755
--- a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py
+++ b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py
@@ -258,7 +258,7 @@ class CsIP:
def __init__(self, dev, config):
self.dev = dev
- self.dnum = hex(int(dev[3:]))
+ self.dnum = hex(100 + int(dev[3:]))
self.iplist = {}
self.address = {}
self.list()
@@ -518,12 +518,11 @@ class CsIP:
if method == "add":
if not self.config.is_vpc():
- # treat the first IP on a interface as special case to set up the routing rules
- if self.get_type() in ["public"] and (len(self.iplist) == 1):
- CsHelper.execute("sudo ip route add throw " + self.config.address().dbag['eth0'][0]['network'] + " table " + tableName + " proto static")
- CsHelper.execute("sudo ip route add throw " + self.config.address().dbag['eth1'][0]['network'] + " table " + tableName + " proto static")
+ if self.get_type() in ["public"]:
+ route.set_route("table %s throw %s proto static" % (tableName, self.config.address().dbag['eth0'][0]['network']))
+ route.set_route("table %s throw %s proto static" % (tableName, self.config.address().dbag['eth1'][0]['network']))
- # add 'defaul via gateway' rule in the device specific routing table
+ # add 'default via gateway' rule in the device specific routing table
if "gateway" in self.address and self.address["gateway"] and self.address["gateway"] != "None":
route.add_route(self.dev, self.address["gateway"])
if "network" in self.address and self.address["network"]:
diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py
index ecbc12f5a6f..e7abb902046 100755
--- a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py
+++ b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py
@@ -16,6 +16,7 @@
# under the License.
import CsHelper
import logging
+import os
from netaddr import *
from random import randint
from CsGuestNetwork import CsGuestNetwork
@@ -46,8 +47,8 @@ class CsDhcp(CsDataBag):
for item in self.dbag:
if item == "id":
continue
- self.add(self.dbag[item])
- self.write_hosts()
+ if not self.dbag[item]['remove']:
+ self.add(self.dbag[item])
self.configure_server()
@@ -64,6 +65,8 @@ class CsDhcp(CsDataBag):
if restart_dnsmasq:
self.delete_leases()
+ self.write_hosts()
+
if not self.cl.is_redundant() or self.cl.is_master():
if restart_dnsmasq:
CsHelper.service("dnsmasq", "restart")
@@ -114,10 +117,26 @@ class CsDhcp(CsDataBag):
idx += 1
def delete_leases(self):
+ macs_dhcphosts = []
try:
- open(LEASES, 'w').close()
- except IOError:
- return
+ logging.info("Attempting to delete entries from dnsmasq.leases file for VMs which are not on dhcphosts file")
+ for host in open(DHCP_HOSTS):
+ macs_dhcphosts.append(host.split(',')[0])
+
+ removed = 0
+ for leaseline in open(LEASES):
+ lease = leaseline.split(' ')
+ mac = lease[1]
+ ip = lease[2]
+ if mac not in macs_dhcphosts:
+ cmd = "dhcp_release $(ip route get %s | grep eth | head -1 | awk '{print $3}') %s %s" % (ip, ip, mac)
+ logging.info(cmd)
+ CsHelper.execute(cmd)
+ removed = removed + 1
+ self.del_host(ip)
+ logging.info("Deleted %s entries from dnsmasq.leases file" % str(removed))
+ except Exception as e:
+ logging.error("Caught error while trying to delete entries from dnsmasq.leases file: %s" % e)
def preseed(self):
self.add_host("127.0.0.1", "localhost %s" % CsHelper.get_hostname())
@@ -170,3 +189,7 @@ class CsDhcp(CsDataBag):
def add_host(self, ip, hosts):
self.hosts[ip] = hosts
+
+ def del_host(self, ip):
+ if ip in self.hosts:
+ self.hosts.pop(ip)
diff --git a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py
index 3ade4a2a979..25a4a1a9438 100755
--- a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py
+++ b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py
@@ -351,6 +351,33 @@ class CsRedundant(object):
interfaces = [interface for interface in self.address.get_interfaces() if interface.is_public()]
CsHelper.reconfigure_interfaces(self.cl, interfaces)
+
+ public_devices = list(set([interface.get_device() for interface in interfaces]))
+ if len(public_devices) > 1:
+ # Handle specific failures when multiple public interfaces
+
+ public_devices.sort()
+
+ # Ensure the default route is added, or outgoing traffic from VMs with static NAT on
+ # the subsequent interfaces will go from he wrong IP
+ route = CsRoute()
+ dev = ''
+ for interface in interfaces:
+ if dev == interface.get_device():
+ continue
+ dev = interface.get_device()
+ gateway = interface.get_gateway()
+ if gateway:
+ route.add_route(dev, gateway)
+
+ # The first public interface has a static MAC address between VRs. Subsequent ones don't,
+ # so an ARP announcement is needed on failover
+ for device in public_devices[1:]:
+ logging.info("Sending garp messages for IPs on %s" % device)
+ for interface in interfaces:
+ if interface.get_device() == device:
+ CsHelper.execute("arping -I %s -U %s -c 1" % (device, interface.get_ip()))
+
logging.info("Router switched to master mode")
def _collect_ignore_ips(self):
diff --git a/systemvm/debian/opt/cloud/bin/cs_dhcp.py b/systemvm/debian/opt/cloud/bin/cs_dhcp.py
index 88b4b7568c5..bb2ff7b07c3 100755
--- a/systemvm/debian/opt/cloud/bin/cs_dhcp.py
+++ b/systemvm/debian/opt/cloud/bin/cs_dhcp.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
+import logging
from netaddr import *
@@ -26,16 +27,14 @@ def merge(dbag, data):
del(dbag[data['ipv4_address']])
else:
remove_keys = set()
- for key, entry in dbag.iteritems():
- if key != 'id' and entry['host_name'] == data['host_name']:
- remove_keys.add(key)
- break
-
for key, entry in dbag.iteritems():
if key != 'id' and entry['mac_address'] == data['mac_address']:
remove_keys.add(key)
break
+ if data['remove'] and key not in remove_keys:
+ remove_keys.add(key)
+
for remove_key in remove_keys:
del(dbag[remove_key])
diff --git a/test/integration/smoke/test_direct_download.py b/test/integration/smoke/test_direct_download.py
new file mode 100644
index 00000000000..65117f97f8c
--- /dev/null
+++ b/test/integration/smoke/test_direct_download.py
@@ -0,0 +1,227 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+""" Test for Direct Downloads of Templates and ISOs
+"""
+# Import Local Modules
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.lib.utils import (cleanup_resources)
+from marvin.lib.base import (ServiceOffering,
+ NetworkOffering,
+ Network,
+ Template,
+ VirtualMachine)
+from marvin.lib.common import (get_pod,
+ get_zone)
+from nose.plugins.attrib import attr
+from marvin.cloudstackAPI import uploadTemplateDirectDownloadCertificate
+from marvin.lib.decoratorGenerators import skipTestIf
+
+
+class TestUploadDirectDownloadCertificates(cloudstackTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.testClient = super(TestUploadDirectDownloadCertificates, cls).getClsTestClient()
+ cls.apiclient = cls.testClient.getApiClient()
+ cls.hypervisor = cls.testClient.getHypervisorInfo()
+ cls.dbclient = cls.testClient.getDbConnection()
+ cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
+ cls.pod = get_pod(cls.apiclient, cls.zone.id)
+ cls.services = cls.testClient.getParsedTestDataConfig()
+
+ cls._cleanup = []
+ cls.hypervisorNotSupported = False
+ if cls.hypervisor.lower() not in ['kvm', 'lxc']:
+ cls.hypervisorNotSupported = True
+
+ if not cls.hypervisorNotSupported:
+ cls.certificates = {
+ "expired": "MIIDSTCCAjECFDi8s70TWFhwVN9cj67RJoAF99c8MA0GCSqGSIb3DQEBCwUAMGExCzAJBgNVBAYTAkNTMQswCQYDVQQIDAJDUzELMAkGA1UEBwwCQ1MxCzAJBgNVBAoMAkNTMQswCQYDVQQLDAJDUzELMAkGA1UEAwwCQ1MxETAPBgkqhkiG9w0BCQEWAkNTMB4XDTE5MDQyNDE1NTQxM1oXDTE5MDQyMjE1NTQxM1owYTELMAkGA1UEBhMCQ1MxCzAJBgNVBAgMAkNTMQswCQYDVQQHDAJDUzELMAkGA1UECgwCQ1MxCzAJBgNVBAsMAkNTMQswCQYDVQQDDAJDUzERMA8GCSqGSIb3DQEJARYCQ1MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrLS0XDBgqOvtoaI0TIxLropp1qdu8n2IQ1MPwo9NzCXvygjocLA6l/NkDSK/2zbS3RX9mSRFEHdPLCy5R3lMvcuWjnMbfUhsdo8aJajuDAS+wVt3ZJTtCL42hHwXhT+rnc/Go3lbq/4jz2W9hHXdM5V7h7w6M30IrL26biSJp01FcdEXglz9O+TwRr5akF1trhIbfhP8Nx/P9q62tyRVeiecXO+yskEqtrdftmVOzyYsv66aV3+a407zZusnX2oPP2r+5XILNp7XZCuJKAJpJkToQMDaJ5S+3A8SAvN3CjqjhYkF0sK9SIKC3+8wTWnyqE5Um/3r+mbdfAVxDCxqZAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAG/R9sJ2pFbu35MliIJIhWkwP7FeP/7gYCNvOXFt6vVGXmcOwuw9WGBxsmsGESQRB4+NnJFjyGQ1Ck+ps5XRRMizyvq6bCQxVuC5M+vYS4J0q8YoL0RJ20pN9iwTsosZjSEKmfUlVgsufqCG2nyusV71LSaQU6f/bylJcJkKwGUhThExh+PVLZ66H5cF4/SzuK6WzWnj5p6+YX8TP+qPUkXN1mapgVKfVMo6mqLsH+eLKH+zqdy5ZZ5znNSbJFgHufYbEFlutTaxHEvKNMEgMCFkFGiyPwRuD6oaPnZFquJLh/mBZOLogpxVD5v20AcUTANtbXSlPaqOnEQFcbiVCb8=",
+ "invalid": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX",
+ "valid": "MIIDSzCCAjMCFDa0LoW+1O8/cEwCI0nIqfl8c1TLMA0GCSqGSIb3DQEBCwUAMGExCzAJBgNVBAYTAkNTMQswCQYDVQQIDAJDUzELMAkGA1UEBwwCQ1MxCzAJBgNVBAoMAkNTMQswCQYDVQQLDAJDUzELMAkGA1UEAwwCQ1MxETAPBgkqhkiG9w0BCQEWAkNTMCAXDTE5MDQyNDE1NTIzNVoYDzIwOTgwOTE1MTU1MjM1WjBhMQswCQYDVQQGEwJDUzELMAkGA1UECAwCQ1MxCzAJBgNVBAcMAkNTMQswCQYDVQQKDAJDUzELMAkGA1UECwwCQ1MxCzAJBgNVBAMMAkNTMREwDwYJKoZIhvcNAQkBFgJDUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKstLRcMGCo6+2hojRMjEuuimnWp27yfYhDUw/Cj03MJe/KCOhwsDqX82QNIr/bNtLdFf2ZJEUQd08sLLlHeUy9y5aOcxt9SGx2jxolqO4MBL7BW3dklO0IvjaEfBeFP6udz8ajeVur/iPPZb2Edd0zlXuHvDozfQisvbpuJImnTUVx0ReCXP075PBGvlqQXW2uEht+E/w3H8/2rra3JFV6J5xc77KyQSq2t1+2ZU7PJiy/rppXf5rjTvNm6ydfag8/av7lcgs2ntdkK4koAmkmROhAwNonlL7cDxIC83cKOqOFiQXSwr1IgoLf7zBNafKoTlSb/ev6Zt18BXEMLGpkCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAVS5uWZRz2m3yx7EUQm47RTMW5WMXU4pI8D+N5WZ9xubYOqtU3r2OAYpfL/QO8iT7jcqNYGoDqe8ZjEaNvfxiTG8cOI6TSXhKBG6hjSaSFQSHOZ5mfstM36y/3ENFh6JCJ2ao1rgWSbfDRyAaHuvt6aCkaV6zRq2OMEgoJqZSgwxLQO230xa2hYgKXOePMVZyHFA2oKJtSOc3jCke9Y8zDUwm0McGdMRBD8tVB0rcaOqQ0PlDLjB9sQuhhLu8vjdgbznmPbUmMG7JN0yhT1eJbIX5ImXyh0DoTwiaGcYwW6SqYodjXACsC37xaQXAPYBiaAs4iI80TJSx1DVFO1LV0g=="
+ }
+
+ return
+
+ @classmethod
+ def tearDownClass(cls):
+ try:
+ cleanup_resources(cls.apiclient, cls._cleanup)
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+ return
+
+ def setUp(self):
+ self.apiclient = self.testClient.getApiClient()
+ self.dbclient = self.testClient.getDbConnection()
+ self.cleanup = []
+ return
+
+ def tearDown(self):
+ try:
+ cleanup_resources(self.apiclient, self.cleanup)
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+ return
+
+ @skipTestIf("hypervisorNotSupported")
+ @attr(tags=["advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false")
+ def test_01_sanity_check_on_certificates(self):
+ """Test Verify certificates before uploading to KVM hosts
+ """
+
+ # Validate the following
+ # 1. Invalid certificates cannot be uploaded to hosts for direct downloads
+ # 2. Expired certificates cannot be uploaded to hosts for direct downloads
+
+ cmd = uploadTemplateDirectDownloadCertificate.uploadTemplateDirectDownloadCertificateCmd()
+ cmd.hypervisor = self.hypervisor
+ cmd.name = "marvin-test-verify-certs"
+ cmd.certificate = self.certificates["invalid"]
+
+ invalid_cert_uploadFails = False
+ expired_cert_upload_fails = False
+ try:
+ self.apiclient.uploadTemplateDirectDownloadCertificate(cmd)
+ self.fail("Invalid certificate must not be uploaded")
+ except Exception as e:
+ invalid_cert_uploadFails = True
+
+ cmd.certificate = self.certificates["expired"]
+ try:
+ self.apiclient.uploadTemplateDirectDownloadCertificate(cmd)
+ self.fail("Expired certificate must not be uploaded")
+ except Exception as e:
+ expired_cert_upload_fails = True
+
+ self.assertTrue(invalid_cert_uploadFails and expired_cert_upload_fails,
+ "Invalid or expired certificates must not be uploaded")
+ return
+
+ @skipTestIf("hypervisorNotSupported")
+ @attr(tags=["advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false")
+ def test_02_upload_direct_download_certificates(self):
+ """Test Upload certificates to KVM hosts for direct download
+ """
+
+ # Validate the following
+ # 1. Valid certificates are uploaded to hosts
+
+ cmd = uploadTemplateDirectDownloadCertificate.uploadTemplateDirectDownloadCertificateCmd()
+ cmd.hypervisor = self.hypervisor
+ cmd.name = "marvin-test-verify-certs"
+ cmd.certificate = self.certificates["valid"]
+
+ try:
+ self.apiclient.uploadTemplateDirectDownloadCertificate(cmd)
+ except Exception as e:
+ self.fail("Valid certificate must be uploaded")
+
+ return
+
+
+class TestDirectDownloadTemplates(cloudstackTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.testClient = super(TestDirectDownloadTemplates, cls).getClsTestClient()
+ cls.apiclient = cls.testClient.getApiClient()
+ cls.hypervisor = cls.testClient.getHypervisorInfo()
+ cls.dbclient = cls.testClient.getDbConnection()
+ cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
+ cls.pod = get_pod(cls.apiclient, cls.zone.id)
+ cls.services = cls.testClient.getParsedTestDataConfig()
+
+ cls._cleanup = []
+ cls.hypervisorNotSupported = False
+ if cls.hypervisor.lower() not in ['kvm', 'lxc']:
+ cls.hypervisorNotSupported = True
+
+ if not cls.hypervisorNotSupported:
+ cls.services["test_templates"]["kvm"]["directdownload"] = "true"
+ cls.template = Template.register(cls.apiclient, cls.services["test_templates"]["kvm"],
+ zoneid=cls.zone.id, hypervisor=cls.hypervisor)
+ cls._cleanup.append(cls.template)
+
+ cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.services["virtual_machine"]["template"] = cls.template.id
+ cls.services["virtual_machine"]["hypervisor"] = cls.hypervisor
+ cls.service_offering = ServiceOffering.create(
+ cls.apiclient,
+ cls.services["service_offerings"]["tiny"]
+ )
+ cls._cleanup.append(cls.service_offering)
+ cls.network_offering = NetworkOffering.create(
+ cls.apiclient,
+ cls.services["l2-network_offering"],
+ )
+ cls.network_offering.update(cls.apiclient, state='Enabled')
+ cls.services["network"]["networkoffering"] = cls.network_offering.id
+ cls.l2_network = Network.create(
+ cls.apiclient,
+ cls.services["l2-network"],
+ zoneid=cls.zone.id,
+ networkofferingid=cls.network_offering.id
+ )
+ cls._cleanup.append(cls.l2_network)
+ cls._cleanup.append(cls.network_offering)
+ return
+
+ @classmethod
+ def tearDownClass(cls):
+ try:
+ cleanup_resources(cls.apiclient, cls._cleanup)
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+ return
+
+ def setUp(self):
+ self.apiclient = self.testClient.getApiClient()
+ self.dbclient = self.testClient.getDbConnection()
+ self.cleanup = []
+ return
+
+ def tearDown(self):
+ try:
+ cleanup_resources(self.apiclient, self.cleanup)
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+ return
+
+ @skipTestIf("hypervisorNotSupported")
+ @attr(tags=["advanced", "basic", "eip", "advancedns", "sg"], required_hardware="false")
+ def test_01_deploy_vm_from_direct_download_template(self):
+ """Test Deploy VM from direct download template
+ """
+
+ # Validate the following
+ # 1. Register direct download template
+ # 2. Deploy VM from direct download template
+
+ vm = VirtualMachine.create(
+ self.apiclient,
+ self.services["virtual_machine"],
+ serviceofferingid=self.service_offering.id,
+ networkids=self.l2_network.id
+ )
+ self.assertEqual(
+ vm.state,
+ "Running",
+ "Check VM deployed from direct download template is running"
+ )
+ self.cleanup.append(vm)
+ return
diff --git a/test/integration/smoke/test_snapshots.py b/test/integration/smoke/test_snapshots.py
index 6e9a877cbc1..7b67637a0aa 100644
--- a/test/integration/smoke/test_snapshots.py
+++ b/test/integration/smoke/test_snapshots.py
@@ -309,9 +309,9 @@ class TestSnapshotRootDisk(cloudstackTestCase):
# Migrate volume to new Primary Storage
Volume.migrate(self.apiclient,
- storageid=storage.id,
- volumeid=vol.id
- )
+ storageid=storage.id,
+ volumeid=vol.id
+ )
volume_response = list_volumes(
self.apiclient,
diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py
index b7a996f1d72..23aa1611012 100644
--- a/test/integration/smoke/test_vm_life_cycle.py
+++ b/test/integration/smoke/test_vm_life_cycle.py
@@ -24,10 +24,10 @@ from marvin.cloudstackAPI import (recoverVirtualMachine,
detachIso,
provisionCertificate,
updateConfiguration,
- migrateVirtualMachine)
-from marvin.lib.utils import (cleanup_resources,
- validateList,
- SshClient)
+ migrateVirtualMachine,
+ migrateVirtualMachineWithVolume)
+from marvin.lib.utils import *
+
from marvin.lib.base import (Account,
ServiceOffering,
VirtualMachine,
@@ -1309,3 +1309,207 @@ class TestMigrateVMwithVolume(cloudstackTestCase):
vol = Volume.list(self.apiclient, volume=volume1.id)[0]
self.assertEqual(vol.storageid, target_pool.id, "Storage pool was not the same as expected")
+
+
+class TestKVMLiveMigration(cloudstackTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ testClient = super(TestKVMLiveMigration, cls).getClsTestClient()
+ cls.apiclient = testClient.getApiClient()
+ cls.services = testClient.getParsedTestDataConfig()
+ cls.hypervisor = testClient.getHypervisorInfo()
+ cls._cleanup = []
+
+ # Get Zone, Domain and templates
+ domain = get_domain(cls.apiclient)
+ cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
+ cls.services['mode'] = cls.zone.networktype
+ cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][
+ 0].__dict__
+ cls.management_ip = cls.config.__dict__["mgtSvr"][0].__dict__["mgtSvrIp"]
+
+ template = get_template(
+ cls.apiclient,
+ cls.zone.id,
+ cls.services["ostype"]
+ )
+ if template == FAILED:
+ assert False, "get_template() failed to return template with description %s" % cls.services["ostype"]
+
+ # Set Zones and disk offerings
+ cls.services["small"]["zoneid"] = cls.zone.id
+ cls.services["small"]["template"] = template.id
+
+ cls.services["iso1"]["zoneid"] = cls.zone.id
+
+ # Create VMs, NAT Rules etc
+ cls.account = Account.create(
+ cls.apiclient,
+ cls.services["account"],
+ domainid=domain.id
+ )
+
+ cls.small_offering = ServiceOffering.create(
+ cls.apiclient,
+ cls.services["service_offerings"]["small"]
+ )
+
+ cls._cleanup = [
+ cls.small_offering,
+ cls.account
+ ]
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.apiclient = super(TestKVMLiveMigration, cls).getClsTestClient().getApiClient()
+ try:
+ cleanup_resources(cls.apiclient, cls._cleanup)
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+
+ def setUp(self):
+ self.apiclient = self.testClient.getApiClient()
+ self.dbclient = self.testClient.getDbConnection()
+ self.cleanup = []
+
+ if self.hypervisor.lower() not in ["kvm"]:
+ self.skipTest("VM Live Migration with Volumes is not supported on other than KVM")
+
+ self.hosts = Host.list(
+ self.apiclient,
+ zoneid=self.zone.id,
+ type='Routing',
+ hypervisor='KVM')
+
+ if len(self.hosts) < 2:
+ self.skipTest("Requires at least two hosts for performing migration related tests")
+
+ def tearDown(self):
+ try:
+ cleanup_resources(self.apiclient, self.cleanup)
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+
+ def get_target_host(self, virtualmachineid):
+ target_hosts = Host.listForMigration(self.apiclient,
+ virtualmachineid=virtualmachineid)
+ if len(target_hosts) < 1:
+ self.skipTest("No target hosts found")
+
+ return target_hosts[0]
+
+ def get_target_pool(self, volid):
+ target_pools = StoragePool.listForMigration(self.apiclient, id=volid)
+
+ if len(target_pools) < 1:
+ self.skipTest("Not enough storage pools found")
+
+ return target_pools[0]
+
+ def get_vm_volumes(self, id):
+ return Volume.list(self.apiclient, virtualmachineid=id, listall=True)
+
+ def deploy_vm(self):
+ return VirtualMachine.create(
+ self.apiclient,
+ self.services["small"],
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ serviceofferingid=self.small_offering.id,
+ mode=self.services["mode"])
+
+ def create_volume(self):
+ small_disk_offering = DiskOffering.list(self.apiclient, name='Small')[0]
+
+ return Volume.create(
+ self.apiclient,
+ self.services,
+ account=self.account.name,
+ diskofferingid=small_disk_offering.id,
+ domainid=self.account.domainid,
+ zoneid=self.zone.id
+ )
+
+ @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false")
+ def test_01_migrate_VM_and_root_volume(self):
+ """Test VM will be migrated with it's root volume"""
+ # Validates the following:
+ # 1. Deploys a VM
+ # 2. Migrates the VM and the root volume to another host and storage pool
+ # 3. Asserts migration success and checks for location
+
+ vm = self.deploy_vm()
+
+ root_volume = self.get_vm_volumes(vm.id)[0]
+
+ target_pool = self.get_target_pool(root_volume.id)
+
+ target_host = self.get_target_host(vm.id)
+
+ cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd()
+
+ cmd.migrateto = [{"volume": str(root_volume.id), "pool": str(target_pool.id)}]
+
+ cmd.virtualmachineid = vm.id
+ cmd.hostid = target_host.id
+
+ response = self.apiclient.migrateVirtualMachineWithVolume(cmd)
+
+ self.assertEqual(response.hostid, target_host.id)
+
+ self.assertEqual(Volume.list(self.apiclient, id=root_volume.id)[0].storageid,
+ target_pool.id,
+ "Pool ID was not as expected")
+
+ @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg", "security"], required_hardware="false")
+ def test_02_migrate_VM_with_two_data_disks(self):
+ """Test VM will be migrated with it's root volume"""
+ # Validate the following
+ # 1. Deploys a VM and attaches 2 data disks
+ # 2. Finds suitable host for migration
+ # 3. Finds suitable storage pool for volumes
+ # 4. Migrate the VM to new host and storage pool and assert migration successful
+
+ vm = self.deploy_vm()
+
+ volume1 = self.create_volume()
+ volume2 = self.create_volume()
+
+ vm.attach_volume(self.apiclient, volume1)
+ vm.attach_volume(self.apiclient, volume2)
+
+ root_volume = self.get_vm_volumes(vm.id)[0]
+
+ target_pool = self.get_target_pool(root_volume.id)
+ volume1.target_pool = self.get_target_pool(volume1.id)
+ volume2.target_pool = self.get_target_pool(volume2.id)
+
+ target_host = self.get_target_host(vm.id)
+
+ cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd()
+
+ cmd.migrateto = [{"volume": str(root_volume.id), "pool": str(target_pool.id)},
+ {"volume": str(volume1.id), "pool": str(volume1.target_pool.id)},
+ {"volume": str(volume2.id), "pool": str(volume2.target_pool.id)}]
+ cmd.virtualmachineid = vm.id
+ cmd.hostid = target_host.id
+
+ response = self.apiclient.migrateVirtualMachineWithVolume(cmd)
+
+ self.assertEqual(Volume.list(self.apiclient, id=root_volume.id)[0].storageid,
+ target_pool.id,
+ "Pool ID not as expected")
+
+ self.assertEqual(Volume.list(self.apiclient, id=volume1.id)[0].storageid,
+ volume1.target_pool.id,
+ "Pool ID not as expected")
+
+ self.assertEqual(Volume.list(self.apiclient, id=volume2.id)[0].storageid,
+ volume2.target_pool.id,
+ "Pool ID not as expected")
+
+ self.assertEqual(response.hostid,
+ target_host.id,
+ "HostID not as expected")
+
diff --git a/ui/config.js b/ui/config.js
new file mode 100644
index 00000000000..f715360fd0d
--- /dev/null
+++ b/ui/config.js
@@ -0,0 +1,40 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Define custom options configurable by admins for UI
+cloudStackOptions = {
+ aboutText: "label.app.name", // This is the text shown in the 'About' box
+ aboutTitle: "label.about.app", // This is the Application 'Title' shown in the 'About' box
+ docTitle: "label.app.name", // This is the Application 'Title' shown on browser tab.
+
+ helpURL: "http://docs.cloudstack.apache.org/", // This is the URL that opens when users click Help
+ keyboardOptions: {
+ "us": "label.standard.us.keyboard",
+ "uk": "label.uk.keyboard",
+ "fr": "label.french.azerty.keyboard",
+ "jp": "label.japanese.keyboard",
+ "sc": "label.simplified.chinese.keyboard"
+ },
+ hiddenFields: {
+ "metrics.zones":[], // Options - "name", "state", "clusters", "cpuused", "cpuallocated", "memused", "memallocated"
+ "metrics.clusters": [], // Options - "name", "state", "hosts", "cpuused", "cpuallocated", "memused", "memallocated"
+ "metrics.hosts": [], // Options - "name", "state", "powerstate", "instances", "cpuused", "memused", "network"
+ "metrics.storagepool": [], // Options - "name", "property", "disk",
+ "metrics.instances": [], // Options - "name", "state", "ipaddress", "zonename", "cpuused", "memused", "network", "disk"
+ "metrics.volumes": [] // Options - "name", "state", "vmname", "sizegb", "physicalsize", "utilization", "storagetype", "storage"
+ }
+};
diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css
index 5ef4a5844e1..0d17cdc6a98 100644
--- a/ui/css/cloudstack3.css
+++ b/ui/css/cloudstack3.css
@@ -565,18 +565,18 @@ body.login {
}
.blocking-overlay {
+ opacity: 0.7;
position: absolute;
- width: 100%;
- height: 100%;
- left: 0px;
- top: 0px;
- background: #F2F2F2;
z-index: 500;
+ top: 0;
+ left: 0;
+ width: 100%;
/*+opacity:70%;*/
+ height: 100%;
+ background: #f2f2f2;
filter: alpha(opacity=70);
-ms-filter: progid:DXImageTransform.Microsoft.Alpha(Opacity=70);
-moz-opacity: 0.7;
- opacity: 0.7;
}
.loading-overlay {
@@ -1555,11 +1555,12 @@ div.list-view td.state.notsuitable-storage-migration-required span {
display: none !important;
}
-.quick-view-tooltip .detail-view .ui-tabs-panel {
+.quick-view-tooltip .detail-view.ui-tabs div.ui-tabs-panel {
display: inline-block;
float: left;
width: 100% !important;
height: auto;
+ background-color: inherit;
overflow: hidden;
}
@@ -4206,8 +4207,6 @@ textarea {
}
.dashboard.admin .dashboard-container {
- margin: 0 0 11px;
- padding: 0 8px 18px 0;
/*+border-radius:3px;*/
border: 1px solid #c8c2c2;
border-radius: 3px;
@@ -4218,7 +4217,7 @@ textarea {
}
.dashboard.admin .dashboard-container.sub {
- width: 468px;
+ width: 49.5%;
}
.dashboard.admin .dashboard-container.sub .button.view-all,
@@ -4274,13 +4273,15 @@ textarea {
/**** Head*/
.dashboard.admin .dashboard-container.head {
float: left;
- width: 966px;
- height: 431px;
+ box-sizing: border-box;
+ width: 100%;
margin: 9px 0 0;
+ padding: 0 0;
}
.dashboard.admin .dashboard-container .top {
float: left;
+ box-sizing: border-box;
width: 100%;
margin: 0;
padding: 4px 4px 8px;
@@ -4334,29 +4335,22 @@ textarea {
/**** Charts / stats*/
.dashboard.admin .zone-stats {
position: relative;
- top: 0;
- left: 0;
- width: 974px;
- /*+placement:shift 0px 0px;*/
- height: 416px;
+ width: 100%;
+ padding: 11px 0;
overflow: auto;
overflow-x: hidden;
}
.dashboard.admin .zone-stats ul {
position: relative;
- /*+placement:shift -2px 11px;*/
- top: 11px;
- left: -2px;
- width: 996px;
+ width: 100%;
}
.dashboard.admin .zone-stats ul li {
- position: absolute;
position: relative;
z-index: 1;
float: left;
- width: 488px;
+ width: 50%;
height: 79px;
font-size: 14px;
cursor: pointer;
@@ -4533,20 +4527,23 @@ textarea {
.dashboard.admin .dashboard-container.sub.alerts {
position: relative;
float: left;
- height: 170px;
- margin: 0 12px 0 0;
+ box-sizing: border-box;
+ height: 190px;
overflow: hidden;
}
-.dashboard.admin .dashboard-container.sub.alerts.last {
- margin-right: 0;
+.dashboard.admin .dashboard-container.sub.alerts.first {
+ margin-right: 1%;
+}
+
+.dashboard.admin .dashboard-container.sub.alerts .top {
+ height: 18%;
}
.dashboard.admin .dashboard-container.sub.alerts ul {
position: relative;
- width: 468px;
- height: 100%;
- margin: 0 0 0 8px;
+ width: 100%;
+ height: 82%;
overflow-y: scroll;
}
@@ -7597,7 +7594,6 @@ textarea {
div.toolbar,
.multi-wizard.zone-wizard .select-container .field .select-array-item {
display: block;
- height: 1px;
}
div.toolbar:after,
@@ -7818,7 +7814,7 @@ div.toolbar:after,
.ui-dialog .list-view .toolbar {
top: 50px;
- width: 854px;
+ width: 100%;
}
div.panel.ui-dialog div.list-view div.fixed-header {
diff --git a/ui/css/src/scss/cloudstack3.scss b/ui/css/src/scss/cloudstack3.scss
index 5a20e0db7c7..0d523cad937 100644
--- a/ui/css/src/scss/cloudstack3.scss
+++ b/ui/css/src/scss/cloudstack3.scss
@@ -29,6 +29,7 @@
@import 'components/login';
@import 'components/dialog-about';
+@import 'components/blocking-overlay';
@import 'components/loading-overlay';
@import 'components/install-wizzard';
@import 'components/notifications';
diff --git a/ui/css/src/scss/components/blocking-overlay.scss b/ui/css/src/scss/components/blocking-overlay.scss
new file mode 100644
index 00000000000..d4167e55552
--- /dev/null
+++ b/ui/css/src/scss/components/blocking-overlay.scss
@@ -0,0 +1,31 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+.blocking-overlay {
+ opacity: 0.7;
+ position: absolute;
+ z-index: $z-index-loading-overlay;
+ top: 0;
+ left: 0;
+ width: 100%;
+ /*+opacity:70%;*/
+ height: 100%;
+ background: #f2f2f2;
+ filter: alpha(opacity=70);
+ -ms-filter: progid:DXImageTransform.Microsoft.Alpha(Opacity=70);
+ -moz-opacity: 0.7;
+}
diff --git a/ui/css/src/scss/components/dashboard.scss b/ui/css/src/scss/components/dashboard.scss
index 049a71eb4e5..1051dba498a 100644
--- a/ui/css/src/scss/components/dashboard.scss
+++ b/ui/css/src/scss/components/dashboard.scss
@@ -24,8 +24,6 @@
}
.dashboard.admin .dashboard-container {
- margin: 0 0 11px;
- padding: 0 8px 18px 0;
/*+border-radius:3px;*/
border: 1px solid #c8c2c2;
border-radius: 3px;
@@ -36,7 +34,7 @@
}
.dashboard.admin .dashboard-container.sub {
- width: 468px;
+ width: 49.5%;
}
.dashboard.admin .dashboard-container.sub .button.view-all,
@@ -95,13 +93,15 @@
/**** Head*/
.dashboard.admin .dashboard-container.head {
float: left;
- width: 966px;
- height: 431px;
+ box-sizing: border-box;
+ width: 100%;
margin: 9px 0 0;
+ padding: 0 0;
}
.dashboard.admin .dashboard-container .top {
float: left;
+ box-sizing: border-box;
width: 100%;
margin: 0;
padding: 4px 4px 8px;
@@ -155,29 +155,22 @@
/**** Charts / stats*/
.dashboard.admin .zone-stats {
position: relative;
- top: 0;
- left: 0;
- width: 974px;
- /*+placement:shift 0px 0px;*/
- height: 416px;
+ width: 100%;
+ padding: 11px 0;
overflow: auto;
overflow-x: hidden;
}
.dashboard.admin .zone-stats ul {
position: relative;
- /*+placement:shift -2px 11px;*/
- top: 11px;
- left: -2px;
- width: 996px;
+ width: 100%;
}
.dashboard.admin .zone-stats ul li {
- position: absolute;
position: relative;
z-index: $z-index-standard;
float: left;
- width: 488px;
+ width: 50%;
height: 79px;
font-size: 14px;
cursor: pointer;
@@ -354,20 +347,23 @@
.dashboard.admin .dashboard-container.sub.alerts {
position: relative;
float: left;
- height: 170px;
- margin: 0 12px 0 0;
+ box-sizing: border-box;
+ height: 190px;
overflow: hidden;
}
-.dashboard.admin .dashboard-container.sub.alerts.last {
- margin-right: 0;
+.dashboard.admin .dashboard-container.sub.alerts.first {
+ margin-right: 1%;
}
+.dashboard.admin .dashboard-container.sub.alerts .top {
+ height: 18%;
+};
+
.dashboard.admin .dashboard-container.sub.alerts ul {
position: relative;
- width: 468px;
- height: 100%;
- margin: 0 0 0 8px;
+ width: 100%;
+ height: 82%;
overflow-y: scroll;
}
diff --git a/ui/css/src/scss/components/list-view.scss b/ui/css/src/scss/components/list-view.scss
index 51d6ac2d443..17d506bf9e3 100644
--- a/ui/css/src/scss/components/list-view.scss
+++ b/ui/css/src/scss/components/list-view.scss
@@ -24,7 +24,7 @@
.ui-dialog .list-view .toolbar {
top: 50px;
- width: 854px;
+ width: 100%;
}
div.panel.ui-dialog div.list-view div.fixed-header {
diff --git a/ui/css/src/scss/components/multi-wizzard.scss b/ui/css/src/scss/components/multi-wizzard.scss
index 3b74f739c27..7c4908a75ef 100644
--- a/ui/css/src/scss/components/multi-wizzard.scss
+++ b/ui/css/src/scss/components/multi-wizzard.scss
@@ -670,11 +670,11 @@
margin-top: 9px !important;
}
-.multi-wizard.instance-wizard .custom-disk-size .select-container {
+.multi-wizard.instance-wizard .custom-slider-container .select-container {
height: 279px;
}
-.multi-wizard.instance-wizard .custom-disk-size .select-container {
+.multi-wizard.instance-wizard .custom-slider-container .select-container {
height: 213px;
margin: -7px 6px 0 8px;
/*+border-radius:6px;*/
@@ -773,7 +773,11 @@
font-size: 10px;
}
-.instance-wizard .step.data-disk-offering.custom-disk-size .select-container {
+.instance-wizard .step.data-disk-offering.custom-slider-container .select-container {
+ height: 272px;
+}
+
+.instance-wizard .step.service-offering.custom-slider-container .select-container {
height: 272px;
}
@@ -781,11 +785,15 @@
height: 240px;
}
-.instance-wizard .step.data-disk-offering.custom-disk-size.custom-iops-do .select-container {
+.instance-wizard .step.data-disk-offering.custom-slider-container.custom-iops-do .select-container {
height: 176px;
}
-.instance-wizard .step.data-disk-offering.required.custom-disk-size .select-container {
+.instance-wizard .step.service-offering.required.custom-slider-container .select-container {
+ height: 315px;
+}
+
+.instance-wizard .step.data-disk-offering.required.custom-slider-container .select-container {
height: 315px;
}
@@ -793,7 +801,7 @@
height: 295px;
}
-.instance-wizard .step.data-disk-offering.required.custom-disk-size.custom-iops-do .select-container {
+.instance-wizard .step.data-disk-offering.required.custom-slider-container.custom-iops-do .select-container {
height: 223px;
}
@@ -2209,7 +2217,6 @@
div.toolbar,
.multi-wizard.zone-wizard .select-container .field .select-array-item {
display: block;
- height: 1px;
}
div.toolbar:after,
diff --git a/ui/css/src/scss/components/quick-view-tooltip.scss b/ui/css/src/scss/components/quick-view-tooltip.scss
index 51bf0325ec9..68932e0c00a 100644
--- a/ui/css/src/scss/components/quick-view-tooltip.scss
+++ b/ui/css/src/scss/components/quick-view-tooltip.scss
@@ -92,11 +92,12 @@
display: none !important;
}
-.quick-view-tooltip .detail-view .ui-tabs-panel {
+.quick-view-tooltip .detail-view.ui-tabs div.ui-tabs-panel {
display: inline-block;
float: left;
width: 100% !important;
height: auto;
+ background-color: inherit;
overflow: hidden;
}
diff --git a/ui/index.html b/ui/index.html
index e725b8153de..8645fc252c8 100644
--- a/ui/index.html
+++ b/ui/index.html
@@ -311,9 +311,9 @@
+ ():
@@ -431,7 +431,7 @@
- ()
+ ()
@@ -440,7 +440,7 @@
- ()
+ ()
@@ -575,7 +575,7 @@
- ()
+ ()
@@ -1429,12 +1429,12 @@
-
+ :
-
+ :
@@ -1495,7 +1495,7 @@
@@ -1512,7 +1512,7 @@
@@ -1520,13 +1520,13 @@
|
- :
+ :
|
|
- :
+ :
|
@@ -1762,6 +1762,7 @@
+
@@ -1920,5 +1920,8 @@
+
+
+