diff --git a/api/src/main/java/com/cloud/agent/api/to/DiskTO.java b/api/src/main/java/com/cloud/agent/api/to/DiskTO.java index 7b3d10bc4db..d22df2df172 100644 --- a/api/src/main/java/com/cloud/agent/api/to/DiskTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/DiskTO.java @@ -40,6 +40,7 @@ public class DiskTO { public static final String VMDK = "vmdk"; public static final String EXPAND_DATASTORE = "expandDatastore"; public static final String TEMPLATE_RESIGN = "templateResign"; + public static final String SECRET_CONSUMER_DETAIL = "storageMigrateSecretConsumer"; private DataTO data; private Long diskSeq; diff --git a/api/src/main/java/com/cloud/host/Host.java b/api/src/main/java/com/cloud/host/Host.java index e5a3889ff18..7563bc3b742 100644 --- a/api/src/main/java/com/cloud/host/Host.java +++ b/api/src/main/java/com/cloud/host/Host.java @@ -53,6 +53,7 @@ public interface Host extends StateObject, Identity, Partition, HAResour } } public static final String HOST_UEFI_ENABLE = "host.uefi.enable"; + public static final String HOST_VOLUME_ENCRYPTION = "host.volume.encryption"; /** * @return name of the machine. diff --git a/api/src/main/java/com/cloud/offering/DiskOffering.java b/api/src/main/java/com/cloud/offering/DiskOffering.java index fd21118e4a2..13ba9a31394 100644 --- a/api/src/main/java/com/cloud/offering/DiskOffering.java +++ b/api/src/main/java/com/cloud/offering/DiskOffering.java @@ -153,4 +153,8 @@ public interface DiskOffering extends InfrastructureEntity, Identity, InternalId void setCacheMode(DiskCacheMode cacheMode); Type getType(); + + boolean getEncrypt(); + + void setEncrypt(boolean encrypt); } diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index d44628a198a..f053620ba51 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -130,32 +130,34 @@ public class Storage { } public static enum StoragePoolType { - Filesystem(false, true), // local directory - NetworkFilesystem(true, true), // NFS - IscsiLUN(true, false), // shared LUN, with a clusterfs overlay - Iscsi(true, false), // for e.g., ZFS Comstar - ISO(false, false), // for iso image - LVM(false, false), // XenServer local LVM SR - CLVM(true, false), - RBD(true, true), // http://libvirt.org/storage.html#StorageBackendRBD - SharedMountPoint(true, false), - VMFS(true, true), // VMware VMFS storage - PreSetup(true, true), // for XenServer, Storage Pool is set up by customers. - EXT(false, true), // XenServer local EXT SR - OCFS2(true, false), - SMB(true, false), - Gluster(true, false), - PowerFlex(true, true), // Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS) - ManagedNFS(true, false), - Linstor(true, true), - DatastoreCluster(true, true); // for VMware, to abstract pool of clusters + Filesystem(false, true, true), // local directory + NetworkFilesystem(true, true, true), // NFS + IscsiLUN(true, false, false), // shared LUN, with a clusterfs overlay + Iscsi(true, false, false), // for e.g., ZFS Comstar + ISO(false, false, false), // for iso image + LVM(false, false, false), // XenServer local LVM SR + CLVM(true, false, false), + RBD(true, true, false), // http://libvirt.org/storage.html#StorageBackendRBD + SharedMountPoint(true, false, true), + VMFS(true, true, false), // VMware VMFS storage + PreSetup(true, true, false), // for XenServer, Storage Pool is set up by customers. + EXT(false, true, false), // XenServer local EXT SR + OCFS2(true, false, false), + SMB(true, false, false), + Gluster(true, false, false), + PowerFlex(true, true, true), // Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS) + ManagedNFS(true, false, false), + Linstor(true, true, false), + DatastoreCluster(true, true, false); // for VMware, to abstract pool of clusters private final boolean shared; private final boolean overprovisioning; + private final boolean encryption; - StoragePoolType(boolean shared, boolean overprovisioning) { + StoragePoolType(boolean shared, boolean overprovisioning, boolean encryption) { this.shared = shared; this.overprovisioning = overprovisioning; + this.encryption = encryption; } public boolean isShared() { @@ -165,6 +167,8 @@ public class Storage { public boolean supportsOverProvisioning() { return overprovisioning; } + + public boolean supportsEncryption() { return encryption; } } public static List getNonSharedStoragePoolTypes() { diff --git a/api/src/main/java/com/cloud/storage/Volume.java b/api/src/main/java/com/cloud/storage/Volume.java index a863c3e989b..32d1f3f54be 100644 --- a/api/src/main/java/com/cloud/storage/Volume.java +++ b/api/src/main/java/com/cloud/storage/Volume.java @@ -243,4 +243,12 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba boolean isDisplay(); boolean isDeployAsIs(); + + public Long getPassphraseId(); + + public void setPassphraseId(Long id); + + public String getEncryptFormat(); + + public void setEncryptFormat(String encryptFormat); } diff --git a/api/src/main/java/com/cloud/vm/DiskProfile.java b/api/src/main/java/com/cloud/vm/DiskProfile.java index 175a92afaf9..7e0484de0e5 100644 --- a/api/src/main/java/com/cloud/vm/DiskProfile.java +++ b/api/src/main/java/com/cloud/vm/DiskProfile.java @@ -42,6 +42,7 @@ public class DiskProfile { private Long iopsReadRate; private Long iopsWriteRate; private String cacheMode; + private boolean requiresEncryption; private HypervisorType hyperType; @@ -61,6 +62,12 @@ public class DiskProfile { this.volumeId = volumeId; } + public DiskProfile(long volumeId, Volume.Type type, String name, long diskOfferingId, long size, String[] tags, boolean useLocalStorage, boolean recreatable, + Long templateId, boolean requiresEncryption) { + this(volumeId, type, name, diskOfferingId, size, tags, useLocalStorage, recreatable, templateId); + this.requiresEncryption = requiresEncryption; + } + public DiskProfile(Volume vol, DiskOffering offering, HypervisorType hyperType) { this(vol.getId(), vol.getVolumeType(), @@ -73,6 +80,7 @@ public class DiskProfile { null); this.hyperType = hyperType; this.provisioningType = offering.getProvisioningType(); + this.requiresEncryption = offering.getEncrypt() || vol.getPassphraseId() != null; } public DiskProfile(DiskProfile dp) { @@ -227,4 +235,8 @@ public class DiskProfile { public String getCacheMode() { return cacheMode; } + + public boolean requiresEncryption() { return requiresEncryption; } + + public void setEncryption(boolean encrypt) { this.requiresEncryption = encrypt; } } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 7580bc238d7..4f80e47b9e9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -105,6 +105,9 @@ public class ApiConstants { public static final String CUSTOM_JOB_ID = "customjobid"; public static final String CURRENT_START_IP = "currentstartip"; public static final String CURRENT_END_IP = "currentendip"; + public static final String ENCRYPT = "encrypt"; + public static final String ENCRYPT_ROOT = "encryptroot"; + public static final String ENCRYPTION_SUPPORTED = "encryptionsupported"; public static final String MIN_IOPS = "miniops"; public static final String MAX_IOPS = "maxiops"; public static final String HYPERVISOR_SNAPSHOT_RESERVE = "hypervisorsnapshotreserve"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java index 4a25cef26c2..45a258f144d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java @@ -160,9 +160,14 @@ public class CreateDiskOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.STORAGE_POLICY, type = CommandType.UUID, entityType = VsphereStoragePoliciesResponse.class,required = false, description = "Name of the storage policy defined at vCenter, this is applicable only for VMware", since = "4.15") private Long storagePolicy; + @Parameter(name = ApiConstants.ENCRYPT, type = CommandType.BOOLEAN, required=false, description = "Volumes using this offering should be encrypted") + private Boolean encrypt; + @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, description = "details to specify disk offering parameters", since = "4.16") private Map details; + + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -199,6 +204,13 @@ public class CreateDiskOfferingCmd extends BaseCmd { return maxIops; } + public boolean getEncrypt() { + if (encrypt == null) { + return false; + } + return encrypt; + } + public List getDomainIds() { if (CollectionUtils.isNotEmpty(domainIds)) { Set set = new LinkedHashSet<>(domainIds); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index b30156097f3..e7a3fb562ff 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -227,6 +227,9 @@ public class CreateServiceOfferingCmd extends BaseCmd { description = "true if virtual machine needs to be dynamically scalable of cpu or memory") protected Boolean isDynamicScalingEnabled; + @Parameter(name = ApiConstants.ENCRYPT_ROOT, type = CommandType.BOOLEAN, description = "VMs using this offering require root volume encryption", since="4.16") + private Boolean encryptRoot; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -449,6 +452,14 @@ public class CreateServiceOfferingCmd extends BaseCmd { return isDynamicScalingEnabled == null ? true : isDynamicScalingEnabled; } + + public boolean getEncryptRoot() { + if (encryptRoot != null) { + return encryptRoot; + } + return false; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListDiskOfferingsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListDiskOfferingsCmd.java index 92b8676b469..d01d85d3928 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListDiskOfferingsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListDiskOfferingsCmd.java @@ -50,6 +50,12 @@ public class ListDiskOfferingsCmd extends BaseListDomainResourcesCmd { since = "4.13") private Long zoneId; + @Parameter(name = ApiConstants.ENCRYPT, + type = CommandType.BOOLEAN, + description = "listed offerings support disk encryption", + since = "4.16") + private Boolean encrypt; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -66,6 +72,8 @@ public class ListDiskOfferingsCmd extends BaseListDomainResourcesCmd { return zoneId; } + public Boolean getEncrypt() { return encrypt; } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java index 91cac0937d4..517b678d36d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java @@ -83,6 +83,12 @@ public class ListServiceOfferingsCmd extends BaseListDomainResourcesCmd { since = "4.15") private Integer cpuSpeed; + @Parameter(name = ApiConstants.ENCRYPT_ROOT, + type = CommandType.BOOLEAN, + description = "listed offerings support root disk encryption", + since = "4.16") + private Boolean encryptRoot; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -123,6 +129,8 @@ public class ListServiceOfferingsCmd extends BaseListDomainResourcesCmd { return cpuSpeed; } + public Boolean getEncryptRoot() { return encryptRoot; } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java index c9215703880..1cc806f6f8e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java @@ -226,6 +226,10 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Snapshot from volume [%s] was not found in database.", getVolumeUuid())); } } catch (Exception e) { + if (e.getCause() instanceof UnsupportedOperationException) { + throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, String.format("Failed to create snapshot due to unsupported operation: %s", e.getCause().getMessage())); + } + String errorMessage = "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeUuid(); s_logger.error(errorMessage, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMessage); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java index 5f61060a014..8054405e6cd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/DiskOfferingResponse.java @@ -156,6 +156,10 @@ public class DiskOfferingResponse extends BaseResponseWithAnnotations { @Param(description = "the vsphere storage policy tagged to the disk offering in case of VMware", since = "4.15") private String vsphereStoragePolicy; + @SerializedName(ApiConstants.ENCRYPT) + @Param(description = "Whether disks using this offering will be encrypted on primary storage") + private Boolean encrypt; + @SerializedName(ApiConstants.DETAILS) @Param(description = "additional key/value details tied with this disk offering", since = "4.16.1") private Map details; @@ -369,6 +373,8 @@ public class DiskOfferingResponse extends BaseResponseWithAnnotations { this.vsphereStoragePolicy = vsphereStoragePolicy; } + public void setEncrypt(Boolean encrypt) { this.encrypt = encrypt; } + public void setDetails(Map details) { this.details = details; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java index fcf0870bcdd..69c25c5fa79 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java @@ -270,6 +270,10 @@ public class HostResponse extends BaseResponseWithAnnotations { @Param(description = "true if the host has capability to support UEFI boot") private Boolean uefiCapabilty; + @SerializedName(ApiConstants.ENCRYPTION_SUPPORTED) + @Param(description = "true if the host supports encryption") + private Boolean encryptionSupported; + @Override public String getObjectId() { return this.getId(); @@ -540,6 +544,13 @@ public class HostResponse extends BaseResponseWithAnnotations { this.setUefiCapabilty(new Boolean(false)); // in case of existing host which is not scanned for UEFI capability } + if (detailsCopy.containsKey(Host.HOST_VOLUME_ENCRYPTION)) { + this.setEncryptionSupported(Boolean.parseBoolean((String) detailsCopy.get(Host.HOST_VOLUME_ENCRYPTION))); + detailsCopy.remove(Host.HOST_VOLUME_ENCRYPTION); + } else { + this.setEncryptionSupported(new Boolean(false)); // default + } + this.details = detailsCopy; } @@ -725,4 +736,8 @@ public class HostResponse extends BaseResponseWithAnnotations { public void setUefiCapabilty(Boolean hostCapability) { this.uefiCapabilty = hostCapability; } + + public void setEncryptionSupported(Boolean encryptionSupported) { + this.encryptionSupported = encryptionSupported; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java index ea9d8eef7a0..224ff0b6577 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java @@ -208,6 +208,10 @@ public class ServiceOfferingResponse extends BaseResponseWithAnnotations { @Param(description = "true if virtual machine needs to be dynamically scalable of cpu or memory", since = "4.16") private Boolean dynamicScalingEnabled; + @SerializedName(ApiConstants.ENCRYPT_ROOT) + @Param(description = "true if virtual machine root disk will be encrypted on storage", since = "4.16") + private Boolean encryptRoot; + public ServiceOfferingResponse() { } @@ -486,4 +490,6 @@ public class ServiceOfferingResponse extends BaseResponseWithAnnotations { public void setDynamicScalingEnabled(Boolean dynamicScalingEnabled) { this.dynamicScalingEnabled = dynamicScalingEnabled; } + + public void setEncryptRoot(Boolean encrypt) { this.encryptRoot = encrypt; } } diff --git a/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolAnswer.java b/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolAnswer.java index be84cce152d..552ffb85aaf 100644 --- a/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolAnswer.java @@ -30,14 +30,18 @@ public class ModifyStoragePoolAnswer extends Answer { private Map templateInfo; private String localDatastoreName; private String poolType; - private List datastoreClusterChildren = new ArrayList<>();; + private List datastoreClusterChildren = new ArrayList<>(); public ModifyStoragePoolAnswer(ModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map tInfo) { + this(cmd, capacityBytes, availableBytes, tInfo, null); + } + + public ModifyStoragePoolAnswer(ModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map tInfo, Map details) { super(cmd); result = true; - poolInfo = new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes); + poolInfo = new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes, details); templateInfo = tInfo; } diff --git a/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolCommand.java b/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolCommand.java index c2ab0ab7f36..ad05fe1d615 100644 --- a/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolCommand.java +++ b/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolCommand.java @@ -20,6 +20,7 @@ package com.cloud.agent.api; import java.io.File; +import java.util.Map; import java.util.UUID; import com.cloud.agent.api.to.StorageFilerTO; @@ -32,6 +33,7 @@ public class ModifyStoragePoolCommand extends Command { private StorageFilerTO pool; private String localPath; private String storagePath; + private Map details; public ModifyStoragePoolCommand(boolean add, StoragePool pool, String localPath) { this.add = add; @@ -39,6 +41,11 @@ public class ModifyStoragePoolCommand extends Command { this.localPath = localPath; } + public ModifyStoragePoolCommand(boolean add, StoragePool pool, String localPath, Map details) { + this(add, pool, localPath); + this.details = details; + } + public ModifyStoragePoolCommand(boolean add, StoragePool pool) { this(add, pool, LOCAL_PATH_PREFIX + File.separator + UUID.nameUUIDFromBytes((pool.getHostAddress() + pool.getPath()).getBytes())); } @@ -67,6 +74,14 @@ public class ModifyStoragePoolCommand extends Command { return storagePath; } + public void setDetails(Map details) { + this.details = details; + } + + public Map getDetails() { + return details; + } + @Override public boolean executeInSequence() { return false; diff --git a/core/src/main/java/com/cloud/agent/api/storage/ResizeVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/storage/ResizeVolumeCommand.java index 70d4d3ebab4..db867698e91 100644 --- a/core/src/main/java/com/cloud/agent/api/storage/ResizeVolumeCommand.java +++ b/core/src/main/java/com/cloud/agent/api/storage/ResizeVolumeCommand.java @@ -20,8 +20,11 @@ package com.cloud.agent.api.storage; import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; import com.cloud.agent.api.to.StorageFilerTO; +import java.util.Arrays; + public class ResizeVolumeCommand extends Command { private String path; private StorageFilerTO pool; @@ -35,6 +38,10 @@ public class ResizeVolumeCommand extends Command { private boolean managed; private String iScsiName; + @LogLevel(LogLevel.Log4jLevel.Off) + private byte[] passphrase; + private String encryptFormat; + protected ResizeVolumeCommand() { } @@ -48,6 +55,13 @@ public class ResizeVolumeCommand extends Command { this.managed = false; } + public ResizeVolumeCommand(String path, StorageFilerTO pool, Long currentSize, Long newSize, boolean shrinkOk, String vmInstance, + String chainInfo, byte[] passphrase, String encryptFormat) { + this(path, pool, currentSize, newSize, shrinkOk, vmInstance, chainInfo); + this.passphrase = passphrase; + this.encryptFormat = encryptFormat; + } + public ResizeVolumeCommand(String path, StorageFilerTO pool, Long currentSize, Long newSize, boolean shrinkOk, String vmInstance, String chainInfo) { this(path, pool, currentSize, newSize, shrinkOk, vmInstance); this.chainInfo = chainInfo; @@ -89,6 +103,16 @@ public class ResizeVolumeCommand extends Command { public String getChainInfo() {return chainInfo; } + public String getEncryptFormat() { return encryptFormat; } + + public byte[] getPassphrase() { return passphrase; } + + public void clearPassphrase() { + if (this.passphrase != null) { + Arrays.fill(this.passphrase, (byte) 0); + } + } + /** * {@inheritDoc} */ diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index 36c35e57273..7fb12d9afd0 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.to; +import com.cloud.agent.api.LogLevel; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import com.cloud.agent.api.to.DataObjectType; @@ -30,6 +31,8 @@ import com.cloud.storage.MigrationOptions; import com.cloud.storage.Storage; import com.cloud.storage.Volume; +import java.util.Arrays; + public class VolumeObjectTO implements DataTO { private String uuid; private Volume.Type volumeType; @@ -68,6 +71,10 @@ public class VolumeObjectTO implements DataTO { private String updatedDataStoreUUID; private String vSphereStoragePolicyId; + @LogLevel(LogLevel.Log4jLevel.Off) + private byte[] passphrase; + private String encryptFormat; + public VolumeObjectTO() { } @@ -110,6 +117,8 @@ public class VolumeObjectTO implements DataTO { this.directDownload = volume.isDirectDownload(); this.deployAsIs = volume.isDeployAsIs(); this.vSphereStoragePolicyId = volume.getvSphereStoragePolicyId(); + this.passphrase = volume.getPassphrase(); + this.encryptFormat = volume.getEncryptFormat(); } public String getUuid() { @@ -357,4 +366,18 @@ public class VolumeObjectTO implements DataTO { public void setvSphereStoragePolicyId(String vSphereStoragePolicyId) { this.vSphereStoragePolicyId = vSphereStoragePolicyId; } + + public String getEncryptFormat() { return encryptFormat; } + + public void setEncryptFormat(String encryptFormat) { this.encryptFormat = encryptFormat; } + + public byte[] getPassphrase() { return passphrase; } + + public void setPassphrase(byte[] passphrase) { this.passphrase = passphrase; } + + public void clearPassphrase() { + if (this.passphrase != null) { + Arrays.fill(this.passphrase, (byte) 0); + } + } } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java index ec272501998..6f6e79d067e 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java @@ -23,14 +23,22 @@ import java.util.List; public interface EndPointSelector { EndPoint select(DataObject srcData, DataObject destData); + EndPoint select(DataObject srcData, DataObject destData, boolean encryptionSupportRequired); + EndPoint select(DataObject srcData, DataObject destData, StorageAction action); + EndPoint select(DataObject srcData, DataObject destData, StorageAction action, boolean encryptionSupportRequired); + EndPoint select(DataObject object); EndPoint select(DataStore store); + EndPoint select(DataObject object, boolean encryptionSupportRequired); + EndPoint select(DataObject object, StorageAction action); + EndPoint select(DataObject object, StorageAction action, boolean encryptionSupportRequired); + List selectAll(DataStore store); List findAllEndpointsForScope(DataStore store); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java index eafc3b7e85c..a22b66af193 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java @@ -92,4 +92,6 @@ public interface VolumeInfo extends DataObject, Volume { String getDeployAsIsConfiguration(); public String getvSphereStoragePolicyId(); + + public byte[] getPassphrase(); } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index c00273ee762..821809b39f4 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -267,7 +267,7 @@ public interface StorageManager extends StorageService { boolean registerHostListener(String providerUuid, HypervisorHostListener listener); - void connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 9c1e998459d..e42720266f9 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -37,6 +37,8 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.secret.dao.PassphraseDao; +import org.apache.cloudstack.secret.PassphraseVO; import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin; import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; @@ -233,6 +235,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati private SecondaryStorageVmDao secondaryStorageVmDao; @Inject VolumeApiService _volumeApiService; + @Inject + PassphraseDao _passphraseDao; private final StateMachine2 _volStateMachine; protected List _storagePoolAllocators; @@ -266,7 +270,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati // Find a destination storage pool with the specified criteria DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId()); DiskProfile dskCh = new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(), - diskOffering.isUseLocalStorage(), diskOffering.isRecreatable(), null); + diskOffering.isUseLocalStorage(), diskOffering.isRecreatable(), null, (diskOffering.getEncrypt() || volume.getPassphraseId() != null)); dskCh.setHyperType(dataDiskHyperType); storageMgr.setDiskProfileThrottling(dskCh, null, diskOffering); @@ -300,6 +304,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati newVol.setInstanceId(oldVol.getInstanceId()); newVol.setRecreatable(oldVol.isRecreatable()); newVol.setFormat(oldVol.getFormat()); + + if (oldVol.getPassphraseId() != null) { + PassphraseVO passphrase =_passphraseDao.persist(new PassphraseVO()); + passphrase.clearPassphrase(); + newVol.setPassphraseId(passphrase.getId()); + } + return _volsDao.persist(newVol); } @@ -414,6 +425,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati Pair pod = null; DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId()); + if (diskOffering.getEncrypt()) { + VolumeVO vol = (VolumeVO) volume; + volume = setPassphraseForVolumeEncryption(vol); + } DataCenter dc = _entityMgr.findById(DataCenter.class, volume.getDataCenterId()); DiskProfile dskCh = new DiskProfile(volume, diskOffering, snapshot.getHypervisorType()); @@ -576,6 +591,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } protected DiskProfile createDiskCharacteristics(VolumeInfo volume, VirtualMachineTemplate template, DataCenter dc, DiskOffering diskOffering) { + boolean requiresEncryption = diskOffering.getEncrypt() || volume.getPassphraseId() != null; if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) { TemplateDataStoreVO ss = _vmTemplateStoreDao.findByTemplateZoneDownloadStatus(template.getId(), dc.getId(), VMTemplateStorageResourceAssoc.Status.DOWNLOADED); if (ss == null) { @@ -583,10 +599,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), ss.getSize(), diskOffering.getTagsArray(), diskOffering.isUseLocalStorage(), - diskOffering.isRecreatable(), Storage.ImageFormat.ISO != template.getFormat() ? template.getId() : null); + diskOffering.isRecreatable(), Storage.ImageFormat.ISO != template.getFormat() ? template.getId() : null, requiresEncryption); } else { return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(), - diskOffering.isUseLocalStorage(), diskOffering.isRecreatable(), null); + diskOffering.isUseLocalStorage(), diskOffering.isRecreatable(), null, requiresEncryption); } } @@ -640,8 +656,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati storageMgr.setDiskProfileThrottling(dskCh, null, diskOffering); } - if (diskOffering != null && diskOffering.isCustomized()) { - dskCh.setSize(size); + if (diskOffering != null) { + if (diskOffering.isCustomized()) { + dskCh.setSize(size); + } + + VolumeVO vol = _volsDao.findById(volume.getId()); + if (diskOffering.getEncrypt()) { + setPassphraseForVolumeEncryption(vol); + volume = volFactory.getVolume(volume.getId()); + } } dskCh.setHyperType(hyperType); @@ -679,7 +703,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati throw new CloudRuntimeException("create volume failed:" + result.getResult()); } } - return result.getVolume(); } catch (InterruptedException e) { s_logger.error("create volume failed", e); @@ -1524,6 +1547,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); } if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) { + DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId()); + if (diskOffering.getEncrypt()) { + vol = setPassphraseForVolumeEncryption(vol); + } newVol = vol; } else { newVol = switchVolume(vol, vm); @@ -1631,6 +1658,20 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati return new Pair(newVol, destPool); } + private VolumeVO setPassphraseForVolumeEncryption(VolumeVO volume) { + if (volume.getPassphraseId() != null) { + return volume; + } + s_logger.debug("Creating passphrase for the volume: " + volume.getName()); + long startTime = System.currentTimeMillis(); + PassphraseVO passphrase = _passphraseDao.persist(new PassphraseVO()); + passphrase.clearPassphrase(); + volume.setPassphraseId(passphrase.getId()); + long finishTime = System.currentTimeMillis(); + s_logger.debug("Creating and persisting passphrase took: " + (finishTime - startTime) + " ms for the volume: " + volume.toString()); + return _volsDao.persist(volume); + } + @Override public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException { diff --git a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java index 952d3014209..0e6bf0a431b 100644 --- a/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/DiskOfferingVO.java @@ -138,6 +138,8 @@ public class DiskOfferingVO implements DiskOffering { @Column(name = "iops_write_rate_max_length") private Long iopsWriteRateMaxLength; + @Column(name = "encrypt") + private boolean encrypt; @Column(name = "cache_mode", updatable = true, nullable = false) @Enumerated(value = EnumType.STRING) @@ -585,7 +587,14 @@ public class DiskOfferingVO implements DiskOffering { return hypervisorSnapshotReserve; } + @Override + public boolean getEncrypt() { return encrypt; } + + @Override + public void setEncrypt(boolean encrypt) { this.encrypt = encrypt; } + public boolean isShared() { return !useLocalStorage; } + } diff --git a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java index 1d8611625b7..43b3fc4191e 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java @@ -32,11 +32,12 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import javax.persistence.Transient; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @Entity @Table(name = "volumes") @@ -170,6 +171,12 @@ public class VolumeVO implements Volume { @Transient private boolean deployAsIs; + @Column(name = "passphrase_id") + private Long passphraseId; + + @Column(name = "encrypt_format") + private String encryptFormat; + // Real Constructor public VolumeVO(Type type, String name, long dcId, long domainId, long accountId, long diskOfferingId, Storage.ProvisioningType provisioningType, long size, @@ -496,7 +503,7 @@ public class VolumeVO implements Volume { @Override public String toString() { - return new StringBuilder("Vol[").append(id).append("|vm=").append(instanceId).append("|").append(volumeType).append("]").toString(); + return new StringBuilder("Vol[").append(id).append("|name=").append(name).append("|vm=").append(instanceId).append("|").append(volumeType).append("]").toString(); } @Override @@ -648,4 +655,12 @@ public class VolumeVO implements Volume { public String getVolumeDescription(){ return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "name", "uuid"); } + + public Long getPassphraseId() { return passphraseId; } + + public void setPassphraseId(Long id) { this.passphraseId = id; } + + public String getEncryptFormat() { return encryptFormat; } + + public void setEncryptFormat(String encryptFormat) { this.encryptFormat = encryptFormat; } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java index 9eb623a7bd6..417695b8f2d 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java @@ -102,6 +102,13 @@ public interface VolumeDao extends GenericDao, StateDao findIncludingRemovedByZone(long zoneId); + /** + * Lists all volumes using a given passphrase ID + * @param passphraseId + * @return list of volumes + */ + List listVolumesByPassphraseId(long passphraseId); + /** * Gets the Total Primary Storage space allocated for an account * diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index d934f80dc4e..d6160c4d586 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -25,6 +25,7 @@ import java.util.List; import javax.inject.Inject; +import org.apache.cloudstack.secret.dao.PassphraseDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -68,6 +69,8 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol protected GenericSearchBuilder secondaryStorageSearch; @Inject ResourceTagDao _tagsDao; + @Inject + PassphraseDao _passphraseDao; protected static final String SELECT_VM_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ? and v.mirror_state = ?"; // need to account for zone-wide primary storage where storage_pool has @@ -373,6 +376,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol AllFieldsSearch.and("updateTime", AllFieldsSearch.entity().getUpdated(), SearchCriteria.Op.LT); AllFieldsSearch.and("updatedCount", AllFieldsSearch.entity().getUpdatedCount(), Op.EQ); AllFieldsSearch.and("name", AllFieldsSearch.entity().getName(), Op.EQ); + AllFieldsSearch.and("passphraseId", AllFieldsSearch.entity().getPassphraseId(), Op.EQ); AllFieldsSearch.done(); RootDiskStateSearch = createSearchBuilder(); @@ -656,16 +660,25 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } } + @Override + public List listVolumesByPassphraseId(long passphraseId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("passphraseId", passphraseId); + return listBy(sc); + } + @Override @DB public boolean remove(Long id) { TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); + s_logger.debug(String.format("Removing volume %s from DB", id)); VolumeVO entry = findById(id); if (entry != null) { _tagsDao.removeByIdAndType(id, ResourceObjectType.Volume); } boolean result = super.remove(id); + txn.commit(); return result; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/secret/PassphraseVO.java b/engine/schema/src/main/java/org/apache/cloudstack/secret/PassphraseVO.java new file mode 100644 index 00000000000..44557b97abb --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/secret/PassphraseVO.java @@ -0,0 +1,55 @@ +package org.apache.cloudstack.secret; + +import com.cloud.utils.db.Encrypt; +import com.cloud.utils.exception.CloudRuntimeException; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.Arrays; +import java.util.Base64; + +@Entity +@Table(name = "passphrase") +public class PassphraseVO { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private Long id; + + @Column(name = "passphrase") + @Encrypt + private byte[] passphrase; + + public PassphraseVO() { + try { + SecureRandom random = SecureRandom.getInstanceStrong(); + byte[] temporary = new byte[48]; // 48 byte random passphrase buffer + this.passphrase = new byte[64]; // 48 byte random passphrase as base64 for usability + random.nextBytes(temporary); + Base64.getEncoder().encode(temporary, this.passphrase); + Arrays.fill(temporary, (byte) 0); // clear passphrase from buffer + } catch (NoSuchAlgorithmException ex ) { + throw new CloudRuntimeException("Volume encryption requested but system is missing specified algorithm to generate passphrase"); + } + } + + public PassphraseVO(PassphraseVO existing) { + this.passphrase = existing.getPassphrase(); + } + + public void clearPassphrase() { + if (this.passphrase != null) { + Arrays.fill(this.passphrase, (byte) 0); + } + } + + public byte[] getPassphrase() { return this.passphrase; } + + public Long getId() { return this.id; } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/secret/dao/PassphraseDao.java b/engine/schema/src/main/java/org/apache/cloudstack/secret/dao/PassphraseDao.java new file mode 100644 index 00000000000..fa3b1c2d2bd --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/secret/dao/PassphraseDao.java @@ -0,0 +1,7 @@ +package org.apache.cloudstack.secret.dao; + +import org.apache.cloudstack.secret.PassphraseVO; +import com.cloud.utils.db.GenericDao; + +public interface PassphraseDao extends GenericDao { +} \ No newline at end of file diff --git a/engine/schema/src/main/java/org/apache/cloudstack/secret/dao/PassphraseDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/secret/dao/PassphraseDaoImpl.java new file mode 100644 index 00000000000..f54d1e8f8b8 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/secret/dao/PassphraseDaoImpl.java @@ -0,0 +1,7 @@ +package org.apache.cloudstack.secret.dao; + +import org.apache.cloudstack.secret.PassphraseVO; +import com.cloud.utils.db.GenericDaoBase; + +public class PassphraseDaoImpl extends GenericDaoBase implements PassphraseDao { +} diff --git a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml index 508b01c2b57..3447657ae3a 100644 --- a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml +++ b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml @@ -295,4 +295,5 @@ + diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41600to41610-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41600to41610-cleanup.sql index 9db01dd374a..9993611d497 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41600to41610-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41600to41610-cleanup.sql @@ -17,4 +17,4 @@ --; -- Schema upgrade cleanup from 4.16.0.0 to 4.16.1.0 ---; \ No newline at end of file +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41600to41610.sql b/engine/schema/src/main/resources/META-INF/db/schema-41600to41610.sql index f5934ef8756..894a6a59e07 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41600to41610.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41600to41610.sql @@ -208,3 +208,182 @@ CREATE VIEW `cloud`.`event_view` AS `cloud`.`projects` ON projects.project_account_id = event.account_id LEFT JOIN `cloud`.`event` eve ON event.start_id = eve.id; + +-- Add passphrase table +CREATE TABLE IF NOT EXISTS `cloud`.`passphrase` ( + `id` bigint unsigned NOT NULL auto_increment, + `passphrase` varchar(64) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- Add foreign key procedure to link volumes to passphrase table +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_FOREIGN_KEY`; +CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_FOREIGN_KEY` ( + IN in_table_name VARCHAR(200), + IN in_foreign_table_name VARCHAR(200), + IN in_foreign_column_name VARCHAR(200) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1005 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' ADD CONSTRAINT '); SET @ddl = CONCAT(@ddl, 'fk_', in_foreign_table_name, '_', in_foreign_column_name); SET @ddl = CONCAT(@ddl, ' FOREIGN KEY (', in_foreign_table_name, '_', in_foreign_column_name, ')'); SET @ddl = CONCAT(@ddl, ' REFERENCES ', in_foreign_table_name, '(', in_foreign_column_name, ')'); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; + +-- Add passphrase column to volumes table +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.volumes', 'passphrase_id', 'bigint unsigned DEFAULT NULL COMMENT ''encryption passphrase id'' '); +CALL `cloud`.`IDEMPOTENT_ADD_FOREIGN_KEY`('cloud.volumes', 'passphrase', 'id'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.volumes', 'encrypt_format', 'varchar(64) DEFAULT NULL COMMENT ''encryption format'' '); + +-- Add encrypt column to disk_offering +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.disk_offering', 'encrypt', 'tinyint(1) DEFAULT 0 COMMENT ''volume encrypt requested'' '); + +DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; +CREATE VIEW `cloud`.`disk_offering_view` AS +SELECT + `disk_offering`.`id` AS `id`, + `disk_offering`.`uuid` AS `uuid`, + `disk_offering`.`name` AS `name`, + `disk_offering`.`display_text` AS `display_text`, + `disk_offering`.`provisioning_type` AS `provisioning_type`, + `disk_offering`.`disk_size` AS `disk_size`, + `disk_offering`.`min_iops` AS `min_iops`, + `disk_offering`.`max_iops` AS `max_iops`, + `disk_offering`.`created` AS `created`, + `disk_offering`.`tags` AS `tags`, + `disk_offering`.`customized` AS `customized`, + `disk_offering`.`customized_iops` AS `customized_iops`, + `disk_offering`.`removed` AS `removed`, + `disk_offering`.`use_local_storage` AS `use_local_storage`, + `disk_offering`.`system_use` AS `system_use`, + `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, + `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, + `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, + `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, + `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, + `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, + `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, + `disk_offering`.`iops_read_rate` AS `iops_read_rate`, + `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, + `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, + `disk_offering`.`iops_write_rate` AS `iops_write_rate`, + `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, + `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, + `disk_offering`.`cache_mode` AS `cache_mode`, + `disk_offering`.`sort_key` AS `sort_key`, + `disk_offering`.`type` AS `type`, + `disk_offering`.`display_offering` AS `display_offering`, + `disk_offering`.`state` AS `state`, + `vsphere_storage_policy`.`value` AS `vsphere_storage_policy`, + `disk_offering`.`encrypt` AS `encrypt`, + GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, + GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, + GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, + GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, + GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, + GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, + GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name +FROM + `cloud`.`disk_offering` + LEFT JOIN + `cloud`.`disk_offering_details` AS `domain_details` ON `domain_details`.`offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid' + LEFT JOIN + `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) + LEFT JOIN + `cloud`.`disk_offering_details` AS `zone_details` ON `zone_details`.`offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid' + LEFT JOIN + `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + LEFT JOIN + `cloud`.`disk_offering_details` AS `vsphere_storage_policy` ON `vsphere_storage_policy`.`offering_id` = `disk_offering`.`id` + AND `vsphere_storage_policy`.`name` = 'storagepolicy' +WHERE + `disk_offering`.`state`='Active' +GROUP BY + `disk_offering`.`id`; + +-- Add encrypt field to service_offering_view +DROP VIEW IF EXISTS `cloud`.`service_offering_view`; +CREATE VIEW `cloud`.`service_offering_view` AS + SELECT + `service_offering`.`id` AS `id`, + `disk_offering`.`uuid` AS `uuid`, + `disk_offering`.`name` AS `name`, + `disk_offering`.`display_text` AS `display_text`, + `disk_offering`.`provisioning_type` AS `provisioning_type`, + `disk_offering`.`created` AS `created`, + `disk_offering`.`tags` AS `tags`, + `disk_offering`.`removed` AS `removed`, + `disk_offering`.`use_local_storage` AS `use_local_storage`, + `disk_offering`.`system_use` AS `system_use`, + `disk_offering`.`customized_iops` AS `customized_iops`, + `disk_offering`.`min_iops` AS `min_iops`, + `disk_offering`.`max_iops` AS `max_iops`, + `disk_offering`.`hv_ss_reserve` AS `hv_ss_reserve`, + `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, + `disk_offering`.`bytes_read_rate_max` AS `bytes_read_rate_max`, + `disk_offering`.`bytes_read_rate_max_length` AS `bytes_read_rate_max_length`, + `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, + `disk_offering`.`bytes_write_rate_max` AS `bytes_write_rate_max`, + `disk_offering`.`bytes_write_rate_max_length` AS `bytes_write_rate_max_length`, + `disk_offering`.`iops_read_rate` AS `iops_read_rate`, + `disk_offering`.`iops_read_rate_max` AS `iops_read_rate_max`, + `disk_offering`.`iops_read_rate_max_length` AS `iops_read_rate_max_length`, + `disk_offering`.`iops_write_rate` AS `iops_write_rate`, + `disk_offering`.`iops_write_rate_max` AS `iops_write_rate_max`, + `disk_offering`.`iops_write_rate_max_length` AS `iops_write_rate_max_length`, + `disk_offering`.`cache_mode` AS `cache_mode`, + `disk_offering`.`disk_size` AS `root_disk_size`, + `disk_offering`.`encrypt` AS `encrypt_root`, + `service_offering`.`cpu` AS `cpu`, + `service_offering`.`speed` AS `speed`, + `service_offering`.`ram_size` AS `ram_size`, + `service_offering`.`nw_rate` AS `nw_rate`, + `service_offering`.`mc_rate` AS `mc_rate`, + `service_offering`.`ha_enabled` AS `ha_enabled`, + `service_offering`.`limit_cpu_use` AS `limit_cpu_use`, + `service_offering`.`host_tag` AS `host_tag`, + `service_offering`.`default_use` AS `default_use`, + `service_offering`.`vm_type` AS `vm_type`, + `service_offering`.`sort_key` AS `sort_key`, + `service_offering`.`is_volatile` AS `is_volatile`, + `service_offering`.`deployment_planner` AS `deployment_planner`, + `service_offering`.`dynamic_scaling_enabled` AS `dynamic_scaling_enabled`, + `vsphere_storage_policy`.`value` AS `vsphere_storage_policy`, + GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, + GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, + GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, + GROUP_CONCAT(DISTINCT(domain.path)) AS domain_path, + GROUP_CONCAT(DISTINCT(zone.id)) AS zone_id, + GROUP_CONCAT(DISTINCT(zone.uuid)) AS zone_uuid, + GROUP_CONCAT(DISTINCT(zone.name)) AS zone_name, + IFNULL(`min_compute_details`.`value`, `cpu`) AS min_cpu, + IFNULL(`max_compute_details`.`value`, `cpu`) AS max_cpu, + IFNULL(`min_memory_details`.`value`, `ram_size`) AS min_memory, + IFNULL(`max_memory_details`.`value`, `ram_size`) AS max_memory + FROM + `cloud`.`service_offering` + INNER JOIN + `cloud`.`disk_offering_view` AS `disk_offering` ON service_offering.id = disk_offering.id + LEFT JOIN + `cloud`.`service_offering_details` AS `domain_details` ON `domain_details`.`service_offering_id` = `disk_offering`.`id` AND `domain_details`.`name`='domainid' + LEFT JOIN + `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) + LEFT JOIN + `cloud`.`service_offering_details` AS `zone_details` ON `zone_details`.`service_offering_id` = `disk_offering`.`id` AND `zone_details`.`name`='zoneid' + LEFT JOIN + `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + LEFT JOIN + `cloud`.`service_offering_details` AS `min_compute_details` ON `min_compute_details`.`service_offering_id` = `disk_offering`.`id` + AND `min_compute_details`.`name` = 'mincpunumber' + LEFT JOIN + `cloud`.`service_offering_details` AS `max_compute_details` ON `max_compute_details`.`service_offering_id` = `disk_offering`.`id` + AND `max_compute_details`.`name` = 'maxcpunumber' + LEFT JOIN + `cloud`.`service_offering_details` AS `min_memory_details` ON `min_memory_details`.`service_offering_id` = `disk_offering`.`id` + AND `min_memory_details`.`name` = 'minmemory' + LEFT JOIN + `cloud`.`service_offering_details` AS `max_memory_details` ON `max_memory_details`.`service_offering_id` = `disk_offering`.`id` + AND `max_memory_details`.`name` = 'maxmemory' + LEFT JOIN + `cloud`.`service_offering_details` AS `vsphere_storage_policy` ON `vsphere_storage_policy`.`service_offering_id` = `disk_offering`.`id` + AND `vsphere_storage_policy`.`name` = 'storagepolicy' + WHERE + `disk_offering`.`state`='Active' + GROUP BY + `service_offering`.`id`; diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index 74de7652a1f..0d85f5e07cd 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -315,7 +315,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { protected Answer cloneVolume(DataObject template, DataObject volume) { CopyCommand cmd = new CopyCommand(template.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(volume.getTO()), 0, VirtualMachineManager.ExecuteInSequence.value()); try { - EndPoint ep = selector.select(volume.getDataStore()); + EndPoint ep = selector.select(volume, anyVolumeRequiresEncryption(volume)); Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; @@ -350,14 +350,15 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { if (srcData instanceof VolumeInfo && ((VolumeInfo)srcData).isDirectDownload()) { bypassSecondaryStorage = true; } + boolean encryptionRequired = anyVolumeRequiresEncryption(srcData, destData); if (cacheStore == null) { if (bypassSecondaryStorage) { CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _copyvolumewait, VirtualMachineManager.ExecuteInSequence.value()); - EndPoint ep = selector.select(srcData, destData); + EndPoint ep = selector.select(srcData, destData, encryptionRequired); Answer answer = null; if (ep == null) { - String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + String errMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); s_logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { @@ -394,9 +395,9 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { objOnImageStore.processEvent(Event.CopyingRequested); CopyCommand cmd = new CopyCommand(objOnImageStore.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(destData.getTO()), _copyvolumewait, VirtualMachineManager.ExecuteInSequence.value()); - EndPoint ep = selector.select(objOnImageStore, destData); + EndPoint ep = selector.select(objOnImageStore, destData, encryptionRequired); if (ep == null) { - String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + String errMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); s_logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { @@ -426,10 +427,10 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } else { DataObject cacheData = cacheMgr.createCacheObject(srcData, destScope); CopyCommand cmd = new CopyCommand(cacheData.getTO(), destData.getTO(), _copyvolumewait, VirtualMachineManager.ExecuteInSequence.value()); - EndPoint ep = selector.select(cacheData, destData); + EndPoint ep = selector.select(cacheData, destData, encryptionRequired); Answer answer = null; if (ep == null) { - String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + String errMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); s_logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { @@ -456,10 +457,12 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { command.setContextParam(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString()); } + boolean encryptionRequired = anyVolumeRequiresEncryption(srcData, destData); + EndPoint ep = selector.select(srcData, StorageAction.MIGRATEVOLUME); Answer answer = null; if (ep == null) { - String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + String errMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); s_logger.error(errMsg); answer = new Answer(command, false, errMsg); } else { @@ -582,6 +585,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } Map options = new HashMap(); options.put("fullSnapshot", fullSnapshot.toString()); + boolean encryptionRequired = anyVolumeRequiresEncryption(srcData, destData); Answer answer = null; try { if (needCacheStorage(srcData, destData)) { @@ -591,7 +595,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(destData.getTO()), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); cmd.setCacheTO(cacheData.getTO()); cmd.setOptions(options); - EndPoint ep = selector.select(srcData, destData); + EndPoint ep = selector.select(srcData, destData, encryptionRequired); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; s_logger.error(errMsg); @@ -603,7 +607,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(destData.getTO()); CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); cmd.setOptions(options); - EndPoint ep = selector.select(srcData, destData, StorageAction.BACKUPSNAPSHOT); + EndPoint ep = selector.select(srcData, destData, StorageAction.BACKUPSNAPSHOT, encryptionRequired); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; s_logger.error(errMsg); @@ -634,4 +638,18 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { result.setResult("Unsupported operation requested for copying data."); callback.complete(result); } + + /** + * Does any object require encryption support? + */ + private boolean anyVolumeRequiresEncryption(DataObject ... objects) { + for (DataObject o : objects) { + if (o instanceof VolumeInfo && ((VolumeInfo) o).getPassphraseId() != null) { + return true; + } else if (o instanceof SnapshotInfo && ((SnapshotInfo) o).getBaseVolume().getPassphraseId() != null) { + return true; + } + } + return false; + } } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java index a62921c9d94..7a34b199cfc 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java @@ -33,6 +33,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.secret.dao.PassphraseDao; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -53,6 +54,8 @@ public class DataMotionServiceImpl implements DataMotionService { StorageStrategyFactory storageStrategyFactory; @Inject VolumeDao volDao; + @Inject + PassphraseDao passphraseDao; @Override public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { @@ -98,7 +101,14 @@ public class DataMotionServiceImpl implements DataMotionService { volDao.update(sourceVO.getId(), sourceVO); destinationVO.setState(Volume.State.Expunged); destinationVO.setRemoved(new Date()); + Long passphraseId = destinationVO.getPassphraseId(); + destinationVO.setPassphraseId(null); volDao.update(destinationVO.getId(), destinationVO); + + if (passphraseId != null) { + passphraseDao.remove(passphraseId); + } + } @Override diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index aee1f75c352..0a4858cae73 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -59,6 +59,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.secret.dao.PassphraseDao; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.ResignatureAnswer; @@ -161,6 +162,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { @Inject private HostDao _hostDao; @Inject + private PassphraseDao _passphraseDao; + @Inject protected PrimaryDataStoreDao _storagePoolDao; @Inject private SnapshotDao _snapshotDao; @@ -1735,9 +1738,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { */ protected MigrationOptions createLinkedCloneMigrationOptions(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, String srcVolumeBackingFile, String srcPoolUuid, Storage.StoragePoolType srcPoolType) { VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(destVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId(), null); - boolean updateBackingFileReference = ref == null; - String backingFile = ref != null ? ref.getInstallPath() : srcVolumeBackingFile; - return new MigrationOptions(srcPoolUuid, srcPoolType, backingFile, updateBackingFileReference); + + // if template exists on destination, use it as the backing file + if (ref != null) { + return new MigrationOptions(destVolumeInfo.getDataStore().getUuid(), destVolumeInfo.getStoragePoolType(), ref.getInstallPath(), false); + } else { + return new MigrationOptions(srcPoolUuid, srcPoolType, srcVolumeBackingFile, true); + } } /** @@ -1874,6 +1881,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { migrateDiskInfo = configureMigrateDiskInfo(srcVolumeInfo, destPath); migrateDiskInfo.setSourceDiskOnStorageFileSystem(isStoragePoolTypeOfFile(sourceStoragePool)); migrateDiskInfoList.add(migrateDiskInfo); + prepareDiskWithSecretConsumerDetail(vmTO, srcVolumeInfo, destVolumeInfo.getPath()); } migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo); @@ -2123,6 +2131,11 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { newVol.setPoolId(storagePoolVO.getId()); newVol.setLastPoolId(lastPoolId); + if (volume.getPassphraseId() != null) { + newVol.setPassphraseId(volume.getPassphraseId()); + newVol.setEncryptFormat(volume.getEncryptFormat()); + } + return _volumeDao.persist(newVol); } @@ -2206,6 +2219,23 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } } + /** + * Include some destination volume info in vmTO, required for some PrepareForMigrationCommand processing + * + */ + protected void prepareDiskWithSecretConsumerDetail(VirtualMachineTO vmTO, VolumeInfo srcVolume, String destPath) { + if (vmTO.getDisks() != null) { + LOGGER.debug(String.format("Preparing VM TO '%s' disks with migration data", vmTO)); + Arrays.stream(vmTO.getDisks()).filter(diskTO -> diskTO.getData().getId() == srcVolume.getId()).forEach( diskTO -> { + Map details = diskTO.getDetails(); + if (diskTO.getDetails() == null) { + diskTO.setDetails(new HashMap<>()); + } + diskTO.getDetails().put(DiskTO.SECRET_CONSUMER_DETAIL, destPath); + }); + } + } + /** * At a high level: The source storage cannot be managed and * the destination storages can be all managed or all not managed, not mixed. diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index af206a7378e..c708ef75e23 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -26,17 +26,14 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.exception.StorageUnavailableException; -import com.cloud.storage.StoragePoolStatus; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; import com.cloud.capacity.Capacity; import com.cloud.capacity.dao.CapacityDao; @@ -44,10 +41,12 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.exception.StorageUnavailableException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StorageUtil; import com.cloud.storage.Volume; import com.cloud.storage.dao.VolumeDao; @@ -220,7 +219,6 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement } protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) { - if (s_logger.isDebugEnabled()) { s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName() + " ,poolId: " + pool.getId()); } @@ -231,6 +229,13 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement return false; } + if (dskCh.requiresEncryption() && !pool.getPoolType().supportsEncryption()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("Storage pool type '%s' doesn't support encryption required for volume, skipping this pool", pool.getPoolType())); + } + return false; + } + Long clusterId = pool.getClusterId(); if (clusterId != null) { ClusterVO cluster = clusterDao.findById(clusterId); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java index 6a903e4235a..2a0f7102c50 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java @@ -65,6 +65,8 @@ import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; +import static com.cloud.host.Host.HOST_VOLUME_ENCRYPTION; + @Component public class DefaultEndPointSelector implements EndPointSelector { private static final Logger s_logger = Logger.getLogger(DefaultEndPointSelector.class); @@ -72,11 +74,14 @@ public class DefaultEndPointSelector implements EndPointSelector { private HostDao hostDao; @Inject private DedicatedResourceDao dedicatedResourceDao; + + private static final String volEncryptColumnName = "volume_encryption_support"; private final String findOneHostOnPrimaryStorage = "select t.id from " - + "(select h.id, cd.value " + + "(select h.id, cd.value, hd.value as " + volEncryptColumnName + " " + "from host h join storage_pool_host_ref s on h.id = s.host_id " + "join cluster c on c.id=h.cluster_id " + "left join cluster_details cd on c.id=cd.cluster_id and cd.name='" + CapacityManager.StorageOperationsExcludeCluster.key() + "' " + + "left join host_details hd on h.id=hd.host_id and hd.name='" + HOST_VOLUME_ENCRYPTION + "' " + "where h.status = 'Up' and h.type = 'Routing' and h.resource_state = 'Enabled' and s.pool_id = ? "; private String findOneHypervisorHostInScopeByType = "select h.id from host h where h.status = 'Up' and h.hypervisor_type = ? "; @@ -118,8 +123,12 @@ public class DefaultEndPointSelector implements EndPointSelector { } } - @DB protected EndPoint findEndPointInScope(Scope scope, String sqlBase, Long poolId) { + return findEndPointInScope(scope, sqlBase, poolId, false); + } + + @DB + protected EndPoint findEndPointInScope(Scope scope, String sqlBase, Long poolId, boolean volumeEncryptionSupportRequired) { StringBuilder sbuilder = new StringBuilder(); sbuilder.append(sqlBase); @@ -142,8 +151,13 @@ public class DefaultEndPointSelector implements EndPointSelector { dedicatedHosts = dedicatedResourceDao.listAllHosts(); } - // TODO: order by rand() is slow if there are lot of hosts sbuilder.append(") t where t.value<>'true' or t.value is null"); //Added for exclude cluster's subquery + + if (volumeEncryptionSupportRequired) { + sbuilder.append(String.format(" and t.%s='true'", volEncryptColumnName)); + } + + // TODO: order by rand() is slow if there are lot of hosts sbuilder.append(" ORDER by "); if (dedicatedHosts.size() > 0) { moveDedicatedHostsToLowerPriority(sbuilder, dedicatedHosts); @@ -208,7 +222,7 @@ public class DefaultEndPointSelector implements EndPointSelector { } } - protected EndPoint findEndPointForImageMove(DataStore srcStore, DataStore destStore) { + protected EndPoint findEndPointForImageMove(DataStore srcStore, DataStore destStore, boolean volumeEncryptionSupportRequired) { // find any xenserver/kvm host in the scope Scope srcScope = srcStore.getScope(); Scope destScope = destStore.getScope(); @@ -233,17 +247,22 @@ public class DefaultEndPointSelector implements EndPointSelector { poolId = destStore.getId(); } } - return findEndPointInScope(selectedScope, findOneHostOnPrimaryStorage, poolId); + return findEndPointInScope(selectedScope, findOneHostOnPrimaryStorage, poolId, volumeEncryptionSupportRequired); } @Override public EndPoint select(DataObject srcData, DataObject destData) { + return select( srcData, destData, false); + } + + @Override + public EndPoint select(DataObject srcData, DataObject destData, boolean volumeEncryptionSupportRequired) { DataStore srcStore = srcData.getDataStore(); DataStore destStore = destData.getDataStore(); if (moveBetweenPrimaryImage(srcStore, destStore)) { - return findEndPointForImageMove(srcStore, destStore); + return findEndPointForImageMove(srcStore, destStore, volumeEncryptionSupportRequired); } else if (moveBetweenPrimaryDirectDownload(srcStore, destStore)) { - return findEndPointForImageMove(srcStore, destStore); + return findEndPointForImageMove(srcStore, destStore, volumeEncryptionSupportRequired); } else if (moveBetweenCacheAndImage(srcStore, destStore)) { // pick ssvm based on image cache dc DataStore selectedStore = null; @@ -274,6 +293,11 @@ public class DefaultEndPointSelector implements EndPointSelector { @Override public EndPoint select(DataObject srcData, DataObject destData, StorageAction action) { + return select(srcData, destData, action, false); + } + + @Override + public EndPoint select(DataObject srcData, DataObject destData, StorageAction action, boolean encryptionRequired) { s_logger.error("IR24 select BACKUPSNAPSHOT from primary to secondary " + srcData.getId() + " dest=" + destData.getId()); if (action == StorageAction.BACKUPSNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) { SnapshotInfo srcSnapshot = (SnapshotInfo)srcData; @@ -293,7 +317,7 @@ public class DefaultEndPointSelector implements EndPointSelector { } } } - return select(srcData, destData); + return select(srcData, destData, encryptionRequired); } protected EndPoint findEndpointForPrimaryStorage(DataStore store) { @@ -350,6 +374,15 @@ public class DefaultEndPointSelector implements EndPointSelector { return sc.list(); } + @Override + public EndPoint select(DataObject object, boolean encryptionSupportRequired) { + DataStore store = object.getDataStore(); + if (store.getRole() == DataStoreRole.Primary) { + return findEndPointInScope(store.getScope(), findOneHostOnPrimaryStorage, store.getId(), encryptionSupportRequired); + } + throw new CloudRuntimeException(String.format("Storage role %s doesn't support encryption", store.getRole())); + } + @Override public EndPoint select(DataObject object) { DataStore store = object.getDataStore(); @@ -415,6 +448,11 @@ public class DefaultEndPointSelector implements EndPointSelector { @Override public EndPoint select(DataObject object, StorageAction action) { + return select(object, action, false); + } + + @Override + public EndPoint select(DataObject object, StorageAction action, boolean encryptionRequired) { if (action == StorageAction.TAKESNAPSHOT) { SnapshotInfo snapshotInfo = (SnapshotInfo)object; if (snapshotInfo.getHypervisorType() == Hypervisor.HypervisorType.KVM) { @@ -446,7 +484,7 @@ public class DefaultEndPointSelector implements EndPointSelector { } } } - return select(object); + return select(object, encryptionRequired); } @Override diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 29405be79be..7f82ed16f08 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -22,6 +22,11 @@ import javax.inject.Inject; import com.cloud.dc.VsphereStoragePolicyVO; import com.cloud.dc.dao.VsphereStoragePolicyDao; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; +import org.apache.cloudstack.secret.dao.PassphraseDao; +import org.apache.cloudstack.secret.PassphraseVO; import com.cloud.service.dao.ServiceOfferingDetailsDao; import com.cloud.storage.MigrationOptions; import com.cloud.storage.VMTemplateVO; @@ -101,6 +106,8 @@ public class VolumeObject implements VolumeInfo { DiskOfferingDetailsDao diskOfferingDetailsDao; @Inject VsphereStoragePolicyDao vsphereStoragePolicyDao; + @Inject + PassphraseDao passphraseDao; private Object payload; private MigrationOptions migrationOptions; @@ -660,11 +667,13 @@ public class VolumeObject implements VolumeInfo { } protected void updateVolumeInfo(VolumeObjectTO newVolume, VolumeVO volumeVo, boolean setVolumeSize, boolean setFormat) { - String previousValues = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volumeVo, "path", "size", "format", "poolId"); + String previousValues = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volumeVo, "path", "size", "format", "encryptFormat", "poolId"); volumeVo.setPath(newVolume.getPath()); Long newVolumeSize = newVolume.getSize(); + volumeVo.setEncryptFormat(newVolume.getEncryptFormat()); + if (newVolumeSize != null && setVolumeSize) { volumeVo.setSize(newVolumeSize); } @@ -674,7 +683,7 @@ public class VolumeObject implements VolumeInfo { volumeVo.setPoolId(getDataStore().getId()); volumeDao.update(volumeVo.getId(), volumeVo); - String newValues = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volumeVo, "path", "size", "format", "poolId"); + String newValues = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volumeVo, "path", "size", "format", "encryptFormat", "poolId"); s_logger.debug(String.format("Updated %s from %s to %s ", volumeVo.getVolumeDescription(), previousValues, newValues)); } @@ -833,4 +842,61 @@ public class VolumeObject implements VolumeInfo { public Class getEntityType() { return Volume.class; } + + @Override + public Long getPassphraseId() { + return volumeVO.getPassphraseId(); + } + + @Override + public void setPassphraseId(Long id) { + volumeVO.setPassphraseId(id); + } + + /** + * Removes passphrase reference from underlying volume. Also removes the associated passphrase entry if it is the last user. + */ + public void deletePassphrase() { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + Long passphraseId = volumeVO.getPassphraseId(); + if (passphraseId != null) { + volumeVO.setPassphraseId(null); + volumeDao.persist(volumeVO); + + s_logger.debug(String.format("Checking to see if we can delete passphrase id %s", passphraseId)); + List volumes = volumeDao.listVolumesByPassphraseId(passphraseId); + + if (volumes != null && !volumes.isEmpty()) { + s_logger.debug("Other volumes use this passphrase, skipping deletion"); + return; + } + + s_logger.debug(String.format("Deleting passphrase %s", passphraseId)); + passphraseDao.remove(passphraseId); + } + } + }); + } + + /** + * Looks up passphrase from underlying volume. + * @return passphrase as bytes + */ + public byte[] getPassphrase() { + PassphraseVO passphrase = passphraseDao.findById(volumeVO.getPassphraseId()); + if (passphrase != null) { + return passphrase.getPassphrase(); + } + return null; + } + + @Override + public String getEncryptFormat() { return volumeVO.getEncryptFormat(); } + + @Override + public void setEncryptFormat(String encryptFormat) { + volumeVO.setEncryptFormat(encryptFormat); + } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 92526846729..9ba159738e9 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -28,6 +28,7 @@ import java.util.Random; import javax.inject.Inject; +import org.apache.cloudstack.secret.dao.PassphraseDao; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import org.apache.cloudstack.annotation.AnnotationService; @@ -194,6 +195,8 @@ public class VolumeServiceImpl implements VolumeService { private StorageManager _storageMgr; @Inject private AnnotationDao annotationDao; + @Inject + private PassphraseDao passphraseDao; private final static String SNAPSHOT_ID = "SNAPSHOT_ID"; @@ -443,6 +446,11 @@ public class VolumeServiceImpl implements VolumeService { try { if (result.isSuccess()) { vo.processEvent(Event.OperationSuccessed); + + if (vo.getPassphraseId() != null) { + vo.deletePassphrase(); + } + if (canVolumeBeRemoved(vo.getId())) { s_logger.info("Volume " + vo.getId() + " is not referred anywhere, remove it from volumes table"); volDao.remove(vo.getId()); diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml index 880cffd3f05..86806c3b3d2 100644 --- a/plugins/hypervisors/kvm/pom.xml +++ b/plugins/hypervisors/kvm/pom.xml @@ -108,10 +108,53 @@ maven-surefire-plugin - **/Qemu*.java + **/QemuImg*.java + + + + skip.libvirt.tests + + + skip.libvirt.tests + true + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-dependencies + package + + copy-dependencies + + + ${project.build.directory}/dependencies + runtime + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + **/QemuImg*.java + **/LibvirtComputingResourceTest.java + + + + + + + diff --git a/plugins/hypervisors/kvm/rn2-hyperd-lapp01.rno.apple.com b/plugins/hypervisors/kvm/rn2-hyperd-lapp01.rno.apple.com new file mode 100644 index 00000000000..0af63eeb999 Binary files /dev/null and b/plugins/hypervisors/kvm/rn2-hyperd-lapp01.rno.apple.com differ diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 2e4bafd8864..cbda6baf68f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -24,6 +24,8 @@ import java.io.StringReader; import java.net.InetAddress; import java.net.URI; import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; @@ -47,10 +49,13 @@ import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import com.cloud.configuration.Config; +import com.cloud.utils.UuidUtils; import org.apache.cloudstack.storage.configdrive.ConfigDrive; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.cryptsetup.CryptSetup; +import org.apache.cloudstack.utils.cryptsetup.KeyFile; import org.apache.cloudstack.utils.hypervisor.HypervisorUtils; import org.apache.cloudstack.utils.linux.CPUStat; import org.apache.cloudstack.utils.linux.KVMHostInfo; @@ -59,6 +64,7 @@ import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.io.FileUtils; @@ -66,6 +72,7 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang.math.NumberUtils; import org.apache.log4j.Logger; +import org.apache.xerces.impl.xpath.regex.Match; import org.joda.time.Duration; import org.libvirt.Connect; import org.libvirt.Domain; @@ -77,6 +84,7 @@ import org.libvirt.DomainSnapshot; import org.libvirt.LibvirtException; import org.libvirt.MemoryStatistic; import org.libvirt.Network; +import org.libvirt.Secret; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; @@ -189,6 +197,8 @@ import com.google.common.base.Strings; import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; import org.libvirt.VcpuInfo; +import static com.cloud.host.Host.HOST_VOLUME_ENCRYPTION; + /** * LibvirtComputingResource execute requests on the computing/routing host using * the libvirt API @@ -676,6 +686,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv protected String dpdkOvsPath; protected String directDownloadTemporaryDownloadPath; protected String cachePath; + protected String javaTempDir = System.getProperty("java.io.tmpdir"); private String getEndIpFromStartIp(final String startIp, final int numIps) { final String[] tokens = startIp.split("[.]"); @@ -2926,6 +2937,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv if (volumeObjectTO.getCacheMode() != null) { disk.setCacheMode(DiskDef.DiskCacheMode.valueOf(volumeObjectTO.getCacheMode().toString().toUpperCase())); } + + if (volumeObjectTO.getPassphrase() != null && volumeObjectTO.getPassphrase().length > 0) { + String secretUuid = createLibvirtVolumeSecret(conn, volumeObjectTO.getPath(), volumeObjectTO.getPassphrase()); + DiskDef.LibvirtDiskEncryptDetails encryptDetails = new DiskDef.LibvirtDiskEncryptDetails(secretUuid, QemuObject.EncryptFormat.enumValue(volumeObjectTO.getEncryptFormat())); + disk.setLibvirtDiskEncryptDetails(encryptDetails); + } } if (vm.getDevices() == null) { s_logger.error("There is no devices for" + vm); @@ -3360,6 +3377,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv cmd.setCluster(_clusterId); cmd.setGatewayIpAddress(_localGateway); cmd.setIqn(getIqn()); + cmd.getHostDetails().put(HOST_VOLUME_ENCRYPTION, String.valueOf(hostSupportsVolumeEncryption())); if (cmd.getHostDetails().containsKey("Host.OS")) { _hostDistro = cmd.getHostDetails().get("Host.OS"); @@ -4634,6 +4652,47 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return true; } + /** + * Test host for volume encryption support + * @return boolean + */ + public boolean hostSupportsVolumeEncryption() { + // test qemu-img + Path testFile = Paths.get(javaTempDir, UUID.randomUUID().toString()).normalize().toAbsolutePath(); + String objectName = "sec0"; + + Map options = new HashMap(); + List passphraseObjects = new ArrayList<>(); + QemuImgFile file = new QemuImgFile(testFile.toString(), 64<<20, PhysicalDiskFormat.QCOW2); + + + try (KeyFile keyFile = new KeyFile(UUID.randomUUID().toString().getBytes())){ + QemuImg qemu = new QemuImg(0); + passphraseObjects.add(QemuObject.prepareSecretForQemuImg(PhysicalDiskFormat.QCOW2, QemuObject.EncryptFormat.LUKS, keyFile.toString(), "sec0", options)); + qemu.create(file, null, options, passphraseObjects); + s_logger.info("Host's qemu install supports encryption"); + } catch (QemuImgException | IOException | LibvirtException ex) { + s_logger.info("Host's qemu install doesn't support encryption", ex); + return false; + } + + // cleanup + try { + Files.deleteIfExists(testFile); + } catch (IOException ex) { + s_logger.warn(String.format("Failed to clean up test file '%s'", testFile.toAbsolutePath()), ex); + } + + // test cryptsetup + CryptSetup crypt = new CryptSetup(); + if (!crypt.isSupported()) { + s_logger.info("Host can't run cryptsetup"); + return false; + } + + return true; + } + public boolean isSecureMode(String bootMode) { if (StringUtils.isNotBlank(bootMode) && "secure".equalsIgnoreCase(bootMode)) { return true; @@ -4672,8 +4731,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv public void setBackingFileFormat(String volPath) { final int timeout = 0; QemuImgFile file = new QemuImgFile(volPath); - QemuImg qemu = new QemuImg(timeout); + try{ + QemuImg qemu = new QemuImg(timeout); Map info = qemu.info(file); String backingFilePath = info.get(QemuImg.BACKING_FILE); String backingFileFormat = info.get(QemuImg.BACKING_FILE_FORMAT); @@ -4714,4 +4774,70 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv VcpuInfo vcpus[] = dm.getVcpusInfo(); return Arrays.stream(vcpus).filter(vcpu -> vcpu.state.equals(VcpuInfo.VcpuState.VIR_VCPU_RUNNING)).count(); } + + /** + * Set up a libvirt secret for a volume. If Libvirt says that a secret already exists for this volume path, we use its uuid. + * The UUID of the secret needs to be prescriptive such that we can register the same UUID on target host during live migration + * + * @param conn libvirt connection + * @param consumer identifier for volume in secret + * @param data secret contents + * @return uuid of matching secret for volume + * @throws LibvirtException + */ + public String createLibvirtVolumeSecret(Connect conn, String consumer, byte[] data) throws LibvirtException { + String secretUuid = null; + LibvirtSecretDef secretDef = new LibvirtSecretDef(LibvirtSecretDef.Usage.VOLUME, generateSecretUUIDFromString(consumer)); + secretDef.setVolumeVolume(consumer); + secretDef.setPrivate(true); + secretDef.setEphemeral(true); + + try { + Secret secret = conn.secretDefineXML(secretDef.toString()); + secret.setValue(data); + secretUuid = secret.getUUIDString(); + secret.free(); + } catch (LibvirtException ex) { + if (ex.getMessage().contains("already defined for use")) { + Match match = new Match(); + if (UuidUtils.REGEX.matches(ex.getMessage(), match)) { + secretUuid = match.getCapturedText(0); + s_logger.info(String.format("Reusing previously defined secret '%s' for volume '%s'", secretUuid, consumer)); + } else { + throw ex; + } + } else { + throw ex; + } + } + + return secretUuid; + } + + public void removeLibvirtVolumeSecret(Connect conn, String secretUuid) throws LibvirtException { + try { + Secret secret = conn.secretLookupByUUIDString(secretUuid); + secret.undefine(); + } catch (LibvirtException ex) { + if (ex.getMessage().contains("Secret not found")) { + s_logger.debug(String.format("Secret uuid %s doesn't exist", secretUuid)); + return; + } + throw ex; + } + s_logger.debug(String.format("Undefined secret %s", secretUuid)); + } + + public void cleanOldSecretsByDiskDef(Connect conn, List disks) throws LibvirtException { + for (DiskDef disk : disks) { + DiskDef.LibvirtDiskEncryptDetails encryptDetails = disk.getLibvirtDiskEncryptDetails(); + if (encryptDetails != null) { + removeLibvirtVolumeSecret(conn, encryptDetails.getPassphraseUuid()); + } + } + } + + public static String generateSecretUUIDFromString(String seed) { + return UUID.nameUUIDFromBytes(seed.getBytes()).toString(); + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java index bb3f7132f22..fdb4ad99433 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java @@ -26,6 +26,7 @@ import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; +import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.w3c.dom.Document; @@ -194,6 +195,15 @@ public class LibvirtDomainXMLParser { } } + NodeList encryption = disk.getElementsByTagName("encryption"); + if (encryption.getLength() != 0) { + Element encryptionElement = (Element) encryption.item(0); + String passphraseUuid = getAttrValue("secret", "uuid", encryptionElement); + QemuObject.EncryptFormat encryptFormat = QemuObject.EncryptFormat.enumValue(encryptionElement.getAttribute("format")); + DiskDef.LibvirtDiskEncryptDetails encryptDetails = new DiskDef.LibvirtDiskEncryptDetails(passphraseUuid, encryptFormat); + def.setLibvirtDiskEncryptDetails(encryptDetails); + } + diskDefs.add(def); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtSecretDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtSecretDef.java index 80c08e9d86d..9596b40dec6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtSecretDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtSecretDef.java @@ -55,10 +55,14 @@ public class LibvirtSecretDef { return _ephemeral; } + public void setEphemeral(boolean ephemeral) { _ephemeral = ephemeral; } + public boolean getPrivate() { return _private; } + public void setPrivate(boolean isPrivate) { _private = isPrivate; } + public String getUuid() { return _uuid; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index 7c65f7970ad..09f0e6851e0 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; @@ -559,6 +560,19 @@ public class LibvirtVMDef { } public static class DiskDef { + public static class LibvirtDiskEncryptDetails { + String passphraseUuid; + QemuObject.EncryptFormat encryptFormat; + + public LibvirtDiskEncryptDetails(String passphraseUuid, QemuObject.EncryptFormat encryptFormat) { + this.passphraseUuid = passphraseUuid; + this.encryptFormat = encryptFormat; + } + + public String getPassphraseUuid() { return this.passphraseUuid; } + public QemuObject.EncryptFormat getEncryptFormat() { return this.encryptFormat; } + } + public enum DeviceType { FLOPPY("floppy"), DISK("disk"), CDROM("cdrom"), LUN("lun"); String _type; @@ -714,6 +728,7 @@ public class LibvirtVMDef { private boolean qemuDriver = true; private DiscardType _discard = DiscardType.IGNORE; private IoDriver ioDriver; + private LibvirtDiskEncryptDetails _encryptDetails; public DiscardType getDiscard() { return _discard; @@ -1026,6 +1041,10 @@ public class LibvirtVMDef { this._serial = serial; } + public void setLibvirtDiskEncryptDetails(LibvirtDiskEncryptDetails details) { this._encryptDetails = details; } + + public LibvirtDiskEncryptDetails getLibvirtDiskEncryptDetails() { return this._encryptDetails; } + @Override public String toString() { StringBuilder diskBuilder = new StringBuilder(); @@ -1093,7 +1112,13 @@ public class LibvirtVMDef { diskBuilder.append("/>\n"); if (_serial != null && !_serial.isEmpty() && _deviceType != DeviceType.LUN) { - diskBuilder.append("" + _serial + ""); + diskBuilder.append("" + _serial + "\n"); + } + + if (_encryptDetails != null) { + diskBuilder.append("\n"); + diskBuilder.append("\n"); + diskBuilder.append("\n"); } if ((_deviceType != DeviceType.CDROM) && diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java index bfa557308e7..bac5551129a 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java @@ -59,13 +59,13 @@ public final class LibvirtCreateCommandWrapper extends CommandWrapper tInfo = new HashMap(); - final ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo); + final ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo, storagepool.getDetails()); return answer; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java index 38cd9958d7c..8e87b02568f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java @@ -24,6 +24,7 @@ import java.util.HashMap; import java.util.Map; import org.apache.cloudstack.storage.configdrive.ConfigDrive; +import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.MapUtils; import org.apache.log4j.Logger; import org.libvirt.Connect; @@ -88,14 +89,31 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp /* setup disks, e.g for iso */ final DiskTO[] volumes = vm.getDisks(); for (final DiskTO volume : volumes) { + final DataTO data = volume.getData(); + if (volume.getType() == Volume.Type.ISO) { - final DataTO data = volume.getData(); if (data != null && data.getPath() != null && data.getPath().startsWith(ConfigDrive.CONFIGDRIVEDIR)) { libvirtComputingResource.getVolumePath(conn, volume, vm.isConfigDriveOnHostCache()); } else { libvirtComputingResource.getVolumePath(conn, volume); } } + + if (data instanceof VolumeObjectTO) { + final VolumeObjectTO volumeObjectTO = (VolumeObjectTO)data; + + if (volumeObjectTO.getPassphrase() != null && volumeObjectTO.getPassphrase().length > 0) { + String secretConsumer = volumeObjectTO.getPath(); + if (volume.getDetails() != null && volume.getDetails().containsKey(DiskTO.SECRET_CONSUMER_DETAIL)) { + secretConsumer = volume.getDetails().get(DiskTO.SECRET_CONSUMER_DETAIL); + } + String secretUuid = libvirtComputingResource.createLibvirtVolumeSecret(conn, secretConsumer, volumeObjectTO.getPassphrase()); + s_logger.debug(String.format("Created libvirt secret %s for disk %s", secretUuid, volumeObjectTO.getPath())); + volumeObjectTO.clearPassphrase(); + } else { + s_logger.debug(String.format("disk %s has no passphrase or encryption", volumeObjectTO)); + } + } } skipDisconnect = true; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java index 685fea9f1bc..d1f0e1489b1 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java @@ -19,9 +19,22 @@ package com.cloud.hypervisor.kvm.resource.wrapper; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.utils.cryptsetup.KeyFile; +import org.apache.cloudstack.utils.qemu.QemuImageOptions; +import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.log4j.Logger; import org.libvirt.Connect; +import org.libvirt.Domain; +import org.libvirt.DomainInfo; import org.libvirt.LibvirtException; import org.libvirt.StorageVol; @@ -39,8 +52,6 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; - /* * Uses a local script now, eventually support for virStorageVolResize() will maybe work on qcow2 and lvm and we can do this in libvirt calls */ @@ -51,7 +62,7 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper 0 ) { + s_logger.debug("Invoking qemu-img to resize an offline, encrypted volume"); + List passphraseObjects = new ArrayList<>(); + try (KeyFile keyFile = new KeyFile(command.getPassphrase())) { + QemuObject.EncryptFormat encryptFormat = QemuObject.EncryptFormat.enumValue(command.getEncryptFormat()); + passphraseObjects.add( + QemuObject.prepareSecretForQemuImg(vol.getFormat(), encryptFormat, keyFile.toString(), "sec0", null) + ); + QemuImg q = new QemuImg(libvirtComputingResource.getCmdsTimeout()); + QemuImageOptions imgOptions = new QemuImageOptions(vol.getFormat(), path,"sec0"); + q.resize(imgOptions, passphraseObjects, newSize); + } catch (QemuImgException | LibvirtException ex) { + throw new CloudRuntimeException("Failed to run qemu-img for resize", ex); + } catch (IOException ex) { + throw new CloudRuntimeException("Failed to create keyfile for encrypted resize", ex); + } finally { + command.clearPassphrase(); + } + } else { + s_logger.debug("Invoking resize script to handle type " + type); + final Script resizecmd = new Script(libvirtComputingResource.getResizeVolumePath(), libvirtComputingResource.getCmdsTimeout(), s_logger); + resizecmd.add("-s", String.valueOf(newSize)); + resizecmd.add("-c", String.valueOf(currentSize)); + resizecmd.add("-p", path); + resizecmd.add("-t", type); + resizecmd.add("-r", String.valueOf(shrinkOk)); + resizecmd.add("-v", vmInstanceName); + final String result = resizecmd.execute(); + + if (result != null) { + if(type.equals(notifyOnlyType)) { + return new ResizeVolumeAnswer(command, true, "Resize succeeded, but need reboot to notify guest"); + } else { + return new ResizeVolumeAnswer(command, false, result); + } } } /* fetch new size as seen from libvirt, don't want to assume anything */ pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid()); pool.refresh(); - final long finalSize = pool.getPhysicalDisk(volid).getVirtualSize(); + final long finalSize = pool.getPhysicalDisk(volumeId).getVirtualSize(); s_logger.debug("after resize, size reports as: " + toHumanReadableSize(finalSize) + ", requested: " + toHumanReadableSize(newSize)); return new ResizeVolumeAnswer(command, true, "success", finalSize); } catch (final CloudRuntimeException e) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java index 3807e5ca931..049269be60e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java @@ -20,6 +20,11 @@ package com.cloud.hypervisor.kvm.resource.wrapper; import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; import org.apache.cloudstack.storage.command.RevertSnapshotCommand; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; @@ -44,7 +49,6 @@ import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; @ResourceWrapper(handles = RevertSnapshotCommand.class) public final class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper { @@ -106,14 +110,12 @@ public final class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper 0) { for (final DiskDef disk : disks) { libvirtComputingResource.cleanupDisk(disk); + DiskDef.LibvirtDiskEncryptDetails diskEncryptDetails = disk.getLibvirtDiskEncryptDetails(); + if (diskEncryptDetails != null) { + libvirtComputingResource.removeLibvirtVolumeSecret(conn, diskEncryptDetails.getPassphraseUuid()); + } } } else { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java index 389a2c717b7..ad22f2016e2 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java @@ -21,11 +21,11 @@ import java.util.List; import java.util.Map; import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.log4j.Logger; - -import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.libvirt.LibvirtException; import com.cloud.agent.api.to.DiskTO; import com.cloud.storage.Storage; @@ -35,7 +35,6 @@ import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; -import org.libvirt.LibvirtException; @StorageAdaptorInfo(storagePoolType=StoragePoolType.Iscsi) public class IscsiAdmStorageAdaptor implements StorageAdaptor { @@ -44,7 +43,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor { private static final Map MapStorageUuidToStoragePool = new HashMap<>(); @Override - public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, StoragePoolType storagePoolType) { + public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, StoragePoolType storagePoolType, Map details) { IscsiAdmStoragePool storagePool = new IscsiAdmStoragePool(uuid, host, port, storagePoolType, this); MapStorageUuidToStoragePool.put(uuid, storagePool); @@ -75,7 +74,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor { // called from LibvirtComputingResource.execute(CreateCommand) // does not apply for iScsiAdmStorageAdaptor @Override - public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, KVMStoragePool pool, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, KVMStoragePool pool, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { throw new UnsupportedOperationException("Creating a physical disk is not supported."); } @@ -384,7 +383,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor { @Override public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, ProvisioningType provisioningType, long size, - KVMStoragePool destPool, int timeout) { + KVMStoragePool destPool, int timeout, byte[] passphrase) { throw new UnsupportedOperationException("Creating a disk from a template is not yet supported for this configuration."); } @@ -394,8 +393,12 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor { } @Override - public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk srcDisk, String destVolumeUuid, KVMStoragePool destPool, int timeout) { - QemuImg q = new QemuImg(timeout); + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + return copyPhysicalDisk(disk, name, destPool, timeout, null, null); + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk srcDisk, String destVolumeUuid, KVMStoragePool destPool, int timeout, byte[] srcPassphrase, byte[] destPassphrase) { QemuImgFile srcFile; @@ -414,6 +417,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor { QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); try { + QemuImg q = new QemuImg(timeout); q.convert(srcFile, destFile); } catch (QemuImgException | LibvirtException ex) { String msg = "Failed to copy data from " + srcDisk.getPath() + " to " + @@ -443,7 +447,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor { } @Override - public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) { + public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout, byte[] passphrase) { return null; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java index 8e4af764cd6..7c054f6905e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java @@ -87,7 +87,7 @@ public class IscsiAdmStoragePool implements KVMStoragePool { // from LibvirtComputingResource.createDiskFromTemplate(KVMPhysicalDisk, String, PhysicalDiskFormat, long, KVMStoragePool) // does not apply for iScsiAdmStoragePool @Override - public KVMPhysicalDisk createPhysicalDisk(String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { + public KVMPhysicalDisk createPhysicalDisk(String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { throw new UnsupportedOperationException("Creating a physical disk is not supported."); } @@ -95,7 +95,7 @@ public class IscsiAdmStoragePool implements KVMStoragePool { // from KVMStorageProcessor.createVolume(CreateObjectCommand) // does not apply for iScsiAdmStoragePool @Override - public KVMPhysicalDisk createPhysicalDisk(String name, Storage.ProvisioningType provisioningType, long size) { + public KVMPhysicalDisk createPhysicalDisk(String name, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { throw new UnsupportedOperationException("Creating a physical disk is not supported."); } @@ -170,4 +170,9 @@ public class IscsiAdmStoragePool implements KVMStoragePool { public boolean supportsConfigDriveIso() { return false; } + + @Override + public Map getDetails() { + return null; + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMPhysicalDisk.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMPhysicalDisk.java index 5b4a61058d5..5187abf3fb8 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMPhysicalDisk.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMPhysicalDisk.java @@ -17,6 +17,7 @@ package com.cloud.hypervisor.kvm.storage; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.cloudstack.utils.qemu.QemuObject; public class KVMPhysicalDisk { private String path; @@ -49,6 +50,7 @@ public class KVMPhysicalDisk { private PhysicalDiskFormat format; private long size; private long virtualSize; + private QemuObject.EncryptFormat qemuEncryptFormat; public KVMPhysicalDisk(String path, String name, KVMStoragePool pool) { this.path = path; @@ -101,4 +103,12 @@ public class KVMPhysicalDisk { this.path = path; } + public QemuObject.EncryptFormat getQemuEncryptFormat() { + return this.qemuEncryptFormat; + } + + public void setQemuEncryptFormat(QemuObject.EncryptFormat format) { + this.qemuEncryptFormat = format; + } + } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java index 46d78e5f6b3..3bff9c9852e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java @@ -25,9 +25,9 @@ import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; public interface KVMStoragePool { - public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size); + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase); - public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size); + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size, byte[] passphrase); public boolean connectPhysicalDisk(String volumeUuid, Map details); @@ -72,4 +72,6 @@ public interface KVMStoragePool { public boolean createFolder(String path); public boolean supportsConfigDriveIso(); + + public Map getDetails(); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java index d6c49d295e0..9053f0618ce 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java @@ -56,8 +56,9 @@ public class KVMStoragePoolManager { String userInfo; boolean type; StoragePoolType poolType; + Map details; - public StoragePoolInformation(String name, String host, int port, String path, String userInfo, StoragePoolType poolType, boolean type) { + public StoragePoolInformation(String name, String host, int port, String path, String userInfo, StoragePoolType poolType, Map details, boolean type) { this.name = name; this.host = host; this.port = port; @@ -65,6 +66,7 @@ public class KVMStoragePoolManager { this.userInfo = userInfo; this.type = type; this.poolType = poolType; + this.details = details; } } @@ -270,7 +272,7 @@ public class KVMStoragePoolManager { } catch (Exception e) { StoragePoolInformation info = _storagePools.get(uuid); if (info != null) { - pool = createStoragePool(info.name, info.host, info.port, info.path, info.userInfo, info.poolType, info.type); + pool = createStoragePool(info.name, info.host, info.port, info.path, info.userInfo, info.poolType, info.details, info.type); } else { throw new CloudRuntimeException("Could not fetch storage pool " + uuid + " from libvirt due to " + e.getMessage()); } @@ -300,7 +302,7 @@ public class KVMStoragePoolManager { } // secondary storage registers itself through here - return createStoragePool(uuid, sourceHost, 0, sourcePath, "", protocol, false); + return createStoragePool(uuid, sourceHost, 0, sourcePath, "", protocol, null, false); } public KVMPhysicalDisk getPhysicalDisk(StoragePoolType type, String poolUuid, String volName) { @@ -341,20 +343,27 @@ public class KVMStoragePoolManager { public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type) { // primary storage registers itself through here - return createStoragePool(name, host, port, path, userInfo, type, true); + return createStoragePool(name, host, port, path, userInfo, type, null, true); + } + + /** + * Primary Storage registers itself through here + */ + public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type, Map details) { + return createStoragePool(name, host, port, path, userInfo, type, details, true); } //Note: due to bug CLOUDSTACK-4459, createStoragepool can be called in parallel, so need to be synced. - private synchronized KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type, boolean primaryStorage) { + private synchronized KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type, Map details, boolean primaryStorage) { StorageAdaptor adaptor = getStorageAdaptor(type); - KVMStoragePool pool = adaptor.createStoragePool(name, host, port, path, userInfo, type); + KVMStoragePool pool = adaptor.createStoragePool(name, host, port, path, userInfo, type, details); // LibvirtStorageAdaptor-specific statement if (type == StoragePoolType.NetworkFilesystem && primaryStorage) { KVMHABase.NfsStoragePool nfspool = new KVMHABase.NfsStoragePool(pool.getUuid(), host, path, pool.getLocalPath(), PoolType.PrimaryStorage); _haMonitor.addStoragePool(nfspool); } - StoragePoolInformation info = new StoragePoolInformation(name, host, port, path, userInfo, type, primaryStorage); + StoragePoolInformation info = new StoragePoolInformation(name, host, port, path, userInfo, type, details, primaryStorage); addStoragePool(pool.getUuid(), info); return pool; } @@ -377,35 +386,35 @@ public class KVMStoragePoolManager { } public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, Storage.ProvisioningType provisioningType, - KVMStoragePool destPool, int timeout) { - return createDiskFromTemplate(template, name, provisioningType, destPool, template.getSize(), timeout); + KVMStoragePool destPool, int timeout, byte[] passphrase) { + return createDiskFromTemplate(template, name, provisioningType, destPool, template.getSize(), timeout, passphrase); } public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, Storage.ProvisioningType provisioningType, - KVMStoragePool destPool, long size, int timeout) { + KVMStoragePool destPool, long size, int timeout, byte[] passphrase) { StorageAdaptor adaptor = getStorageAdaptor(destPool.getType()); // LibvirtStorageAdaptor-specific statement if (destPool.getType() == StoragePoolType.RBD) { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.RAW, provisioningType, - size, destPool, timeout); + size, destPool, timeout, passphrase); } else if (destPool.getType() == StoragePoolType.CLVM) { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.RAW, provisioningType, - size, destPool, timeout); + size, destPool, timeout, passphrase); } else if (template.getFormat() == PhysicalDiskFormat.DIR) { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.DIR, provisioningType, - size, destPool, timeout); + size, destPool, timeout, passphrase); } else if (destPool.getType() == StoragePoolType.PowerFlex || destPool.getType() == StoragePoolType.Linstor) { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.RAW, provisioningType, - size, destPool, timeout); + size, destPool, timeout, passphrase); } else { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.QCOW2, provisioningType, - size, destPool, timeout); + size, destPool, timeout, passphrase); } } @@ -416,7 +425,12 @@ public class KVMStoragePoolManager { public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { StorageAdaptor adaptor = getStorageAdaptor(destPool.getType()); - return adaptor.copyPhysicalDisk(disk, name, destPool, timeout); + return adaptor.copyPhysicalDisk(disk, name, destPool, timeout, null, null); + } + + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, byte[] srcPassphrase, byte[] dstPassphrase) { + StorageAdaptor adaptor = getStorageAdaptor(destPool.getType()); + return adaptor.copyPhysicalDisk(disk, name, destPool, timeout, srcPassphrase, dstPassphrase); } public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) { @@ -425,9 +439,9 @@ public class KVMStoragePoolManager { } public KVMPhysicalDisk createDiskWithTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, - KVMStoragePool destPool, int timeout) { + KVMStoragePool destPool, int timeout, byte[] passphrase) { StorageAdaptor adaptor = getStorageAdaptor(destPool.getType()); - return adaptor.createDiskFromTemplateBacking(template, name, format, size, destPool, timeout); + return adaptor.createDiskFromTemplateBacking(template, name, format, size, destPool, timeout, passphrase); } public KVMPhysicalDisk createPhysicalDiskFromDirectDownloadTemplate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 9cd6c5af03d..3d8acddc89c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -29,6 +29,7 @@ import java.net.URISyntaxException; import java.text.DateFormat; import java.text.MessageFormat; import java.text.SimpleDateFormat; +import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -64,10 +65,13 @@ import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.cryptsetup.KeyFile; +import org.apache.cloudstack.utils.qemu.QemuImageOptions; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.collections.MapUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.lang.StringUtils; @@ -410,7 +414,7 @@ public class KVMStorageProcessor implements StorageProcessor { s_logger.warn("Failed to connect new volume at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } - vol = storagePoolMgr.copyPhysicalDisk(BaseVol, path != null ? path : volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); + vol = storagePoolMgr.copyPhysicalDisk(BaseVol, path != null ? path : volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds(), null, volume.getPassphrase()); storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); } else { @@ -420,7 +424,7 @@ public class KVMStorageProcessor implements StorageProcessor { } BaseVol = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath); vol = storagePoolMgr.createDiskFromTemplate(BaseVol, volume.getUuid(), volume.getProvisioningType(), - BaseVol.getPool(), volume.getSize(), cmd.getWaitInMillSeconds()); + BaseVol.getPool(), volume.getSize(), cmd.getWaitInMillSeconds(), volume.getPassphrase()); } if (vol == null) { return new CopyCmdAnswer(" Can't create storage volume on storage pool"); @@ -429,6 +433,9 @@ public class KVMStorageProcessor implements StorageProcessor { final VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setPath(vol.getName()); newVol.setSize(volume.getSize()); + if (vol.getQemuEncryptFormat() != null) { + newVol.setEncryptFormat(vol.getQemuEncryptFormat().toString()); + } if (vol.getFormat() == PhysicalDiskFormat.RAW) { newVol.setFormat(ImageFormat.RAW); @@ -442,6 +449,8 @@ public class KVMStorageProcessor implements StorageProcessor { } catch (final CloudRuntimeException e) { s_logger.debug("Failed to create volume: ", e); return new CopyCmdAnswer(e.toString()); + } finally { + volume.clearPassphrase(); } } @@ -512,6 +521,7 @@ public class KVMStorageProcessor implements StorageProcessor { return new CopyCmdAnswer(e.toString()); } finally { + srcVol.clearPassphrase(); if (secondaryStoragePool != null) { storagePoolMgr.deleteStoragePool(secondaryStoragePool.getType(), secondaryStoragePool.getUuid()); } @@ -558,6 +568,8 @@ public class KVMStorageProcessor implements StorageProcessor { s_logger.debug("Failed to copyVolumeFromPrimaryToSecondary: ", e); return new CopyCmdAnswer(e.toString()); } finally { + srcVol.clearPassphrase(); + destVol.clearPassphrase(); if (secondaryStoragePool != null) { storagePoolMgr.deleteStoragePool(secondaryStoragePool.getType(), secondaryStoragePool.getUuid()); } @@ -685,6 +697,7 @@ public class KVMStorageProcessor implements StorageProcessor { s_logger.debug("Failed to createTemplateFromVolume: ", e); return new CopyCmdAnswer(e.toString()); } finally { + volume.clearPassphrase(); if (secondaryStorage != null) { secondaryStorage.delete(); } @@ -930,6 +943,8 @@ public class KVMStorageProcessor implements StorageProcessor { Connect conn = null; KVMPhysicalDisk snapshotDisk = null; KVMStoragePool primaryPool = null; + + final VolumeObjectTO srcVolume = snapshot.getVolume(); try { conn = LibvirtConnection.getConnectionByVmName(vmName); @@ -989,18 +1004,41 @@ public class KVMStorageProcessor implements StorageProcessor { return new CopyCmdAnswer(e.toString()); } } else { - final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), s_logger); - command.add("-b", snapshotDisk.getPath()); - command.add(NAME_OPTION, snapshotName); - command.add("-p", snapshotDestPath); - if (isCreatedFromVmSnapshot) { - descName = UUID.randomUUID().toString(); - } - command.add("-t", descName); - final String result = command.execute(); - if (result != null) { - s_logger.debug("Failed to backup snaptshot: " + result); - return new CopyCmdAnswer(result); + /* if encrypted qcow2 file, use qemu-img directly. Otherwise call manage snapshot script */ + if (qemuVolumeHasEncryption(srcVolume)) { + List passphraseObjects = new ArrayList<>(); + try (KeyFile keyFile = new KeyFile(srcVolume.getPassphrase())) { + Map options = new HashMap(); + passphraseObjects.add( + QemuObject.prepareSecretForQemuImg(PhysicalDiskFormat.QCOW2, QemuObject.EncryptFormat.LUKS, keyFile.toString(), "sec0", options) + ); + secondaryStoragePool.createFolder(snapshotRelPath); + QemuImg q = new QemuImg(cmd.getWaitInMillSeconds()); + QemuImageOptions imgOptions = new QemuImageOptions(PhysicalDiskFormat.QCOW2, snapshotDisk.getPath(),"sec0"); + QemuImgFile sourceFile = new QemuImgFile(snapshotDisk.getPath(), PhysicalDiskFormat.QCOW2); + QemuImgFile destFile = new QemuImgFile(snapshotDestPath + File.separator + snapshotName, PhysicalDiskFormat.QCOW2); + q.convert(sourceFile, destFile, options, passphraseObjects, imgOptions, snapshotName, false); + } catch (QemuImgException ex) { + throw new CloudRuntimeException("Failed to run qemu-img for snapshot backup", ex); + } catch (IOException ex) { + throw new CloudRuntimeException("Failed to create keyfile for encrypted snapshot backup", ex); + } catch (LibvirtException ex) { + throw new CloudRuntimeException("Failed to query libvirt during snapshot backup", ex); + } + } else { + final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), s_logger); + command.add("-b", snapshotDisk.getPath()); + command.add(NAME_OPTION, snapshotName); + command.add("-p", snapshotDestPath); + if (isCreatedFromVmSnapshot) { + descName = UUID.randomUUID().toString(); + } + command.add("-t", descName); + final String result = command.execute(); + if (result != null) { + s_logger.debug("Failed to backup snaptshot: " + result); + return new CopyCmdAnswer(result); + } } final File snapFile = new File(snapshotDestPath + "/" + descName); if(snapFile.exists()){ @@ -1012,10 +1050,7 @@ public class KVMStorageProcessor implements StorageProcessor { newSnapshot.setPath(snapshotRelPath + File.separator + descName); newSnapshot.setPhysicalSize(size); return new CopyCmdAnswer(newSnapshot); - } catch (final LibvirtException e) { - s_logger.debug("Failed to backup snapshot: ", e); - return new CopyCmdAnswer(e.toString()); - } catch (final CloudRuntimeException e) { + } catch (final LibvirtException | CloudRuntimeException e) { s_logger.debug("Failed to backup snapshot: ", e); return new CopyCmdAnswer(e.toString()); } finally { @@ -1059,11 +1094,17 @@ public class KVMStorageProcessor implements StorageProcessor { } } else { if (primaryPool.getType() != StoragePoolType.RBD) { - deleteSnapshotViaManageSnapshotScript(snapshotName, snapshotDisk); + if (qemuVolumeHasEncryption(srcVolume)) { + deleteSnapshotViaQemuImg(srcVolume, snapshotDisk.getPath(), snapshotName, cmd.getWaitInMillSeconds()); + } else { + deleteSnapshotViaManageSnapshotScript(snapshotName, snapshotDisk); + } } } } catch (final Exception ex) { s_logger.error("Failed to delete snapshots on primary", ex); + } finally { + srcVolume.clearPassphrase(); } } @@ -1077,6 +1118,30 @@ public class KVMStorageProcessor implements StorageProcessor { } } + private boolean qemuVolumeHasEncryption(VolumeObjectTO volume) { + return volume.getEncryptFormat() != null && QemuObject.EncryptFormat.enumValue(volume.getEncryptFormat()) == QemuObject.EncryptFormat.LUKS && volume.getPassphrase() != null; + } + + private void deleteSnapshotViaQemuImg(final VolumeObjectTO volume, final String path, final String snapshotName, final int timeout) { + List passphraseObjects = new ArrayList<>(); + try (KeyFile keyFile = new KeyFile(volume.getPassphrase())) { + passphraseObjects.add( + QemuObject.prepareSecretForQemuImg(PhysicalDiskFormat.QCOW2, QemuObject.EncryptFormat.LUKS, keyFile.toString(), "sec0", null) + ); + QemuImg q = new QemuImg(timeout); + QemuImageOptions imgOptions = new QemuImageOptions(PhysicalDiskFormat.QCOW2, path,"sec0"); + q.deleteSnapshot(imgOptions, snapshotName, passphraseObjects); + } catch (QemuImgException ex) { + throw new CloudRuntimeException("Failed to run qemu-img for deleting snapshot", ex); + } catch (IOException ex) { + throw new CloudRuntimeException("Failed to create keyfile for deleting encrypted snapshot", ex); + } catch (LibvirtException ex) { + throw new CloudRuntimeException("Failed to call Libvirt during deleting snapshot", ex); + } finally { + volume.clearPassphrase(); + } + } + private void deleteSnapshotViaManageSnapshotScript(final String snapshotName, KVMPhysicalDisk snapshotDisk) { final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger); command.add(MANAGE_SNAPSTHOT_DESTROY_OPTION, snapshotDisk.getPath()); @@ -1232,7 +1297,7 @@ public class KVMStorageProcessor implements StorageProcessor { final Long bytesReadRate, final Long bytesReadRateMax, final Long bytesReadRateMaxLength, final Long bytesWriteRate, final Long bytesWriteRateMax, final Long bytesWriteRateMaxLength, final Long iopsReadRate, final Long iopsReadRateMax, final Long iopsReadRateMaxLength, - final Long iopsWriteRate, final Long iopsWriteRateMax, final Long iopsWriteRateMaxLength, final String cacheMode) throws LibvirtException, InternalErrorException { + final Long iopsWriteRate, final Long iopsWriteRateMax, final Long iopsWriteRateMaxLength, final String cacheMode, final DiskDef.LibvirtDiskEncryptDetails encryptDetails) throws LibvirtException, InternalErrorException { List disks = null; Domain dm = null; DiskDef diskdef = null; @@ -1306,6 +1371,10 @@ public class KVMStorageProcessor implements StorageProcessor { diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, busT); } + if (encryptDetails != null) { + diskdef.setLibvirtDiskEncryptDetails(encryptDetails); + } + if ((bytesReadRate != null) && (bytesReadRate > 0)) { diskdef.setBytesReadRate(bytesReadRate); } @@ -1363,19 +1432,27 @@ public class KVMStorageProcessor implements StorageProcessor { final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)vol.getDataStore(); final String vmName = cmd.getVmName(); final String serial = resource.diskUuidToSerial(vol.getUuid()); + try { final Connect conn = LibvirtConnection.getConnectionByVmName(vmName); + DiskDef.LibvirtDiskEncryptDetails encryptDetails = null; + if (vol.getPassphrase() != null && vol.getPassphrase().length > 0) { + String secretUuid = resource.createLibvirtVolumeSecret(conn, vol.getPath(), vol.getPassphrase()); + encryptDetails = new DiskDef.LibvirtDiskEncryptDetails(secretUuid, QemuObject.EncryptFormat.enumValue(vol.getEncryptFormat())); + vol.clearPassphrase(); + } storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath(), disk.getDetails()); final KVMPhysicalDisk phyDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); final String volCacheMode = vol.getCacheMode() == null ? null : vol.getCacheMode().toString(); + s_logger.debug(String.format("Attaching physical disk %s with format %s", phyDisk.getPath(), phyDisk.getFormat())); attachOrDetachDisk(conn, true, vmName, phyDisk, disk.getDiskSeq().intValue(), serial, vol.getBytesReadRate(), vol.getBytesReadRateMax(), vol.getBytesReadRateMaxLength(), vol.getBytesWriteRate(), vol.getBytesWriteRateMax(), vol.getBytesWriteRateMaxLength(), vol.getIopsReadRate(), vol.getIopsReadRateMax(), vol.getIopsReadRateMaxLength(), - vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode); + vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, encryptDetails); return new AttachAnswer(disk); } catch (final LibvirtException e) { @@ -1388,6 +1465,8 @@ public class KVMStorageProcessor implements StorageProcessor { } catch (final CloudRuntimeException e) { s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); return new AttachAnswer(e.toString()); + } finally { + vol.clearPassphrase(); } } @@ -1408,7 +1487,7 @@ public class KVMStorageProcessor implements StorageProcessor { vol.getBytesReadRate(), vol.getBytesReadRateMax(), vol.getBytesReadRateMaxLength(), vol.getBytesWriteRate(), vol.getBytesWriteRateMax(), vol.getBytesWriteRateMaxLength(), vol.getIopsReadRate(), vol.getIopsReadRateMax(), vol.getIopsReadRateMaxLength(), - vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode); + vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, null); storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); @@ -1422,6 +1501,8 @@ public class KVMStorageProcessor implements StorageProcessor { } catch (final CloudRuntimeException e) { s_logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); return new DettachAnswer(e.toString()); + } finally { + vol.clearPassphrase(); } } @@ -1440,7 +1521,7 @@ public class KVMStorageProcessor implements StorageProcessor { destTemplate = primaryPool.getPhysicalDisk(srcBackingFilePath); } return storagePoolMgr.createDiskWithTemplateBacking(destTemplate, volume.getUuid(), format, volume.getSize(), - primaryPool, timeout); + primaryPool, timeout, volume.getPassphrase()); } /** @@ -1448,7 +1529,7 @@ public class KVMStorageProcessor implements StorageProcessor { */ protected KVMPhysicalDisk createFullCloneVolume(MigrationOptions migrationOptions, VolumeObjectTO volume, KVMStoragePool primaryPool, PhysicalDiskFormat format) { s_logger.debug("For VM migration with full-clone volume: Creating empty stub disk for source disk " + migrationOptions.getSrcVolumeUuid() + " and size: " + toHumanReadableSize(volume.getSize()) + " and format: " + format); - return primaryPool.createPhysicalDisk(volume.getUuid(), format, volume.getProvisioningType(), volume.getSize()); + return primaryPool.createPhysicalDisk(volume.getUuid(), format, volume.getProvisioningType(), volume.getSize(), volume.getPassphrase()); } @Override @@ -1470,25 +1551,25 @@ public class KVMStorageProcessor implements StorageProcessor { } MigrationOptions migrationOptions = volume.getMigrationOptions(); - if (migrationOptions != null) { + if (isLinkedCloneMigration(migrationOptions)) { String srcStoreUuid = migrationOptions.getSrcPoolUuid(); StoragePoolType srcPoolType = migrationOptions.getSrcPoolType(); KVMStoragePool srcPool = storagePoolMgr.getStoragePool(srcPoolType, srcStoreUuid); int timeout = migrationOptions.getTimeout(); - - if (migrationOptions.getType() == MigrationOptions.Type.LinkedClone) { - vol = createLinkedCloneVolume(migrationOptions, srcPool, primaryPool, volume, format, timeout); - } else if (migrationOptions.getType() == MigrationOptions.Type.FullClone) { - vol = createFullCloneVolume(migrationOptions, volume, primaryPool, format); - } + vol = createLinkedCloneVolume(migrationOptions, srcPool, primaryPool, volume, format, timeout); + } else if (isFullCloneMigration(migrationOptions)) { + vol = createFullCloneVolume(migrationOptions, volume, primaryPool, format); } else { vol = primaryPool.createPhysicalDisk(volume.getUuid(), format, - volume.getProvisioningType(), disksize); + volume.getProvisioningType(), disksize, volume.getPassphrase()); } final VolumeObjectTO newVol = new VolumeObjectTO(); if(vol != null) { newVol.setPath(vol.getName()); + if (vol.getQemuEncryptFormat() != null) { + newVol.setEncryptFormat(vol.getQemuEncryptFormat().toString()); + } } newVol.setSize(volume.getSize()); newVol.setFormat(ImageFormat.valueOf(format.toString().toUpperCase())); @@ -1497,9 +1578,19 @@ public class KVMStorageProcessor implements StorageProcessor { } catch (final Exception e) { s_logger.debug("Failed to create volume: ", e); return new CreateObjectAnswer(e.toString()); + } finally { + volume.clearPassphrase(); } } + protected static boolean isLinkedCloneMigration(MigrationOptions options) { + return options != null && options.getType() == MigrationOptions.Type.LinkedClone; + } + + protected static boolean isFullCloneMigration(MigrationOptions options) { + return options != null && options.getType() == MigrationOptions.Type.FullClone; + } + protected static final MessageFormat SnapshotXML = new MessageFormat(" " + " {0}" + " " + " {1}" + " " + " "); @@ -1523,6 +1614,10 @@ public class KVMStorageProcessor implements StorageProcessor { } } + if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && qemuVolumeHasEncryption(volume)) { + throw new CloudRuntimeException("VM is running, encrypted volume snapshots aren't supported"); + } + final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); final KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), volume.getPath()); @@ -1577,13 +1672,32 @@ public class KVMStorageProcessor implements StorageProcessor { } } else { /* VM is not running, create a snapshot by ourself */ - final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger); - command.add(MANAGE_SNAPSTHOT_CREATE_OPTION, disk.getPath()); - command.add(NAME_OPTION, snapshotName); - final String result = command.execute(); - if (result != null) { - s_logger.debug("Failed to manage snapshot: " + result); - return new CreateObjectAnswer("Failed to manage snapshot: " + result); + /* if we have a Qemu image that is LUKS encrypted, use direct qemu-img call to snapshot. Otherwise call the snapshot script as usual */ + if (qemuVolumeHasEncryption(volume)) { + List passphraseObjects = new ArrayList<>(); + try (KeyFile keyFile = new KeyFile(volume.getPassphrase())) { + passphraseObjects.add( + QemuObject.prepareSecretForQemuImg(PhysicalDiskFormat.QCOW2, QemuObject.EncryptFormat.LUKS, keyFile.toString(), "sec0", null) + ); + QemuImg q = new QemuImg(cmd.getWait()); + QemuImageOptions imgOptions = new QemuImageOptions(PhysicalDiskFormat.QCOW2, disk.getPath(),"sec0"); + q.snapshot(imgOptions, snapshotName, passphraseObjects); + } catch (QemuImgException ex) { + throw new CloudRuntimeException("Failed to run qemu-img for snapshot", ex); + } catch (IOException ex) { + throw new CloudRuntimeException("Failed to create keyfile for encrypted snapshot", ex); + } finally { + volume.clearPassphrase(); + } + } else { + final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger); + command.add(MANAGE_SNAPSTHOT_CREATE_OPTION, disk.getPath()); + command.add(NAME_OPTION, snapshotName); + final String result = command.execute(); + if (result != null) { + s_logger.debug("Failed to manage snapshot: " + result); + return new CreateObjectAnswer("Failed to manage snapshot: " + result); + } } } } @@ -1595,6 +1709,8 @@ public class KVMStorageProcessor implements StorageProcessor { } catch (final LibvirtException e) { s_logger.debug("Failed to manage snapshot: ", e); return new CreateObjectAnswer("Failed to manage snapshot: " + e.toString()); + } finally { + volume.clearPassphrase(); } } @@ -1625,18 +1741,20 @@ public class KVMStorageProcessor implements StorageProcessor { } catch (final CloudRuntimeException e) { s_logger.debug("Failed to delete volume: ", e); return new Answer(null, false, e.toString()); + } finally { + vol.clearPassphrase(); } } @Override public Answer createVolumeFromSnapshot(final CopyCommand cmd) { + final DataTO srcData = cmd.getSrcTO(); + final SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData; + final VolumeObjectTO volume = snapshot.getVolume(); try { - final DataTO srcData = cmd.getSrcTO(); - final SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData; final DataTO destData = cmd.getDestTO(); final PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore(); final DataStoreTO imageStore = srcData.getDataStore(); - final VolumeObjectTO volume = snapshot.getVolume(); if (!(imageStore instanceof NfsTO || imageStore instanceof PrimaryDataStoreTO)) { return new CopyCmdAnswer("unsupported protocol"); @@ -1665,6 +1783,8 @@ public class KVMStorageProcessor implements StorageProcessor { } catch (final CloudRuntimeException e) { s_logger.debug("Failed to createVolumeFromSnapshot: ", e); return new CopyCmdAnswer(e.toString()); + } finally { + volume.clearPassphrase(); } } @@ -1791,10 +1911,10 @@ public class KVMStorageProcessor implements StorageProcessor { @Override public Answer deleteSnapshot(final DeleteCommand cmd) { String snap_full_name = ""; + SnapshotObjectTO snapshotTO = (SnapshotObjectTO) cmd.getData(); + PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO) snapshotTO.getDataStore(); + VolumeObjectTO volume = snapshotTO.getVolume(); try { - SnapshotObjectTO snapshotTO = (SnapshotObjectTO) cmd.getData(); - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO) snapshotTO.getDataStore(); - VolumeObjectTO volume = snapshotTO.getVolume(); KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), volume.getPath()); String snapshotFullPath = snapshotTO.getPath(); @@ -1823,7 +1943,11 @@ public class KVMStorageProcessor implements StorageProcessor { } } else if (primaryPool.getType() == StoragePoolType.NetworkFilesystem || primaryPool.getType() == StoragePoolType.Filesystem) { s_logger.info(String.format("Deleting snapshot (id=%s, name=%s, path=%s, storage type=%s) on primary storage", snapshotTO.getId(), snapshotTO.getName(), snapshotTO.getPath(), primaryPool.getType())); - deleteSnapshotViaManageSnapshotScript(snapshotName, disk); + if (qemuVolumeHasEncryption(volume)) { + deleteSnapshotViaQemuImg(volume, disk.getPath(), snapshotName, cmd.getWait()); + } else { + deleteSnapshotViaManageSnapshotScript(snapshotName, disk); + } } else { s_logger.warn("Operation not implemented for storage pool type of " + primaryPool.getType().toString()); throw new InternalErrorException("Operation not implemented for storage pool type of " + primaryPool.getType().toString()); @@ -1840,6 +1964,8 @@ public class KVMStorageProcessor implements StorageProcessor { } catch (Exception e) { s_logger.error("Failed to remove snapshot " + snap_full_name + ", with exception: " + e.toString()); return new Answer(cmd, false, "Failed to remove snapshot " + snap_full_name); + } finally { + volume.clearPassphrase(); } } @@ -2012,6 +2138,9 @@ public class KVMStorageProcessor implements StorageProcessor { } catch (final CloudRuntimeException e) { s_logger.debug("Failed to copyVolumeFromPrimaryToPrimary: ", e); return new CopyCmdAnswer(e.toString()); + } finally { + srcVol.clearPassphrase(); + destVol.clearPassphrase(); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index 008cb72c355..4ed5dd1b6a4 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -17,6 +17,7 @@ package com.cloud.hypervisor.kvm.storage; import java.io.File; +import java.io.IOException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.HashMap; @@ -24,10 +25,12 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import org.apache.cloudstack.utils.cryptsetup.KeyFile; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; import org.libvirt.Connect; @@ -104,9 +107,9 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { @Override public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, - KVMStoragePool destPool, int timeout) { - String volumeDesc = String.format("volume [%s], with template backing [%s], in pool [%s] (%s), with size [%s]", name, template.getName(), destPool.getUuid(), - destPool.getType(), size); + KVMStoragePool destPool, int timeout, byte[] passphrase) { + String volumeDesc = String.format("volume [%s], with template backing [%s], in pool [%s] (%s), with size [%s] and encryption is %s", name, template.getName(), destPool.getUuid(), + destPool.getType(), size, passphrase != null && passphrase.length > 0); if (!poolTypesThatEnableCreateDiskFromTemplateBacking.contains(destPool.getType())) { s_logger.info(String.format("Skipping creation of %s due to pool type is none of the following types %s.", volumeDesc, poolTypesThatEnableCreateDiskFromTemplateBacking.stream() @@ -125,12 +128,23 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { String destPoolLocalPath = destPool.getLocalPath(); String destPath = String.format("%s%s%s", destPoolLocalPath, destPoolLocalPath.endsWith("/") ? "" : "/", name); - try { + Map options = new HashMap(); + List passphraseObjects = new ArrayList<>(); + try (KeyFile keyFile = new KeyFile(passphrase)) { QemuImgFile destFile = new QemuImgFile(destPath, format); destFile.setSize(size); QemuImgFile backingFile = new QemuImgFile(template.getPath(), template.getFormat()); - new QemuImg(timeout).create(destFile, backingFile); - } catch (QemuImgException e) { + + if (keyFile.isSet()) { + passphraseObjects.add(QemuObject.prepareSecretForQemuImg(format, QemuObject.EncryptFormat.LUKS, keyFile.toString(), "sec0", options)); + } + s_logger.debug(String.format("Passphrase is staged to keyFile: %s", keyFile.isSet())); + + QemuImg qemu = new QemuImg(timeout); + qemu.create(destFile, backingFile, options, passphraseObjects); + Map info = qemu.info(destFile); + } catch (QemuImgException | LibvirtException | IOException e) { + // why don't we throw an exception here? I guess we fail to find the volume later and that results in a failure returned? s_logger.error(String.format("Failed to create %s in [%s] due to [%s].", volumeDesc, destPath, e.getMessage()), e); } @@ -563,7 +577,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } @Override - public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type) { + public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type, Map details) { s_logger.info("Attempting to create storage pool " + name + " (" + type.toString() + ") in libvirt"); StoragePool sp = null; @@ -743,7 +757,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { @Override public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, - PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { + PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { s_logger.info("Attempting to create volume " + name + " (" + pool.getType().toString() + ") in pool " + pool.getUuid() + " with size " + toHumanReadableSize(size)); @@ -755,11 +769,9 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { case Filesystem: switch (format) { case QCOW2: - return createPhysicalDiskByQemuImg(name, pool, format, provisioningType, size); case RAW: - return createPhysicalDiskByQemuImg(name, pool, format, provisioningType, size); + return createPhysicalDiskByQemuImg(name, pool, format, provisioningType, size, passphrase); case DIR: - return createPhysicalDiskByLibVirt(name, pool, format, provisioningType, size); case TAR: return createPhysicalDiskByLibVirt(name, pool, format, provisioningType, size); default: @@ -803,37 +815,50 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { private KVMPhysicalDisk createPhysicalDiskByQemuImg(String name, KVMStoragePool pool, - PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { + PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { String volPath = pool.getLocalPath() + "/" + name; String volName = name; long virtualSize = 0; long actualSize = 0; + QemuObject.EncryptFormat encryptFormat = null; + List passphraseObjects = new ArrayList<>(); final int timeout = 0; QemuImgFile destFile = new QemuImgFile(volPath); destFile.setFormat(format); destFile.setSize(size); - QemuImg qemu = new QemuImg(timeout); Map options = new HashMap(); if (pool.getType() == StoragePoolType.NetworkFilesystem){ options.put("preallocation", QemuImg.PreallocationType.getPreallocationType(provisioningType).toString()); } - try{ - qemu.create(destFile, options); + try (KeyFile keyFile = new KeyFile(passphrase)) { + QemuImg qemu = new QemuImg(timeout); + if (keyFile.isSet()) { + passphraseObjects.add(QemuObject.prepareSecretForQemuImg(format, QemuObject.EncryptFormat.LUKS, keyFile.toString(), "sec0", options)); + + // make room for encryption header on raw format, use LUKS + if (format == PhysicalDiskFormat.RAW) { + destFile.setSize(destFile.getSize() - (16<<20)); + destFile.setFormat(PhysicalDiskFormat.LUKS); + } + + encryptFormat = QemuObject.EncryptFormat.LUKS; + } + qemu.create(destFile, null, options, passphraseObjects); Map info = qemu.info(destFile); virtualSize = Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE)); actualSize = new File(destFile.getFileName()).length(); - } catch (QemuImgException | LibvirtException e) { - s_logger.error("Failed to create " + volPath + - " due to a failed executing of qemu-img: " + e.getMessage()); + } catch (QemuImgException | LibvirtException | IOException e) { + throw new CloudRuntimeException(String.format("Failed to create %s due to a failed execution of qemu-img", volPath), e); } KVMPhysicalDisk disk = new KVMPhysicalDisk(volPath, volName, pool); disk.setFormat(format); disk.setSize(actualSize); disk.setVirtualSize(virtualSize); + disk.setQemuEncryptFormat(encryptFormat); return disk; } @@ -975,7 +1000,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { */ @Override public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, - String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout) { + String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout, byte[] passphrase) { s_logger.info("Creating volume " + name + " from template " + template.getName() + " in pool " + destPool.getUuid() + " (" + destPool.getType().toString() + ") with size " + toHumanReadableSize(size)); @@ -985,12 +1010,14 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { if (destPool.getType() == StoragePoolType.RBD) { disk = createDiskFromTemplateOnRBD(template, name, format, provisioningType, size, destPool, timeout); } else { - try { + try (KeyFile keyFile = new KeyFile(passphrase)){ String newUuid = name; - disk = destPool.createPhysicalDisk(newUuid, format, provisioningType, template.getVirtualSize()); + List passphraseObjects = new ArrayList<>(); + disk = destPool.createPhysicalDisk(newUuid, format, provisioningType, template.getVirtualSize(), passphrase); if (disk == null) { throw new CloudRuntimeException("Failed to create disk from template " + template.getName()); } + if (template.getFormat() == PhysicalDiskFormat.TAR) { Script.runSimpleBashScript("tar -x -f " + template.getPath() + " -C " + disk.getPath(), timeout); // TO BE FIXED to aware provisioningType } else if (template.getFormat() == PhysicalDiskFormat.DIR) { @@ -1007,32 +1034,45 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } Map options = new HashMap(); options.put("preallocation", QemuImg.PreallocationType.getPreallocationType(provisioningType).toString()); + + + if (keyFile.isSet()) { + passphraseObjects.add(QemuObject.prepareSecretForQemuImg(format, QemuObject.EncryptFormat.LUKS, keyFile.toString(), "sec0", options)); + disk.setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS); + } switch(provisioningType){ case THIN: QemuImgFile backingFile = new QemuImgFile(template.getPath(), template.getFormat()); - qemu.create(destFile, backingFile, options); + qemu.create(destFile, backingFile, options, passphraseObjects); break; case SPARSE: case FAT: QemuImgFile srcFile = new QemuImgFile(template.getPath(), template.getFormat()); - qemu.convert(srcFile, destFile, options, null); + qemu.convert(srcFile, destFile, options, passphraseObjects, null, false); break; } } else if (format == PhysicalDiskFormat.RAW) { + PhysicalDiskFormat destFormat = PhysicalDiskFormat.RAW; + Map options = new HashMap(); + + if (keyFile.isSet()) { + destFormat = PhysicalDiskFormat.LUKS; + disk.setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS); + passphraseObjects.add(QemuObject.prepareSecretForQemuImg(destFormat, QemuObject.EncryptFormat.LUKS, keyFile.toString(), "sec0", options)); + } + QemuImgFile sourceFile = new QemuImgFile(template.getPath(), template.getFormat()); - QemuImgFile destFile = new QemuImgFile(disk.getPath(), PhysicalDiskFormat.RAW); + QemuImgFile destFile = new QemuImgFile(disk.getPath(), destFormat); if (size > template.getVirtualSize()) { destFile.setSize(size); } else { destFile.setSize(template.getVirtualSize()); } QemuImg qemu = new QemuImg(timeout); - Map options = new HashMap(); - qemu.convert(sourceFile, destFile, options, null); + qemu.convert(sourceFile, destFile, options, passphraseObjects, null, false); } - } catch (QemuImgException | LibvirtException e) { - s_logger.error("Failed to create " + disk.getPath() + - " due to a failed executing of qemu-img: " + e.getMessage()); + } catch (QemuImgException | LibvirtException | IOException e) { + throw new CloudRuntimeException(String.format("Failed to create %s due to a failed execution of qemu-img", disk.getPath()), e); } } @@ -1067,7 +1107,6 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } - QemuImg qemu = new QemuImg(timeout); QemuImgFile srcFile; QemuImgFile destFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(destPool.getSourceHost(), destPool.getSourcePort(), @@ -1076,10 +1115,10 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { disk.getPath())); destFile.setFormat(format); - if (srcPool.getType() != StoragePoolType.RBD) { srcFile = new QemuImgFile(template.getPath(), template.getFormat()); try{ + QemuImg qemu = new QemuImg(timeout); qemu.convert(srcFile, destFile); } catch (QemuImgException | LibvirtException e) { s_logger.error("Failed to create " + disk.getPath() + @@ -1241,6 +1280,11 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } } + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + return copyPhysicalDisk(disk, name, destPool, timeout, null, null); + } + /** * This copies a volume from Primary Storage to Secondary Storage * @@ -1248,7 +1292,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { * in ManagementServerImpl shows that the destPool is always a Secondary Storage Pool */ @Override - public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, byte[] srcPassphrase, byte[] dstPassphrase) { /** With RBD you can't run qemu-img convert with an existing RBD image as destination @@ -1269,9 +1313,9 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { s_logger.debug("copyPhysicalDisk: disk size:" + toHumanReadableSize(disk.getSize()) + ", virtualsize:" + toHumanReadableSize(disk.getVirtualSize())+" format:"+disk.getFormat()); if (destPool.getType() != StoragePoolType.RBD) { if (disk.getFormat() == PhysicalDiskFormat.TAR) { - newDisk = destPool.createPhysicalDisk(name, PhysicalDiskFormat.DIR, Storage.ProvisioningType.THIN, disk.getVirtualSize()); + newDisk = destPool.createPhysicalDisk(name, PhysicalDiskFormat.DIR, Storage.ProvisioningType.THIN, disk.getVirtualSize(), null); } else { - newDisk = destPool.createPhysicalDisk(name, Storage.ProvisioningType.THIN, disk.getVirtualSize()); + newDisk = destPool.createPhysicalDisk(name, Storage.ProvisioningType.THIN, disk.getVirtualSize(), null); } } else { newDisk = new KVMPhysicalDisk(destPool.getSourceDir() + "/" + name, name, destPool); @@ -1283,7 +1327,13 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { String destPath = newDisk.getPath(); PhysicalDiskFormat destFormat = newDisk.getFormat(); - QemuImg qemu = new QemuImg(timeout); + QemuImg qemu; + + try { + qemu = new QemuImg(timeout); + } catch (QemuImgException | LibvirtException ex ) { + throw new CloudRuntimeException("Failed to create qemu-img command", ex); + } QemuImgFile srcFile = null; QemuImgFile destFile = null; @@ -1462,5 +1512,4 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { private void deleteDirVol(LibvirtStoragePool pool, StorageVol vol) throws LibvirtException { Script.runSimpleBashScript("rm -r --interactive=never " + vol.getPath()); } - } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index b2e8decfcb1..33c6f0ef393 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -110,15 +110,15 @@ public class LibvirtStoragePool implements KVMStoragePool { @Override public KVMPhysicalDisk createPhysicalDisk(String name, - PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { + PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { return this._storageAdaptor - .createPhysicalDisk(name, this, format, provisioningType, size); + .createPhysicalDisk(name, this, format, provisioningType, size, passphrase); } @Override - public KVMPhysicalDisk createPhysicalDisk(String name, Storage.ProvisioningType provisioningType, long size) { + public KVMPhysicalDisk createPhysicalDisk(String name, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { return this._storageAdaptor.createPhysicalDisk(name, this, - this.getDefaultFormat(), provisioningType, size); + this.getDefaultFormat(), provisioningType, size, passphrase); } @Override @@ -279,4 +279,9 @@ public class LibvirtStoragePool implements KVMStoragePool { } return false; } + + @Override + public Map getDetails() { + return null; + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java index dc00601674b..9f57083e9a9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java @@ -16,6 +16,26 @@ // under the License. package com.cloud.hypervisor.kvm.storage; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.StringJoiner; + +import javax.annotation.Nonnull; + +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.log4j.Logger; +import org.libvirt.LibvirtException; + +import com.cloud.storage.Storage; +import com.cloud.utils.exception.CloudRuntimeException; import com.linbit.linstor.api.ApiClient; import com.linbit.linstor.api.ApiException; import com.linbit.linstor.api.Configuration; @@ -33,25 +53,6 @@ import com.linbit.linstor.api.model.ResourceWithVolumes; import com.linbit.linstor.api.model.StoragePool; import com.linbit.linstor.api.model.VolumeDefinition; -import javax.annotation.Nonnull; -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.StringJoiner; - -import com.cloud.storage.Storage; -import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.cloudstack.utils.qemu.QemuImg; -import org.apache.cloudstack.utils.qemu.QemuImgException; -import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; -import org.libvirt.LibvirtException; - @StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.Linstor) public class LinstorStorageAdaptor implements StorageAdaptor { private static final Logger s_logger = Logger.getLogger(LinstorStorageAdaptor.class); @@ -174,7 +175,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor { @Override public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, - Storage.StoragePoolType type) + Storage.StoragePoolType type, Map details) { s_logger.debug(String.format( "Linstor createStoragePool: name: '%s', host: '%s', path: %s, userinfo: %s", name, host, path, userInfo)); @@ -197,7 +198,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor { @Override public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format, - Storage.ProvisioningType provisioningType, long size) + Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { final String rscName = getLinstorRscName(name); LinstorStoragePool lpool = (LinstorStoragePool) pool; @@ -377,7 +378,8 @@ public class LinstorStorageAdaptor implements StorageAdaptor { Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, - int timeout) + int timeout, + byte[] passphrase) { s_logger.info("Linstor: createDiskFromTemplate"); return copyPhysicalDisk(template, name, destPool, timeout); @@ -401,23 +403,28 @@ public class LinstorStorageAdaptor implements StorageAdaptor { } @Override - public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout) + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + return copyPhysicalDisk(disk, name, destPool, timeout, null, null); + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout, byte[] srcPassphrase, byte[] destPassphrase) { s_logger.debug("Linstor: copyPhysicalDisk"); final QemuImg.PhysicalDiskFormat sourceFormat = disk.getFormat(); final String sourcePath = disk.getPath(); - final QemuImg qemu = new QemuImg(timeout); final QemuImgFile srcFile = new QemuImgFile(sourcePath, sourceFormat); final KVMPhysicalDisk dstDisk = destPools.createPhysicalDisk( - name, QemuImg.PhysicalDiskFormat.RAW, Storage.ProvisioningType.FAT, disk.getVirtualSize()); + name, QemuImg.PhysicalDiskFormat.RAW, Storage.ProvisioningType.FAT, disk.getVirtualSize(), null); final QemuImgFile destFile = new QemuImgFile(dstDisk.getPath()); destFile.setFormat(dstDisk.getFormat()); destFile.setSize(disk.getVirtualSize()); try { + final QemuImg qemu = new QemuImg(timeout); qemu.convert(srcFile, destFile); } catch (QemuImgException | LibvirtException e) { s_logger.error(e); @@ -460,7 +467,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor { QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool, - int timeout) + int timeout, byte[] passphrase) { s_logger.debug("Linstor: createDiskFromTemplateBacking"); return null; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStoragePool.java index 0e8a4ed5bed..5bc60fd2399 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStoragePool.java @@ -19,9 +19,10 @@ package com.cloud.hypervisor.kvm.storage; import java.util.List; import java.util.Map; -import com.cloud.storage.Storage; import org.apache.cloudstack.utils.qemu.QemuImg; +import com.cloud.storage.Storage; + public class LinstorStoragePool implements KVMStoragePool { private final String _uuid; private final String _sourceHost; @@ -42,15 +43,15 @@ public class LinstorStoragePool implements KVMStoragePool { @Override public KVMPhysicalDisk createPhysicalDisk(String name, QemuImg.PhysicalDiskFormat format, - Storage.ProvisioningType provisioningType, long size) + Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { - return _storageAdaptor.createPhysicalDisk(name, this, format, provisioningType, size); + return _storageAdaptor.createPhysicalDisk(name, this, format, provisioningType, size, passphrase); } @Override - public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size) + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { - return _storageAdaptor.createPhysicalDisk(volumeUuid,this, getDefaultFormat(), provisioningType, size); + return _storageAdaptor.createPhysicalDisk(volumeUuid,this, getDefaultFormat(), provisioningType, size, passphrase); } @Override @@ -185,6 +186,11 @@ public class LinstorStoragePool implements KVMStoragePool { return false; } + @Override + public Map getDetails() { + return null; + } + public String getResourceGroup() { return _resourceGroup; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java index 6db2f82beb4..6af43d50d24 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java @@ -55,7 +55,7 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor { } @Override - public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, StoragePoolType storagePoolType) { + public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, StoragePoolType storagePoolType, Map details) { LibvirtStoragePool storagePool = new LibvirtStoragePool(uuid, path, StoragePoolType.ManagedNFS, this, null); storagePool.setSourceHost(host); @@ -291,6 +291,11 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor { @Override public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + return copyPhysicalDisk(disk, name, destPool, timeout, null, null); + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, byte[] srcPassphrase, byte[] destPassphrase) { throw new UnsupportedOperationException("Copying a disk is not supported in this configuration."); } @@ -315,7 +320,7 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor { } @Override - public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) { + public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout, byte[] passphrase) { return null; } @@ -325,7 +330,7 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor { } @Override - public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, PhysicalDiskFormat format, ProvisioningType provisioningType, long size) { + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, PhysicalDiskFormat format, ProvisioningType provisioningType, long size, byte[] passphrase) { return null; } @@ -335,7 +340,7 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor { } @Override - public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout) { + public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout, byte[] passphrase) { return null; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java index 59eaab0f2a1..81ec46e0083 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java @@ -19,6 +19,8 @@ package com.cloud.hypervisor.kvm.storage; import java.io.File; import java.io.FileFilter; +import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -26,11 +28,17 @@ import java.util.Map; import java.util.UUID; import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.utils.cryptsetup.CryptSetup; +import org.apache.cloudstack.utils.cryptsetup.CryptSetupException; +import org.apache.cloudstack.utils.cryptsetup.KeyFile; +import org.apache.cloudstack.utils.qemu.QemuImageOptions; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.io.filefilter.WildcardFileFilter; import org.apache.log4j.Logger; +import org.libvirt.LibvirtException; import com.cloud.storage.Storage; import com.cloud.storage.StorageLayer; @@ -39,7 +47,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; import com.google.common.base.Strings; -import org.libvirt.LibvirtException; @StorageAdaptorInfo(storagePoolType= Storage.StoragePoolType.PowerFlex) public class ScaleIOStorageAdaptor implements StorageAdaptor { @@ -103,11 +110,27 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { } KVMPhysicalDisk disk = new KVMPhysicalDisk(diskFilePath, volumePath, pool); - disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + + // try to discover format as written to disk, rather than assuming raw. + // We support qcow2 for stored primary templates, disks seen as other should be treated as raw. + QemuImg qemu = new QemuImg(0); + QemuImgFile qemuFile = new QemuImgFile(diskFilePath); + Map details = qemu.info(qemuFile); + String detectedFormat = details.getOrDefault(QemuImg.FILE_FORMAT, "none"); + if (detectedFormat.equalsIgnoreCase(QemuImg.PhysicalDiskFormat.QCOW2.toString())) { + disk.setFormat(QemuImg.PhysicalDiskFormat.QCOW2); + } else { + disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + } long diskSize = getPhysicalDiskSize(diskFilePath); disk.setSize(diskSize); - disk.setVirtualSize(diskSize); + + if (details.containsKey(QemuImg.VIRTUAL_SIZE)) { + disk.setVirtualSize(Long.parseLong(details.get(QemuImg.VIRTUAL_SIZE))); + } else { + disk.setVirtualSize(diskSize); + } return disk; } catch (Exception e) { @@ -117,8 +140,8 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { } @Override - public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type) { - ScaleIOStoragePool storagePool = new ScaleIOStoragePool(uuid, host, port, path, type, this); + public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type, Map details) { + ScaleIOStoragePool storagePool = new ScaleIOStoragePool(uuid, host, port, path, type, details, this); MapStorageUuidToStoragePool.put(uuid, storagePool); return storagePool; } @@ -128,9 +151,37 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { return MapStorageUuidToStoragePool.remove(uuid) != null; } + /** + * ScaleIO doesn't need to communicate with the hypervisor normally to create a volume. This is used only to prepare a ScaleIO data disk for encryption. + * @param name disk path + * @param pool pool + * @param format disk format + * @param provisioningType provisioning type + * @param size disk size + * @param passphrase passphrase + * @return the disk object + */ @Override - public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { - return null; + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { + if (passphrase == null || passphrase.length == 0) { + return null; + } + + if(!connectPhysicalDisk(name, pool, null)) { + throw new CloudRuntimeException(String.format("Failed to ensure disk %s was present", name)); + } + + KVMPhysicalDisk disk = getPhysicalDisk(name, pool); + + try { + CryptSetup crypt = new CryptSetup(); + crypt.luksFormat(passphrase, CryptSetup.LuksType.LUKS, disk.getPath()); + disk.setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS); + } catch (CryptSetupException ex) { + throw new CloudRuntimeException("Failed to set up encryption for block device " + disk.getPath(), ex); + } + + return disk; } @Override @@ -228,7 +279,7 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { } @Override - public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout) { + public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout, byte[] passphrase) { return null; } @@ -244,6 +295,11 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { @Override public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + return copyPhysicalDisk(disk, name, destPool, timeout, null, null); + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, byte[] srcPassphrase, byte[]dstPassphrase) { if (Strings.isNullOrEmpty(name) || disk == null || destPool == null) { LOGGER.error("Unable to copy physical disk due to insufficient data"); throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); @@ -261,18 +317,49 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { destDisk.setVirtualSize(disk.getVirtualSize()); destDisk.setSize(disk.getSize()); - QemuImg qemu = new QemuImg(timeout); + QemuImg qemu = null; QemuImgFile srcFile = null; QemuImgFile destFile = null; + String srcKeyName = "sec0"; + String destKeyName = "sec1"; + List qemuObjects = new ArrayList<>(); + Map options = new HashMap(); + CryptSetup cryptSetup = null; - try { - srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); - destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + try (KeyFile srcKey = new KeyFile(srcPassphrase); KeyFile dstKey = new KeyFile(dstPassphrase)){ + qemu = new QemuImg(timeout, true, true); + String srcPath = disk.getPath(); + String destPath = destDisk.getPath(); + QemuImg.PhysicalDiskFormat destFormat = destDisk.getFormat(); + QemuImageOptions qemuImageOpts = new QemuImageOptions(srcPath); - LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath()); - qemu.convert(srcFile, destFile, true); + if (srcKey.isSet()) { + qemuObjects.add(QemuObject.prepareSecretForQemuImg(disk.getFormat(), null , srcKey.toString(), srcKeyName, options)); + qemuImageOpts = new QemuImageOptions(disk.getFormat(), srcPath, srcKeyName); + } + + if (dstKey.isSet()) { + if (qemu.supportsSkipZeros()) { + // format and open luks device rather than letting qemu do a slow copy of full image + cryptSetup = new CryptSetup(); + cryptSetup.luksFormat(dstPassphrase, CryptSetup.LuksType.LUKS, destDisk.getPath()); + cryptSetup.open(dstPassphrase, CryptSetup.LuksType.LUKS, destDisk.getPath(), name); + destPath = String.format("/dev/mapper/%s", name); + } else { + qemuObjects.add(QemuObject.prepareSecretForQemuImg(destDisk.getFormat(), null, dstKey.toString(), destKeyName, options)); + destFormat = QemuImg.PhysicalDiskFormat.LUKS; + } + destDisk.setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS); + } + + srcFile = new QemuImgFile(srcPath, disk.getFormat()); + destFile = new QemuImgFile(destPath, destFormat); + + boolean forceSourceFormat = srcFile.getFormat() == QemuImg.PhysicalDiskFormat.RAW; + LOGGER.debug(String.format("Starting copy from source disk %s(%s) to PowerFlex volume %s(%s), forcing source format is %b", srcFile.getFileName(), srcFile.getFormat(), destFile.getFileName(), destFile.getFormat(), forceSourceFormat)); + qemu.convert(srcFile, destFile, options, qemuObjects, qemuImageOpts,null, forceSourceFormat); LOGGER.debug("Succesfully converted source disk image " + srcFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath()); - } catch (QemuImgException | LibvirtException e) { + } catch (QemuImgException | LibvirtException | IOException | CryptSetupException e) { try { Map srcInfo = qemu.info(srcFile); LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo)); @@ -283,6 +370,14 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((Strings.isNullOrEmpty(e.getMessage())) ? "an unknown error" : e.getMessage())); LOGGER.error(errMsg); throw new CloudRuntimeException(errMsg, e); + } finally { + if (cryptSetup != null) { + try { + cryptSetup.close(name); + } catch (CryptSetupException ex) { + LOGGER.warn("Failed to clean up LUKS disk after copying disk", ex); + } + } } return destDisk; @@ -309,7 +404,7 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { } @Override - public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) { + public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout, byte[] passphrase) { return null; } @@ -346,6 +441,7 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { QemuImgFile srcFile = null; QemuImgFile destFile = null; try { + QemuImg qemu = new QemuImg(timeout, true, true); destDisk = destPool.getPhysicalDisk(destTemplatePath); if (destDisk == null) { LOGGER.error("Failed to find the disk: " + destTemplatePath + " of the storage pool: " + destPool.getUuid()); @@ -368,14 +464,21 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { } srcFile = new QemuImgFile(srcTemplateFilePath, srcFileFormat); - destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + qemu.info(srcFile); + /** + * Even though the disk itself is raw, we store templates on ScaleIO the raw volumes in qcow2 format. + * This improves performance by reading/writing less data to volume, saves the unused space for encryption header, and + * nicely encapsulates VM images that might contain LUKS data (as opposed to converting to raw which would look like a LUKS volume). + */ + destFile = new QemuImgFile(destDisk.getPath(), QemuImg.PhysicalDiskFormat.QCOW2); + destFile.setSize(srcFile.getSize()); LOGGER.debug("Starting copy from source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); - QemuImg qemu = new QemuImg(timeout); + qemu.create(destFile); qemu.convert(srcFile, destFile); - LOGGER.debug("Succesfully converted source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); + LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); } catch (QemuImgException | LibvirtException e) { - LOGGER.error("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage(), e); + LOGGER.error("Failed to convert. The error was: " + e.getMessage(), e); destDisk = null; } finally { Script.runSimpleBashScript("rm -f " + srcTemplateFilePath); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java index 4ead92d6a0d..cf977f5467b 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java @@ -20,6 +20,8 @@ package com.cloud.hypervisor.kvm.storage; import java.util.List; import java.util.Map; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; import org.apache.cloudstack.utils.qemu.QemuImg; import com.cloud.storage.Storage; @@ -34,8 +36,9 @@ public class ScaleIOStoragePool implements KVMStoragePool { private long capacity; private long used; private long available; + private Map details; - public ScaleIOStoragePool(String uuid, String host, int port, String path, Storage.StoragePoolType poolType, StorageAdaptor adaptor) { + public ScaleIOStoragePool(String uuid, String host, int port, String path, Storage.StoragePoolType poolType, Map poolDetails, StorageAdaptor adaptor) { this.uuid = uuid; sourceHost = host; sourcePort = port; @@ -45,15 +48,34 @@ public class ScaleIOStoragePool implements KVMStoragePool { capacity = 0; used = 0; available = 0; + details = poolDetails; + addSDCDetails(); + } + + private void addSDCDetails() { + if (details == null || !details.containsKey(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)) { + return; + } + + String storageSystemId = details.get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID); + String sdcId = ScaleIOUtil.getSdcId(storageSystemId); + if (sdcId != null) { + details.put(ScaleIOGatewayClient.SDC_ID, sdcId); + } else { + String sdcGuId = ScaleIOUtil.getSdcGuid(); + if (sdcGuId != null) { + details.put(ScaleIOGatewayClient.SDC_GUID, sdcGuId); + } + } } @Override - public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { - return null; + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { + return this.storageAdaptor.createPhysicalDisk(volumeUuid, this, format, provisioningType, size, passphrase); } @Override - public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size) { + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { return null; } @@ -178,4 +200,9 @@ public class ScaleIOStoragePool implements KVMStoragePool { public boolean supportsConfigDriveIso() { return false; } + + @Override + public Map getDetails() { + return this.details; + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java index 570c2070c75..ca0f77f4512 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java @@ -35,12 +35,12 @@ public interface StorageAdaptor { // it with info from local disk, and return it public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool); - public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type); + public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type, Map details); public boolean deleteStoragePool(String uuid); public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, - PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size); + PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase); // given disk path (per database) and pool, prepare disk on host public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details); @@ -58,13 +58,14 @@ public interface StorageAdaptor { public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, - KVMStoragePool destPool, int timeout); + KVMStoragePool destPool, int timeout, byte[] passphrase); public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool); public List listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool); public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout); + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout, byte[] srcPassphrase, byte[] dstPassphrase); public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout); @@ -80,7 +81,7 @@ public interface StorageAdaptor { */ KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, - KVMStoragePool destPool, int timeout); + KVMStoragePool destPool, int timeout, byte[] passphrase); /** * Create physical disk on Primary Storage from direct download template on the host (in temporary location) diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/cryptsetup/CryptSetup.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/cryptsetup/CryptSetup.java new file mode 100644 index 00000000000..6489ade23fd --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/cryptsetup/CryptSetup.java @@ -0,0 +1,108 @@ +package org.apache.cloudstack.utils.cryptsetup; + +import com.cloud.utils.script.Script; + +import java.io.IOException; + +public class CryptSetup { + protected String _commandPath = "cryptsetup"; + + /** + * LuksType represents the possible types that can be passed to cryptsetup. + * NOTE: Only "luks1" is currently supported with Libvirt, so while + * this utility may be capable of creating various types, care should + * be taken to use types that work for the use case. + */ + public enum LuksType { + LUKS("luks1"), LUKS2("luks2"), PLAIN("plain"), TCRYPT("tcrypt"), BITLK("bitlk"); + + String luksType; + + LuksType(String type) { this.luksType = type; } + + @Override + public String toString() { + return luksType; + } + } + + public CryptSetup(final String commandPath) { + _commandPath = commandPath; + } + + public CryptSetup() {} + + public void open(byte[] passphrase, LuksType luksType, String diskPath, String diskName) throws CryptSetupException { + try(KeyFile key = new KeyFile(passphrase)) { + final Script script = new Script(_commandPath); + script.add("open"); + script.add("--key-file"); + script.add(key.toString()); + script.add("--allow-discards"); + script.add(diskPath); + script.add(diskName); + + final String result = script.execute(); + if (result != null) { + throw new CryptSetupException(result); + } + } catch (IOException ex) { + throw new CryptSetupException(String.format("Failed to open encrypted device at '%s'", diskPath), ex); + } + } + + public void close(String diskName) throws CryptSetupException { + final Script script = new Script(_commandPath); + script.add("close"); + script.add(diskName); + + final String result = script.execute(); + if (result != null) { + throw new CryptSetupException(result); + } + } + + /** + * Formats a file using cryptsetup + * @param passphrase + * @param luksType + * @param diskPath + * @throws CryptSetupException + */ + public void luksFormat(byte[] passphrase, LuksType luksType, String diskPath) throws CryptSetupException { + try(KeyFile key = new KeyFile(passphrase)) { + final Script script = new Script(_commandPath); + script.add("luksFormat"); + script.add("-q"); + script.add("--force-password"); + script.add("--key-file"); + script.add(key.toString()); + script.add("--type"); + script.add(luksType.toString()); + script.add(diskPath); + + final String result = script.execute(); + if (result != null) { + throw new CryptSetupException(result); + } + } catch (IOException ex) { + throw new CryptSetupException(String.format("Failed to format encrypted device at '%s'", diskPath), ex); + } + } + + public boolean isSupported() { + final Script script = new Script(_commandPath); + script.add("--usage"); + final String result = script.execute(); + return result == null; + } + + public boolean isLuks(String filePath) { + final Script script = new Script(_commandPath); + script.add("isLuks"); + script.add(filePath); + + final String result = script.execute(); + return result == null; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/cryptsetup/CryptSetupException.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/cryptsetup/CryptSetupException.java new file mode 100644 index 00000000000..210413d03cc --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/cryptsetup/CryptSetupException.java @@ -0,0 +1,9 @@ +package org.apache.cloudstack.utils.cryptsetup; + +public class CryptSetupException extends Exception { + public CryptSetupException(String message) { + super(message); + } + + public CryptSetupException(String message, Exception ex) { super(message, ex); } +} diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/cryptsetup/KeyFile.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/cryptsetup/KeyFile.java new file mode 100644 index 00000000000..c11de561cf4 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/cryptsetup/KeyFile.java @@ -0,0 +1,60 @@ +package org.apache.cloudstack.utils.cryptsetup; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.Set; + +public class KeyFile implements Closeable { + private Path filePath = null; + + /** + * KeyFile represents a temporary file for storing data + * to pass to commands, as an alternative to putting sensitive + * data on the command line. + * @param key byte array of content for the KeyFile + * @throws IOException as the IOException for creating KeyFile + */ + public KeyFile(byte[] key) throws IOException { + if (key != null && key.length > 0) { + Set permissions = PosixFilePermissions.fromString("rw-------"); + filePath = Files.createTempFile("keyfile", ".tmp", PosixFilePermissions.asFileAttribute(permissions)); + Files.write(filePath, key); + } + } + + public Path getPath() { + return filePath; + } + + public boolean isSet() { + return filePath != null; + } + + /** + * Converts the keyfile to the absolute path String where it is located + * @return absolute path as String + */ + @Override + public String toString() { + if (filePath != null) { + return filePath.toAbsolutePath().toString(); + } + return null; + } + + /** + * Deletes the underlying key file + * @throws IOException as the IOException for deleting the underlying key file + */ + @Override + public void close() throws IOException { + if (isSet()) { + Files.delete(filePath); + filePath = null; + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImageOptions.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImageOptions.java new file mode 100644 index 00000000000..1b5e4eff261 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImageOptions.java @@ -0,0 +1,65 @@ +package org.apache.cloudstack.utils.qemu; + +import com.google.common.base.Joiner; + +import java.util.HashMap; +import java.util.Map; +import java.util.TreeMap; + +public class QemuImageOptions { + private Map params = new HashMap<>(); + private static final String FILENAME_PARAM_KEY = "file.filename"; + private static final String LUKS_KEY_SECRET_PARAM_KEY = "key-secret"; + private static final String QCOW2_KEY_SECRET_PARAM_KEY = "encrypt.key-secret"; + + public QemuImageOptions(String filePath) { + params.put(FILENAME_PARAM_KEY, filePath); + } + + /** + * Constructor for self-crafting the full map of parameters + * @param params the map of parameters + */ + public QemuImageOptions(Map params) { + this.params = params; + } + + /** + * Constructor for crafting image options that may contain a secret or format + * @param format optional format, renders as "driver" option + * @param filePath required path of image + * @param secretName optional secret name for image. Secret only applies for QCOW2 or LUKS format + */ + public QemuImageOptions(QemuImg.PhysicalDiskFormat format, String filePath, String secretName) { + params.put(FILENAME_PARAM_KEY, filePath); + if (secretName != null && !secretName.isBlank()) { + switch (format) { + case QCOW2: + params.put(QCOW2_KEY_SECRET_PARAM_KEY, secretName); + break; + case LUKS: + params.put(LUKS_KEY_SECRET_PARAM_KEY, secretName); + break; + } + } + if (format != null) { + params.put("driver", format.toString()); + } + } + + public void setFormat(QemuImg.PhysicalDiskFormat format) { + if (format != null) { + params.put("driver", format.toString()); + } + } + + /** + * Converts QemuObject into the command strings required by qemu-img flags + * @return array of strings representing command flag and value (--object) + */ + public String[] toCommandFlag() { + Map sorted = new TreeMap<>(params); + String paramString = Joiner.on(",").withKeyValueSeparator("=").join(sorted); + return new String[] {"--image-opts", paramString}; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java index 7de09a3a935..385c53af4f7 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java @@ -16,17 +16,21 @@ // under the License. package org.apache.cloudstack.utils.qemu; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; +import org.apache.commons.lang.NotImplementedException; +import org.apache.commons.lang.StringUtils; +import org.libvirt.LibvirtException; + import com.cloud.hypervisor.kvm.resource.LibvirtConnection; import com.cloud.storage.Storage; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.NotImplementedException; -import org.libvirt.LibvirtException; public class QemuImg { public final static String BACKING_FILE = "backing_file"; @@ -35,11 +39,18 @@ public class QemuImg { public final static String FILE_FORMAT = "file_format"; public final static String IMAGE = "image"; public final static String VIRTUAL_SIZE = "virtual_size"; + public final static String ENCRYPT_FORMAT = "encrypt.format"; + public final static String ENCRYPT_KEY_SECRET = "encrypt.key-secret"; + public final static String TARGET_ZERO_FLAG = "--target-is-zero"; + public final static long QEMU_2_10 = 2010000; /* The qemu-img binary. We expect this to be in $PATH */ public String _qemuImgPath = "qemu-img"; private String cloudQemuImgPath = "cloud-qemu-img"; private int timeout; + private boolean skipZero = false; + private boolean noCache = false; + private long version; private String getQemuImgPathScript = String.format("which %s >& /dev/null; " + "if [ $? -gt 0 ]; then echo \"%s\"; else echo \"%s\"; fi", @@ -47,7 +58,7 @@ public class QemuImg { /* Shouldn't we have KVMPhysicalDisk and LibvirtVMDef read this? */ public static enum PhysicalDiskFormat { - RAW("raw"), QCOW2("qcow2"), VMDK("vmdk"), FILE("file"), RBD("rbd"), SHEEPDOG("sheepdog"), HTTP("http"), HTTPS("https"), TAR("tar"), DIR("dir"); + RAW("raw"), QCOW2("qcow2"), VMDK("vmdk"), FILE("file"), RBD("rbd"), SHEEPDOG("sheepdog"), HTTP("http"), HTTPS("https"), TAR("tar"), DIR("dir"), LUKS("luks"); String format; private PhysicalDiskFormat(final String format) { @@ -90,8 +101,41 @@ public class QemuImg { } } - public QemuImg(final int timeout) { + /** + * Create a QemuImg object that supports skipping target zeroes + * We detect this support via qemu-img help since support can + * be backported rather than found in a specific version. + * + * @param timeout script timeout, default 0 + * @param skipZeroIfSupported Don't write zeroes to target device during convert, if supported by qemu-img + * @param noCache Ensure we flush writes to target disk (useful for block device targets) + */ + public QemuImg(final int timeout, final boolean skipZeroIfSupported, final boolean noCache) throws QemuImgException, LibvirtException { + if (skipZeroIfSupported) { + final Script s = new Script(_qemuImgPath, timeout); + s.add("--help"); + + final OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser(); + final String result = s.execute(parser); + + // Older Qemu returns output in result due to --help reporting error status + if (result != null) { + if (result.contains(TARGET_ZERO_FLAG)) { + this.skipZero = true; + } + } else { + if (parser.getLines().contains(TARGET_ZERO_FLAG)) { + this.skipZero = true; + } + } + } this.timeout = timeout; + this.noCache = noCache; + this.version = LibvirtConnection.getConnection().getVersion(); + } + + public QemuImg(final int timeout) throws LibvirtException, QemuImgException { + this(timeout, false, false); } public void setTimeout(final int timeout) { @@ -106,7 +150,8 @@ public class QemuImg { * A alternative path to the qemu-img binary * @return void */ - public QemuImg(final String qemuImgPath) { + public QemuImg(final String qemuImgPath) throws LibvirtException, QemuImgException { + this(0, false, false); _qemuImgPath = qemuImgPath; } @@ -132,9 +177,35 @@ public class QemuImg { * @return void */ public void create(final QemuImgFile file, final QemuImgFile backingFile, final Map options) throws QemuImgException { + create(file, backingFile, options, null); + } + + /** + * Create a new image + * + * This method calls 'qemu-img create' + * + * @param file + * The file to create + * @param backingFile + * A backing file if used (for example with qcow2) + * @param options + * Options for the create. Takes a Map with key value + * pairs which are passed on to qemu-img without validation. + * @param qemuObjects + * Pass list of qemu Object to create - see objects in qemu man page + * @return void + */ + public void create(final QemuImgFile file, final QemuImgFile backingFile, final Map options, final List qemuObjects) throws QemuImgException { final Script s = new Script(_qemuImgPath, timeout); s.add("create"); + if (this.version >= QEMU_2_10 && qemuObjects != null) { + for (QemuObject o : qemuObjects) { + s.add(o.toCommandFlag()); + } + } + if (options != null && !options.isEmpty()) { s.add("-o"); final StringBuilder optionsStr = new StringBuilder(); @@ -244,6 +315,63 @@ public class QemuImg { */ public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, final Map options, final String snapshotName, final boolean forceSourceFormat) throws QemuImgException, LibvirtException { + convert(srcFile, destFile, options, null, snapshotName, forceSourceFormat); + } + + /** + * Convert a image from source to destination + * + * This method calls 'qemu-img convert' and takes five objects + * as an argument. + * + * + * @param srcFile + * The source file + * @param destFile + * The destination file + * @param options + * Options for the convert. Takes a Map with key value + * pairs which are passed on to qemu-img without validation. + * @param qemuObjects + * Pass qemu Objects to create - see objects in qemu man page + * @param snapshotName + * If it is provided, convertion uses it as parameter + * @param forceSourceFormat + * If true, specifies the source format in the conversion cmd + * @return void + */ + public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, + final Map options, final List qemuObjects, final String snapshotName, final boolean forceSourceFormat) throws QemuImgException, LibvirtException { + QemuImageOptions imageOpts = new QemuImageOptions(srcFile.getFormat(), srcFile.getFileName(), null); + convert(srcFile, destFile, options, qemuObjects, imageOpts, snapshotName, forceSourceFormat); + } + + /** + * Convert a image from source to destination + * + * This method calls 'qemu-img convert' and takes five objects + * as an argument. + * + * + * @param srcFile + * The source file + * @param destFile + * The destination file + * @param options + * Options for the convert. Takes a Map with key value + * pairs which are passed on to qemu-img without validation. + * @param qemuObjects + * Pass qemu Objects to convert - see objects in qemu man page + * @param srcImageOpts + * pass qemu --image-opts to convert + * @param snapshotName + * If it is provided, convertion uses it as parameter + * @param forceSourceFormat + * If true, specifies the source format in the conversion cmd + * @return void + */ + public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, + final Map options, final List qemuObjects, final QemuImageOptions srcImageOpts, final String snapshotName, final boolean forceSourceFormat) throws QemuImgException { Script script = new Script(_qemuImgPath, timeout); if (StringUtils.isNotBlank(snapshotName)) { String qemuPath = Script.runSimpleBashScript(getQemuImgPathScript); @@ -251,41 +379,56 @@ public class QemuImg { } script.add("convert"); - Long version = LibvirtConnection.getConnection().getVersion(); - if (version >= 2010000) { - script.add("-U"); - } - // autodetect source format unless specified explicitly - if (forceSourceFormat) { - script.add("-f"); - script.add(srcFile.getFormat().toString()); + if (skipZero && Files.exists(Paths.get(destFile.getFileName()))) { + script.add("-n"); + script.add(TARGET_ZERO_FLAG); + script.add("-W"); + // with target-is-zero we skip zeros in 1M chunks for compatibility + script.add("-S"); + script.add("1M"); } script.add("-O"); script.add(destFile.getFormat().toString()); - if (options != null && !options.isEmpty()) { - script.add("-o"); - final StringBuffer optionsBuffer = new StringBuffer(); - for (final Map.Entry option : options.entrySet()) { - optionsBuffer.append(option.getKey()).append('=').append(option.getValue()).append(','); - } - String optionsStr = optionsBuffer.toString(); - optionsStr = optionsStr.replaceAll(",$", ""); - script.add(optionsStr); - } + addScriptOptionsFromMap(options, script); if (StringUtils.isNotBlank(snapshotName)) { - if (!forceSourceFormat) { - script.add("-f"); - script.add(srcFile.getFormat().toString()); + if (this.version >= QEMU_2_10) { + script.add("-l"); + } else { + script.add("-s"); } - script.add("-s"); script.add(snapshotName); } - script.add(srcFile.getFileName()); + if (noCache) { + script.add("-t"); + script.add("none"); + } + + if (this.version >= QEMU_2_10) { + script.add("-U"); + + if (forceSourceFormat) { + srcImageOpts.setFormat(srcFile.getFormat()); + } + script.add(srcImageOpts.toCommandFlag()); + + if (qemuObjects != null) { + for (QemuObject o : qemuObjects) { + script.add(o.toCommandFlag()); + } + } + } else { + if (forceSourceFormat) { + script.add("-f"); + script.add(srcFile.getFormat().toString()); + } + script.add(srcFile.getFileName()); + } + script.add(destFile.getFileName()); final String result = script.execute(); @@ -407,8 +550,7 @@ public class QemuImg { public Map info(final QemuImgFile file) throws QemuImgException, LibvirtException { final Script s = new Script(_qemuImgPath); s.add("info"); - Long version = LibvirtConnection.getConnection().getVersion(); - if (version >= 2010000) { + if (this.version >= QEMU_2_10) { s.add("-U"); } s.add(file.getFileName()); @@ -436,12 +578,72 @@ public class QemuImg { info.put(key, value); } } + + // set some missing attributes in passed file, if found + if (info.containsKey(VIRTUAL_SIZE) && file.getSize() == 0L) { + file.setSize(Long.parseLong(info.get(VIRTUAL_SIZE))); + } + + if (info.containsKey(FILE_FORMAT) && file.getFormat() == null) { + file.setFormat(PhysicalDiskFormat.valueOf(info.get(FILE_FORMAT).toUpperCase())); + } + return info; } - /* List, apply, create or delete snapshots in image */ - public void snapshot() throws QemuImgException { + /* create snapshots in image */ + public void snapshot(final QemuImageOptions srcImageOpts, final String snapshotName, final List qemuObjects) throws QemuImgException { + final Script s = new Script(_qemuImgPath, timeout); + s.add("snapshot"); + s.add("-c"); + s.add(snapshotName); + for (QemuObject o : qemuObjects) { + s.add(o.toCommandFlag()); + } + + s.add(srcImageOpts.toCommandFlag()); + + final String result = s.execute(); + if (result != null) { + throw new QemuImgException(result); + } + } + + /* delete snapshots in image */ + public void deleteSnapshot(final QemuImageOptions srcImageOpts, final String snapshotName, final List qemuObjects) throws QemuImgException { + final Script s = new Script(_qemuImgPath, timeout); + s.add("snapshot"); + s.add("-d"); + s.add(snapshotName); + + for (QemuObject o : qemuObjects) { + s.add(o.toCommandFlag()); + } + + s.add(srcImageOpts.toCommandFlag()); + + final String result = s.execute(); + if (result != null) { + // support idempotent delete calls, if no snapshot exists we are good. + if (result.contains("snapshot not found") || result.contains("Can't find the snapshot")) { + return; + } + throw new QemuImgException(result); + } + } + + private void addScriptOptionsFromMap(Map options, Script s) { + if (options != null && !options.isEmpty()) { + s.add("-o"); + final StringBuffer optionsBuffer = new StringBuffer(); + for (final Map.Entry option : options.entrySet()) { + optionsBuffer.append(option.getKey()).append('=').append(option.getValue()).append(','); + } + String optionsStr = optionsBuffer.toString(); + optionsStr = optionsStr.replaceAll(",$", ""); + s.add(optionsStr); + } } /* Changes the backing file of an image */ @@ -512,6 +714,33 @@ public class QemuImg { s.execute(); } + /** + * Resize an image, new style flags/options + * + * @param imageOptions + * Qemu style image options for the image to resize + * @param qemuObjects + * Qemu style options (e.g. for passing secrets) + * @param size + * The absolute final size of the image + */ + public void resize(final QemuImageOptions imageOptions, final List qemuObjects, final long size) throws QemuImgException { + final Script s = new Script(_qemuImgPath); + s.add("resize"); + + for (QemuObject o : qemuObjects) { + s.add(o.toCommandFlag()); + } + + s.add(imageOptions.toCommandFlag()); + s.add(Long.toString(size)); + + final String result = s.execute(); + if (result != null) { + throw new QemuImgException(result); + } + } + /** * Resize an image * @@ -528,4 +757,12 @@ public class QemuImg { public void resize(final QemuImgFile file, final long size) throws QemuImgException { this.resize(file, size, false); } + + /** + * Does qemu-img support --target-is-zero + * @return boolean + */ + public boolean supportsSkipZeros() { + return this.skipZero; + } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuObject.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuObject.java new file mode 100644 index 00000000000..e2b71f0f269 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuObject.java @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.utils.qemu; + +import java.util.HashMap; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.commons.lang3.StringUtils; + +import com.google.common.base.Joiner; + +public class QemuObject { + private final ObjectType type; + private final Map params; + + public enum ObjectParameter { + DATA("data"), + FILE("file"), + FORMAT("format"), + ID("id"), + IV("iv"), + KEYID("keyid"); + + private final String parameter; + + ObjectParameter(String param) { + this.parameter = param; + } + + @Override + public String toString() {return parameter; } + } + + /** + * Supported qemu encryption formats. + * NOTE: Only "luks" is currently supported with Libvirt, so while + * this utility may be capable of creating various formats, care should + * be taken to use types that work for the use case. + */ + public enum EncryptFormat { + LUKS("luks"), + AES("aes"); + + private final String format; + + EncryptFormat(String format) { this.format = format; } + + EncryptFormat(QemuImg.PhysicalDiskFormat format) { + this.format = format.toString(); + } + + @Override + public String toString() { return format;} + + public static EncryptFormat enumValue(String value) { + if (StringUtils.isBlank(value)) { + return LUKS; // default encryption format + } + return EncryptFormat.valueOf(value.toUpperCase()); + } + } + + public enum ObjectType { + SECRET("secret"); + + private final String objectType; + + ObjectType(String objectType) { + this.objectType = objectType; + } + + @Override + public String toString() { + return objectType; + } + } + + public QemuObject(ObjectType type, Map params) { + this.type = type; + this.params = params; + } + + /** + * Converts QemuObject into the command strings required by qemu-img flags + * @return array of strings representing command flag and value (--object) + */ + public String[] toCommandFlag() { + Map sorted = new TreeMap<>(params); + String paramString = Joiner.on(",").withKeyValueSeparator("=").join(sorted); + return new String[] {"--object", String.format("%s,%s", type, paramString) }; + } + + /** + * Creates a QemuObject with the correct parameters for passing encryption secret details to qemu-img + * @param format the image format to use + * @param encryptFormat the encryption format to use (luks) + * @param keyFilePath the path to the file containing encryption key + * @param secretName the name to use for the secret + * @param options the options map for qemu-img (-o flag) + * @return the QemuObject containing encryption parameters + */ + public static QemuObject prepareSecretForQemuImg(QemuImg.PhysicalDiskFormat format, EncryptFormat encryptFormat, String keyFilePath, String secretName, Map options) { + Map params = new HashMap<>(); + params.put(QemuObject.ObjectParameter.ID, secretName); + params.put(QemuObject.ObjectParameter.FILE, keyFilePath); + + if (options != null) { + if (format == QemuImg.PhysicalDiskFormat.QCOW2) { + options.put("encrypt.key-secret", secretName); + options.put("encrypt.format", encryptFormat.toString()); + } else if (format == QemuImg.PhysicalDiskFormat.RAW || format == QemuImg.PhysicalDiskFormat.LUKS) { + options.put("key-secret", secretName); + } + } + return new QemuObject(QemuObject.ObjectType.SECRET, params); + } +} diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java index 3632cf299d3..b951f997909 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -56,6 +56,7 @@ import javax.xml.xpath.XPathFactory; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; +import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; import org.apache.cloudstack.utils.linux.CPUStat; import org.apache.cloudstack.utils.linux.MemStat; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; @@ -76,6 +77,7 @@ import org.libvirt.LibvirtException; import org.libvirt.MemoryStatistic; import org.libvirt.NodeInfo; import org.libvirt.StorageVol; +import org.libvirt.VcpuInfo; import org.libvirt.jna.virDomainMemoryStats; import org.mockito.BDDMockito; import org.mockito.Mock; @@ -207,8 +209,6 @@ import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.PowerState; import com.cloud.vm.VirtualMachine.Type; -import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; -import org.libvirt.VcpuInfo; @RunWith(PowerMockRunner.class) @PrepareForTest(value = {MemStat.class}) @@ -2146,7 +2146,7 @@ public class LibvirtComputingResourceTest { when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(poolManager); when(poolManager.getStoragePool(pool.getType(), pool.getUuid())).thenReturn(primary); - when(primary.createPhysicalDisk(diskCharacteristics.getPath(), diskCharacteristics.getProvisioningType(), diskCharacteristics.getSize())).thenReturn(vol); + when(primary.createPhysicalDisk(diskCharacteristics.getPath(), diskCharacteristics.getProvisioningType(), diskCharacteristics.getSize(), null)).thenReturn(vol); final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance(); assertNotNull(wrapper); @@ -2205,7 +2205,7 @@ public class LibvirtComputingResourceTest { when(poolManager.getStoragePool(pool.getType(), pool.getUuid())).thenReturn(primary); when(primary.getPhysicalDisk(command.getTemplateUrl())).thenReturn(baseVol); - when(poolManager.createDiskFromTemplate(baseVol, diskCharacteristics.getPath(), diskCharacteristics.getProvisioningType(), primary, baseVol.getSize(), 0)).thenReturn(vol); + when(poolManager.createDiskFromTemplate(baseVol, diskCharacteristics.getPath(), diskCharacteristics.getProvisioningType(), primary, baseVol.getSize(), 0,null)).thenReturn(vol); final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance(); assertNotNull(wrapper); @@ -2765,7 +2765,7 @@ public class LibvirtComputingResourceTest { when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr); when(storagePoolMgr.createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool() - .getUserInfo(), command.getPool().getType())).thenReturn(kvmStoragePool); + .getUserInfo(), command.getPool().getType(), command.getDetails())).thenReturn(kvmStoragePool); final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance(); @@ -2776,7 +2776,7 @@ public class LibvirtComputingResourceTest { verify(libvirtComputingResource, times(1)).getStoragePoolMgr(); verify(storagePoolMgr, times(1)).createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool() - .getUserInfo(), command.getPool().getType()); + .getUserInfo(), command.getPool().getType(), command.getDetails()); } @Test @@ -2788,7 +2788,7 @@ public class LibvirtComputingResourceTest { when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr); when(storagePoolMgr.createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool() - .getUserInfo(), command.getPool().getType())).thenReturn(null); + .getUserInfo(), command.getPool().getType(), command.getDetails())).thenReturn(null); final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance(); @@ -2799,7 +2799,7 @@ public class LibvirtComputingResourceTest { verify(libvirtComputingResource, times(1)).getStoragePoolMgr(); verify(storagePoolMgr, times(1)).createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool() - .getUserInfo(), command.getPool().getType()); + .getUserInfo(), command.getPool().getType(), command.getDetails()); } @Test @@ -4845,6 +4845,10 @@ public class LibvirtComputingResourceTest { final LibvirtUtilitiesHelper libvirtUtilitiesHelper = Mockito.mock(LibvirtUtilitiesHelper.class); final Connect conn = Mockito.mock(Connect.class); final StorageVol v = Mockito.mock(StorageVol.class); + final Domain vm = Mockito.mock(Domain.class); + final DomainInfo info = Mockito.mock(DomainInfo.class); + final DomainState state = DomainInfo.DomainState.VIR_DOMAIN_RUNNING; + info.state = state; when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr); when(storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid())).thenReturn(storagePool); @@ -4858,9 +4862,11 @@ public class LibvirtComputingResourceTest { try { when(libvirtUtilitiesHelper.getConnection()).thenReturn(conn); when(conn.storageVolLookupByPath(path)).thenReturn(v); + when(libvirtUtilitiesHelper.getConnectionByVmName(vmInstance)).thenReturn(conn); + when(conn.domainLookupByName(vmInstance)).thenReturn(vm); + when(vm.getInfo()).thenReturn(info); when(conn.getLibVirVersion()).thenReturn(10010l); - } catch (final LibvirtException e) { fail(e.getMessage()); } @@ -4873,9 +4879,10 @@ public class LibvirtComputingResourceTest { verify(libvirtComputingResource, times(1)).getStoragePoolMgr(); - verify(libvirtComputingResource, times(1)).getLibvirtUtilitiesHelper(); + verify(libvirtComputingResource, times(2)).getLibvirtUtilitiesHelper(); try { verify(libvirtUtilitiesHelper, times(1)).getConnection(); + verify(libvirtUtilitiesHelper, times(1)).getConnectionByVmName(vmInstance); } catch (final LibvirtException e) { fail(e.getMessage()); } @@ -4896,6 +4903,11 @@ public class LibvirtComputingResourceTest { final KVMStoragePool storagePool = Mockito.mock(KVMStoragePool.class); final KVMPhysicalDisk vol = Mockito.mock(KVMPhysicalDisk.class); final LibvirtUtilitiesHelper libvirtUtilitiesHelper = Mockito.mock(LibvirtUtilitiesHelper.class); + final Connect conn = Mockito.mock(Connect.class); + final Domain vm = Mockito.mock(Domain.class); + final DomainInfo info = Mockito.mock(DomainInfo.class); + final DomainState state = DomainInfo.DomainState.VIR_DOMAIN_RUNNING; + info.state = state; when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr); when(storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid())).thenReturn(storagePool); @@ -4904,6 +4916,15 @@ public class LibvirtComputingResourceTest { when(storagePool.getType()).thenReturn(StoragePoolType.Linstor); when(vol.getFormat()).thenReturn(PhysicalDiskFormat.RAW); + when(libvirtComputingResource.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper); + try { + when(libvirtUtilitiesHelper.getConnectionByVmName(vmInstance)).thenReturn(conn); + when(conn.domainLookupByName(vmInstance)).thenReturn(vm); + when(vm.getInfo()).thenReturn(info); + } catch (final LibvirtException e) { + fail(e.getMessage()); + } + final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance(); assertNotNull(wrapper); @@ -4913,9 +4934,10 @@ public class LibvirtComputingResourceTest { verify(libvirtComputingResource, times(1)).getStoragePoolMgr(); verify(libvirtComputingResource, times(0)).getResizeScriptType(storagePool, vol); - verify(libvirtComputingResource, times(0)).getLibvirtUtilitiesHelper(); + verify(libvirtComputingResource, times(1)).getLibvirtUtilitiesHelper(); try { verify(libvirtUtilitiesHelper, times(0)).getConnection(); + verify(libvirtUtilitiesHelper, times(1)).getConnectionByVmName(vmInstance); } catch (final LibvirtException e) { fail(e.getMessage()); } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParserTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParserTest.java index f2ba293436e..ccab4b01c33 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParserTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParserTest.java @@ -29,6 +29,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.RngDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.WatchDogDef; import junit.framework.TestCase; +import org.apache.cloudstack.utils.qemu.QemuObject; public class LibvirtDomainXMLParserTest extends TestCase { @@ -51,6 +52,10 @@ public class LibvirtDomainXMLParserTest extends TestCase { String diskLabel ="vda"; String diskPath = "/var/lib/libvirt/images/my-test-image.qcow2"; + String diskLabel2 ="vdb"; + String diskPath2 = "/var/lib/libvirt/images/my-test-image2.qcow2"; + String secretUuid = "5644d664-a238-3a9b-811c-961f609d29f4"; + String xml = "" + "s-2970-VM" + "4d2c1526-865d-4fc9-a1ac-dbd1801a22d0" + @@ -87,6 +92,16 @@ public class LibvirtDomainXMLParserTest extends TestCase { "" + "
" + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "
" + + "" + "" + "" + "" + @@ -200,6 +215,11 @@ public class LibvirtDomainXMLParserTest extends TestCase { assertEquals(deviceType, disks.get(diskId).getDeviceType()); assertEquals(diskFormat, disks.get(diskId).getDiskFormatType()); + DiskDef.LibvirtDiskEncryptDetails encryptDetails = disks.get(1).getLibvirtDiskEncryptDetails(); + assertNotNull(encryptDetails); + assertEquals(QemuObject.EncryptFormat.LUKS, encryptDetails.getEncryptFormat()); + assertEquals(secretUuid, encryptDetails.getPassphraseUuid()); + List channels = parser.getChannels(); for (int i = 0; i < channels.size(); i++) { assertEquals(channelType, channels.get(i).getChannelType()); diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDefTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDefTest.java index b0eaad4f269..594460e8702 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDefTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDefTest.java @@ -23,6 +23,7 @@ import java.io.File; import java.util.Arrays; import java.util.List; import java.util.Scanner; +import java.util.UUID; import junit.framework.TestCase; @@ -30,6 +31,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.ChannelDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.SCSIDef; import org.apache.cloudstack.utils.linux.MemStat; +import org.apache.cloudstack.utils.qemu.QemuObject; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -191,6 +193,24 @@ public class LibvirtVMDefTest extends TestCase { assertEquals(xmlDef, expectedXml); } + @Test + public void testDiskDefWithEncryption() { + String passphraseUuid = UUID.randomUUID().toString(); + DiskDef disk = new DiskDef(); + DiskDef.LibvirtDiskEncryptDetails encryptDetails = new DiskDef.LibvirtDiskEncryptDetails(passphraseUuid, QemuObject.EncryptFormat.LUKS); + disk.defBlockBasedDisk("disk1", 1, DiskDef.DiskBus.VIRTIO); + disk.setLibvirtDiskEncryptDetails(encryptDetails); + String expectedXML = "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n" + + "\n"; + assertEquals(disk.toString(), expectedXML); + } + @Test public void testDiskDefWithBurst() { String filePath = "/var/lib/libvirt/images/disk.qcow2"; diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java index 23f0ff91128..37c4ec2918f 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java @@ -759,6 +759,41 @@ public class LibvirtMigrateCommandWrapperTest { assertXpath(doc, "/domain/devices/disk/driver/@type", "raw"); } + @Test + public void testReplaceStorageWithSecrets() throws Exception { + Map mapMigrateStorage = new HashMap(); + + final String xmlDesc = + "" + + " " + + " \n" + + " \n" + + " \n" + + " \n" + + " bf8621b3027c497d963b\n" + + " \n" + + "
\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " " + + ""; + + final String volumeFile = "3530f749-82fd-458e-9485-a357e6e541db"; + String newDiskPath = "/mnt/2d0435e1-99e0-4f1d-94c0-bee1f6f8b99e/" + volumeFile; + MigrateDiskInfo diskInfo = new MigrateDiskInfo("123456", DiskType.BLOCK, DriverType.RAW, Source.FILE, newDiskPath); + mapMigrateStorage.put("/mnt/07eb495b-5590-3877-9fb7-23c6e9a40d40/bf8621b3-027c-497d-963b-06319650f048", diskInfo); + final String result = libvirtMigrateCmdWrapper.replaceStorage(xmlDesc, mapMigrateStorage, false); + final String expectedSecretUuid = LibvirtComputingResource.generateSecretUUIDFromString(volumeFile); + + InputStream in = IOUtils.toInputStream(result); + DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance(); + DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); + Document doc = docBuilder.parse(in); + assertXpath(doc, "/domain/devices/disk/encryption/secret/@uuid", expectedSecretUuid); + } + public void testReplaceStorageXmlDiskNotManagedStorage() throws ParserConfigurationException, TransformerException, SAXException, IOException { final LibvirtMigrateCommandWrapper lw = new LibvirtMigrateCommandWrapper(); String destDisk1FileName = "XXXXXXXXXXXXXX"; diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java index 4f18c38a164..6c2f560ecff 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java @@ -26,7 +26,10 @@ import static org.mockito.Mockito.when; import java.io.File; import java.io.FileFilter; +import java.util.HashMap; +import java.util.Map; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; @@ -42,8 +45,9 @@ import org.powermock.modules.junit4.PowerMockRunner; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageLayer; +import com.cloud.utils.script.Script; -@PrepareForTest(ScaleIOUtil.class) +@PrepareForTest({ScaleIOUtil.class, Script.class}) @RunWith(PowerMockRunner.class) public class ScaleIOStoragePoolTest { @@ -57,10 +61,13 @@ public class ScaleIOStoragePoolTest { @Before public void setUp() throws Exception { final String uuid = "345fc603-2d7e-47d2-b719-a0110b3732e6"; + final String systemId = "218ce1797566a00f"; final StoragePoolType type = StoragePoolType.PowerFlex; + Map details = new HashMap(); + details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId); adapter = spy(new ScaleIOStorageAdaptor(storageLayer)); - pool = new ScaleIOStoragePool(uuid, "192.168.1.19", 443, "a519be2f00000000", type, adapter); + pool = new ScaleIOStoragePool(uuid, "192.168.1.19", 443, "a519be2f00000000", type, details, adapter); } @After @@ -69,28 +76,64 @@ public class ScaleIOStoragePoolTest { @Test public void testAttributes() { - assertEquals(pool.getCapacity(), 0); - assertEquals(pool.getUsed(), 0); - assertEquals(pool.getAvailable(), 0); - assertEquals(pool.getUuid(), "345fc603-2d7e-47d2-b719-a0110b3732e6"); - assertEquals(pool.getSourceHost(), "192.168.1.19"); - assertEquals(pool.getSourcePort(), 443); - assertEquals(pool.getSourceDir(), "a519be2f00000000"); - assertEquals(pool.getType(), StoragePoolType.PowerFlex); + assertEquals(0, pool.getCapacity()); + assertEquals(0, pool.getUsed()); + assertEquals(0, pool.getAvailable()); + assertEquals("345fc603-2d7e-47d2-b719-a0110b3732e6", pool.getUuid()); + assertEquals("192.168.1.19", pool.getSourceHost()); + assertEquals(443, pool.getSourcePort()); + assertEquals("a519be2f00000000", pool.getSourceDir()); + assertEquals(StoragePoolType.PowerFlex, pool.getType()); + assertEquals("218ce1797566a00f", pool.getDetails().get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)); pool.setCapacity(131072); pool.setUsed(24576); pool.setAvailable(106496); - assertEquals(pool.getCapacity(), 131072); - assertEquals(pool.getUsed(), 24576); - assertEquals(pool.getAvailable(), 106496); + assertEquals(131072, pool.getCapacity()); + assertEquals(24576, pool.getUsed()); + assertEquals(106496, pool.getAvailable()); + } + + @Test + public void testSdcIdAttribute() { + final String uuid = "345fc603-2d7e-47d2-b719-a0110b3732e6"; + final String systemId = "218ce1797566a00f"; + final String sdcId = "301b852c00000003"; + final StoragePoolType type = StoragePoolType.PowerFlex; + Map details = new HashMap(); + details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId); + + PowerMockito.mockStatic(Script.class); + when(Script.runSimpleBashScript("/opt/emc/scaleio/sdc/bin/drv_cfg --query_mdms|grep 218ce1797566a00f|awk '{print $5}'")).thenReturn(sdcId); + + ScaleIOStoragePool pool1 = new ScaleIOStoragePool(uuid, "192.168.1.19", 443, "a519be2f00000000", type, details, adapter); + assertEquals(systemId, pool1.getDetails().get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)); + assertEquals(sdcId, pool1.getDetails().get(ScaleIOGatewayClient.SDC_ID)); + } + + @Test + public void testSdcGuidAttribute() { + final String uuid = "345fc603-2d7e-47d2-b719-a0110b3732e6"; + final String systemId = "218ce1797566a00f"; + final String sdcGuid = "B0E3BFB8-C20B-43BF-93C8-13339E85AA50"; + final StoragePoolType type = StoragePoolType.PowerFlex; + Map details = new HashMap(); + details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId); + + PowerMockito.mockStatic(Script.class); + when(Script.runSimpleBashScript("/opt/emc/scaleio/sdc/bin/drv_cfg --query_mdms|grep 218ce1797566a00f|awk '{print $5}'")).thenReturn(null); + when(Script.runSimpleBashScript("/opt/emc/scaleio/sdc/bin/drv_cfg --query_guid")).thenReturn(sdcGuid); + + ScaleIOStoragePool pool1 = new ScaleIOStoragePool(uuid, "192.168.1.19", 443, "a519be2f00000000", type, details, adapter); + assertEquals(systemId, pool1.getDetails().get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)); + assertEquals(sdcGuid, pool1.getDetails().get(ScaleIOGatewayClient.SDC_GUID)); } @Test public void testDefaults() { - assertEquals(pool.getDefaultFormat(), PhysicalDiskFormat.RAW); - assertEquals(pool.getType(), StoragePoolType.PowerFlex); + assertEquals(PhysicalDiskFormat.RAW, pool.getDefaultFormat()); + assertEquals(StoragePoolType.PowerFlex, pool.getType()); assertNull(pool.getAuthUserName()); assertNull(pool.getAuthSecret()); @@ -145,7 +188,7 @@ public class ScaleIOStoragePoolTest { disk.setSize(8192); disk.setVirtualSize(8192); - assertEquals(disk.getPath(), "/dev/disk/by-id/emc-vol-218ce1797566a00f-6c3362b500000001"); + assertEquals("/dev/disk/by-id/emc-vol-218ce1797566a00f-6c3362b500000001", disk.getPath()); when(adapter.getPhysicalDisk(volumeId, pool)).thenReturn(disk); diff --git a/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/cryptsetup/CryptSetupTest.java b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/cryptsetup/CryptSetupTest.java new file mode 100644 index 00000000000..e031178a465 --- /dev/null +++ b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/cryptsetup/CryptSetupTest.java @@ -0,0 +1,53 @@ +package org.apache.cloudstack.utils.cryptsetup; + +import org.apache.cloudstack.secret.PassphraseVO; +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.Set; + +public class CryptSetupTest { + CryptSetup cryptSetup = new CryptSetup(); + + @Before + public void setup() { + Assume.assumeTrue(cryptSetup.isSupported()); + } + + @Test + public void cryptSetupTest() throws IOException, CryptSetupException { + Set permissions = PosixFilePermissions.fromString("rw-------"); + Path path = Files.createTempFile("cryptsetup", ".tmp",PosixFilePermissions.asFileAttribute(permissions)); + + // create a 1MB file to use as a crypt device + RandomAccessFile file = new RandomAccessFile(path.toFile(),"rw"); + file.setLength(10<<20); + file.close(); + + String filePath = path.toAbsolutePath().toString(); + PassphraseVO passphrase = new PassphraseVO(); + + cryptSetup.luksFormat(passphrase.getPassphrase(), CryptSetup.LuksType.LUKS, filePath); + + Assert.assertTrue(cryptSetup.isLuks(filePath)); + + Assert.assertTrue(Files.deleteIfExists(path)); + } + + @Test + public void cryptSetupNonLuksTest() throws IOException { + Set permissions = PosixFilePermissions.fromString("rw-------"); + Path path = Files.createTempFile("cryptsetup", ".tmp",PosixFilePermissions.asFileAttribute(permissions)); + + Assert.assertFalse(cryptSetup.isLuks(path.toAbsolutePath().toString())); + Assert.assertTrue(Files.deleteIfExists(path)); + } +} diff --git a/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/cryptsetup/KeyFileTest.java b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/cryptsetup/KeyFileTest.java new file mode 100644 index 00000000000..887aff4369d --- /dev/null +++ b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/cryptsetup/KeyFileTest.java @@ -0,0 +1,31 @@ +package org.apache.cloudstack.utils.cryptsetup; + +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; + +public class KeyFileTest { + + @Test + public void keyFileTest() throws IOException { + byte[] contents = "the quick brown fox".getBytes(); + KeyFile keyFile = new KeyFile(contents); + System.out.printf("New test KeyFile at %s%n", keyFile); + Path path = keyFile.getPath(); + + Assert.assertTrue(keyFile.isSet()); + + // check contents + byte[] fileContents = Files.readAllBytes(path); + Assert.assertArrayEquals(contents, fileContents); + + // delete file on close + keyFile.close(); + + Assert.assertFalse("key file was not cleaned up", Files.exists(path)); + Assert.assertFalse("key file is still set", keyFile.isSet()); + } +} diff --git a/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImageOptionsTest.java b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImageOptionsTest.java new file mode 100644 index 00000000000..d891cd696c8 --- /dev/null +++ b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImageOptionsTest.java @@ -0,0 +1,43 @@ +package org.apache.cloudstack.utils.qemu; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.Arrays; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class QemuImageOptionsTest { + @Parameterized.Parameters + public static Collection data() { + String imagePath = "/path/to/file"; + String secretName = "secretname"; + return Arrays.asList(new Object[][] { + { null, imagePath, null, new String[]{"--image-opts","file.filename=/path/to/file"} }, + { QemuImg.PhysicalDiskFormat.QCOW2, imagePath, null, new String[]{"--image-opts",String.format("driver=qcow2,file.filename=%s", imagePath)} }, + { QemuImg.PhysicalDiskFormat.RAW, imagePath, secretName, new String[]{"--image-opts",String.format("driver=raw,file.filename=%s", imagePath)} }, + { QemuImg.PhysicalDiskFormat.QCOW2, imagePath, secretName, new String[]{"--image-opts", String.format("driver=qcow2,encrypt.key-secret=%s,file.filename=%s", secretName, imagePath)} }, + { QemuImg.PhysicalDiskFormat.LUKS, imagePath, secretName, new String[]{"--image-opts", String.format("driver=luks,file.filename=%s,key-secret=%s", imagePath, secretName)} } + }); + } + + public QemuImageOptionsTest(QemuImg.PhysicalDiskFormat format, String filePath, String secretName, String[] expected) { + this.format = format; + this.filePath = filePath; + this.secretName = secretName; + this.expected = expected; + } + + private final QemuImg.PhysicalDiskFormat format; + private final String filePath; + private final String secretName; + private final String[] expected; + + @Test + public void qemuImageOptionsFileNameTest() { + QemuImageOptions options = new QemuImageOptions(format, filePath, secretName); + Assert.assertEquals(expected, options.toCommandFlag()); + } +} diff --git a/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImgTest.java b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImgTest.java index 335a5dd9c4a..cb7f6919e36 100644 --- a/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImgTest.java +++ b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImgTest.java @@ -18,11 +18,17 @@ package org.apache.cloudstack.utils.qemu; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.File; import com.cloud.utils.script.Script; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.UUID; @@ -32,7 +38,6 @@ import org.junit.Test; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.libvirt.LibvirtException; - @Ignore public class QemuImgTest { @@ -94,7 +99,34 @@ public class QemuImgTest { } @Test - public void testCreateSparseVolume() throws QemuImgException { + public void testCreateWithSecretObject() throws QemuImgException, LibvirtException { + Path testFile = Paths.get("/tmp/", UUID.randomUUID().toString()).normalize().toAbsolutePath(); + long size = 1<<30; // 1 Gi + + Map objectParams = new HashMap<>(); + objectParams.put(QemuObject.ObjectParameter.ID, "sec0"); + objectParams.put(QemuObject.ObjectParameter.DATA, UUID.randomUUID().toString()); + + Map options = new HashMap(); + + options.put(QemuImg.ENCRYPT_FORMAT, "luks"); + options.put(QemuImg.ENCRYPT_KEY_SECRET, "sec0"); + + List qObjects = new ArrayList<>(); + qObjects.add(new QemuObject(QemuObject.ObjectType.SECRET, objectParams)); + + QemuImgFile file = new QemuImgFile(testFile.toString(), size, PhysicalDiskFormat.QCOW2); + QemuImg qemu = new QemuImg(0); + qemu.create(file, null, options, qObjects); + + Map info = qemu.info(file); + assertEquals("yes", info.get("encrypted")); + + assertTrue(testFile.toFile().delete()); + } + + @Test + public void testCreateSparseVolume() throws QemuImgException, LibvirtException { String filename = "/tmp/" + UUID.randomUUID() + ".qcow2"; /* 10TB virtual_size */ @@ -204,7 +236,7 @@ public class QemuImgTest { } @Test(expected = QemuImgException.class) - public void testCreateAndResizeFail() throws QemuImgException { + public void testCreateAndResizeFail() throws QemuImgException, LibvirtException { String filename = "/tmp/" + UUID.randomUUID() + ".qcow2"; long startSize = 20480; @@ -224,7 +256,7 @@ public class QemuImgTest { } @Test(expected = QemuImgException.class) - public void testCreateAndResizeZero() throws QemuImgException { + public void testCreateAndResizeZero() throws QemuImgException, LibvirtException { String filename = "/tmp/" + UUID.randomUUID() + ".qcow2"; long startSize = 20480; diff --git a/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuObjectTest.java b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuObjectTest.java new file mode 100644 index 00000000000..316da622b84 --- /dev/null +++ b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuObjectTest.java @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.utils.qemu; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.HashMap; +import java.util.Map; + +@RunWith(MockitoJUnitRunner.class) +public class QemuObjectTest { + @Test + public void ToStringTest() { + Map params = new HashMap<>(); + params.put(QemuObject.ObjectParameter.ID, "sec0"); + params.put(QemuObject.ObjectParameter.FILE, "/dev/shm/file"); + QemuObject qObject = new QemuObject(QemuObject.ObjectType.SECRET, params); + + String[] flag = qObject.toCommandFlag(); + Assert.assertEquals(2, flag.length); + Assert.assertEquals("--object", flag[0]); + Assert.assertEquals("secret,file=/dev/shm/file,id=sec0", flag[1]); + } +} diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index fff15fd0ca9..362b8d8ca4c 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -138,10 +138,11 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri } CreateObjectCommand cmd = new CreateObjectCommand(volume.getTO()); - EndPoint ep = epSelector.select(volume); + boolean encryptionRequired = anyVolumeRequiresEncryption(volume); + EndPoint ep = epSelector.select(volume, encryptionRequired); Answer answer = null; if (ep == null) { - String errMsg = "No remote endpoint to send CreateObjectCommand, check if host or ssvm is down?"; + String errMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); s_logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { @@ -200,9 +201,6 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri } else { result.setAnswer(answer); } - } catch (StorageUnavailableException e) { - s_logger.debug("failed to create volume", e); - errMsg = e.toString(); } catch (Exception e) { s_logger.debug("failed to create volume", e); errMsg = e.toString(); @@ -260,6 +258,8 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { + s_logger.debug(String.format("Copying volume %s(%s) to %s(%s)", srcdata.getId(), srcdata.getType(), destData.getId(), destData.getType())); + boolean encryptionRequired = anyVolumeRequiresEncryption(srcdata, destData); DataStore store = destData.getDataStore(); if (store.getRole() == DataStoreRole.Primary) { if ((srcdata.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.TEMPLATE)) { @@ -280,13 +280,14 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri DataObject srcData = templateDataFactory.getTemplate(srcdata.getId(), imageStore); CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), primaryStorageDownloadWait, true); - EndPoint ep = epSelector.select(srcData, destData); + EndPoint ep = epSelector.select(srcData, destData, encryptionRequired); Answer answer = null; if (ep == null) { - String errMsg = "No remote endpoint to send CopyCommand, check if host or ssvm is down?"; + String errMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); s_logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { + s_logger.debug(String.format("Sending copy command to endpoint %s, where encryption support is %s", ep.getHostAddr(), encryptionRequired ? "required" : "not required")); answer = ep.sendMessage(cmd); } CopyCommandResult result = new CopyCommandResult("", answer); @@ -294,10 +295,10 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri } else if (srcdata.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) { SnapshotObjectTO srcTO = (SnapshotObjectTO) srcdata.getTO(); CopyCommand cmd = new CopyCommand(srcTO, destData.getTO(), StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(), true); - EndPoint ep = epSelector.select(srcdata, destData); + EndPoint ep = epSelector.select(srcdata, destData, encryptionRequired); CopyCmdAnswer answer = null; if (ep == null) { - String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + String errMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); s_logger.error(errMsg); answer = new CopyCmdAnswer(errMsg); } else { @@ -342,6 +343,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri @Override public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { CreateCmdResult result = null; + s_logger.debug("Taking snapshot of "+ snapshot); try { SnapshotObjectTO snapshotTO = (SnapshotObjectTO) snapshot.getTO(); Object payload = snapshot.getPayload(); @@ -350,10 +352,13 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri snapshotTO.setQuiescevm(snapshotPayload.getQuiescevm()); } + boolean encryptionRequired = anyVolumeRequiresEncryption(snapshot); CreateObjectCommand cmd = new CreateObjectCommand(snapshotTO); - EndPoint ep = epSelector.select(snapshot, StorageAction.TAKESNAPSHOT); + EndPoint ep = epSelector.select(snapshot, StorageAction.TAKESNAPSHOT, encryptionRequired); Answer answer = null; + s_logger.debug("Taking snapshot of "+ snapshot + " and encryption required is " + encryptionRequired); + if (ep == null) { String errMsg = "No remote endpoint to send createObjectCommand, check if host or ssvm is down?"; s_logger.error(errMsg); @@ -407,16 +412,22 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri VolumeObject vol = (VolumeObject) data; StoragePool pool = (StoragePool) data.getDataStore(); ResizeVolumePayload resizeParameter = (ResizeVolumePayload) vol.getpayload(); + boolean encryptionRequired = anyVolumeRequiresEncryption(vol); + long [] endpointsToRunResize = resizeParameter.hosts; - ResizeVolumeCommand resizeCmd = - new ResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), resizeParameter.newSize, resizeParameter.shrinkOk, - resizeParameter.instanceName, vol.getChainInfo()); + // if hosts are provided, they are where the VM last ran. We can use that. + if (endpointsToRunResize == null || endpointsToRunResize.length == 0) { + EndPoint ep = epSelector.select(data, encryptionRequired); + endpointsToRunResize = new long[] {ep.getId()}; + } + ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), + resizeParameter.newSize, resizeParameter.shrinkOk, resizeParameter.instanceName, vol.getChainInfo(), vol.getPassphrase(), vol.getEncryptFormat()); if (pool.getParent() != 0) { resizeCmd.setContextParam(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString()); } CreateCmdResult result = new CreateCmdResult(null, null); try { - ResizeVolumeAnswer answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, resizeParameter.hosts, resizeCmd); + ResizeVolumeAnswer answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, endpointsToRunResize, resizeCmd); if (answer != null && answer.getResult()) { long finalSize = answer.getNewSize(); s_logger.debug("Resize: volume started at size: " + toHumanReadableSize(vol.getSize()) + " and ended at size: " + toHumanReadableSize(finalSize)); @@ -435,6 +446,8 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri } catch (Exception e) { s_logger.debug("sending resize command failed", e); result.setResult(e.toString()); + } finally { + resizeCmd.clearPassphrase(); } callback.complete(result); @@ -492,4 +505,18 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri public boolean canHostAccessStoragePool(Host host, StoragePool pool) { return true; } + + /** + * Does any object require encryption support? + */ + private boolean anyVolumeRequiresEncryption(DataObject ... objects) { + for (DataObject o : objects) { + if (o instanceof VolumeInfo && ((VolumeInfo) o).getPassphraseId() != null) { + return true; + } else if (o instanceof SnapshotInfo && ((SnapshotInfo) o).getBaseVolume().getPassphraseId() != null) { + return true; + } + } + return false; + } } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java index f497b10127d..73b69bdef4f 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java @@ -38,6 +38,8 @@ public interface ScaleIOGatewayClient { String GATEWAY_API_PASSWORD = "powerflex.gw.password"; String STORAGE_POOL_NAME = "powerflex.storagepool.name"; String STORAGE_POOL_SYSTEM_ID = "powerflex.storagepool.system.id"; + String SDC_ID = "powerflex.sdc.id"; + String SDC_GUID = "powerflex.sdc.guid"; static ScaleIOGatewayClient getClient(final String url, final String username, final String password, final boolean validateCertificate, final int timeout, final int maxConnections) throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { @@ -81,8 +83,10 @@ public interface ScaleIOGatewayClient { // SDC APIs List listSdcs(); Sdc getSdc(String sdcId); + String getSdcIdByGuid(String sdcGuid); Sdc getSdcByIp(String ipAddress); Sdc getConnectedSdcByIp(String ipAddress); - List listConnectedSdcIps(); - boolean isSdcConnected(String ipAddress); + boolean haveConnectedSdcs(); + boolean isSdcConnected(String sdcId); + boolean isSdcConnectedByIP(String ipAddress); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java index fa195414b67..5566dbcdb0d 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java @@ -1013,6 +1013,24 @@ public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient { return get("/instances/Sdc::" + sdcId, Sdc.class); } + @Override + public String getSdcIdByGuid(String sdcGuid) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcGuid), "SDC Guid cannot be null"); + + List sdcs = listSdcs(); + if (sdcs == null) { + return null; + } + + for (Sdc sdc : sdcs) { + if (sdcGuid.equalsIgnoreCase(sdc.getSdcGuid())) { + return sdc.getId(); + } + } + + return null; + } + @Override public Sdc getSdcByIp(String ipAddress) { Preconditions.checkArgument(!Strings.isNullOrEmpty(ipAddress), "IP address cannot be null"); @@ -1035,28 +1053,35 @@ public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient { } @Override - public List listConnectedSdcIps() { - List sdcIps = new ArrayList<>(); + public boolean haveConnectedSdcs() { List sdcs = listSdcs(); if(sdcs != null) { for (Sdc sdc : sdcs) { if (MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) { - sdcIps.add(sdc.getSdcIp()); + return true; } } } - return sdcIps; + return false; } @Override - public boolean isSdcConnected(String ipAddress) { + public boolean isSdcConnected(String sdcId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "SDC Id cannot be null"); + + Sdc sdc = getSdc(sdcId); + return (sdc != null && MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())); + } + + @Override + public boolean isSdcConnectedByIP(String ipAddress) { Preconditions.checkArgument(!Strings.isNullOrEmpty(ipAddress), "IP address cannot be null"); List sdcs = listSdcs(); - if(sdcs != null) { + if (sdcs != null) { for (Sdc sdc : sdcs) { - if (ipAddress.equalsIgnoreCase(sdc.getSdcIp()) && MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) { + if (sdc != null && ipAddress.equalsIgnoreCase(sdc.getSdcIp()) && MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) { return true; } } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index 318e82de04d..8229b21a51a 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -41,7 +41,7 @@ import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CreateObjectAnswer; -import org.apache.cloudstack.storage.datastore.api.Sdc; +import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; import org.apache.cloudstack.storage.datastore.api.VolumeStatistics; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; @@ -54,7 +54,10 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; @@ -64,6 +67,7 @@ import com.cloud.agent.api.to.DataTO; import com.cloud.alert.AlertManager; import com.cloud.configuration.Config; import com.cloud.host.Host; +import com.cloud.host.dao.HostDao; import com.cloud.server.ManagementServerImpl; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ResizeVolumePayload; @@ -71,11 +75,13 @@ import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; @@ -96,6 +102,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Inject + private StoragePoolHostDao storagePoolHostDao; + @Inject private VolumeDao volumeDao; @Inject private VolumeDetailsDao volumeDetailsDao; @@ -109,6 +117,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { private AlertManager alertMgr; @Inject private ConfigurationDao configDao; + @Inject + private HostDao hostDao; public ScaleIOPrimaryDataStoreDriver() { @@ -144,38 +154,38 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT; } - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); - final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); - if (sdc == null) { + final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + if (StringUtils.isBlank(sdcId)) { alertHostSdcDisconnection(host); throw new CloudRuntimeException("Unable to grant access to volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); } - return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdc.getId(), iopsLimit, bandwidthLimitInKbps); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId, iopsLimit, bandwidthLimitInKbps); } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); LOGGER.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); - final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); - if (sdc == null) { + final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + if (StringUtils.isBlank(sdcId)) { alertHostSdcDisconnection(host); throw new CloudRuntimeException("Unable to grant access to template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); } - return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdc.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId); } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { SnapshotInfo snapshot = (SnapshotInfo) dataObject; LOGGER.debug("Granting access for PowerFlex volume snapshot: " + snapshot.getPath()); - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); - final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); - if (sdc == null) { + final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + if (StringUtils.isBlank(sdcId)) { alertHostSdcDisconnection(host); throw new CloudRuntimeException("Unable to grant access to snapshot: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); } - return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdc.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId); } return false; @@ -184,6 +194,11 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } + private boolean grantAccess(DataObject dataObject, EndPoint ep, DataStore dataStore) { + Host host = hostDao.findById(ep.getId()); + return grantAccess(dataObject, host, dataStore); + } + @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { try { @@ -191,41 +206,64 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { final VolumeVO volume = volumeDao.findById(dataObject.getId()); LOGGER.debug("Revoking access for PowerFlex volume: " + volume.getPath()); - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); - final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); - if (sdc == null) { + final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + if (StringUtils.isBlank(sdcId)) { throw new CloudRuntimeException("Unable to revoke access for volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); } - client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volume.getPath()), sdc.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId); } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); LOGGER.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); - final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); - if (sdc == null) { + final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + if (StringUtils.isBlank(sdcId)) { throw new CloudRuntimeException("Unable to revoke access for template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); } - client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdc.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId); } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { SnapshotInfo snapshot = (SnapshotInfo) dataObject; LOGGER.debug("Revoking access for PowerFlex volume snapshot: " + snapshot.getPath()); - final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); - final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); - if (sdc == null) { + final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); + if (StringUtils.isBlank(sdcId)) { throw new CloudRuntimeException("Unable to revoke access for snapshot: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); } - client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdc.getId()); + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId); } } catch (Exception e) { LOGGER.warn("Failed to revoke access due to: " + e.getMessage(), e); } } + private void revokeAccess(DataObject dataObject, EndPoint ep, DataStore dataStore) { + Host host = hostDao.findById(ep.getId()); + revokeAccess(dataObject, host, dataStore); + } + + private String getConnectedSdc(long poolId, long hostId) { + try { + StoragePoolHostVO poolHostVO = storagePoolHostDao.findByPoolHost(poolId, hostId); + if (poolHostVO == null) { + return null; + } + + final ScaleIOGatewayClient client = getScaleIOClient(poolId); + if (client.isSdcConnected(poolHostVO.getLocalPath())) { + return poolHostVO.getLocalPath(); + } + } catch (Exception e) { + LOGGER.warn("Couldn't check SDC connection for the host: " + hostId + " and storage pool: " + poolId + " due to " + e.getMessage(), e); + } + + return null; + } + @Override public long getUsedBytes(StoragePool storagePool) { long usedSpaceBytes = 0; @@ -393,7 +431,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } - private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { + private CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId) { LOGGER.debug("Creating PowerFlex volume"); StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); @@ -426,7 +464,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { volume.setPoolType(Storage.StoragePoolType.PowerFlex); volume.setFormat(Storage.ImageFormat.RAW); volume.setPoolId(storagePoolId); - volumeDao.update(volume.getId(), volume); + VolumeObject createdObject = VolumeObject.getVolumeObject(volumeInfo.getDataStore(), volume); + createdObject.update(); long capacityBytes = storagePool.getCapacityBytes(); long usedBytes = storagePool.getUsedBytes(); @@ -434,7 +473,35 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); storagePoolDao.update(storagePoolId, storagePool); - return volumePath; + CreateObjectAnswer answer = new CreateObjectAnswer(createdObject.getTO()); + + // if volume needs to be set up with encryption, do it now. + if (anyVolumeRequiresEncryption(volumeInfo)) { + LOGGER.debug(String.format("Setting up encryption for volume %s", volumeInfo)); + VolumeObjectTO prepVolume = (VolumeObjectTO) createdObject.getTO(); + prepVolume.setPath(volumePath); + prepVolume.setUuid(volumePath); + CreateObjectCommand cmd = new CreateObjectCommand(prepVolume); + EndPoint ep = selector.select(volumeInfo, true); + if (ep == null) { + throw new CloudRuntimeException("No remote endpoint to send PowerFlex volume encryption preparation"); + } else { + try { + grantAccess(createdObject, ep, volumeInfo.getDataStore()); + answer = (CreateObjectAnswer) ep.sendMessage(cmd); + if (!answer.getResult()) { + throw new CloudRuntimeException("Failed to set up encryption on PowerFlex volume: " + answer.getDetails()); + } + } finally { + revokeAccess(createdObject, ep, volumeInfo.getDataStore()); + prepVolume.clearPassphrase(); + } + } + } else { + LOGGER.debug(String.format("No encryption configured for data volume %s", volumeInfo)); + } + + return answer; } catch (Exception e) { String errMsg = "Unable to create PowerFlex Volume due to " + e.getMessage(); LOGGER.warn(errMsg); @@ -490,16 +557,21 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { String scaleIOVolumePath = null; String errMsg = null; + Answer answer = new Answer(null, false, "not started"); try { if (dataObject.getType() == DataObjectType.VOLUME) { LOGGER.debug("createAsync - creating volume"); - scaleIOVolumePath = createVolume((VolumeInfo) dataObject, dataStore.getId()); + CreateObjectAnswer createAnswer = createVolume((VolumeInfo) dataObject, dataStore.getId()); + scaleIOVolumePath = createAnswer.getData().getPath(); + answer = createAnswer; } else if (dataObject.getType() == DataObjectType.TEMPLATE) { LOGGER.debug("createAsync - creating template"); scaleIOVolumePath = createTemplateVolume((TemplateInfo)dataObject, dataStore.getId()); + answer = new Answer(null, true, "created template"); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; LOGGER.error(errMsg); + answer = new Answer(null, false, errMsg); } } catch (Exception ex) { errMsg = ex.getMessage(); @@ -507,10 +579,11 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { if (callback == null) { throw ex; } + answer = new Answer(null, false, errMsg); } if (callback != null) { - CreateCmdResult result = new CreateCmdResult(scaleIOVolumePath, new Answer(null, errMsg == null, errMsg)); + CreateCmdResult result = new CreateCmdResult(scaleIOVolumePath, answer); result.setResult(errMsg); callback.complete(result); } @@ -585,6 +658,7 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { Answer answer = null; String errMsg = null; + CopyCommandResult result; try { DataStore srcStore = srcData.getDataStore(); @@ -592,51 +666,72 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { if (srcStore.getRole() == DataStoreRole.Primary && (destStore.getRole() == DataStoreRole.Primary && destData.getType() == DataObjectType.VOLUME)) { if (srcData.getType() == DataObjectType.TEMPLATE) { answer = copyTemplateToVolume(srcData, destData, destHost); - if (answer == null) { - errMsg = "No answer for copying template to PowerFlex volume"; - } else if (!answer.getResult()) { - errMsg = answer.getDetails(); - } } else if (srcData.getType() == DataObjectType.VOLUME) { if (isSameScaleIOStorageInstance(srcStore, destStore)) { answer = migrateVolume(srcData, destData); } else { answer = copyVolume(srcData, destData, destHost); } - - if (answer == null) { - errMsg = "No answer for migrate PowerFlex volume"; - } else if (!answer.getResult()) { - errMsg = answer.getDetails(); - } } else { errMsg = "Unsupported copy operation from src object: (" + srcData.getType() + ", " + srcData.getDataStore() + "), dest object: (" + destData.getType() + ", " + destData.getDataStore() + ")"; LOGGER.warn(errMsg); + answer = new Answer(null, false, errMsg); } } else { errMsg = "Unsupported copy operation"; + LOGGER.warn(errMsg); + answer = new Answer(null, false, errMsg); } } catch (Exception e) { LOGGER.debug("Failed to copy due to " + e.getMessage(), e); errMsg = e.toString(); + answer = new Answer(null, false, errMsg); } - CopyCommandResult result = new CopyCommandResult(null, answer); - result.setResult(errMsg); + result = new CopyCommandResult(null, answer); callback.complete(result); } + /** + * Responsible for copying template on ScaleIO primary to root disk + * @param srcData dataobject representing the template + * @param destData dataobject representing the target root disk + * @param destHost host to use for copy + * @return answer + */ private Answer copyTemplateToVolume(DataObject srcData, DataObject destData, Host destHost) { + /* If encryption is requested, since the template object is not encrypted we need to grow the destination disk to accommodate the new headers. + * Data stores of file type happen automatically, but block device types have to handle it. Unfortunately for ScaleIO this means we add a whole 8GB to + * the original size, but only if we are close to an 8GB boundary. + */ + LOGGER.debug(String.format("Copying template %s to volume %s", srcData.getId(), destData.getId())); + VolumeInfo destInfo = (VolumeInfo) destData; + boolean encryptionRequired = anyVolumeRequiresEncryption(destData); + if (encryptionRequired) { + if (needsExpansionForEncryptionHeader(srcData.getSize(), destData.getSize())) { + long newSize = destData.getSize() + (1<<30); + LOGGER.debug(String.format("Destination volume %s(%s) is configured for encryption. Resizing to fit headers, new size %s will be rounded up to nearest 8Gi", destInfo.getId(), destData.getSize(), newSize)); + ResizeVolumePayload p = new ResizeVolumePayload(newSize, destInfo.getMinIops(), destInfo.getMaxIops(), + destInfo.getHypervisorSnapshotReserve(), false, destInfo.getAttachedVmName(), null, true); + destInfo.addPayload(p); + resizeVolume(destInfo); + } else { + LOGGER.debug(String.format("Template %s has size %s, ok for volume %s with size %s", srcData.getId(), srcData.getSize(), destData.getId(), destData.getSize())); + } + } else { + LOGGER.debug(String.format("Destination volume is not configured for encryption, skipping encryption prep. Volume: %s", destData.getId())); + } + // Copy PowerFlex/ScaleIO template to volume LOGGER.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "")); int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); Answer answer = null; - EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcData.getDataStore()); + EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcData, encryptionRequired); if (ep == null) { - String errorMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + String errorMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); LOGGER.error(errorMsg); answer = new Answer(cmd, false, errorMsg); } else { @@ -655,9 +750,10 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), copyVolumeWait, VirtualMachineManager.ExecuteInSequence.value()); Answer answer = null; - EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcData.getDataStore()); + boolean encryptionRequired = anyVolumeRequiresEncryption(srcData, destData); + EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcData, encryptionRequired); if (ep == null) { - String errorMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + String errorMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); LOGGER.error(errorMsg); answer = new Answer(cmd, false, errorMsg); } else { @@ -930,8 +1026,12 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } try { + StoragePoolHostVO poolHostVO = storagePoolHostDao.findByPoolHost(pool.getId(), host.getId()); + if (poolHostVO == null) { + return false; + } final ScaleIOGatewayClient client = getScaleIOClient(pool.getId()); - return client.isSdcConnected(host.getPrivateIpAddress()); + return client.isSdcConnected(poolHostVO.getLocalPath()); } catch (Exception e) { LOGGER.warn("Unable to check the host: " + host.getId() + " access to storage pool: " + pool.getId() + " due to " + e.getMessage(), e); return false; @@ -947,4 +1047,27 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { String msg = "SDC not connected on the host: " + host.getId() + ", reconnect the SDC to MDM"; alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); } + + /** + * Does the destination size fit the source size plus an encryption header? + * @param srcSize size of source + * @param dstSize size of destination + * @return true if resize is required + */ + private boolean needsExpansionForEncryptionHeader(long srcSize, long dstSize) { + int headerSize = 32<<20; // ensure we have 32MiB for encryption header + return srcSize + headerSize > dstSize; + } + + /** + * Does any object require encryption support? + */ + private boolean anyVolumeRequiresEncryption(DataObject ... objects) { + for (DataObject o : objects) { + if (o instanceof VolumeInfo && ((VolumeInfo) o).getPassphraseId() != null) { + return true; + } + } + return false; + } } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index edebdac7929..65831e4ec3d 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -258,22 +258,9 @@ public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCyc throw new CloudRuntimeException("Unsupported hypervisor type: " + cluster.getHypervisorType().toString()); } - List connectedSdcIps = null; - try { - ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStore.getId(), storagePoolDetailsDao); - connectedSdcIps = client.listConnectedSdcIps(); - } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { - LOGGER.error("Failed to create storage pool", e); - throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to create storage pool"); - } - - if (connectedSdcIps == null || connectedSdcIps.isEmpty()) { - LOGGER.debug("No connected SDCs found for the PowerFlex storage pool"); - throw new CloudRuntimeException("Failed to create storage pool as connected SDCs not found"); - } + checkConnectedSdcs(dataStore.getId()); PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore; - List hostsInCluster = resourceManager.listAllUpAndEnabledHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId()); if (hostsInCluster.isEmpty()) { @@ -285,8 +272,7 @@ public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCyc List poolHosts = new ArrayList(); for (HostVO host : hostsInCluster) { try { - if (connectedSdcIps.contains(host.getPrivateIpAddress())) { - storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); + if (storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId())) { poolHosts.add(host); } } catch (Exception e) { @@ -315,27 +301,14 @@ public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCyc throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString()); } - List connectedSdcIps = null; - try { - ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStore.getId(), storagePoolDetailsDao); - connectedSdcIps = client.listConnectedSdcIps(); - } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { - LOGGER.error("Failed to create storage pool", e); - throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to create storage pool"); - } - - if (connectedSdcIps == null || connectedSdcIps.isEmpty()) { - LOGGER.debug("No connected SDCs found for the PowerFlex storage pool"); - throw new CloudRuntimeException("Failed to create storage pool as connected SDCs not found"); - } + checkConnectedSdcs(dataStore.getId()); LOGGER.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId()); List hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - if (connectedSdcIps.contains(host.getPrivateIpAddress())) { - storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + if (storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId())) { poolHosts.add(host); } } catch (Exception e) { @@ -352,6 +325,22 @@ public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCyc return true; } + private void checkConnectedSdcs(Long dataStoreId) { + boolean haveConnectedSdcs = false; + try { + ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStoreId, storagePoolDetailsDao); + haveConnectedSdcs = client.haveConnectedSdcs(); + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + LOGGER.error(String.format("Failed to create storage pool for datastore: %s", dataStoreId), e); + throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to create storage pool for datastore: %s", dataStoreId)); + } + + if (!haveConnectedSdcs) { + LOGGER.debug(String.format("No connected SDCs found for the PowerFlex storage pool of datastore: %s", dataStoreId)); + throw new CloudRuntimeException(String.format("Failed to create storage pool as connected SDCs not found for datastore: %s", dataStoreId)); + } + } + @Override public boolean maintain(DataStore store) { storagePoolAutomation.maintain(store); diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java index f6722314a5c..475e2b2c6c5 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java @@ -21,6 +21,8 @@ package org.apache.cloudstack.storage.datastore.provider; import java.net.URISyntaxException; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; +import java.util.HashMap; +import java.util.Map; import javax.inject.Inject; @@ -30,6 +32,8 @@ import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -69,7 +73,41 @@ public class ScaleIOHostListener implements HypervisorHostListener { return false; } - if (!isHostSdcConnected(host.getPrivateIpAddress(), poolId)) { + StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + + String systemId = _storagePoolDetailsDao.findDetail(poolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); + if (systemId == null) { + throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool " + storagePool.getName()); + } + Map details = new HashMap<>(); + details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId); + + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool, storagePool.getPath(), details); + ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, hostId); + Map poolDetails = answer.getPoolInfo().getDetails(); + if (MapUtils.isEmpty(poolDetails)) { + String msg = "SDC details not found on the host: " + hostId + ", (re)install SDC and restart agent"; + s_logger.warn(msg); + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC not found on host: " + host.getUuid(), msg); + return false; + } + + String sdcId = null; + if (poolDetails.containsKey(ScaleIOGatewayClient.SDC_ID)) { + sdcId = poolDetails.get(ScaleIOGatewayClient.SDC_ID); + } else if (poolDetails.containsKey(ScaleIOGatewayClient.SDC_GUID)) { + String sdcGuid = poolDetails.get(ScaleIOGatewayClient.SDC_GUID); + sdcId = getHostSdcId(sdcGuid, poolId); + } + + if (StringUtils.isBlank(sdcId)) { + String msg = "Couldn't retrieve SDC details from the host: " + hostId + ", (re)install SDC and restart agent"; + s_logger.warn(msg); + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC details not found on host: " + host.getUuid(), msg); + return false; + } + + if (!isHostSdcConnected(sdcId, poolId)) { s_logger.warn("SDC not connected on the host: " + hostId); String msg = "SDC not connected on the host: " + hostId + ", reconnect the SDC to MDM and restart agent"; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); @@ -78,27 +116,39 @@ public class ScaleIOHostListener implements HypervisorHostListener { StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(poolId, hostId); if (storagePoolHost == null) { - storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); + storagePoolHost = new StoragePoolHostVO(poolId, hostId, sdcId); _storagePoolHostDao.persist(storagePoolHost); + } else { + storagePoolHost.setLocalPath(sdcId); + _storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost); } - StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); - sendModifyStoragePoolCommand(cmd, storagePool, hostId); + s_logger.info("Connection established between storage pool: " + storagePool + " and host: " + hostId); return true; } - private boolean isHostSdcConnected(String hostIpAddress, long poolId) { + private String getHostSdcId(String sdcGuid, long poolId) { + try { + s_logger.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", poolId, sdcGuid)); + ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(poolId, _storagePoolDetailsDao); + return client.getSdcIdByGuid(sdcGuid); + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + s_logger.error(String.format("Failed to get host SDC Id for pool: %s", poolId), e); + throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", poolId)); + } + } + + private boolean isHostSdcConnected(String sdcId, long poolId) { try { ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(poolId, _storagePoolDetailsDao); - return client.isSdcConnected(hostIpAddress); + return client.isSdcConnected(sdcId); } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { s_logger.error("Failed to check host sdc connection", e); throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to check host sdc connection"); } } - private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { + private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null) { @@ -116,7 +166,7 @@ public class ScaleIOHostListener implements HypervisorHostListener { assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; - s_logger.info("Connection established between storage pool " + storagePool + " and host: " + hostId); + return (ModifyStoragePoolAnswer) answer; } @Override diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java index 0180f17cdd7..e9fb01b392a 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.datastore.util; import org.apache.log4j.Logger; +import com.cloud.utils.UuidUtils; import com.cloud.utils.script.Script; import com.google.common.base.Strings; @@ -47,11 +48,31 @@ public class ScaleIOUtil { private static final String SDC_HOME_PATH = getSdcHomePath(); private static final String RESCAN_CMD = "drv_cfg --rescan"; + + /** + * Cmd for querying volumes in SDC + * Sample output for cmd: drv_cfg --query_vols: + * Retrieved 2 volume(s) + * VOL-ID 6c33633100000009 MDM-ID 218ce1797566a00f + * VOL-ID 6c3362a30000000a MDM-ID 218ce1797566a00f + */ private static final String QUERY_VOLUMES_CMD = "drv_cfg --query_vols"; - // Sample output for cmd: drv_cfg --query_vols: - // Retrieved 2 volume(s) - // VOL-ID 6c33633100000009 MDM-ID 218ce1797566a00f - // VOL-ID 6c3362a30000000a MDM-ID 218ce1797566a00f + + /** + * Cmd for querying guid in SDC + * Sample output for cmd: drv_cfg --query_guid: + * B0E3BFB8-C20B-43BF-93C8-13339E85AA50 + */ + private static final String QUERY_GUID_CMD = "drv_cfg --query_guid"; + + /** + * Cmd for querying MDMs in SDC + * Sample output for cmd: drv_cfg --query_mdms: + * Retrieved 2 mdm(s) + * MDM-ID 3ef46cbf2aaf5d0f SDC ID 6b18479c00000003 INSTALLATION ID 68ab55462cbb3ae4 IPs [0]-x.x.x.x [1]-x.x.x.x + * MDM-ID 2e706b2740ec200f SDC ID 301b852c00000003 INSTALLATION ID 33f8662e7a5c1e6c IPs [0]-x.x.x.x [1]-x.x.x.x + */ + private static final String QUERY_MDMS_CMD = "drv_cfg --query_mdms"; public static String getSdcHomePath() { String sdcHomePath = DEFAULT_SDC_HOME_PATH; @@ -97,6 +118,51 @@ public class ScaleIOUtil { return result; } + public static String getSdcGuid() { + String queryGuidCmd = ScaleIOUtil.SDC_HOME_PATH + "/bin/" + ScaleIOUtil.QUERY_GUID_CMD; + String result = Script.runSimpleBashScript(queryGuidCmd); + if (result == null) { + LOGGER.warn("Failed to get SDC guid"); + return null; + } + + if (result.isEmpty()) { + LOGGER.warn("No SDC guid retrieved"); + return null; + } + + if (!UuidUtils.validateUUID(result)) { + LOGGER.warn("Invalid SDC guid: " + result); + return null; + } + + return result; + } + + public static String getSdcId(String mdmId) { + //query_mdms outputs "MDM-ID SDC ID INSTALLATION ID IPs [0]-x.x.x.x [1]-x.x.x.x" for a MDM with ID: + String queryMdmsCmd = ScaleIOUtil.SDC_HOME_PATH + "/bin/" + ScaleIOUtil.QUERY_MDMS_CMD; + queryMdmsCmd += "|grep " + mdmId + "|awk '{print $5}'"; + String result = Script.runSimpleBashScript(queryMdmsCmd); + if (result == null) { + LOGGER.warn("Failed to get SDC Id, for the MDM: " + mdmId); + return null; + } + + if (result.isEmpty()) { + LOGGER.warn("No SDC Id retrieved, for the MDM: " + mdmId); + return null; + } + + String sdcIdRegEx = "^[0-9a-fA-F]{16}$"; + if (!result.matches(sdcIdRegEx)) { + LOGGER.warn("Invalid SDC Id: " + result + " retrieved, for the MDM: " + mdmId); + return null; + } + + return result; + } + public static final String getVolumePath(String volumePathWithName) { if (Strings.isNullOrEmpty(volumePathWithName)) { return volumePathWithName; diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java index eed82ff0ed2..6cc7b874557 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -22,7 +22,6 @@ package org.apache.cloudstack.storage.datastore.lifecycle; import static com.google.common.truth.Truth.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -140,11 +139,7 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { ScaleIOGatewayClientImpl client = mock(ScaleIOGatewayClientImpl.class); when(ScaleIOGatewayClientConnectionPool.getInstance().getClient(1L, storagePoolDetailsDao)).thenReturn(client); - List connectedSdcIps = new ArrayList<>(); - connectedSdcIps.add("192.168.1.1"); - connectedSdcIps.add("192.168.1.2"); - when(client.listConnectedSdcIps()).thenReturn(connectedSdcIps); - when(client.isSdcConnected(anyString())).thenReturn(true); + when(client.haveConnectedSdcs()).thenReturn(true); final ZoneScope scope = new ZoneScope(1L); diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index bedfd30e140..1113e873c09 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -2925,6 +2925,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q Boolean isRootAdmin = _accountMgr.isRootAdmin(account.getAccountId()); Boolean isRecursive = cmd.isRecursive(); Long zoneId = cmd.getZoneId(); + Boolean encrypt = cmd.getEncrypt(); // Keeping this logic consistent with domain specific zones // if a domainId is provided, we just return the disk offering // associated with this domain @@ -2971,6 +2972,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q sc.addAnd("name", SearchCriteria.Op.EQ, name); } + if (encrypt != null) { + sc.addAnd("encrypt", SearchCriteria.Op.EQ, encrypt); + } + if (zoneId != null) { SearchBuilder sb = _diskOfferingJoinDao.createSearchBuilder(); sb.and("zoneId", sb.entity().getZoneId(), Op.FIND_IN_SET); @@ -3055,6 +3060,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q Integer cpuNumber = cmd.getCpuNumber(); Integer memory = cmd.getMemory(); Integer cpuSpeed = cmd.getCpuSpeed(); + Boolean encryptRoot = cmd.getEncryptRoot(); SearchCriteria sc = _srvOfferingJoinDao.createSearchCriteria(); if (!_accountMgr.isRootAdmin(caller.getId()) && isSystem) { @@ -3159,6 +3165,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q sc.addAnd("systemUse", SearchCriteria.Op.EQ, isSystem); } + if (encryptRoot != null) { + sc.addAnd("encryptRoot", SearchCriteria.Op.EQ, encryptRoot); + } + if (name != null) { sc.addAnd("name", SearchCriteria.Op.EQ, name); } diff --git a/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java index 002f6226fd4..7a59c9cbbbd 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java @@ -107,6 +107,7 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase filteredDomainIds = filterChildSubDomains(domainIds); @@ -2955,6 +2955,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati offering.setCustomizedIops(isCustomizedIops); offering.setMinIops(minIops); offering.setMaxIops(maxIops); + offering.setEncrypt(encryptRoot); setBytesRate(offering, bytesReadRate, bytesReadRateMax, bytesReadRateMaxLength, bytesWriteRate, bytesWriteRateMax, bytesWriteRateMaxLength); setIopsRate(offering, iopsReadRate, iopsReadRateMax, iopsReadRateMaxLength, iopsWriteRate, iopsWriteRateMax, iopsWriteRateMaxLength); @@ -3268,7 +3269,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati Long bytesWriteRate, Long bytesWriteRateMax, Long bytesWriteRateMaxLength, Long iopsReadRate, Long iopsReadRateMax, Long iopsReadRateMaxLength, Long iopsWriteRate, Long iopsWriteRateMax, Long iopsWriteRateMaxLength, - final Integer hypervisorSnapshotReserve, String cacheMode, final Map details, final Long storagePolicyID) { + final Integer hypervisorSnapshotReserve, String cacheMode, final Map details, final Long storagePolicyID, final boolean encrypt) { long diskSize = 0;// special case for custom disk offerings long maxVolumeSizeInGb = VolumeOrchestrationService.MaxVolumeSize.value(); if (numGibibytes != null && numGibibytes <= 0) { @@ -3350,6 +3351,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("If provided, Hypervisor Snapshot Reserve must be greater than or equal to 0."); } + newDiskOffering.setEncrypt(encrypt); newDiskOffering.setHypervisorSnapshotReserve(hypervisorSnapshotReserve); CallContext.current().setEventDetails("Disk offering id=" + newDiskOffering.getId()); @@ -3364,6 +3366,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati detailsVO.add(new DiskOfferingDetailVO(offering.getId(), ApiConstants.ZONE_ID, String.valueOf(zoneId), false)); } } + if (MapUtils.isNotEmpty(details)) { details.forEach((key, value) -> { boolean displayDetail = !key.equals(Volume.BANDWIDTH_LIMIT_IN_MBPS) && !key.equals(Volume.IOPS_LIMIT); @@ -3459,6 +3462,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final Long iopsWriteRateMaxLength = cmd.getIopsWriteRateMaxLength(); final Integer hypervisorSnapshotReserve = cmd.getHypervisorSnapshotReserve(); final String cacheMode = cmd.getCacheMode(); + final boolean encrypt = cmd.getEncrypt(); validateMaxRateEqualsOrGreater(iopsReadRate, iopsReadRateMax, IOPS_READ_RATE); validateMaxRateEqualsOrGreater(iopsWriteRate, iopsWriteRateMax, IOPS_WRITE_RATE); @@ -3472,7 +3476,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati localStorageRequired, isDisplayOfferingEnabled, isCustomizedIops, minIops, maxIops, bytesReadRate, bytesReadRateMax, bytesReadRateMaxLength, bytesWriteRate, bytesWriteRateMax, bytesWriteRateMaxLength, iopsReadRate, iopsReadRateMax, iopsReadRateMaxLength, iopsWriteRate, iopsWriteRateMax, iopsWriteRateMaxLength, - hypervisorSnapshotReserve, cacheMode, details, storagePolicyId); + hypervisorSnapshotReserve, cacheMode, details, storagePolicyId, encrypt); } /** diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 3dc45b68584..aa685b74d14 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -277,7 +277,7 @@ StateListener, Configurable { long ram_requested = offering.getRamSize() * 1024L * 1024L; VirtualMachine vm = vmProfile.getVirtualMachine(); DataCenter dc = _dcDao.findById(vm.getDataCenterId()); - + boolean volumesRequireEncryption = anyVolumeRequiresEncryption(_volsDao.findByInstance(vm.getId())); if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) { checkForNonDedicatedResources(vmProfile, dc, avoids); @@ -299,7 +299,7 @@ StateListener, Configurable { if (plan.getHostId() != null && haVmTag == null) { Long hostIdSpecified = plan.getHostId(); if (s_logger.isDebugEnabled()) { - s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + hostIdSpecified); + s_logger.debug("DeploymentPlan has host_id specified, choosing this host: " + hostIdSpecified); } HostVO host = _hostDao.findById(hostIdSpecified); if (host != null && StringUtils.isNotBlank(uefiFlag) && "yes".equalsIgnoreCase(uefiFlag)) { @@ -340,6 +340,14 @@ StateListener, Configurable { Map> suitableVolumeStoragePools = result.first(); List readyAndReusedVolumes = result.second(); + _hostDao.loadDetails(host); + if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { + s_logger.warn(String.format("VM's volumes require encryption support, and provided host %s can't handle it", host)); + return null; + } else { + s_logger.debug(String.format("Volume encryption requirements are met by provided host %s", host)); + } + // choose the potential pool for this VM for this host if (!suitableVolumeStoragePools.isEmpty()) { List suitableHosts = new ArrayList(); @@ -405,6 +413,8 @@ StateListener, Configurable { s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); HostVO host = _hostDao.findById(vm.getLastHostId()); + _hostDao.loadHostTags(host); + _hostDao.loadDetails(host); ServiceOfferingDetailsVO offeringDetails = null; if (host == null) { s_logger.debug("The last host of this VM cannot be found"); @@ -422,6 +432,8 @@ StateListener, Configurable { if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ s_logger.debug("The last host of this VM does not have required GPU devices available"); } + } else if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { + s_logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host)); } else { if (host.getStatus() == Status.Up) { if (checkVmProfileAndHost(vmProfile, host)) { @@ -526,14 +538,12 @@ StateListener, Configurable { resetAvoidSet(plannerAvoidOutput, plannerAvoidInput); - dest = - checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput); + dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput); if (dest != null) { return dest; } // reset the avoid input to the planners resetAvoidSet(avoids, plannerAvoidOutput); - } else { return null; } @@ -543,6 +553,13 @@ StateListener, Configurable { long hostId = dest.getHost().getId(); avoids.addHost(dest.getHost().getId()); + if (volumesRequireEncryption && !Boolean.parseBoolean(_hostDetailsDao.findDetail(hostId, Host.HOST_VOLUME_ENCRYPTION).getValue())) { + s_logger.warn(String.format("VM's volumes require encryption support, and the planner-provided host %s can't handle it", dest.getHost())); + continue; + } else { + s_logger.debug(String.format("VM's volume encryption requirements are met by host %s", dest.getHost())); + } + if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) { // found destination return dest; @@ -557,10 +574,18 @@ StateListener, Configurable { } } } - return dest; } + protected boolean anyVolumeRequiresEncryption(List volumes) { + for (Volume volume : volumes) { + if (volume.getPassphraseId() != null) { + return true; + } + } + return false; + } + private boolean isDeployAsIs(VirtualMachine vm) { long templateId = vm.getTemplateId(); VMTemplateVO template = templateDao.findById(templateId); @@ -667,7 +692,7 @@ StateListener, Configurable { return null; } - private boolean checkVmProfileAndHost(final VirtualMachineProfile vmProfile, final HostVO host) { + protected boolean checkVmProfileAndHost(final VirtualMachineProfile vmProfile, final HostVO host) { ServiceOffering offering = vmProfile.getServiceOffering(); if (offering.getHostTag() != null) { _hostDao.loadHostTags(host); @@ -880,14 +905,13 @@ StateListener, Configurable { } @DB - private boolean checkIfHostFitsPlannerUsage(final long hostId, final PlannerResourceUsage resourceUsageRequired) { + protected boolean checkIfHostFitsPlannerUsage(final long hostId, final PlannerResourceUsage resourceUsageRequired) { // TODO Auto-generated method stub // check if this host has been picked up by some other planner // exclusively // if planner can work with shared host, check if this host has // been marked as 'shared' // else if planner needs dedicated host, - PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); if (reservationEntry != null) { final long id = reservationEntry.getId(); @@ -1225,7 +1249,6 @@ StateListener, Configurable { if (!suitableVolumeStoragePools.isEmpty()) { Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired, readyAndReusedVolumes, plan.getPreferredHosts(), vmProfile.getVirtualMachine()); - if (potentialResources != null) { Host host = _hostDao.findById(potentialResources.first().getId()); Map storageVolMap = potentialResources.second(); @@ -1414,6 +1437,7 @@ StateListener, Configurable { List allVolumes = new ArrayList<>(); allVolumes.addAll(volumesOrderBySizeDesc); + for (StoragePool storagePool : suitablePools) { haveEnoughSpace = false; hostCanAccessPool = false; @@ -1495,12 +1519,22 @@ StateListener, Configurable { } } - if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) { + HostVO potentialHostVO = _hostDao.findById(potentialHost.getId()); + _hostDao.loadDetails(potentialHostVO); + + boolean hostHasEncryption = Boolean.parseBoolean(potentialHostVO.getDetail(Host.HOST_VOLUME_ENCRYPTION)); + boolean hostMeetsEncryptionRequirements = !anyVolumeRequiresEncryption(new ArrayList<>(volumesOrderBySizeDesc)) || hostHasEncryption; + boolean plannerUsageFits = checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired); + + if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck && hostMeetsEncryptionRequirements && plannerUsageFits) { s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM"); volumeAllocationMap.clear(); return new Pair>(potentialHost, storage); } else { + if (!hostMeetsEncryptionRequirements) { + s_logger.debug("Potential host " + potentialHost + " did not meet encryption requirements of all volumes"); + } avoid.addHost(potentialHost.getId()); } } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 400f9534996..3359146d25d 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -45,11 +45,6 @@ import java.util.stream.Collectors; import javax.inject.Inject; -import com.cloud.agent.api.GetStoragePoolCapabilitiesAnswer; -import com.cloud.agent.api.GetStoragePoolCapabilitiesCommand; -import com.cloud.network.router.VirtualNetworkApplianceManager; -import com.cloud.server.StatsCollector; -import com.cloud.upgrade.SystemVmTemplateRegistration; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiConstants; @@ -124,6 +119,8 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.GetStoragePoolCapabilitiesAnswer; +import com.cloud.agent.api.GetStoragePoolCapabilitiesCommand; import com.cloud.agent.api.GetStorageStatsAnswer; import com.cloud.agent.api.GetStorageStatsCommand; import com.cloud.agent.api.GetVolumeStatsAnswer; @@ -175,6 +172,7 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.network.router.VirtualNetworkApplianceManager; import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; import com.cloud.org.Grouping; @@ -182,6 +180,7 @@ import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceState; import com.cloud.server.ConfigurationServer; import com.cloud.server.ManagementServer; +import com.cloud.server.StatsCollector; import com.cloud.service.dao.ServiceOfferingDetailsDao; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; @@ -199,6 +198,7 @@ import com.cloud.storage.listener.StoragePoolMonitor; import com.cloud.storage.listener.VolumeStateListener; import com.cloud.template.TemplateManager; import com.cloud.template.VirtualMachineTemplate; +import com.cloud.upgrade.SystemVmTemplateRegistration; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.ResourceLimitService; @@ -1095,14 +1095,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public void connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { + public boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); assert (pool.isShared()) : "Now, did you actually read the name of this method?"; s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId); DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); - listener.hostConnect(hostId, pool.getId()); + return listener.hostConnect(hostId, pool.getId()); } @Override diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index a9f95e6b6fe..a8c22e8cebc 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -73,6 +73,7 @@ import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.jobs.JobInfo; import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO; import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; +import org.apache.cloudstack.secret.dao.PassphraseDao; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.DettachCommand; @@ -120,6 +121,7 @@ import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; import com.cloud.gpu.GPU; +import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; @@ -291,6 +293,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic public TaggedResourceService taggedResourceService; @Inject VirtualMachineManager virtualMachineManager; + @Inject + PassphraseDao _passphraseDao; protected Gson _gson; @@ -755,6 +759,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic parentVolume = _volsDao.findByIdIncludingRemoved(snapshotCheck.getVolumeId()); + // Don't support creating templates from encrypted volumes (yet) + if (parentVolume.getPassphraseId() != null) { + throw new UnsupportedOperationException("Cannot create new volumes from encrypted volume snapshots"); + } + if (zoneId == null) { // if zoneId is not provided, we default to create volume in the same zone as the snapshot zone. zoneId = snapshotCheck.getDataCenterId(); @@ -854,6 +863,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } volume = _volsDao.persist(volume); + if (cmd.getSnapshotId() == null && displayVolume) { // for volume created from snapshot, create usage event after volume creation UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), diskOfferingId, null, size, @@ -1059,6 +1069,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new InvalidParameterValueException("Requested disk offering has been removed."); } + if (newDiskOffering.getEncrypt() != diskOffering.getEncrypt()) { + throw new InvalidParameterValueException( + String.format("Current disk offering's encryption(%s) does not match target disk offering's encryption(%s)", diskOffering.getEncrypt(), newDiskOffering.getEncrypt()) + ); + } + if (diskOffering.getTags() != null) { if (!com.cloud.utils.StringUtils.areTagsEqual(diskOffering.getTags(), newDiskOffering.getTags())) { throw new InvalidParameterValueException("The tags on the new and old disk offerings must match."); @@ -1763,6 +1779,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new InvalidParameterValueException("Please specify a VM that is in the same zone as the volume."); } + HypervisorType rootDiskHyperType = vm.getHypervisorType(); + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeToAttach.getDiskOfferingId()); + if (diskOffering.getEncrypt() && rootDiskHyperType != HypervisorType.KVM) { + throw new InvalidParameterValueException("Volume's disk offering has encryption enabled, but volume encryption is not supported for hypervisor type " + rootDiskHyperType); + } + // Check that the device ID is valid if (deviceId != null) { // validate ROOT volume type @@ -1794,7 +1816,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // offering not allowed DataCenterVO dataCenter = _dcDao.findById(volumeToAttach.getDataCenterId()); if (!dataCenter.isLocalStorageEnabled()) { - DiskOfferingVO diskOffering = _diskOfferingDao.findById(volumeToAttach.getDiskOfferingId()); if (diskOffering.isUseLocalStorage()) { throw new InvalidParameterValueException("Zone is not configured to use local storage but volume's disk offering " + diskOffering.getName() + " uses it"); } @@ -1829,7 +1850,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } } - HypervisorType rootDiskHyperType = vm.getHypervisorType(); HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId()); StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId()); @@ -2364,6 +2384,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic vm = _vmInstanceDao.findById(instanceId); } + if (vol.getPassphraseId() != null) { + throw new InvalidParameterValueException("Migration of encrypted volumes is unsupported"); + } + // Check that Vm to which this volume is attached does not have VM Snapshots // OfflineVmwareMigration: consider if this is needed and desirable if (vm != null && _vmSnapshotDao.findByVm(vm.getId()).size() > 0) { @@ -2806,6 +2830,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); } + if (volume.getEncryptFormat() != null && volume.getAttachedVM() != null && volume.getAttachedVM().getState() != State.Stopped) { + s_logger.debug(String.format("Refusing to take snapshot of encrypted volume (%s) on running VM (%s)", volume, volume.getAttachedVM())); + throw new UnsupportedOperationException("Volume snapshots for encrypted volumes are not supported if VM is running"); + } + CreateSnapshotPayload payload = new CreateSnapshotPayload(); payload.setSnapshotId(snapshotId); @@ -2982,6 +3011,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw ex; } + if (volume.getPassphraseId() != null) { + throw new InvalidParameterValueException("Extraction of encrypted volumes is unsupported"); + } + if (volume.getVolumeType() != Volume.Type.DATADISK) { // Datadisk dont have any template dependence. @@ -3312,6 +3345,16 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic sendCommand = true; } + if (host != null) { + _hostDao.loadDetails(host); + boolean hostSupportsEncryption = Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION)); + if (volumeToAttach.getPassphraseId() != null) { + if (!hostSupportsEncryption) { + throw new CloudRuntimeException(errorMsg + " because target host " + host + " doesn't support volume encryption"); + } + } + } + if (volumeToAttachStoragePool != null) { verifyManagedStorage(volumeToAttachStoragePool.getId(), hostId); } diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 72815d42290..e1c69804984 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -96,6 +96,7 @@ import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.server.TaggedResourceService; import com.cloud.storage.CreateSnapshotPayload; import com.cloud.storage.DataStoreRole; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.ScopeType; import com.cloud.storage.Snapshot; import com.cloud.storage.Snapshot.Type; @@ -110,6 +111,7 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotPolicyDao; import com.cloud.storage.dao.SnapshotScheduleDao; @@ -172,6 +174,8 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement @Inject DomainDao _domainDao; @Inject + DiskOfferingDao _diskOfferingDao; + @Inject StorageManager _storageMgr; @Inject SnapshotScheduler _snapSchedMgr; @@ -836,6 +840,14 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement throw new InvalidParameterValueException("Failed to create snapshot policy, unable to find a volume with id " + volumeId); } + // For now, volumes with encryption don't support snapshot schedules, because they will fail when VM is running + DiskOfferingVO diskOffering = _diskOfferingDao.findByIdIncludingRemoved(volume.getDiskOfferingId()); + if (diskOffering == null) { + throw new InvalidParameterValueException(String.format("Failed to find disk offering for the volume [%s]", volume.getUuid())); + } else if(diskOffering.getEncrypt()) { + throw new UnsupportedOperationException(String.format("Encrypted volumes don't support snapshot schedules, cannot create snapshot policy for the volume [%s]", volume.getUuid())); + } + String volumeDescription = volume.getVolumeDescription(); _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume); diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index d7d38922917..dc7b262fca2 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -1806,6 +1806,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // check permissions _accountMgr.checkAccess(caller, null, true, volume); + // Don't support creating templates from encrypted volumes (yet) + if (volume.getPassphraseId() != null) { + throw new UnsupportedOperationException("Cannot create templates from encrypted volumes"); + } + // If private template is created from Volume, check that the volume // will not be active when the private template is // created @@ -1829,6 +1834,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // Volume could be removed so find including removed to record source template id. volume = _volumeDao.findByIdIncludingRemoved(snapshot.getVolumeId()); + // Don't support creating templates from encrypted volumes (yet) + if (volume != null && volume.getPassphraseId() != null) { + throw new UnsupportedOperationException("Cannot create templates from snapshots of encrypted volumes"); + } + // check permissions _accountMgr.checkAccess(caller, null, true, snapshot); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 0be2e482b2a..41b47eb5882 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -3826,6 +3826,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } ServiceOfferingVO offering = _serviceOfferingDao.findById(serviceOffering.getId()); + if (offering.getEncrypt() && hypervisorType != HypervisorType.KVM) { + throw new InvalidParameterValueException("Root volume encryption is not supported for hypervisor type " + hypervisorType); + } + if (offering.isDynamic()) { offering.setDynamicFlag(true); validateCustomParameters(offering, customParameters); @@ -3844,6 +3848,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (diskOffering == null) { throw new InvalidParameterValueException("Specified disk offering cannot be found"); } + + if (diskOffering.getEncrypt() && hypervisorType != HypervisorType.KVM) { + throw new InvalidParameterValueException("Volume encryption is not supported for hypervisor type " + hypervisorType); + } + if (diskOffering.isCustomized()) { if (diskSize == null) { throw new InvalidParameterValueException("This disk offering requires a custom size specified"); diff --git a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index 636a1ae4d7a..898f4593e57 100644 --- a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -394,6 +394,12 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme throw new InvalidParameterValueException("All volumes of the VM: " + userVmVo.getUuid() + " should be on the same PowerFlex storage pool"); } } + + // disallow KVM snapshots for VMs if root volume is encrypted (Qemu crash) + if (rootVolume.getPassphraseId() != null && userVmVo.getState() == State.Running && snapshotMemory) { + throw new UnsupportedOperationException("Cannot create VM memory snapshots on KVM from encrypted root volumes"); + } + } // check access diff --git a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java index 97e15de6f84..e58a5c68734 100644 --- a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java +++ b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java @@ -23,29 +23,43 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.DataCenter; +import com.cloud.gpu.GPU; import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.Storage; +import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.VirtualMachineProfileImpl; import org.apache.cloudstack.affinity.dao.AffinityGroupDomainMapDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.collections.CollectionUtils; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentMatchers; import org.mockito.InjectMocks; import org.mockito.Matchers; import org.mockito.Mock; @@ -162,12 +176,30 @@ public class DeploymentPlanningManagerImplTest { @Inject HostPodDao hostPodDao; + @Inject + VolumeDao volDao; + + @Inject + HostDao hostDao; + + @Inject + CapacityManager capacityMgr; + + @Inject + ServiceOfferingDetailsDao serviceOfferingDetailsDao; + + @Inject + ClusterDetailsDao clusterDetailsDao; + @Mock Host host; - private static long dataCenterId = 1L; - private static long hostId = 1l; - private static final long ADMIN_ACCOUNT_ROLE_ID = 1l; + private static final long dataCenterId = 1L; + private static final long instanceId = 123L; + private static final long hostId = 0L; + private static final long podId = 2L; + private static final long clusterId = 3L; + private static final long ADMIN_ACCOUNT_ROLE_ID = 1L; @BeforeClass public static void setUp() throws ConfigurationException { @@ -179,7 +211,7 @@ public class DeploymentPlanningManagerImplTest { ComponentContext.initComponentsLifeCycle(); - PlannerHostReservationVO reservationVO = new PlannerHostReservationVO(200L, 1L, 2L, 3L, PlannerResourceUsage.Shared); + PlannerHostReservationVO reservationVO = new PlannerHostReservationVO(hostId, dataCenterId, podId, clusterId, PlannerResourceUsage.Shared); Mockito.when(_plannerHostReserveDao.persist(Matchers.any(PlannerHostReservationVO.class))).thenReturn(reservationVO); Mockito.when(_plannerHostReserveDao.findById(Matchers.anyLong())).thenReturn(reservationVO); Mockito.when(_affinityGroupVMMapDao.countAffinityGroupsForVm(Matchers.anyLong())).thenReturn(0L); @@ -190,9 +222,12 @@ public class DeploymentPlanningManagerImplTest { VMInstanceVO vm = new VMInstanceVO(); Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm); + Mockito.when(vmProfile.getId()).thenReturn(instanceId); Mockito.when(vmDetailsDao.listDetailsKeyPairs(Matchers.anyLong())).thenReturn(null); + Mockito.when(volDao.findByInstance(Matchers.anyLong())).thenReturn(new ArrayList<>()); + Mockito.when(_dcDao.findById(Matchers.anyLong())).thenReturn(dc); Mockito.when(dc.getId()).thenReturn(dataCenterId); @@ -439,6 +474,323 @@ public class DeploymentPlanningManagerImplTest { Assert.assertTrue(avoids.getClustersToAvoid().contains(expectedClusterId)); } + @Test + public void volumesRequireEncryptionTest() { + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + VolumeVO vol2 = new VolumeVO("vol2", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.DATADISK); + VolumeVO vol3 = new VolumeVO("vol3", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.DATADISK); + vol2.setPassphraseId(1L); + + List volumes = List.of(vol1, vol2, vol3); + Assert.assertTrue("Volumes require encryption, but not reporting", _dpm.anyVolumeRequiresEncryption(volumes)); + } + + @Test + public void volumesDoNotRequireEncryptionTest() { + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + VolumeVO vol2 = new VolumeVO("vol2", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.DATADISK); + VolumeVO vol3 = new VolumeVO("vol3", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.DATADISK); + + List volumes = List.of(vol1, vol2, vol3); + Assert.assertFalse("Volumes do not require encryption, but reporting they do", _dpm.anyVolumeRequiresEncryption(volumes)); + } + + /** + * Root requires encryption, chosen host supports it + */ + @Test + public void passEncRootProvidedHostSupportingEncryptionTest() { + HostVO host = new HostVO("host"); + Map hostDetails = new HashMap<>() {{ + put(Host.HOST_VOLUME_ENCRYPTION, "true"); + }}; + host.setDetails(hostDetails); + + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + vol1.setPassphraseId(1L); + + setupMocksForPlanDeploymentHostTests(host, vol1); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId, podId, clusterId, hostId, null, null); + try { + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, null); + Assert.assertEquals(dest.getHost(), host); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + /** + * Root requires encryption, chosen host does not support it + */ + @Test + public void failEncRootProvidedHostNotSupportingEncryptionTest() { + HostVO host = new HostVO("host"); + Map hostDetails = new HashMap<>() {{ + put(Host.HOST_VOLUME_ENCRYPTION, "false"); + }}; + host.setDetails(hostDetails); + + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + vol1.setPassphraseId(1L); + + setupMocksForPlanDeploymentHostTests(host, vol1); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId, podId, clusterId, hostId, null, null); + try { + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, null); + Assert.assertNull("Destination should be null since host doesn't support encryption and root requires it", dest); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + /** + * Root does not require encryption, chosen host does not support it + */ + @Test + public void passNoEncRootProvidedHostNotSupportingEncryptionTest() { + HostVO host = new HostVO("host"); + Map hostDetails = new HashMap<>() {{ + put(Host.HOST_VOLUME_ENCRYPTION, "false"); + }}; + host.setDetails(hostDetails); + + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + + setupMocksForPlanDeploymentHostTests(host, vol1); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId, podId, clusterId, hostId, null, null); + try { + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, null); + Assert.assertEquals(dest.getHost(), host); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + /** + * Root does not require encryption, chosen host does support it + */ + @Test + public void passNoEncRootProvidedHostSupportingEncryptionTest() { + HostVO host = new HostVO("host"); + Map hostDetails = new HashMap<>() {{ + put(Host.HOST_VOLUME_ENCRYPTION, "true"); + }}; + host.setDetails(hostDetails); + + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + + setupMocksForPlanDeploymentHostTests(host, vol1); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId, podId, clusterId, hostId, null, null); + try { + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, null); + Assert.assertEquals(dest.getHost(), host); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + /** + * Root requires encryption, last host supports it + */ + @Test + public void passEncRootLastHostSupportingEncryptionTest() { + HostVO host = Mockito.spy(new HostVO("host")); + Map hostDetails = new HashMap<>() {{ + put(Host.HOST_VOLUME_ENCRYPTION, "true"); + }}; + host.setDetails(hostDetails); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + vol1.setPassphraseId(1L); + + setupMocksForPlanDeploymentHostTests(host, vol1); + + VMInstanceVO vm = (VMInstanceVO) vmProfile.getVirtualMachine(); + vm.setLastHostId(hostId); + + // host id is null here so we pick up last host id + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId, podId, clusterId, null, null, null); + try { + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, null); + Assert.assertEquals(dest.getHost(), host); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + /** + * Root requires encryption, last host does not support it + */ + @Test + public void failEncRootLastHostNotSupportingEncryptionTest() { + HostVO host = Mockito.spy(new HostVO("host")); + Map hostDetails = new HashMap<>() {{ + put(Host.HOST_VOLUME_ENCRYPTION, "false"); + }}; + host.setDetails(hostDetails); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + vol1.setPassphraseId(1L); + + setupMocksForPlanDeploymentHostTests(host, vol1); + + VMInstanceVO vm = (VMInstanceVO) vmProfile.getVirtualMachine(); + vm.setLastHostId(hostId); + // host id is null here so we pick up last host id + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId, podId, clusterId, null, null, null); + try { + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, null); + Assert.assertNull("Destination should be null since last host doesn't support encryption and root requires it", dest); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + @Test + public void passEncRootPlannerHostSupportingEncryptionTest() { + HostVO host = Mockito.spy(new HostVO("host")); + Map hostDetails = new HashMap<>() {{ + put(Host.HOST_VOLUME_ENCRYPTION, "true"); + }}; + host.setDetails(hostDetails); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + vol1.setPassphraseId(1L); + + DeploymentClusterPlanner planner = setupMocksForPlanDeploymentHostTests(host, vol1); + + // host id is null here so we pick up last host id + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId, podId, clusterId, null, null, null); + + try { + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, planner); + Assert.assertEquals(host, dest.getHost()); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + @Test + public void failEncRootPlannerHostSupportingEncryptionTest() { + HostVO host = Mockito.spy(new HostVO("host")); + Map hostDetails = new HashMap<>() {{ + put(Host.HOST_VOLUME_ENCRYPTION, "false"); + }}; + host.setDetails(hostDetails); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + + VolumeVO vol1 = new VolumeVO("vol1", dataCenterId,podId,1L,1L, instanceId,"folder","path",ProvisioningType.THIN, (long)10<<30, Volume.Type.ROOT); + vol1.setPassphraseId(1L); + + DeploymentClusterPlanner planner = setupMocksForPlanDeploymentHostTests(host, vol1); + + // host id is null here so we pick up last host id + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId, podId, clusterId, null, null, null); + + try { + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, planner); + Assert.assertNull("Destination should be null since last host doesn't support encryption and root requires it", dest); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + // This is so ugly but everything is so intertwined... + private DeploymentClusterPlanner setupMocksForPlanDeploymentHostTests(HostVO host, VolumeVO vol1) { + long diskOfferingId = 345L; + List volumeVOs = new ArrayList<>(); + List volumes = new ArrayList<>(); + vol1.setDiskOfferingId(diskOfferingId); + volumes.add(vol1); + volumeVOs.add(vol1); + + DiskOfferingVO diskOffering = new DiskOfferingVO(); + diskOffering.setEncrypt(true); + + VMTemplateVO template = new VMTemplateVO(); + template.setFormat(Storage.ImageFormat.QCOW2); + + host.setClusterId(clusterId); + + StoragePool pool = new StoragePoolVO(); + + Map> suitableVolumeStoragePools = new HashMap<>() {{ + put(vol1, List.of(pool)); + }}; + + Pair>, List> suitable = new Pair<>(suitableVolumeStoragePools, volumes); + + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, + false, false, false, "test dpm", ProvisioningType.THIN, false, false, + null, false, VirtualMachine.Type.User, null, "FirstFitPlanner", true); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + Mockito.when(vmProfile.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.doNothing().when(hostDao).loadDetails(host); + Mockito.doReturn(volumeVOs).when(volDao).findByInstance(ArgumentMatchers.anyLong()); + Mockito.doReturn(suitable).when(_dpm).findSuitablePoolsForVolumes( + ArgumentMatchers.any(VirtualMachineProfile.class), + ArgumentMatchers.any(DataCenterDeployment.class), + ArgumentMatchers.any(ExcludeList.class), + ArgumentMatchers.anyInt() + ); + + ClusterVO clusterVO = new ClusterVO(); + clusterVO.setHypervisorType(HypervisorType.KVM.toString()); + Mockito.when(_clusterDao.findById(ArgumentMatchers.anyLong())).thenReturn(clusterVO); + + Mockito.doReturn(List.of(host)).when(_dpm).findSuitableHosts( + ArgumentMatchers.any(VirtualMachineProfile.class), + ArgumentMatchers.any(DeploymentPlan.class), + ArgumentMatchers.any(ExcludeList.class), + ArgumentMatchers.anyInt() + ); + + Map suitableVolumeStoragePoolMap = new HashMap<>() {{ + put(vol1, pool); + }}; + Mockito.doReturn(true).when(_dpm).hostCanAccessSPool(ArgumentMatchers.any(Host.class), ArgumentMatchers.any(StoragePool.class)); + + Pair> potentialResources = new Pair<>(host, suitableVolumeStoragePoolMap); + + Mockito.when(capacityMgr.checkIfHostReachMaxGuestLimit(host)).thenReturn(false); + Mockito.when(capacityMgr.checkIfHostHasCpuCapability(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.anyInt())).thenReturn(true); + Mockito.when(capacityMgr.checkIfHostHasCapacity( + ArgumentMatchers.anyLong(), + ArgumentMatchers.anyInt(), + ArgumentMatchers.anyLong(), + ArgumentMatchers.anyBoolean(), + ArgumentMatchers.anyFloat(), + ArgumentMatchers.anyFloat(), + ArgumentMatchers.anyBoolean() + )).thenReturn(true); + Mockito.when(serviceOfferingDetailsDao.findDetail(vmProfile.getServiceOfferingId(), GPU.Keys.vgpuType.toString())).thenReturn(null); + + Mockito.doReturn(true).when(_dpm).checkVmProfileAndHost(vmProfile, host); + Mockito.doReturn(true).when(_dpm).checkIfHostFitsPlannerUsage(ArgumentMatchers.anyLong(), ArgumentMatchers.nullable(PlannerResourceUsage.class)); + Mockito.when(clusterDetailsDao.findDetail(ArgumentMatchers.anyLong(), ArgumentMatchers.anyString())).thenReturn(new ClusterDetailsVO(clusterId, "mock", "1")); + + DeploymentClusterPlanner planner = Mockito.spy(new FirstFitPlanner()); + try { + Mockito.doReturn(List.of(clusterId), List.of()).when(planner).orderClusters( + ArgumentMatchers.any(VirtualMachineProfile.class), + ArgumentMatchers.any(DeploymentPlan.class), + ArgumentMatchers.any(ExcludeList.class) + ); + } catch (Exception ex) { + ex.printStackTrace(); + } + + return planner; + } + private DataCenter prepareAvoidDisabledTests() { DataCenter dc = Mockito.mock(DataCenter.class); Mockito.when(dc.getId()).thenReturn(123l); diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index f317ea8acf3..b5f460ee3c4 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -35,9 +35,6 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; -import com.cloud.api.query.dao.ServiceOfferingJoinDao; -import com.cloud.api.query.vo.ServiceOfferingJoinVO; -import com.cloud.storage.dao.VMTemplateDao; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; @@ -73,6 +70,8 @@ import org.mockito.Spy; import org.mockito.runners.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; +import com.cloud.api.query.dao.ServiceOfferingJoinDao; +import com.cloud.api.query.vo.ServiceOfferingJoinVO; import com.cloud.configuration.Resource; import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.DataCenterVO; @@ -85,7 +84,9 @@ import com.cloud.org.Grouping; import com.cloud.serializer.GsonHelper; import com.cloud.server.TaggedResourceService; import com.cloud.storage.Volume.Type; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.user.Account; @@ -158,6 +159,8 @@ public class VolumeApiServiceImplTest { private VMTemplateDao templateDao; @Mock private ServiceOfferingJoinDao serviceOfferingJoinDao; + @Mock + private DiskOfferingDao _diskOfferingDao; private DetachVolumeCmd detachCmd = new DetachVolumeCmd(); private Class _detachCmdClass = detachCmd.getClass(); @@ -269,6 +272,7 @@ public class VolumeApiServiceImplTest { VolumeVO correctRootVolumeVO = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT); when(volumeDaoMock.findById(6L)).thenReturn(correctRootVolumeVO); + when(volumeDaoMock.getHypervisorType(6L)).thenReturn(HypervisorType.XenServer); // managed root volume VolumeInfo managedVolume = Mockito.mock(VolumeInfo.class); @@ -291,7 +295,7 @@ public class VolumeApiServiceImplTest { when(userVmDaoMock.findById(4L)).thenReturn(vmHavingRootVolume); List vols = new ArrayList(); vols.add(new VolumeVO()); - when(volumeDaoMock.findByInstanceAndDeviceId(4L, 0L)).thenReturn(vols); + lenient().when(volumeDaoMock.findByInstanceAndDeviceId(4L, 0L)).thenReturn(vols); // volume in uploaded state VolumeInfo uploadedVolume = Mockito.mock(VolumeInfo.class); @@ -309,6 +313,27 @@ public class VolumeApiServiceImplTest { upVolume.setState(Volume.State.Uploaded); when(volumeDaoMock.findById(8L)).thenReturn(upVolume); + UserVmVO kvmVm = new UserVmVO(4L, "vm", "vm", 1, HypervisorType.KVM, 1L, false, false, 1L, 1L, 1, 1L, null, "vm", null); + kvmVm.setState(State.Running); + kvmVm.setDataCenterId(1L); + when(userVmDaoMock.findById(4L)).thenReturn(kvmVm); + + VolumeVO volumeOfKvmVm = new VolumeVO("root", 1L, 1L, 1L, 1L, 4L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT); + volumeOfKvmVm.setPoolId(1L); + lenient().when(volumeDaoMock.findById(9L)).thenReturn(volumeOfKvmVm); + lenient().when(volumeDaoMock.getHypervisorType(9L)).thenReturn(HypervisorType.KVM); + + VolumeVO dataVolumeVO = new VolumeVO("data", 1L, 1L, 1L, 1L, 2L, "data", "data", Storage.ProvisioningType.THIN, 1, null, null, "data", Type.DATADISK); + lenient().when(volumeDaoMock.findById(10L)).thenReturn(dataVolumeVO); + + VolumeInfo dataVolume = Mockito.mock(VolumeInfo.class); + when(dataVolume.getId()).thenReturn(10L); + when(dataVolume.getDataCenterId()).thenReturn(1L); + when(dataVolume.getVolumeType()).thenReturn(Volume.Type.DATADISK); + when(dataVolume.getInstanceId()).thenReturn(null); + when(dataVolume.getState()).thenReturn(Volume.State.Allocated); + when(volumeDataFactoryMock.getVolume(10L)).thenReturn(dataVolume); + // helper dao methods mock when(_vmSnapshotDao.findByVm(any(Long.class))).thenReturn(new ArrayList()); when(_vmInstanceDao.findById(any(Long.class))).thenReturn(stoppedVm); @@ -322,6 +347,10 @@ public class VolumeApiServiceImplTest { txn.close("runVolumeDaoImplTest"); } + DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); + when(diskOffering.getEncrypt()).thenReturn(false); + when(_diskOfferingDao.findById(anyLong())).thenReturn(diskOffering); + // helper methods mock lenient().doNothing().when(accountManagerMock).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class)); doNothing().when(_jobMgr).updateAsyncJobAttachment(any(Long.class), any(String.class), any(Long.class)); @@ -415,6 +444,25 @@ public class VolumeApiServiceImplTest { volumeApiServiceImpl.attachVolumeToVM(2L, 6L, 0L); } + // Negative test - attach data volume, to the vm on non-kvm hypervisor + @Test(expected = InvalidParameterValueException.class) + public void attachDiskWithEncryptEnabledOfferingonNonKVM() throws NoSuchFieldException, IllegalAccessException { + DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); + when(diskOffering.getEncrypt()).thenReturn(true); + when(_diskOfferingDao.findById(anyLong())).thenReturn(diskOffering); + volumeApiServiceImpl.attachVolumeToVM(2L, 10L, 1L); + } + + // Positive test - attach data volume, to the vm on kvm hypervisor + @Test + public void attachDiskWithEncryptEnabledOfferingOnKVM() throws NoSuchFieldException, IllegalAccessException { + thrown.expect(NullPointerException.class); + DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); + when(diskOffering.getEncrypt()).thenReturn(true); + when(_diskOfferingDao.findById(anyLong())).thenReturn(diskOffering); + volumeApiServiceImpl.attachVolumeToVM(4L, 10L, 1L); + } + // volume not Ready @Test(expected = InvalidParameterValueException.class) public void testTakeSnapshotF1() throws ResourceAllocationException { diff --git a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java index b9bbe068ab6..0f9b999a0fe 100644 --- a/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java +++ b/server/src/test/java/com/cloud/storage/listener/StoragePoolMonitorTest.java @@ -63,7 +63,7 @@ public class StoragePoolMonitorTest { Mockito.when(poolDao.listBy(nullable(Long.class), nullable(Long.class), nullable(Long.class), Mockito.any(ScopeType.class))).thenReturn(Collections.singletonList(pool)); Mockito.when(poolDao.findZoneWideStoragePoolsByTags(Mockito.anyLong(), Mockito.any(String[].class))).thenReturn(Collections.emptyList()); Mockito.when(poolDao.findZoneWideStoragePoolsByHypervisor(Mockito.anyLong(), Mockito.any(Hypervisor.HypervisorType.class))).thenReturn(Collections.emptyList()); - Mockito.doNothing().when(storageManager).connectHostToSharedPool(host.getId(), pool.getId()); + Mockito.doReturn(true).when(storageManager).connectHostToSharedPool(host.getId(), pool.getId()); storagePoolMonitor.processConnect(host, cmd, false); diff --git a/server/src/test/resources/createNetworkOffering.xml b/server/src/test/resources/createNetworkOffering.xml index 897d4dac305..ea127d741f1 100644 --- a/server/src/test/resources/createNetworkOffering.xml +++ b/server/src/test/resources/createNetworkOffering.xml @@ -62,4 +62,5 @@ + diff --git a/test/integration/smoke/test_disk_offerings.py b/test/integration/smoke/test_disk_offerings.py index 660dd30024d..dc23a52a026 100644 --- a/test/integration/smoke/test_disk_offerings.py +++ b/test/integration/smoke/test_disk_offerings.py @@ -45,7 +45,7 @@ class TestCreateDiskOffering(cloudstackTestCase): raise Exception("Warning: Exception during cleanup : %s" % e) return - @attr(tags=["advanced", "basic", "eip", "sg", "advancedns", "smoke"], required_hardware="false") + @attr(tags=["advanced", "basic", "eip", "sg", "advancedns", "smoke", "diskencrypt"], required_hardware="false") def test_01_create_disk_offering(self): """Test to create disk offering @@ -87,6 +87,11 @@ class TestCreateDiskOffering(cloudstackTestCase): self.services["disk_offering"]["name"], "Check name in createServiceOffering" ) + self.assertEqual( + disk_response.encrypt, + False, + "Ensure disk encryption is false by default" + ) return @attr(hypervisor="kvm") @@ -294,6 +299,49 @@ class TestCreateDiskOffering(cloudstackTestCase): return + @attr(tags = ["advanced", "basic", "eip", "sg", "advancedns", "simulator", "smoke", "diskencrypt"]) + def test_08_create_encrypted_disk_offering(self): + """Test to create an encrypted type disk offering""" + + # Validate the following: + # 1. createDiskOfferings should return valid info for new offering + # 2. The Cloud Database contains the valid information + + disk_offering = DiskOffering.create( + self.apiclient, + self.services["disk_offering"], + name="disk-encrypted", + encrypt="true" + ) + self.cleanup.append(disk_offering) + + self.debug("Created Disk offering with ID: %s" % disk_offering.id) + + list_disk_response = list_disk_offering( + self.apiclient, + id=disk_offering.id + ) + + self.assertEqual( + isinstance(list_disk_response, list), + True, + "Check list response returns a valid list" + ) + + self.assertNotEqual( + len(list_disk_response), + 0, + "Check Disk offering is created" + ) + disk_response = list_disk_response[0] + + self.assertEqual( + disk_response.encrypt, + True, + "Check if encrypt is set after createServiceOffering" + ) + return + class TestDiskOfferings(cloudstackTestCase): def setUp(self): diff --git a/test/integration/smoke/test_service_offerings.py b/test/integration/smoke/test_service_offerings.py index 3a942a10b62..a7723d575ad 100644 --- a/test/integration/smoke/test_service_offerings.py +++ b/test/integration/smoke/test_service_offerings.py @@ -67,7 +67,7 @@ class TestCreateServiceOffering(cloudstackTestCase): "smoke", "basic", "eip", - "sg"], + "sg", "diskencrypt"], required_hardware="false") def test_01_create_service_offering(self): """Test to create service offering""" @@ -128,6 +128,11 @@ class TestCreateServiceOffering(cloudstackTestCase): self.services["service_offerings"]["tiny"]["name"], "Check name in createServiceOffering" ) + self.assertEqual( + list_service_response[0].encryptroot, + False, + "Ensure encrypt is false by default" + ) return @attr( @@ -301,6 +306,53 @@ class TestCreateServiceOffering(cloudstackTestCase): ) return + @attr( + tags=[ + "advanced", + "advancedns", + "smoke", + "basic", + "eip", + "sg", + "diskencrypt"], + required_hardware="false") + def test_05_create_service_offering_with_root_encryption_type(self): + """Test to create service offering with root encryption""" + + # Validate the following: + # 1. createServiceOfferings should return a valid information + # for newly created offering + + service_offering = ServiceOffering.create( + self.apiclient, + self.services["service_offerings"]["tiny"], + name="tiny-encrypted-root", + encryptRoot=True + ) + self.cleanup.append(service_offering) + + self.debug( + "Created service offering with ID: %s" % + service_offering.id) + + list_service_response = list_service_offering( + self.apiclient, + id=service_offering.id + ) + + self.assertNotEqual( + len(list_service_response), + 0, + "Check Service offering is created" + ) + + self.assertEqual( + list_service_response[0].encryptroot, + True, + "Check encrypt root is true" + ) + return + class TestServiceOfferings(cloudstackTestCase): diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py index 13082859682..5bd0dd6e502 100644 --- a/test/integration/smoke/test_volumes.py +++ b/test/integration/smoke/test_volumes.py @@ -43,9 +43,12 @@ from marvin.lib.common import (get_domain, find_storage_pool_type, get_pod, list_disk_offering) -from marvin.lib.utils import checkVolumeSize +from marvin.lib.utils import (cleanup_resources, checkVolumeSize) from marvin.lib.utils import (format_volume_to_ext3, wait_until) +from marvin.sshClient import SshClient +import xml.etree.ElementTree as ET +from lxml import etree from nose.plugins.attrib import attr @@ -1005,3 +1008,555 @@ class TestVolumes(cloudstackTestCase): "Offering name did not match with the new one " ) return + + +class TestVolumeEncryption(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.testClient = super(TestVolumeEncryption, cls).getClsTestClient() + cls.apiclient = cls.testClient.getApiClient() + cls.services = cls.testClient.getParsedTestDataConfig() + cls._cleanup = [] + + cls.unsupportedHypervisor = False + cls.hypervisor = cls.testClient.getHypervisorInfo() + if cls.hypervisor.lower() not in ['kvm']: + # Volume Encryption currently supported for KVM hypervisor + cls.unsupportedHypervisor = True + return + + # Get Zone and Domain + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + + cls.services['mode'] = cls.zone.networktype + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["domainid"] = cls.domain.id + cls.services["zoneid"] = cls.zone.id + + # Get template + template = get_suitable_test_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"], + cls.hypervisor + ) + if template == FAILED: + assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"] + + cls.services["template"] = template.id + cls.services["diskname"] = cls.services["volume"]["diskname"] + + cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__ + + # Create Account + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + + # Create Service Offering + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["small"] + ) + cls._cleanup.append(cls.service_offering) + + # Create Service Offering with encryptRoot true + cls.service_offering_encrypt = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["small"], + name="Small Encrypted Instance", + encryptroot=True + ) + cls._cleanup.append(cls.service_offering_encrypt) + + # Create Disk Offering + cls.disk_offering = DiskOffering.create( + cls.apiclient, + cls.services["disk_offering"] + ) + cls._cleanup.append(cls.disk_offering) + + # Create Disk Offering with encrypt true + cls.disk_offering_encrypt = DiskOffering.create( + cls.apiclient, + cls.services["disk_offering"], + name="Encrypted", + encrypt=True + ) + cls._cleanup.append(cls.disk_offering_encrypt) + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + if self.unsupportedHypervisor: + self.skipTest("Skipping test as volume encryption is not supported for hypervisor %s" % self.hypervisor) + + if not self.does_host_with_encryption_support_exists(): + self.skipTest("Skipping test as no host exists with volume encryption support") + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @attr(tags=["advanced", "smoke", "diskencrypt"], required_hardware="true") + def test_01_root_volume_encryption(self): + """Test Root Volume Encryption + + # Validate the following + # 1. Create VM using the service offering with encryptroot true + # 2. Verify VM created and Root Volume + # 3. Create Data Volume using the disk offering with encrypt false + # 4. Verify Data Volume + """ + + virtual_machine = VirtualMachine.create( + self.apiclient, + self.services, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_encrypt.id, + mode=self.services["mode"] + ) + self.cleanup.append(virtual_machine) + self.debug("Created VM with ID: %s" % virtual_machine.id) + + list_vm_response = VirtualMachine.list( + self.apiclient, + id=virtual_machine.id + ) + self.assertEqual( + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + len(list_vm_response), + 0, + "Check VM available in List Virtual Machines" + ) + + vm_response = list_vm_response[0] + self.assertEqual( + vm_response.id, + virtual_machine.id, + "Check virtual machine id in listVirtualMachines" + ) + self.assertEqual( + vm_response.state, + 'Running', + msg="VM is not in Running state" + ) + + self.check_volume_encryption(virtual_machine, 1) + + volume = Volume.create( + self.apiclient, + self.services, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering.id + ) + self.debug("Created a volume with ID: %s" % volume.id) + + list_volume_response = Volume.list( + self.apiclient, + id=volume.id) + self.assertEqual( + isinstance(list_volume_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + list_volume_response, + None, + "Check if volume exists in ListVolumes" + ) + + self.debug("Attaching volume (ID: %s) to VM (ID: %s)" % (volume.id, virtual_machine.id)) + + virtual_machine.attach_volume( + self.apiclient, + volume + ) + + try: + ssh = virtual_machine.get_ssh_client() + self.debug("Rebooting VM %s" % virtual_machine.id) + ssh.execute("reboot") + except Exception as e: + self.fail("SSH access failed for VM %s - %s" % (virtual_machine.ipaddress, e)) + + # Poll listVM to ensure VM is started properly + timeout = self.services["timeout"] + while True: + time.sleep(self.services["sleep"]) + + # Ensure that VM is in running state + list_vm_response = VirtualMachine.list( + self.apiclient, + id=virtual_machine.id + ) + + if isinstance(list_vm_response, list): + vm = list_vm_response[0] + if vm.state == 'Running': + self.debug("VM state: %s" % vm.state) + break + + if timeout == 0: + raise Exception( + "Failed to start VM (ID: %s) " % vm.id) + timeout = timeout - 1 + + vol_sz = str(list_volume_response[0].size) + ssh = virtual_machine.get_ssh_client( + reconnect=True + ) + + # Get the updated volume information + list_volume_response = Volume.list( + self.apiclient, + id=volume.id) + + volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid)) + self.debug(" Using KVM volume_name: %s" % (volume_name)) + ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=vol_sz) + self.debug(" Volume Size Expected %s Actual :%s" % (vol_sz, ret[1])) + virtual_machine.detach_volume(self.apiclient, volume) + self.assertEqual(ret[0], SUCCESS, "Check if promised disk size actually available") + time.sleep(self.services["sleep"]) + + @attr(tags=["advanced", "smoke", "diskencrypt"], required_hardware="true") + def test_02_data_volume_encryption(self): + """Test Data Volume Encryption + + # Validate the following + # 1. Create VM using the service offering with encryptroot false + # 2. Verify VM created and Root Volume + # 3. Create Data Volume using the disk offering with encrypt true + # 4. Verify Data Volume + """ + + virtual_machine = VirtualMachine.create( + self.apiclient, + self.services, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + mode=self.services["mode"] + ) + self.cleanup.append(virtual_machine) + self.debug("Created VM with ID: %s" % virtual_machine.id) + + list_vm_response = VirtualMachine.list( + self.apiclient, + id=virtual_machine.id + ) + self.assertEqual( + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + len(list_vm_response), + 0, + "Check VM available in List Virtual Machines" + ) + + vm_response = list_vm_response[0] + self.assertEqual( + vm_response.id, + virtual_machine.id, + "Check virtual machine id in listVirtualMachines" + ) + self.assertEqual( + vm_response.state, + 'Running', + msg="VM is not in Running state" + ) + + volume = Volume.create( + self.apiclient, + self.services, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering_encrypt.id + ) + self.debug("Created a volume with ID: %s" % volume.id) + + list_volume_response = Volume.list( + self.apiclient, + id=volume.id) + self.assertEqual( + isinstance(list_volume_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + list_volume_response, + None, + "Check if volume exists in ListVolumes" + ) + + self.debug("Attaching volume (ID: %s) to VM (ID: %s)" % (volume.id, virtual_machine.id)) + + virtual_machine.attach_volume( + self.apiclient, + volume + ) + + try: + ssh = virtual_machine.get_ssh_client() + self.debug("Rebooting VM %s" % virtual_machine.id) + ssh.execute("reboot") + except Exception as e: + self.fail("SSH access failed for VM %s - %s" % (virtual_machine.ipaddress, e)) + + # Poll listVM to ensure VM is started properly + timeout = self.services["timeout"] + while True: + time.sleep(self.services["sleep"]) + + # Ensure that VM is in running state + list_vm_response = VirtualMachine.list( + self.apiclient, + id=virtual_machine.id + ) + + if isinstance(list_vm_response, list): + vm = list_vm_response[0] + if vm.state == 'Running': + self.debug("VM state: %s" % vm.state) + break + + if timeout == 0: + raise Exception( + "Failed to start VM (ID: %s) " % vm.id) + timeout = timeout - 1 + + vol_sz = str(list_volume_response[0].size) + ssh = virtual_machine.get_ssh_client( + reconnect=True + ) + + # Get the updated volume information + list_volume_response = Volume.list( + self.apiclient, + id=volume.id) + + volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid)) + self.debug(" Using KVM volume_name: %s" % (volume_name)) + ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=vol_sz) + self.debug(" Volume Size Expected %s Actual :%s" % (vol_sz, ret[1])) + + self.check_volume_encryption(virtual_machine, 1) + + virtual_machine.detach_volume(self.apiclient, volume) + self.assertEqual(ret[0], SUCCESS, "Check if promised disk size actually available") + time.sleep(self.services["sleep"]) + + @attr(tags=["advanced", "smoke", "diskencrypt"], required_hardware="true") + def test_03_root_and_data_volume_encryption(self): + """Test Root and Data Volumes Encryption + + # Validate the following + # 1. Create VM using the service offering with encryptroot true + # 2. Verify VM created and Root Volume + # 3. Create Data Volume using the disk offering with encrypt true + # 4. Verify Data Volume + """ + + virtual_machine = VirtualMachine.create( + self.apiclient, + self.services, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_encrypt.id, + diskofferingid=self.disk_offering_encrypt.id, + mode=self.services["mode"] + ) + self.cleanup.append(virtual_machine) + self.debug("Created VM with ID: %s" % virtual_machine.id) + + list_vm_response = VirtualMachine.list( + self.apiclient, + id=virtual_machine.id + ) + self.assertEqual( + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + len(list_vm_response), + 0, + "Check VM available in List Virtual Machines" + ) + + vm_response = list_vm_response[0] + self.assertEqual( + vm_response.id, + virtual_machine.id, + "Check virtual machine id in listVirtualMachines" + ) + self.assertEqual( + vm_response.state, + 'Running', + msg="VM is not in Running state" + ) + + self.check_volume_encryption(virtual_machine, 2) + + volume = Volume.create( + self.apiclient, + self.services, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering_encrypt.id + ) + self.debug("Created a volume with ID: %s" % volume.id) + + list_volume_response = Volume.list( + self.apiclient, + id=volume.id) + self.assertEqual( + isinstance(list_volume_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + list_volume_response, + None, + "Check if volume exists in ListVolumes" + ) + + self.debug("Attaching volume (ID: %s) to VM (ID: %s)" % (volume.id, virtual_machine.id)) + + virtual_machine.attach_volume( + self.apiclient, + volume + ) + + try: + ssh = virtual_machine.get_ssh_client() + self.debug("Rebooting VM %s" % virtual_machine.id) + ssh.execute("reboot") + except Exception as e: + self.fail("SSH access failed for VM %s - %s" % (virtual_machine.ipaddress, e)) + + # Poll listVM to ensure VM is started properly + timeout = self.services["timeout"] + while True: + time.sleep(self.services["sleep"]) + + # Ensure that VM is in running state + list_vm_response = VirtualMachine.list( + self.apiclient, + id=virtual_machine.id + ) + + if isinstance(list_vm_response, list): + vm = list_vm_response[0] + if vm.state == 'Running': + self.debug("VM state: %s" % vm.state) + break + + if timeout == 0: + raise Exception( + "Failed to start VM (ID: %s) " % vm.id) + timeout = timeout - 1 + + vol_sz = str(list_volume_response[0].size) + ssh = virtual_machine.get_ssh_client( + reconnect=True + ) + + # Get the updated volume information + list_volume_response = Volume.list( + self.apiclient, + id=volume.id) + + volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid)) + self.debug(" Using KVM volume_name: %s" % (volume_name)) + ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=vol_sz) + self.debug(" Volume Size Expected %s Actual :%s" % (vol_sz, ret[1])) + + self.check_volume_encryption(virtual_machine, 3) + + virtual_machine.detach_volume(self.apiclient, volume) + self.assertEqual(ret[0], SUCCESS, "Check if promised disk size actually available") + time.sleep(self.services["sleep"]) + + def does_host_with_encryption_support_exists(self): + hosts = Host.list( + self.apiclient, + zoneid=self.zone.id, + type='Routing', + hypervisor='KVM', + state='Up') + + for host in hosts: + if host.encryptionsupported: + return True + + return False + + def check_volume_encryption(self, virtual_machine, volumes_count): + hosts = Host.list(self.apiclient, id=virtual_machine.hostid) + if len(hosts) != 1: + assert False, "Could not find host with id " + virtual_machine.hostid + + host = hosts[0] + instance_name = virtual_machine.instancename + + self.assertIsNotNone(host, "Host should not be None") + self.assertIsNotNone(instance_name, "Instance name should not be None") + + ssh_client = SshClient( + host=host.ipaddress, + port=22, + user=self.hostConfig['username'], + passwd=self.hostConfig['password']) + + virsh_cmd = 'virsh dumpxml %s' % instance_name + xml_res = ssh_client.execute(virsh_cmd) + xml_as_str = ''.join(xml_res) + parser = etree.XMLParser(remove_blank_text=True) + virshxml_root = ET.fromstring(xml_as_str, parser=parser) + + encryption_format = virshxml_root.findall(".devices/disk/encryption[@format='luks']") + self.assertIsNotNone(encryption_format, "The volume encryption format is not luks") + self.assertEqual( + len(encryption_format), + volumes_count, + "Check the number of volumes encrypted with luks format" + ) + + secret_type = virshxml_root.findall(".devices/disk/encryption/secret[@type='passphrase']") + self.assertIsNotNone(secret_type, "The volume encryption secret type is not passphrase") + self.assertEqual( + len(secret_type), + volumes_count, + "Check the number of encrypted volumes with passphrase secret type" + ) diff --git a/ui/package.json b/ui/package.json index f063a0f8c91..3879ee92a6c 100644 --- a/ui/package.json +++ b/ui/package.json @@ -40,12 +40,12 @@ "@fortawesome/vue-fontawesome": "^2.0.2", "ant-design-vue": "~1.7.3", "antd-theme-webpack-plugin": "^1.3.9", - "axios": "^0.21.4", + "axios": "^0.21.1", "babel-plugin-require-context-hook": "^1.0.0", "core-js": "^3.6.5", "enquire.js": "^2.1.6", "js-cookie": "^2.2.1", - "lodash": "^4.17.21", + "lodash": "^4.17.15", "md5": "^2.2.1", "moment": "^2.26.0", "npm-check-updates": "^6.0.1", diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 4e8d2b42ea6..db6f1e1274e 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -900,6 +900,8 @@ "label.enable.vpn": "Enable Remote Access VPN", "label.enabling.vpn": "Enabling VPN", "label.enabling.vpn.access": "Enabling VPN Access", +"label.encrypt": "Encrypt", +"label.encryptroot": "Encrypt Root Disk", "label.end": "End", "label.end.ip": "End IP", "label.end.reserved.system.ip": "End Reserved system IP", @@ -2394,6 +2396,7 @@ "label.volume": "Volume", "label.volume.details": "Volume details", "label.volume.empty": "No data volumes attached to this VM", +"label.volume.encryption.support": "Volume Encryption Supported", "label.volume.ids": "Volume ID's", "label.volume.migrated": "Volume migrated", "label.volume.volumefileupload.description": "Click or drag file to this area to upload", diff --git a/ui/src/config/section/offering.js b/ui/src/config/section/offering.js index 6a09e6f8040..59f99c8a9b7 100644 --- a/ui/src/config/section/offering.js +++ b/ui/src/config/section/offering.js @@ -31,7 +31,7 @@ export default { params: { isrecursive: 'true' }, columns: ['name', 'displaytext', 'cpunumber', 'cpuspeed', 'memory', 'domain', 'zone', 'order'], details: () => { - var fields = ['name', 'id', 'displaytext', 'offerha', 'provisioningtype', 'storagetype', 'iscustomized', 'iscustomizediops', 'limitcpuuse', 'cpunumber', 'cpuspeed', 'memory', 'hosttags', 'storagetags', 'domain', 'zone', 'created', 'dynamicscalingenabled'] + var fields = ['name', 'id', 'displaytext', 'offerha', 'provisioningtype', 'storagetype', 'iscustomized', 'iscustomizediops', 'limitcpuuse', 'cpunumber', 'cpuspeed', 'memory', 'hosttags', 'storagetags', 'domain', 'zone', 'created', 'dynamicscalingenabled', 'encryptroot'] if (store.getters.apis.createServiceOffering && store.getters.apis.createServiceOffering.params.filter(x => x.name === 'storagepolicy').length > 0) { fields.splice(6, 0, 'vspherestoragepolicy') @@ -141,7 +141,7 @@ export default { params: { isrecursive: 'true' }, columns: ['name', 'displaytext', 'disksize', 'domain', 'zone', 'order'], details: () => { - var fields = ['name', 'id', 'displaytext', 'disksize', 'provisioningtype', 'storagetype', 'iscustomized', 'iscustomizediops', 'tags', 'domain', 'zone', 'created'] + var fields = ['name', 'id', 'displaytext', 'disksize', 'provisioningtype', 'storagetype', 'iscustomized', 'iscustomizediops', 'tags', 'domain', 'zone', 'created', 'encrypt'] if (store.getters.apis.createDiskOffering && store.getters.apis.createDiskOffering.params.filter(x => x.name === 'storagepolicy').length > 0) { fields.splice(6, 0, 'vspherestoragepolicy') diff --git a/ui/src/views/infra/HostInfo.vue b/ui/src/views/infra/HostInfo.vue index 4ab73b51a20..c30a75463f5 100644 --- a/ui/src/views/infra/HostInfo.vue +++ b/ui/src/views/infra/HostInfo.vue @@ -40,6 +40,14 @@ + +
+ {{ $t('label.volume.encryption.support') }} +
+ {{ host.encryptionsupported }} +
+
+
{{ $t('label.hosttags') }} diff --git a/ui/src/views/offering/AddComputeOffering.vue b/ui/src/views/offering/AddComputeOffering.vue index bcc09c1e9d5..8faf7fcec5e 100644 --- a/ui/src/views/offering/AddComputeOffering.vue +++ b/ui/src/views/offering/AddComputeOffering.vue @@ -425,6 +425,10 @@ + + + + @@ -552,6 +556,7 @@ export default { qosType: '', isCustomizedDiskIops: false, isPublic: true, + isEncrypted: false, selectedDomains: [], domains: [], domainLoading: false, @@ -751,7 +756,8 @@ export default { customized: values.offeringtype !== 'fixed', offerha: values.offerha === true, limitcpuuse: values.limitcpuuse === true, - dynamicscalingenabled: values.dynamicscalingenabled + dynamicscalingenabled: values.dynamicscalingenabled, + encryptroot: values.isencrypted } // custom fields (begin) diff --git a/ui/src/views/offering/AddDiskOffering.vue b/ui/src/views/offering/AddDiskOffering.vue index 26cb04bbdb8..54b924b5056 100644 --- a/ui/src/views/offering/AddDiskOffering.vue +++ b/ui/src/views/offering/AddDiskOffering.vue @@ -260,6 +260,10 @@ + + + + @@ -375,6 +379,7 @@ export default { storagePolicies: null, storageTagLoading: false, isPublic: true, + isEncrypted: false, domains: [], domainLoading: false, zones: [], @@ -501,7 +506,8 @@ export default { storageType: values.storagetype, cacheMode: values.writecachetype, provisioningType: values.provisioningtype, - customized: values.customdisksize + customized: values.customdisksize, + encrypt: values.isencrypted } if (values.customdisksize !== true) { params.disksize = values.disksize diff --git a/utils/src/main/java/com/cloud/utils/UuidUtils.java b/utils/src/main/java/com/cloud/utils/UuidUtils.java index e733eff6da3..5b24c28e284 100644 --- a/utils/src/main/java/com/cloud/utils/UuidUtils.java +++ b/utils/src/main/java/com/cloud/utils/UuidUtils.java @@ -24,13 +24,14 @@ import org.apache.xerces.impl.xpath.regex.RegularExpression; public class UuidUtils { + public static RegularExpression REGEX = new RegularExpression("[0-9a-fA-F]{8}(?:-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}"); + public final static String first(String uuid) { return uuid.substring(0, uuid.indexOf('-')); } public static boolean validateUUID(String uuid) { - RegularExpression regex = new RegularExpression("[0-9a-fA-F]{8}(?:-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}"); - return regex.matches(uuid); + return REGEX.matches(uuid); } /**