mirror of https://github.com/apache/cloudstack.git
CLOUDSTACK-9620: Enhancements for managed storage (#2298)
Allowed zone-wide primary storage based on a custom plug-in to be added via the GUI in a KVM-only environment (previously this only worked for XenServer and VMware) Added support for root disks on managed storage with KVM Added support for volume snapshots with managed storage on KVM Enable creating a template directly from a volume (i.e. without having to go through a volume snapshot) on KVM with managed storage Only allow the resizing of a volume for managed storage on KVM if the volume in question is either not attached to a VM or is attached to a VM in the Stopped state. Included support for Reinstall VM on KVM with managed storage Enabled offline migration on KVM from non-managed storage to managed storage and vice versa Included support for online storage migration on KVM with managed storage (NFS and Ceph to managed storage) Added support to download (extract) a managed-storage volume to a QCOW2 file When uploading a file from outside of CloudStack to CloudStack, set the min and max IOPS, if applicable. Included support for the KVM auto-convergence feature The compression flag was actually added in version 1.0.3 (1000003) as opposed to version 1.3.0 (1003000) (changed this to reflect the correct version) On KVM when using iSCSI-based managed storage, if the user shuts a VM down from the guest OS (as opposed to doing so from CloudStack), we need to pass to the KVM agent a list of applicable iSCSI volumes that need to be disconnected. Added a new Global Setting: kvm.storage.live.migration.wait For XenServer, added a check to enforce that only volumes from zone-wide managed storage can be storage motioned from a host in one cluster to a host in another cluster (cannot do so at the time being with volumes from cluster-scoped managed storage) Don’t allow Storage XenMotion on a VM that has any managed-storage volume with one or more snapshots. Enabled for managed storage with VMware: Template caching, create snapshot, delete snapshot, create volume from snapshot, and create template from snapshot Added an SIOC API plug-in to support VMware SIOC When starting a VM that uses managed storage in a cluster other than the one it last was running in, we need to remove the reference to the iSCSI volume from the original cluster. Added the ability to revert a volume to a snapshot Enabled cluster-scoped managed storage Added support for VMware dynamic discovery
This commit is contained in:
parent
823a7891e4
commit
a30a31c9b7
|
|
@ -27,6 +27,7 @@ public class DiskTO {
|
|||
public static final String CHAP_INITIATOR_SECRET = "chapInitiatorSecret";
|
||||
public static final String CHAP_TARGET_USERNAME = "chapTargetUsername";
|
||||
public static final String CHAP_TARGET_SECRET = "chapTargetSecret";
|
||||
public static final String SCSI_NAA_DEVICE_ID = "scsiNaaDeviceId";
|
||||
public static final String MANAGED = "managed";
|
||||
public static final String IQN = "iqn";
|
||||
public static final String STORAGE_HOST = "storageHost";
|
||||
|
|
@ -36,6 +37,9 @@ public class DiskTO {
|
|||
public static final String PROTOCOL_TYPE = "protocoltype";
|
||||
public static final String PATH = "path";
|
||||
public static final String UUID = "uuid";
|
||||
public static final String VMDK = "vmdk";
|
||||
public static final String EXPAND_DATASTORE = "expandDatastore";
|
||||
public static final String TEMPLATE_RESIGN = "templateResign";
|
||||
|
||||
private DataTO data;
|
||||
private Long diskSeq;
|
||||
|
|
|
|||
|
|
@ -78,6 +78,14 @@ public class ResizeVolumeCmd extends BaseAsyncCmd {
|
|||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
public ResizeVolumeCmd() {}
|
||||
|
||||
public ResizeVolumeCmd(Long id, Long minIops, Long maxIops) {
|
||||
this.id = id;
|
||||
this.minIops = minIops;
|
||||
this.maxIops = maxIops;
|
||||
}
|
||||
|
||||
//TODO use the method getId() instead of this one.
|
||||
public Long getEntityId() {
|
||||
return id;
|
||||
|
|
|
|||
|
|
@ -1151,6 +1151,21 @@
|
|||
</dependency>
|
||||
</dependencies>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>vmwaresioc</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>noredist</name>
|
||||
</property>
|
||||
</activation>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-api-vmware-sioc</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>quickcloud</id>
|
||||
<activation>
|
||||
|
|
|
|||
|
|
@ -19,15 +19,20 @@
|
|||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
|
||||
public class MigrateCommand extends Command {
|
||||
String vmName;
|
||||
String destIp;
|
||||
String hostGuid;
|
||||
boolean isWindows;
|
||||
VirtualMachineTO vmTO;
|
||||
boolean executeInSequence = false;
|
||||
private String vmName;
|
||||
private String destIp;
|
||||
private Map<String, MigrateDiskInfo> migrateStorage;
|
||||
private boolean autoConvergence;
|
||||
private String hostGuid;
|
||||
private boolean isWindows;
|
||||
private VirtualMachineTO vmTO;
|
||||
private boolean executeInSequence = false;
|
||||
|
||||
protected MigrateCommand() {
|
||||
}
|
||||
|
|
@ -40,6 +45,22 @@ public class MigrateCommand extends Command {
|
|||
this.executeInSequence = executeInSequence;
|
||||
}
|
||||
|
||||
public void setMigrateStorage(Map<String, MigrateDiskInfo> migrateStorage) {
|
||||
this.migrateStorage = migrateStorage;
|
||||
}
|
||||
|
||||
public Map<String, MigrateDiskInfo> getMigrateStorage() {
|
||||
return migrateStorage != null ? new HashMap<>(migrateStorage) : new HashMap<String, MigrateDiskInfo>();
|
||||
}
|
||||
|
||||
public void setAutoConvergence(boolean autoConvergence) {
|
||||
this.autoConvergence = autoConvergence;
|
||||
}
|
||||
|
||||
public boolean isAutoConvergence() {
|
||||
return autoConvergence;
|
||||
}
|
||||
|
||||
public boolean isWindows() {
|
||||
return isWindows;
|
||||
}
|
||||
|
|
@ -68,4 +89,67 @@ public class MigrateCommand extends Command {
|
|||
public boolean executeInSequence() {
|
||||
return executeInSequence;
|
||||
}
|
||||
|
||||
public static class MigrateDiskInfo {
|
||||
public enum DiskType {
|
||||
FILE, BLOCK;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase();
|
||||
}
|
||||
}
|
||||
|
||||
public enum DriverType {
|
||||
QCOW2, RAW;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase();
|
||||
}
|
||||
}
|
||||
|
||||
public enum Source {
|
||||
FILE, DEV;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase();
|
||||
}
|
||||
}
|
||||
|
||||
private final String serialNumber;
|
||||
private final DiskType diskType;
|
||||
private final DriverType driverType;
|
||||
private final Source source;
|
||||
private final String sourceText;
|
||||
|
||||
public MigrateDiskInfo(final String serialNumber, final DiskType diskType, final DriverType driverType, final Source source, final String sourceText) {
|
||||
this.serialNumber = serialNumber;
|
||||
this.diskType = diskType;
|
||||
this.driverType = driverType;
|
||||
this.source = source;
|
||||
this.sourceText = sourceText;
|
||||
}
|
||||
|
||||
public String getSerialNumber() {
|
||||
return serialNumber;
|
||||
}
|
||||
|
||||
public DiskType getDiskType() {
|
||||
return diskType;
|
||||
}
|
||||
|
||||
public DriverType getDriverType() {
|
||||
return driverType;
|
||||
}
|
||||
|
||||
public Source getSource() {
|
||||
return source;
|
||||
}
|
||||
|
||||
public String getSourceText() {
|
||||
return sourceText;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,5 +19,16 @@
|
|||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class ModifyTargetsAnswer extends Answer {
|
||||
private List<String> connectedPaths;
|
||||
|
||||
public void setConnectedPaths(List<String> connectedPaths) {
|
||||
this.connectedPaths = connectedPaths;
|
||||
}
|
||||
|
||||
public List<String> getConnectedPaths() {
|
||||
return connectedPaths;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,11 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
public class ModifyTargetsCommand extends Command {
|
||||
public enum TargetTypeToRemove { BOTH, NEITHER, STATIC, DYNAMIC }
|
||||
|
||||
public static final String IQN = "iqn";
|
||||
public static final String STORAGE_TYPE = "storageType";
|
||||
public static final String STORAGE_UUID = "storageUuid";
|
||||
public static final String STORAGE_HOST = "storageHost";
|
||||
public static final String STORAGE_PORT = "storagePort";
|
||||
public static final String CHAP_NAME = "chapName";
|
||||
|
|
@ -32,6 +36,9 @@ public class ModifyTargetsCommand extends Command {
|
|||
public static final String MUTUAL_CHAP_SECRET = "mutualChapSecret";
|
||||
|
||||
private boolean add;
|
||||
private boolean applyToAllHostsInCluster;
|
||||
private TargetTypeToRemove targetTypeToRemove = TargetTypeToRemove.BOTH;
|
||||
private boolean removeAsync;
|
||||
private List<Map<String, String>> targets;
|
||||
|
||||
public void setAdd(boolean add) {
|
||||
|
|
@ -42,6 +49,30 @@ public class ModifyTargetsCommand extends Command {
|
|||
return add;
|
||||
}
|
||||
|
||||
public void setApplyToAllHostsInCluster(boolean applyToAllHostsInCluster) {
|
||||
this.applyToAllHostsInCluster = applyToAllHostsInCluster;
|
||||
}
|
||||
|
||||
public boolean getApplyToAllHostsInCluster() {
|
||||
return applyToAllHostsInCluster;
|
||||
}
|
||||
|
||||
public void setTargetTypeToRemove(TargetTypeToRemove targetTypeToRemove) {
|
||||
this.targetTypeToRemove = targetTypeToRemove;
|
||||
}
|
||||
|
||||
public TargetTypeToRemove getTargetTypeToRemove() {
|
||||
return targetTypeToRemove;
|
||||
}
|
||||
|
||||
public void setRemoveAsync(boolean removeAsync) {
|
||||
this.removeAsync = removeAsync;
|
||||
}
|
||||
|
||||
public boolean isRemoveAsync() {
|
||||
return removeAsync;
|
||||
}
|
||||
|
||||
public void setTargets(List<Map<String, String>> targets) {
|
||||
this.targets = targets;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,8 @@ package com.cloud.agent.api;
|
|||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
|
||||
public class PrepareForMigrationCommand extends Command {
|
||||
VirtualMachineTO vm;
|
||||
private VirtualMachineTO vm;
|
||||
private boolean rollback;
|
||||
|
||||
protected PrepareForMigrationCommand() {
|
||||
}
|
||||
|
|
@ -35,6 +36,14 @@ public class PrepareForMigrationCommand extends Command {
|
|||
return vm;
|
||||
}
|
||||
|
||||
public void setRollback(boolean rollback) {
|
||||
this.rollback = rollback;
|
||||
}
|
||||
|
||||
public boolean isRollback() {
|
||||
return rollback;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -24,9 +24,16 @@ import java.util.Map;
|
|||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
|
||||
public class StartAnswer extends Answer {
|
||||
public static final String PATH = "path";
|
||||
public static final String IMAGE_FORMAT = "imageFormat";
|
||||
|
||||
VirtualMachineTO vm;
|
||||
String hostGuid;
|
||||
Map<String, String> _iqnToPath;
|
||||
// key = an applicable IQN (ex. iqn.1998-01.com.vmware.iscsi:name1)
|
||||
// value = a Map with the following data:
|
||||
// key = PATH or IMAGE_FORMAT (defined above)
|
||||
// value = Example if PATH is key: UUID of VDI; Example if IMAGE_FORMAT is key: DiskTO.VHD
|
||||
private Map<String, Map<String, String>> _iqnToData;
|
||||
|
||||
protected StartAnswer() {
|
||||
}
|
||||
|
|
@ -61,11 +68,11 @@ public class StartAnswer extends Answer {
|
|||
return hostGuid;
|
||||
}
|
||||
|
||||
public void setIqnToPath(Map<String, String> iqnToPath) {
|
||||
_iqnToPath = iqnToPath;
|
||||
public void setIqnToData(Map<String, Map<String, String>> iqnToData) {
|
||||
_iqnToData = iqnToData;
|
||||
}
|
||||
|
||||
public Map<String, String> getIqnToPath() {
|
||||
return _iqnToPath;
|
||||
public Map<String, Map<String, String>> getIqnToData() {
|
||||
return _iqnToData;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,10 @@ package com.cloud.agent.api;
|
|||
import com.cloud.agent.api.to.GPUDeviceTO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
import java.util.List;
|
||||
|
||||
public class StopCommand extends RebootCommand {
|
||||
private boolean isProxy = false;
|
||||
private String urlPort = null;
|
||||
|
|
@ -30,6 +34,11 @@ public class StopCommand extends RebootCommand {
|
|||
boolean checkBeforeCleanup = false;
|
||||
String controlIp = null;
|
||||
boolean forceStop = false;
|
||||
/**
|
||||
* On KVM when using iSCSI-based managed storage, if the user shuts a VM down from the guest OS (as opposed to doing so from CloudStack),
|
||||
* we need to pass to the KVM agent a list of applicable iSCSI volumes that need to be disconnected.
|
||||
*/
|
||||
private List<Map<String, String>> volumesToDisconnect = new ArrayList<>();
|
||||
|
||||
protected StopCommand() {
|
||||
}
|
||||
|
|
@ -102,4 +111,12 @@ public class StopCommand extends RebootCommand {
|
|||
public boolean isForceStop() {
|
||||
return forceStop;
|
||||
}
|
||||
|
||||
public void setVolumesToDisconnect(List<Map<String, String>> volumesToDisconnect) {
|
||||
this.volumesToDisconnect = volumesToDisconnect;
|
||||
}
|
||||
|
||||
public List<Map<String, String>> getVolumesToDisconnect() {
|
||||
return volumesToDisconnect;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,18 +19,22 @@
|
|||
|
||||
package com.cloud.agent.api.storage;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.StorageFilerTO;
|
||||
import com.cloud.storage.StoragePool;
|
||||
|
||||
public class CopyVolumeCommand extends StorageNfsVersionCommand {
|
||||
|
||||
long volumeId;
|
||||
String volumePath;
|
||||
StorageFilerTO pool;
|
||||
String secondaryStorageURL;
|
||||
boolean toSecondaryStorage;
|
||||
String vmName;
|
||||
boolean executeInSequence = false;
|
||||
private long volumeId;
|
||||
private String volumePath;
|
||||
private StorageFilerTO pool;
|
||||
private String secondaryStorageURL;
|
||||
private boolean toSecondaryStorage;
|
||||
private String vmName;
|
||||
private DataTO srcData;
|
||||
private Map<String, String> srcDetails;
|
||||
private boolean executeInSequence;
|
||||
|
||||
public CopyVolumeCommand() {
|
||||
}
|
||||
|
|
@ -75,4 +79,19 @@ public class CopyVolumeCommand extends StorageNfsVersionCommand {
|
|||
return vmName;
|
||||
}
|
||||
|
||||
public void setSrcData(DataTO srcData) {
|
||||
this.srcData = srcData;
|
||||
}
|
||||
|
||||
public DataTO getSrcData() {
|
||||
return srcData;
|
||||
}
|
||||
|
||||
public void setSrcDetails(Map<String, String> srcDetails) {
|
||||
this.srcDetails = srcDetails;
|
||||
}
|
||||
|
||||
public Map<String, String> getSrcDetails() {
|
||||
return srcDetails;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,19 +19,26 @@
|
|||
|
||||
package com.cloud.agent.api.storage;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.StorageFilerTO;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.Volume;
|
||||
|
||||
public class MigrateVolumeCommand extends Command {
|
||||
|
||||
long volumeId;
|
||||
String volumePath;
|
||||
StorageFilerTO pool;
|
||||
String attachedVmName;
|
||||
Volume.Type volumeType;
|
||||
|
||||
private DataTO srcData;
|
||||
private DataTO destData;
|
||||
private Map<String, String> srcDetails;
|
||||
private Map<String, String> destDetails;
|
||||
|
||||
public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool pool, int timeout) {
|
||||
this.volumeId = volumeId;
|
||||
this.volumePath = volumePath;
|
||||
|
|
@ -48,6 +55,15 @@ public class MigrateVolumeCommand extends Command {
|
|||
this.setWait(timeout);
|
||||
}
|
||||
|
||||
public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map<String, String> srcDetails, Map<String, String> destDetails, int timeout) {
|
||||
this.srcData = srcData;
|
||||
this.destData = destData;
|
||||
this.srcDetails = srcDetails;
|
||||
this.destDetails = destDetails;
|
||||
|
||||
setWait(timeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return true;
|
||||
|
|
@ -72,4 +88,24 @@ public class MigrateVolumeCommand extends Command {
|
|||
public Volume.Type getVolumeType() {
|
||||
return volumeType;
|
||||
}
|
||||
|
||||
public DataTO getSrcData() {
|
||||
return srcData;
|
||||
}
|
||||
|
||||
public DataTO getDestData() {
|
||||
return destData;
|
||||
}
|
||||
|
||||
public Map<String, String> getSrcDetails() {
|
||||
return srcDetails;
|
||||
}
|
||||
|
||||
public Map<String, String> getDestDetails() {
|
||||
return destDetails;
|
||||
}
|
||||
|
||||
public int getWaitInMillSeconds() {
|
||||
return getWait() * 1000;
|
||||
}
|
||||
}
|
||||
|
|
@ -205,6 +205,7 @@ public class TemplateLocation {
|
|||
}
|
||||
|
||||
_props.setProperty("virtualsize", Long.toString(newInfo.virtualSize));
|
||||
_props.setProperty("size", Long.toString(newInfo.size));
|
||||
_formats.add(newInfo);
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,8 +29,8 @@ public class CopyCommand extends StorageSubSystemCommand {
|
|||
private DataTO destTO;
|
||||
private DataTO cacheTO;
|
||||
private boolean executeInSequence = false;
|
||||
private Map<String, String> options = new HashMap<String, String>();
|
||||
private Map<String, String> options2 = new HashMap<String, String>();
|
||||
private Map<String, String> options = new HashMap<>();
|
||||
private Map<String, String> options2 = new HashMap<>();
|
||||
|
||||
public CopyCommand(final DataTO srcData, final DataTO destData, final int timeout, final boolean executeInSequence) {
|
||||
super();
|
||||
|
|
|
|||
|
|
@ -36,5 +36,9 @@ public enum DataStoreCapabilities {
|
|||
/**
|
||||
* indicates that this driver supports the "cloneOfSnapshot" property of cloud.snapshot_details (for creating a volume from a volume)
|
||||
*/
|
||||
CAN_CREATE_VOLUME_FROM_VOLUME
|
||||
CAN_CREATE_VOLUME_FROM_VOLUME,
|
||||
/**
|
||||
* indicates that this driver supports reverting a volume to a snapshot state
|
||||
*/
|
||||
CAN_REVERT_VOLUME_TO_SNAPSHOT
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,14 +44,54 @@ import com.cloud.vm.DiskProfile;
|
|||
import com.cloud.vm.VMInstanceVO;
|
||||
|
||||
public interface StorageManager extends StorageService {
|
||||
static final ConfigKey<Integer> StorageCleanupInterval = new ConfigKey<Integer>(Integer.class, "storage.cleanup.interval", "Advanced", "86400",
|
||||
"The interval (in seconds) to wait before running the storage cleanup thread.", false, ConfigKey.Scope.Global, null);
|
||||
static final ConfigKey<Integer> StorageCleanupDelay = new ConfigKey<Integer>(Integer.class, "storage.cleanup.delay", "Advanced", "86400",
|
||||
"Determines how long (in seconds) to wait before actually expunging destroyed volumes. The default value = the default value of storage.cleanup.interval.", false, ConfigKey.Scope.Global, null);
|
||||
static final ConfigKey<Boolean> StorageCleanupEnabled = new ConfigKey<Boolean>(Boolean.class, "storage.cleanup.enabled", "Advanced", "true",
|
||||
"Enables/disables the storage cleanup thread.", false, ConfigKey.Scope.Global, null);
|
||||
static final ConfigKey<Boolean> TemplateCleanupEnabled = new ConfigKey<Boolean>(Boolean.class, "storage.template.cleanup.enabled", "Storage", "true",
|
||||
"Enable/disable template cleanup activity, only take effect when overall storage cleanup is enabled", false, ConfigKey.Scope.Global, null);
|
||||
ConfigKey<Integer> StorageCleanupInterval = new ConfigKey<>(Integer.class,
|
||||
"storage.cleanup.interval",
|
||||
"Advanced",
|
||||
"86400",
|
||||
"The interval (in seconds) to wait before running the storage cleanup thread.",
|
||||
false,
|
||||
ConfigKey.Scope.Global,
|
||||
null);
|
||||
ConfigKey<Integer> StorageCleanupDelay = new ConfigKey<>(Integer.class,
|
||||
"storage.cleanup.delay",
|
||||
"Advanced",
|
||||
"86400",
|
||||
"Determines how long (in seconds) to wait before actually expunging destroyed volumes. The default value = the default value of storage.cleanup.interval.",
|
||||
false,
|
||||
ConfigKey.Scope.Global,
|
||||
null);
|
||||
ConfigKey<Boolean> StorageCleanupEnabled = new ConfigKey<>(Boolean.class,
|
||||
"storage.cleanup.enabled",
|
||||
"Advanced",
|
||||
"true",
|
||||
"Enables/disables the storage cleanup thread.",
|
||||
false,
|
||||
ConfigKey.Scope.Global,
|
||||
null);
|
||||
ConfigKey<Boolean> TemplateCleanupEnabled = new ConfigKey<>(Boolean.class,
|
||||
"storage.template.cleanup.enabled",
|
||||
"Storage",
|
||||
"true",
|
||||
"Enable/disable template cleanup activity, only take effect when overall storage cleanup is enabled",
|
||||
false,
|
||||
ConfigKey.Scope.Global,
|
||||
null);
|
||||
ConfigKey<Integer> KvmStorageOfflineMigrationWait = new ConfigKey<>(Integer.class,
|
||||
"kvm.storage.offline.migration.wait",
|
||||
"Storage",
|
||||
"10800",
|
||||
"Timeout in seconds for offline (non-live) storage migration to complete on KVM",
|
||||
true,
|
||||
ConfigKey.Scope.Global,
|
||||
null);
|
||||
ConfigKey<Integer> KvmStorageOnlineMigrationWait = new ConfigKey<>(Integer.class,
|
||||
"kvm.storage.online.migration.wait",
|
||||
"Storage",
|
||||
"10800",
|
||||
"Timeout in seconds for online (live) storage migration to complete on KVM (migrateVirtualMachineWithVolume)",
|
||||
true,
|
||||
ConfigKey.Scope.Global,
|
||||
null);
|
||||
|
||||
/**
|
||||
* Returns a comma separated list of tags for the specified storage pool
|
||||
|
|
@ -102,6 +142,8 @@ public interface StorageManager extends StorageService {
|
|||
|
||||
Host updateSecondaryStorage(long secStorageId, String newUrl);
|
||||
|
||||
void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool);
|
||||
|
||||
List<Long> getUpHostsInPool(long poolId);
|
||||
|
||||
void cleanupSecondaryStorage(boolean recurring);
|
||||
|
|
|
|||
|
|
@ -58,6 +58,11 @@
|
|||
<artifactId>cloud-utils</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-server</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||
|
|
@ -85,6 +86,7 @@ import com.cloud.agent.api.ClusterVMMetaDataSyncAnswer;
|
|||
import com.cloud.agent.api.ClusterVMMetaDataSyncCommand;
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.MigrateCommand;
|
||||
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||
import com.cloud.agent.api.PingRoutingCommand;
|
||||
import com.cloud.agent.api.PlugNicAnswer;
|
||||
import com.cloud.agent.api.PlugNicCommand;
|
||||
|
|
@ -114,6 +116,7 @@ import com.cloud.agent.manager.Commands;
|
|||
import com.cloud.agent.manager.allocator.HostAllocator;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.dc.ClusterDetailsDao;
|
||||
import com.cloud.dc.ClusterDetailsVO;
|
||||
import com.cloud.dc.DataCenter;
|
||||
|
|
@ -536,6 +539,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
|
||||
final Long hostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId();
|
||||
|
||||
List<Map<String, String>> targets = getTargets(hostId, vm.getId());
|
||||
|
||||
if (volumeExpungeCommands != null && volumeExpungeCommands.size() > 0 && hostId != null) {
|
||||
final Commands cmds = new Commands(Command.OnError.Stop);
|
||||
|
||||
|
|
@ -563,6 +568,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
// Clean up volumes based on the vm's instance id
|
||||
volumeMgr.cleanupVolumes(vm.getId());
|
||||
|
||||
if (hostId != null && CollectionUtils.isNotEmpty(targets)) {
|
||||
removeDynamicTargets(hostId, targets);
|
||||
}
|
||||
|
||||
final VirtualMachineGuru guru = getVmGuru(vm);
|
||||
guru.finalizeExpunge(vm);
|
||||
//remove the overcommit detials from the uservm details
|
||||
|
|
@ -599,6 +608,64 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
|
||||
}
|
||||
|
||||
private List<Map<String, String>> getTargets(Long hostId, long vmId) {
|
||||
List<Map<String, String>> targets = new ArrayList<>();
|
||||
|
||||
HostVO hostVO = _hostDao.findById(hostId);
|
||||
|
||||
if (hostVO == null || hostVO.getHypervisorType() != HypervisorType.VMware) {
|
||||
return targets;
|
||||
}
|
||||
|
||||
List<VolumeVO> volumes = _volsDao.findByInstance(vmId);
|
||||
|
||||
if (CollectionUtils.isEmpty(volumes)) {
|
||||
return targets;
|
||||
}
|
||||
|
||||
for (VolumeVO volume : volumes) {
|
||||
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId());
|
||||
|
||||
if (storagePoolVO != null && storagePoolVO.isManaged()) {
|
||||
Map<String, String> target = new HashMap<>();
|
||||
|
||||
target.put(ModifyTargetsCommand.STORAGE_HOST, storagePoolVO.getHostAddress());
|
||||
target.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
|
||||
target.put(ModifyTargetsCommand.IQN, volume.get_iScsiName());
|
||||
|
||||
targets.add(target);
|
||||
}
|
||||
}
|
||||
|
||||
return targets;
|
||||
}
|
||||
|
||||
private void removeDynamicTargets(long hostId, List<Map<String, String>> targets) {
|
||||
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||
|
||||
cmd.setTargets(targets);
|
||||
cmd.setApplyToAllHostsInCluster(true);
|
||||
cmd.setAdd(false);
|
||||
cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC);
|
||||
|
||||
sendModifyTargetsCommand(cmd, hostId);
|
||||
}
|
||||
|
||||
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
|
||||
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||
|
||||
if (answer == null) {
|
||||
String msg = "Unable to get an answer to the modify targets command";
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
else if (!answer.getResult()) {
|
||||
String msg = "Unable to modify target on the following host: " + hostId;
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean start() {
|
||||
// TODO, initial delay is hardcoded
|
||||
|
|
@ -1073,8 +1140,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
|
||||
startAnswer = cmds.getAnswer(StartAnswer.class);
|
||||
if (startAnswer != null && startAnswer.getResult()) {
|
||||
handlePath(vmTO.getDisks(), startAnswer.getIqnToPath());
|
||||
handlePath(vmTO.getDisks(), startAnswer.getIqnToData());
|
||||
|
||||
final String host_guid = startAnswer.getHost_guid();
|
||||
|
||||
if (host_guid != null) {
|
||||
final HostVO finalHost = _resourceMgr.findHostByGuid(host_guid);
|
||||
if (finalHost == null) {
|
||||
|
|
@ -1254,8 +1323,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
}
|
||||
|
||||
// for managed storage on XenServer and VMware, need to update the DB with a path if the VDI/VMDK file was newly created
|
||||
private void handlePath(final DiskTO[] disks, final Map<String, String> iqnToPath) {
|
||||
if (disks != null && iqnToPath != null) {
|
||||
private void handlePath(final DiskTO[] disks, final Map<String, Map<String, String>> iqnToData) {
|
||||
if (disks != null && iqnToData != null) {
|
||||
for (final DiskTO disk : disks) {
|
||||
final Map<String, String> details = disk.getDetails();
|
||||
final boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED));
|
||||
|
|
@ -1264,12 +1333,31 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
final Long volumeId = disk.getData().getId();
|
||||
final VolumeVO volume = _volsDao.findById(volumeId);
|
||||
final String iScsiName = volume.get_iScsiName();
|
||||
final String path = iqnToPath.get(iScsiName);
|
||||
|
||||
if (path != null) {
|
||||
volume.setPath(path);
|
||||
boolean update = false;
|
||||
|
||||
_volsDao.update(volumeId, volume);
|
||||
final Map<String, String> data = iqnToData.get(iScsiName);
|
||||
|
||||
if (data != null) {
|
||||
final String path = data.get(StartAnswer.PATH);
|
||||
|
||||
if (path != null) {
|
||||
volume.setPath(path);
|
||||
|
||||
update = true;
|
||||
}
|
||||
|
||||
final String imageFormat = data.get(StartAnswer.IMAGE_FORMAT);
|
||||
|
||||
if (imageFormat != null) {
|
||||
volume.setFormat(ImageFormat.valueOf(imageFormat));
|
||||
|
||||
update = true;
|
||||
}
|
||||
|
||||
if (update) {
|
||||
_volsDao.update(volumeId, volume);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1331,10 +1419,37 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
}
|
||||
}
|
||||
|
||||
private List<Map<String, String>> getVolumesToDisconnect(VirtualMachine vm) {
|
||||
List<Map<String, String>> volumesToDisconnect = new ArrayList<>();
|
||||
|
||||
List<VolumeVO> volumes = _volsDao.findByInstance(vm.getId());
|
||||
|
||||
if (CollectionUtils.isEmpty(volumes)) {
|
||||
return volumesToDisconnect;
|
||||
}
|
||||
|
||||
for (VolumeVO volume : volumes) {
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId());
|
||||
|
||||
if (storagePool != null && storagePool.isManaged()) {
|
||||
Map<String, String> info = new HashMap<>(3);
|
||||
|
||||
info.put(DiskTO.STORAGE_HOST, storagePool.getHostAddress());
|
||||
info.put(DiskTO.STORAGE_PORT, String.valueOf(storagePool.getPort()));
|
||||
info.put(DiskTO.IQN, volume.get_iScsiName());
|
||||
|
||||
volumesToDisconnect.add(info);
|
||||
}
|
||||
}
|
||||
|
||||
return volumesToDisconnect;
|
||||
}
|
||||
|
||||
protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachineProfile profile, final boolean force, final boolean checkBeforeCleanup) {
|
||||
final VirtualMachine vm = profile.getVirtualMachine();
|
||||
StopCommand stpCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), checkBeforeCleanup);
|
||||
stpCmd.setControlIp(getControlNicIpForVM(vm));
|
||||
stpCmd.setVolumesToDisconnect(getVolumesToDisconnect(vm));
|
||||
final StopCommand stop = stpCmd;
|
||||
try {
|
||||
Answer answer = null;
|
||||
|
|
@ -2103,6 +2218,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
try {
|
||||
final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
|
||||
final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType()));
|
||||
|
||||
String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString());
|
||||
boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence);
|
||||
|
||||
mc.setAutoConvergence(kvmAutoConvergence);
|
||||
|
||||
mc.setHostGuid(dest.getHost().getGuid());
|
||||
|
||||
try {
|
||||
|
|
@ -2176,32 +2297,48 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
final Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
|
||||
|
||||
for (final VolumeVO volume : allVolumes) {
|
||||
final Long poolId = volumeToPool.get(Long.valueOf(volume.getId()));
|
||||
final Long poolId = volumeToPool.get(volume.getId());
|
||||
final StoragePoolVO destPool = _storagePoolDao.findById(poolId);
|
||||
final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId());
|
||||
final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
|
||||
|
||||
if (destPool != null) {
|
||||
// Check if pool is accessible from the destination host and disk offering with which the volume was
|
||||
// created is compliant with the pool type.
|
||||
if (_poolHostDao.findByPoolHost(destPool.getId(), host.getId()) == null || destPool.isLocal() != diskOffering.getUseLocalStorage()) {
|
||||
// Cannot find a pool for the volume. Throw an exception.
|
||||
throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + destPool + " while migrating vm to host " + host +
|
||||
". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " +
|
||||
"the given pool.");
|
||||
} else if (destPool.getId() == currentPool.getId()) {
|
||||
// If the pool to migrate to is the same as current pool, the volume doesn't need to be migrated.
|
||||
} else {
|
||||
volumeToPoolObjectMap.put(volume, destPool);
|
||||
if (currentPool.isManaged()) {
|
||||
if (destPool.getId() == currentPool.getId()) {
|
||||
volumeToPoolObjectMap.put(volume, currentPool);
|
||||
}
|
||||
else {
|
||||
throw new CloudRuntimeException("Currently, a volume on managed storage can only be 'migrated' to itself.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Check if pool is accessible from the destination host and disk offering with which the volume was
|
||||
// created is compliant with the pool type.
|
||||
if (_poolHostDao.findByPoolHost(destPool.getId(), host.getId()) == null || destPool.isLocal() != diskOffering.getUseLocalStorage()) {
|
||||
// Cannot find a pool for the volume. Throw an exception.
|
||||
throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + destPool + " while migrating vm to host " + host +
|
||||
". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " +
|
||||
"the given pool.");
|
||||
} else if (destPool.getId() == currentPool.getId()) {
|
||||
// If the pool to migrate to is the same as current pool, the volume doesn't need to be migrated.
|
||||
} else {
|
||||
volumeToPoolObjectMap.put(volume, destPool);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (currentPool.isManaged()) {
|
||||
volumeToPoolObjectMap.put(volume, currentPool);
|
||||
if (currentPool.getScope() == ScopeType.ZONE) {
|
||||
volumeToPoolObjectMap.put(volume, currentPool);
|
||||
}
|
||||
else {
|
||||
throw new CloudRuntimeException("Currently, you can only 'migrate' a volume on managed storage if its storage pool is zone wide.");
|
||||
}
|
||||
} else {
|
||||
// Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
|
||||
|
||||
final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
|
||||
final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null);
|
||||
final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(),
|
||||
host.getId(), null, null);
|
||||
|
||||
final List<StoragePool> poolList = new ArrayList<>();
|
||||
final ExcludeList avoid = new ExcludeList();
|
||||
|
|
@ -3588,6 +3725,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
try {
|
||||
final boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
|
||||
final MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to, getExecuteInSequence(vm.getHypervisorType()));
|
||||
|
||||
String autoConvergence = _configDao.getValue(Config.KvmAutoConvergence.toString());
|
||||
boolean kvmAutoConvergence = Boolean.parseBoolean(autoConvergence);
|
||||
|
||||
mc.setAutoConvergence(kvmAutoConvergence);
|
||||
|
||||
mc.setHostGuid(dest.getHost().getGuid());
|
||||
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -610,16 +610,23 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
@Override
|
||||
public boolean volumeInactive(Volume volume) {
|
||||
Long vmId = volume.getInstanceId();
|
||||
if (vmId != null) {
|
||||
UserVm vm = _entityMgr.findById(UserVm.class, vmId);
|
||||
if (vm == null) {
|
||||
return true;
|
||||
}
|
||||
State state = vm.getState();
|
||||
if (state.equals(State.Stopped) || state.equals(State.Destroyed)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (vmId == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
UserVm vm = _entityMgr.findById(UserVm.class, vmId);
|
||||
|
||||
if (vm == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
State state = vm.getState();
|
||||
|
||||
if (state.equals(State.Stopped) || state.equals(State.Destroyed)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -1274,8 +1281,9 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool);
|
||||
Long templateId = newVol.getTemplateId();
|
||||
for (int i = 0; i < 2; i++) {
|
||||
// retry one more time in case of template reload is required for Vmware case
|
||||
AsyncCallFuture<VolumeApiResult> future = null;
|
||||
// retry one more time in case of template reload is required for VMware case
|
||||
AsyncCallFuture<VolumeApiResult> future;
|
||||
|
||||
if (templateId == null) {
|
||||
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
|
||||
HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType();
|
||||
|
|
@ -1368,23 +1376,34 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
|
||||
List<VolumeTask> tasks = getTasks(vols, dest.getStorageForDisks(), vm);
|
||||
Volume vol = null;
|
||||
StoragePool pool = null;
|
||||
StoragePool pool;
|
||||
for (VolumeTask task : tasks) {
|
||||
if (task.type == VolumeTaskType.NOP) {
|
||||
pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
|
||||
|
||||
if (task.pool != null && task.pool.isManaged()) {
|
||||
long hostId = vm.getVirtualMachine().getHostId();
|
||||
Host host = _hostDao.findById(hostId);
|
||||
|
||||
volService.grantAccess(volFactory.getVolume(task.volume.getId()), host, (DataStore)pool);
|
||||
}
|
||||
|
||||
vol = task.volume;
|
||||
|
||||
// For a zone-wide managed storage, it is possible that the VM can be started in another
|
||||
// cluster. In that case make sure that the volume in in the right access group cluster.
|
||||
// cluster. In that case, make sure that the volume is in the right access group.
|
||||
if (pool.isManaged()) {
|
||||
long oldHostId = vm.getVirtualMachine().getLastHostId();
|
||||
long hostId = vm.getVirtualMachine().getHostId();
|
||||
|
||||
if (oldHostId != hostId) {
|
||||
Host oldHost = _hostDao.findById(oldHostId);
|
||||
Host host = _hostDao.findById(hostId);
|
||||
DataStore storagePool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
|
||||
storageMgr.removeStoragePoolFromCluster(oldHostId, vol.get_iScsiName(), pool);
|
||||
|
||||
volService.revokeAccess(volFactory.getVolume(vol.getId()), oldHost, storagePool);
|
||||
volService.grantAccess(volFactory.getVolume(vol.getId()), host, storagePool);
|
||||
}
|
||||
}
|
||||
} else if (task.type == VolumeTaskType.MIGRATE) {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -17,12 +17,15 @@
|
|||
package org.apache.cloudstack.storage.snapshot;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.event.ActionEvent;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.event.UsageEventUtils;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
|
|
@ -46,10 +49,16 @@ import com.cloud.storage.VolumeDetailVO;
|
|||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
import com.cloud.vm.snapshot.VMSnapshot;
|
||||
import com.cloud.vm.snapshot.VMSnapshotService;
|
||||
import com.cloud.vm.snapshot.VMSnapshotVO;
|
||||
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
|
||||
|
|
@ -64,17 +73,21 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
|
|||
import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer;
|
||||
import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.UUID;
|
||||
|
||||
@Component
|
||||
public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
||||
|
|
@ -89,14 +102,18 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
@Inject private SnapshotDao snapshotDao;
|
||||
@Inject private SnapshotDataFactory snapshotDataFactory;
|
||||
@Inject private SnapshotDetailsDao snapshotDetailsDao;
|
||||
@Inject SnapshotDataStoreDao snapshotStoreDao;
|
||||
@Inject private VolumeDetailsDao volumeDetailsDao;
|
||||
@Inject private VMInstanceDao vmInstanceDao;
|
||||
@Inject private VMSnapshotDao vmSnapshotDao;
|
||||
@Inject private VMSnapshotService vmSnapshotService;
|
||||
@Inject private VolumeDao volumeDao;
|
||||
@Inject private VolumeService volService;
|
||||
@Inject private VolumeDetailsDao _volumeDetailsDaoImpl;
|
||||
|
||||
@Override
|
||||
public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) {
|
||||
Preconditions.checkArgument(snapshotInfo != null, "backupSnapshot expects a valid snapshot");
|
||||
Preconditions.checkArgument(snapshotInfo != null, "'snapshotInfo' cannot be 'null'.");
|
||||
|
||||
if (snapshotInfo.getLocationType() != Snapshot.LocationType.SECONDARY) {
|
||||
markAsBackedUp((SnapshotObject)snapshotInfo);
|
||||
|
|
@ -107,14 +124,24 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
// At this point, the snapshot is either taken as a native
|
||||
// snapshot on the storage or exists as a volume on the storage (clone).
|
||||
// If archive flag is passed in, we should copy this snapshot to secondary
|
||||
// storage and delete it from the primary storage.
|
||||
// storage and delete it from primary storage.
|
||||
|
||||
HostVO host = getHost(snapshotInfo.getVolumeId());
|
||||
|
||||
boolean canStorageSystemCreateVolumeFromSnapshot = canStorageSystemCreateVolumeFromSnapshot(snapshotInfo.getBaseVolume().getPoolId());
|
||||
|
||||
if (!canStorageSystemCreateVolumeFromSnapshot) {
|
||||
String msg = "Cannot archive snapshot: 'canStorageSystemCreateVolumeFromSnapshot' was false.";
|
||||
|
||||
s_logger.warn(msg);
|
||||
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
|
||||
boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(host.getClusterId());
|
||||
|
||||
if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsResign) {
|
||||
String msg = "Cannot archive snapshot: canStorageSystemCreateVolumeFromSnapshot and/or computeClusterSupportsResign were false.";
|
||||
if (!computeClusterSupportsResign) {
|
||||
String msg = "Cannot archive snapshot: 'computeClusterSupportsResign' was false.";
|
||||
|
||||
s_logger.warn(msg);
|
||||
|
||||
|
|
@ -126,6 +153,8 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
|
||||
@Override
|
||||
public boolean deleteSnapshot(Long snapshotId) {
|
||||
Preconditions.checkArgument(snapshotId != null, "'snapshotId' cannot be 'null'.");
|
||||
|
||||
SnapshotVO snapshotVO = snapshotDao.findById(snapshotId);
|
||||
|
||||
if (Snapshot.State.Destroyed.equals(snapshotVO.getState())) {
|
||||
|
|
@ -139,23 +168,21 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
}
|
||||
|
||||
if (!Snapshot.State.BackedUp.equals(snapshotVO.getState())) {
|
||||
throw new InvalidParameterValueException("Unable to delete snapshotshot " + snapshotId + " because it is in the following state: " + snapshotVO.getState());
|
||||
throw new InvalidParameterValueException("Unable to delete snapshot '" + snapshotId +
|
||||
"' because it is in the following state: " + snapshotVO.getState());
|
||||
}
|
||||
|
||||
return cleanupSnapshotOnPrimaryStore(snapshotId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleans up a snapshot which was taken on a primary store. This function
|
||||
* removes
|
||||
* This cleans up a snapshot which was taken on a primary store.
|
||||
*
|
||||
* @param snapshotId: ID of snapshot that needs to be removed
|
||||
* @return true if snapshot is removed, false otherwise
|
||||
* @param snapshotId: ID of snapshot to be removed
|
||||
* @return true if snapshot is removed; else, false
|
||||
*/
|
||||
|
||||
@ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_OFF_PRIMARY, eventDescription = "deleting snapshot", async = true)
|
||||
private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) {
|
||||
|
||||
SnapshotObject snapshotObj = (SnapshotObject)snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Primary);
|
||||
|
||||
if (snapshotObj == null) {
|
||||
|
|
@ -167,13 +194,13 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
}
|
||||
|
||||
if (ObjectInDataStoreStateMachine.State.Copying.equals(snapshotObj.getStatus())) {
|
||||
throw new InvalidParameterValueException("Unable to delete snapshotshot " + snapshotId + " because it is in the copying state.");
|
||||
throw new InvalidParameterValueException("Unable to delete snapshot '" + snapshotId + "' because it is in the copying state");
|
||||
}
|
||||
|
||||
try {
|
||||
snapshotObj.processEvent(Snapshot.Event.DestroyRequested);
|
||||
List<VolumeDetailVO> volumesFromSnapshot;
|
||||
volumesFromSnapshot = _volumeDetailsDaoImpl.findDetails("SNAPSHOT_ID", String.valueOf(snapshotId), null);
|
||||
|
||||
List<VolumeDetailVO> volumesFromSnapshot = _volumeDetailsDaoImpl.findDetails("SNAPSHOT_ID", String.valueOf(snapshotId), null);
|
||||
|
||||
if (volumesFromSnapshot.size() > 0) {
|
||||
try {
|
||||
|
|
@ -181,6 +208,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
} catch (NoTransitionException e1) {
|
||||
s_logger.debug("Failed to change snapshot state: " + e1.toString());
|
||||
}
|
||||
|
||||
throw new InvalidParameterValueException("Unable to perform delete operation, Snapshot with id: " + snapshotId + " is in use ");
|
||||
}
|
||||
}
|
||||
|
|
@ -194,6 +222,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
snapshotSvr.deleteSnapshot(snapshotObj);
|
||||
|
||||
snapshotObj.processEvent(Snapshot.Event.OperationSucceeded);
|
||||
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_OFF_PRIMARY, snapshotObj.getAccountId(), snapshotObj.getDataCenterId(), snapshotId,
|
||||
snapshotObj.getName(), null, null, 0L, snapshotObj.getClass().getName(), snapshotObj.getUuid());
|
||||
}
|
||||
|
|
@ -209,12 +238,202 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean isAcceptableRevertFormat(VolumeVO volumeVO) {
|
||||
return ImageFormat.VHD.equals(volumeVO.getFormat()) || ImageFormat.OVA.equals(volumeVO.getFormat()) || ImageFormat.QCOW2.equals(volumeVO.getFormat());
|
||||
}
|
||||
|
||||
private void verifyFormat(VolumeInfo volumeInfo) {
|
||||
ImageFormat imageFormat = volumeInfo.getFormat();
|
||||
|
||||
if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2) {
|
||||
throw new CloudRuntimeException("Only the following image types are currently supported: " +
|
||||
ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", and " + ImageFormat.QCOW2);
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyDiskTypeAndHypervisor(VolumeInfo volumeInfo) {
|
||||
ImageFormat imageFormat = volumeInfo.getFormat();
|
||||
Volume.Type volumeType = volumeInfo.getVolumeType();
|
||||
|
||||
if (ImageFormat.OVA.equals(imageFormat) && Volume.Type.ROOT.equals(volumeType)) {
|
||||
throw new CloudRuntimeException("The hypervisor type is VMware and the disk type is ROOT. For this situation, " +
|
||||
"recover the data on the snapshot by creating a new CloudStack volume from the corresponding volume snapshot.");
|
||||
}
|
||||
}
|
||||
|
||||
private void verifySnapshotType(SnapshotInfo snapshotInfo) {
|
||||
if (snapshotInfo.getHypervisorType() == HypervisorType.KVM && snapshotInfo.getDataStore().getRole() != DataStoreRole.Primary) {
|
||||
throw new CloudRuntimeException("For the KVM hypervisor type, you can only revert a volume to a snapshot state if the snapshot " +
|
||||
"resides on primary storage. For other snapshot types, create a volume from the snapshot to recover its data.");
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyLocationType(SnapshotInfo snapshotInfo) {
|
||||
VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
|
||||
|
||||
if (snapshotInfo.getLocationType() == Snapshot.LocationType.SECONDARY && volumeInfo.getFormat() != ImageFormat.VHD) {
|
||||
throw new CloudRuntimeException("Only the '" + ImageFormat.VHD + "' image type can be used when 'LocationType' is set to 'SECONDARY'.");
|
||||
}
|
||||
}
|
||||
|
||||
private boolean getHypervisorRequiresResignature(VolumeInfo volumeInfo) {
|
||||
return ImageFormat.VHD.equals(volumeInfo.getFormat()) || ImageFormat.OVA.equals(volumeInfo.getFormat());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean revertSnapshot(SnapshotInfo snapshot) {
|
||||
throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead.");
|
||||
public boolean revertSnapshot(SnapshotInfo snapshotInfo) {
|
||||
VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
|
||||
|
||||
verifyFormat(volumeInfo);
|
||||
|
||||
verifyDiskTypeAndHypervisor(volumeInfo);
|
||||
|
||||
verifySnapshotType(snapshotInfo);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshotInfo.getId(), DataStoreRole.Primary);
|
||||
|
||||
if (snapshotStore != null) {
|
||||
long snapshotStoragePoolId = snapshotStore.getDataStoreId();
|
||||
|
||||
if (!volumeInfo.getPoolId().equals(snapshotStoragePoolId)) {
|
||||
String errMsg = "Storage pool mismatch";
|
||||
|
||||
s_logger.error(errMsg);
|
||||
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
}
|
||||
|
||||
boolean storageSystemSupportsCapability = storageSystemSupportsCapability(volumeInfo.getPoolId(),
|
||||
DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString());
|
||||
|
||||
if (!storageSystemSupportsCapability) {
|
||||
String errMsg = "Storage pool revert capability not supported";
|
||||
|
||||
s_logger.error(errMsg);
|
||||
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshotInfo.getId());
|
||||
|
||||
if (snapshotVO == null) {
|
||||
String errMsg = "Failed to acquire lock on the following snapshot: " + snapshotInfo.getId();
|
||||
|
||||
s_logger.error(errMsg);
|
||||
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
Long hostId = null;
|
||||
boolean success = false;
|
||||
|
||||
try {
|
||||
volumeInfo.stateTransit(Volume.Event.RevertSnapshotRequested);
|
||||
|
||||
if (getHypervisorRequiresResignature(volumeInfo)) {
|
||||
hostId = getHostId(volumeInfo);
|
||||
|
||||
if (hostId != null) {
|
||||
HostVO hostVO = hostDao.findById(hostId);
|
||||
DataStore dataStore = dataStoreMgr.getDataStore(volumeInfo.getPoolId(), DataStoreRole.Primary);
|
||||
|
||||
volService.revokeAccess(volumeInfo, hostVO, dataStore);
|
||||
|
||||
modifyTarget(false, volumeInfo, hostId);
|
||||
}
|
||||
}
|
||||
|
||||
success = snapshotSvr.revertSnapshot(snapshotInfo);
|
||||
|
||||
if (!success) {
|
||||
String errMsg = "Failed to revert a volume to a snapshot state";
|
||||
|
||||
s_logger.error(errMsg);
|
||||
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
if (getHypervisorRequiresResignature(volumeInfo)) {
|
||||
if (hostId != null) {
|
||||
HostVO hostVO = hostDao.findById(hostId);
|
||||
DataStore dataStore = dataStoreMgr.getDataStore(volumeInfo.getPoolId(), DataStoreRole.Primary);
|
||||
|
||||
volService.grantAccess(volumeInfo, hostVO, dataStore);
|
||||
|
||||
modifyTarget(true, volumeInfo, hostId);
|
||||
}
|
||||
}
|
||||
|
||||
if (success) {
|
||||
volumeInfo.stateTransit(Volume.Event.OperationSucceeded);
|
||||
}
|
||||
else {
|
||||
volumeInfo.stateTransit(Volume.Event.OperationFailed);
|
||||
}
|
||||
|
||||
snapshotDao.releaseFromLockTable(snapshotInfo.getId());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private Long getHostId(VolumeInfo volumeInfo) {
|
||||
VirtualMachine virtualMachine = volumeInfo.getAttachedVM();
|
||||
|
||||
if (virtualMachine == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Long hostId = virtualMachine.getHostId();
|
||||
|
||||
if (hostId == null) {
|
||||
hostId = virtualMachine.getLastHostId();
|
||||
}
|
||||
|
||||
return hostId;
|
||||
}
|
||||
|
||||
private void modifyTarget(boolean add, VolumeInfo volumeInfo, long hostId) {
|
||||
StoragePoolVO storagePoolVO = storagePoolDao.findById(volumeInfo.getPoolId());
|
||||
|
||||
Map<String, String> details = new HashMap<>(3);
|
||||
|
||||
details.put(ModifyTargetsCommand.IQN, volumeInfo.get_iScsiName());
|
||||
details.put(ModifyTargetsCommand.STORAGE_HOST, storagePoolVO.getHostAddress());
|
||||
details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePoolVO.getPort()));
|
||||
|
||||
List<Map<String, String>> targets = new ArrayList<>(1);
|
||||
|
||||
targets.add(details);
|
||||
|
||||
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||
|
||||
cmd.setTargets(targets);
|
||||
cmd.setApplyToAllHostsInCluster(true);
|
||||
cmd.setAdd(add);
|
||||
cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.BOTH);
|
||||
|
||||
sendModifyTargetsCommand(cmd, hostId);
|
||||
}
|
||||
|
||||
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
|
||||
Answer answer = agentMgr.easySend(hostId, cmd);
|
||||
|
||||
if (answer == null) {
|
||||
throw new CloudRuntimeException("Unable to get an answer to the modify targets command");
|
||||
}
|
||||
|
||||
if (!answer.getResult()) {
|
||||
String msg = "Unable to modify targets on the following host: " + hostId;
|
||||
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -222,8 +441,23 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
public SnapshotInfo takeSnapshot(SnapshotInfo snapshotInfo) {
|
||||
VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
|
||||
|
||||
if (volumeInfo.getFormat() != ImageFormat.VHD) {
|
||||
throw new CloudRuntimeException("Only the " + ImageFormat.VHD.toString() + " image type is currently supported.");
|
||||
verifyFormat(volumeInfo);
|
||||
verifyLocationType(snapshotInfo);
|
||||
|
||||
final boolean canStorageSystemCreateVolumeFromSnapshot = canStorageSystemCreateVolumeFromSnapshot(volumeInfo.getPoolId());
|
||||
final boolean computeClusterSupportsVolumeClone;
|
||||
|
||||
// only XenServer, VMware and KVM are currently supported
|
||||
if (volumeInfo.getFormat() == ImageFormat.VHD) {
|
||||
HostVO hostVO = getHost(volumeInfo.getId());
|
||||
|
||||
computeClusterSupportsVolumeClone = clusterDao.getSupportsResigning(hostVO.getClusterId());
|
||||
}
|
||||
else if (volumeInfo.getFormat() == ImageFormat.OVA || volumeInfo.getFormat() == ImageFormat.QCOW2) {
|
||||
computeClusterSupportsVolumeClone = true;
|
||||
}
|
||||
else {
|
||||
throw new CloudRuntimeException("Unsupported format");
|
||||
}
|
||||
|
||||
SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshotInfo.getId());
|
||||
|
|
@ -232,22 +466,33 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
throw new CloudRuntimeException("Failed to acquire lock on the following snapshot: " + snapshotInfo.getId());
|
||||
}
|
||||
|
||||
VMSnapshot vmSnapshot = null;
|
||||
|
||||
if (ImageFormat.OVA.equals(volumeInfo.getFormat())) {
|
||||
setVmdk(snapshotInfo, volumeInfo);
|
||||
|
||||
try {
|
||||
vmSnapshot = takeHypervisorSnapshot(volumeInfo);
|
||||
}
|
||||
catch (ResourceAllocationException ex) {
|
||||
String errMsg = "Unable to allocate VM snapshot";
|
||||
|
||||
s_logger.error(errMsg, ex);
|
||||
|
||||
throw new CloudRuntimeException(errMsg, ex);
|
||||
}
|
||||
}
|
||||
|
||||
SnapshotResult result = null;
|
||||
SnapshotInfo snapshotOnPrimary = null;
|
||||
|
||||
try {
|
||||
volumeInfo.stateTransit(Volume.Event.SnapshotRequested);
|
||||
|
||||
// only XenServer is currently supported
|
||||
HostVO hostVO = getHost(volumeInfo.getId());
|
||||
|
||||
boolean canStorageSystemCreateVolumeFromSnapshot = canStorageSystemCreateVolumeFromSnapshot(volumeInfo.getPoolId());
|
||||
boolean computeClusterSupportsResign = clusterDao.getSupportsResigning(hostVO.getClusterId());
|
||||
|
||||
// if canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsResign, then take a back-end snapshot or create a back-end clone;
|
||||
// if canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsVolumeClone, then take a back-end snapshot or create a back-end clone;
|
||||
// else, just create a new back-end volume (eventually used to create a new SR on and to copy a VDI to)
|
||||
|
||||
if (canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsResign) {
|
||||
if (canStorageSystemCreateVolumeFromSnapshot && computeClusterSupportsVolumeClone) {
|
||||
SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshotInfo.getId(),
|
||||
"takeSnapshot",
|
||||
Boolean.TRUE.toString(),
|
||||
|
|
@ -264,7 +509,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
throw new CloudRuntimeException(result.getResult());
|
||||
}
|
||||
|
||||
if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsResign) {
|
||||
if (!canStorageSystemCreateVolumeFromSnapshot || !computeClusterSupportsVolumeClone) {
|
||||
performSnapshotAndCopyOnHostSide(volumeInfo, snapshotInfo);
|
||||
}
|
||||
|
||||
|
|
@ -276,6 +521,12 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
} else {
|
||||
volumeInfo.stateTransit(Volume.Event.OperationFailed);
|
||||
}
|
||||
|
||||
if (ImageFormat.OVA.equals(volumeInfo.getFormat())) {
|
||||
if (vmSnapshot != null) {
|
||||
deleteHypervisorSnapshot(vmSnapshot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
snapshotDao.releaseFromLockTable(snapshotInfo.getId());
|
||||
|
|
@ -298,6 +549,68 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
|
||||
}
|
||||
|
||||
private VMSnapshot takeHypervisorSnapshot(VolumeInfo volumeInfo) throws ResourceAllocationException {
|
||||
VirtualMachine virtualMachine = volumeInfo.getAttachedVM();
|
||||
|
||||
if (virtualMachine != null && VirtualMachine.State.Running.equals(virtualMachine.getState())) {
|
||||
String vmSnapshotName = UUID.randomUUID().toString().replace("-", "");
|
||||
|
||||
VMSnapshotVO vmSnapshotVO =
|
||||
new VMSnapshotVO(virtualMachine.getAccountId(), virtualMachine.getDomainId(), virtualMachine.getId(), vmSnapshotName, vmSnapshotName,
|
||||
vmSnapshotName, virtualMachine.getServiceOfferingId(), VMSnapshot.Type.Disk, null);
|
||||
|
||||
VMSnapshot vmSnapshot = vmSnapshotDao.persist(vmSnapshotVO);
|
||||
|
||||
if (vmSnapshot == null) {
|
||||
throw new CloudRuntimeException("Unable to allocate a VM snapshot object");
|
||||
}
|
||||
|
||||
vmSnapshot = vmSnapshotService.createVMSnapshot(virtualMachine.getId(), vmSnapshot.getId(), true);
|
||||
|
||||
if (vmSnapshot == null) {
|
||||
throw new CloudRuntimeException("Unable to create a hypervisor-side snapshot");
|
||||
}
|
||||
|
||||
try {
|
||||
Thread.sleep(60000);
|
||||
}
|
||||
catch (Exception ex) {
|
||||
s_logger.warn(ex.getMessage(), ex);
|
||||
}
|
||||
|
||||
return vmSnapshot;
|
||||
}
|
||||
|
||||
// We didn't need to take a hypervisor-side snapshot. Return 'null' to indicate this.
|
||||
return null;
|
||||
}
|
||||
|
||||
private void deleteHypervisorSnapshot(VMSnapshot vmSnapshot) {
|
||||
boolean success = vmSnapshotService.deleteVMSnapshot(vmSnapshot.getId());
|
||||
|
||||
if (!success) {
|
||||
throw new CloudRuntimeException("Unable to delete the hypervisor-side snapshot");
|
||||
}
|
||||
}
|
||||
|
||||
private void setVmdk(SnapshotInfo snapshotInfo, VolumeInfo volumeInfo) {
|
||||
if (!ImageFormat.OVA.equals(volumeInfo.getFormat())) {
|
||||
return;
|
||||
}
|
||||
|
||||
String search = "]";
|
||||
|
||||
String path = volumeInfo.getPath();
|
||||
int startIndex = path.indexOf(search);
|
||||
|
||||
SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(snapshotInfo.getId(),
|
||||
DiskTO.VMDK,
|
||||
path.substring(startIndex + search.length()).trim(),
|
||||
false);
|
||||
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
}
|
||||
|
||||
private void updateLocationTypeInDb(SnapshotInfo snapshotInfo) {
|
||||
Object objPayload = snapshotInfo.getPayload();
|
||||
|
||||
|
|
@ -313,19 +626,23 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
}
|
||||
|
||||
private boolean canStorageSystemCreateVolumeFromSnapshot(long storagePoolId) {
|
||||
boolean supportsCloningVolumeFromSnapshot = false;
|
||||
return storageSystemSupportsCapability(storagePoolId, DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString());
|
||||
}
|
||||
|
||||
private boolean storageSystemSupportsCapability(long storagePoolId, String capability) {
|
||||
boolean supportsCapability = false;
|
||||
|
||||
DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
|
||||
|
||||
Map<String, String> mapCapabilities = dataStore.getDriver().getCapabilities();
|
||||
|
||||
if (mapCapabilities != null) {
|
||||
String value = mapCapabilities.get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString());
|
||||
String value = mapCapabilities.get(capability);
|
||||
|
||||
supportsCloningVolumeFromSnapshot = Boolean.valueOf(value);
|
||||
supportsCapability = Boolean.valueOf(value);
|
||||
}
|
||||
|
||||
return supportsCloningVolumeFromSnapshot;
|
||||
return supportsCapability;
|
||||
}
|
||||
|
||||
private void performSnapshotAndCopyOnHostSide(VolumeInfo volumeInfo, SnapshotInfo snapshotInfo) {
|
||||
|
|
@ -385,7 +702,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
|
||||
SnapshotAndCopyCommand snapshotAndCopyCommand = new SnapshotAndCopyCommand(volumeInfo.getPath(), sourceDetails, destDetails);
|
||||
|
||||
SnapshotAndCopyAnswer snapshotAndCopyAnswer = null;
|
||||
SnapshotAndCopyAnswer snapshotAndCopyAnswer;
|
||||
|
||||
try {
|
||||
// if sourceDetails != null, we need to connect the host(s) to the volume
|
||||
|
|
@ -589,41 +906,104 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean usingBackendSnapshotFor(long snapshotId) {
|
||||
String property = getProperty(snapshotId, "takeSnapshot");
|
||||
|
||||
return Boolean.parseBoolean(property);
|
||||
}
|
||||
|
||||
@Override
|
||||
public StrategyPriority canHandle(Snapshot snapshot, SnapshotOperation op) {
|
||||
if (SnapshotOperation.REVERT.equals(op)) {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
Snapshot.LocationType locationType = snapshot.getLocationType();
|
||||
|
||||
// If the snapshot exists on Secondary Storage, we can't delete it.
|
||||
if (SnapshotOperation.DELETE.equals(op)) {
|
||||
if (Snapshot.LocationType.SECONDARY.equals(locationType)) {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Image);
|
||||
|
||||
// If the snapshot exists on Secondary Storage, we can't delete it.
|
||||
if (snapshotStore != null) {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary);
|
||||
|
||||
if (snapshotStore == null) {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
long snapshotStoragePoolId = snapshotStore.getDataStoreId();
|
||||
|
||||
boolean storageSystemSupportsCapability = storageSystemSupportsCapability(snapshotStoragePoolId, DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString());
|
||||
|
||||
return storageSystemSupportsCapability ? StrategyPriority.HIGHEST : StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
long volumeId = snapshot.getVolumeId();
|
||||
|
||||
VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(volumeId);
|
||||
|
||||
long storagePoolId = volumeVO.getPoolId();
|
||||
long volumeStoragePoolId = volumeVO.getPoolId();
|
||||
|
||||
DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
|
||||
if (SnapshotOperation.REVERT.equals(op)) {
|
||||
boolean baseVolumeExists = volumeVO.getRemoved() == null;
|
||||
|
||||
Snapshot.LocationType locationType = snapshot.getLocationType();
|
||||
if (baseVolumeExists) {
|
||||
boolean acceptableFormat = isAcceptableRevertFormat(volumeVO);
|
||||
|
||||
if (acceptableFormat) {
|
||||
SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary);
|
||||
|
||||
boolean usingBackendSnapshot = usingBackendSnapshotFor(snapshot.getId());
|
||||
|
||||
if (usingBackendSnapshot) {
|
||||
if (snapshotStore != null) {
|
||||
long snapshotStoragePoolId = snapshotStore.getDataStoreId();
|
||||
|
||||
boolean storageSystemSupportsCapability = storageSystemSupportsCapability(snapshotStoragePoolId,
|
||||
DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString());
|
||||
|
||||
if (storageSystemSupportsCapability) {
|
||||
return StrategyPriority.HIGHEST;
|
||||
}
|
||||
|
||||
storageSystemSupportsCapability = storageSystemSupportsCapability(volumeStoragePoolId,
|
||||
DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString());
|
||||
|
||||
if (storageSystemSupportsCapability) {
|
||||
return StrategyPriority.HIGHEST;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (snapshotStore != null) {
|
||||
long snapshotStoragePoolId = snapshotStore.getDataStoreId();
|
||||
|
||||
StoragePoolVO storagePoolVO = storagePoolDao.findById(snapshotStoragePoolId);
|
||||
|
||||
if (storagePoolVO.isManaged()) {
|
||||
return StrategyPriority.HIGHEST;
|
||||
}
|
||||
}
|
||||
|
||||
StoragePoolVO storagePoolVO = storagePoolDao.findById(volumeStoragePoolId);
|
||||
|
||||
if (storagePoolVO.isManaged()) {
|
||||
return StrategyPriority.HIGHEST;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the snapshot exists on Secondary Storage, we can't delete it.
|
||||
if (SnapshotOperation.DELETE.equals(op) && Snapshot.LocationType.SECONDARY.equals(locationType)) {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
if (dataStore != null) {
|
||||
Map<String, String> mapCapabilities = dataStore.getDriver().getCapabilities();
|
||||
boolean storageSystemSupportsCapability = storageSystemSupportsCapability(volumeStoragePoolId, DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString());
|
||||
|
||||
if (mapCapabilities != null) {
|
||||
String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString());
|
||||
Boolean supportsStorageSystemSnapshots = Boolean.valueOf(value);
|
||||
|
||||
if (supportsStorageSystemSnapshots) {
|
||||
return StrategyPriority.HIGHEST;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
return storageSystemSupportsCapability ? StrategyPriority.HIGHEST : StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,7 +71,9 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
|||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||
import com.cloud.agent.api.storage.ListVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.ListVolumeCommand;
|
||||
import com.cloud.agent.api.storage.ResizeVolumeCommand;
|
||||
|
|
@ -124,6 +126,8 @@ import com.cloud.storage.dao.VolumeDetailsDao;
|
|||
public class VolumeServiceImpl implements VolumeService {
|
||||
private static final Logger s_logger = Logger.getLogger(VolumeServiceImpl.class);
|
||||
@Inject
|
||||
protected AgentManager agentMgr;
|
||||
@Inject
|
||||
VolumeDao volDao;
|
||||
@Inject
|
||||
PrimaryDataStoreProviderManager dataStoreMgr;
|
||||
|
|
@ -895,12 +899,12 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
copyCaller.setCallback(copyCaller.getTarget().copyManagedTemplateCallback(null, null)).setContext(copyContext);
|
||||
|
||||
// Populate details which will be later read by the storage subsystem.
|
||||
Map<String, String> details = new HashMap<String, String>();
|
||||
Map<String, String> details = new HashMap<>();
|
||||
|
||||
details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString());
|
||||
details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress());
|
||||
details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort()));
|
||||
details.put(PrimaryDataStore.MANAGED_STORE_TARGET, ((TemplateObject)templateOnPrimary).getInstallPath());
|
||||
details.put(PrimaryDataStore.MANAGED_STORE_TARGET, templateOnPrimary.getInstallPath());
|
||||
details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcTemplateInfo.getUniqueName());
|
||||
details.put(PrimaryDataStore.REMOVE_AFTER_COPY, Boolean.TRUE.toString());
|
||||
details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(templateOnPrimary.getSize()));
|
||||
|
|
@ -920,7 +924,7 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
|
||||
grantAccess(templateOnPrimary, destHost, destPrimaryDataStore);
|
||||
|
||||
VolumeApiResult result = null;
|
||||
VolumeApiResult result;
|
||||
|
||||
try {
|
||||
motionSrv.copyAsync(srcTemplateInfo, templateOnPrimary, destHost, copyCaller);
|
||||
|
|
@ -929,6 +933,16 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
}
|
||||
finally {
|
||||
revokeAccess(templateOnPrimary, destHost, destPrimaryDataStore);
|
||||
|
||||
if (HypervisorType.VMware.equals(destHost.getHypervisorType())) {
|
||||
details.put(ModifyTargetsCommand.IQN, templateOnPrimary.getInstallPath());
|
||||
|
||||
List<Map<String, String>> targets = new ArrayList<>();
|
||||
|
||||
targets.add(details);
|
||||
|
||||
removeDynamicTargets(destHost.getId(), targets);
|
||||
}
|
||||
}
|
||||
|
||||
if (result.isFailed()) {
|
||||
|
|
@ -951,6 +965,32 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
}
|
||||
}
|
||||
|
||||
private void removeDynamicTargets(long hostId, List<Map<String, String>> targets) {
|
||||
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||
|
||||
cmd.setTargets(targets);
|
||||
cmd.setApplyToAllHostsInCluster(true);
|
||||
cmd.setAdd(false);
|
||||
cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC);
|
||||
|
||||
sendModifyTargetsCommand(cmd, hostId);
|
||||
}
|
||||
|
||||
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
|
||||
Answer answer = agentMgr.easySend(hostId, cmd);
|
||||
|
||||
if (answer == null) {
|
||||
String msg = "Unable to get an answer to the modify targets command";
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
else if (!answer.getResult()) {
|
||||
String msg = "Unable to modify target on the following host: " + hostId;
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clones the template volume on managed storage to the ROOT volume
|
||||
*
|
||||
|
|
@ -1085,12 +1125,12 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
destPrimaryDataStore.getDriver().getCapabilities().get(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString())
|
||||
);
|
||||
|
||||
boolean computeZoneSupportsResign = computeZoneSupportsResign(destHost.getDataCenterId(), destHost.getHypervisorType());
|
||||
boolean computeSupportsVolumeClone = computeSupportsVolumeClone(destHost.getDataCenterId(), destHost.getHypervisorType());
|
||||
|
||||
AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<>();
|
||||
|
||||
if (storageCanCloneVolume && computeZoneSupportsResign) {
|
||||
s_logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and host cluster can perform UUID resigning.");
|
||||
if (storageCanCloneVolume && computeSupportsVolumeClone) {
|
||||
s_logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and compute side is OK with volume cloning.");
|
||||
|
||||
TemplateInfo templateOnPrimary = destPrimaryDataStore.getTemplate(srcTemplateInfo.getId());
|
||||
|
||||
|
|
@ -1118,16 +1158,22 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
|
||||
// We have a template on primary storage. Clone it to new volume.
|
||||
s_logger.debug("Creating a clone from template on primary storage " + destDataStoreId);
|
||||
|
||||
createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future);
|
||||
} else {
|
||||
s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally");
|
||||
|
||||
createManagedVolumeCopyTemplateAsync(volumeInfo, destPrimaryDataStore, srcTemplateInfo, destHost, future);
|
||||
}
|
||||
|
||||
return future;
|
||||
}
|
||||
|
||||
private boolean computeZoneSupportsResign(long zoneId, HypervisorType hypervisorType) {
|
||||
private boolean computeSupportsVolumeClone(long zoneId, HypervisorType hypervisorType) {
|
||||
if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return getHost(zoneId, hypervisorType, true) != null;
|
||||
}
|
||||
|
||||
|
|
@ -1757,7 +1803,17 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
CreateVolumeContext<VolumeApiResult> context = new CreateVolumeContext<VolumeApiResult>(null, volume, future);
|
||||
AsyncCallbackDispatcher<VolumeServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
|
||||
caller.setCallback(caller.getTarget().resizeVolumeCallback(caller, context)).setContext(context);
|
||||
volume.getDataStore().getDriver().resize(volume, caller);
|
||||
|
||||
try {
|
||||
volume.getDataStore().getDriver().resize(volume, caller);
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Failed to change state to resize", e);
|
||||
|
||||
result.setResult(e.toString());
|
||||
|
||||
future.complete(result);
|
||||
}
|
||||
|
||||
return future;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import com.cloud.storage.dao.VolumeDetailsDao;
|
|||
import com.cloud.user.AccountDetailVO;
|
||||
import com.cloud.user.AccountDetailsDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
|
||||
import org.apache.cloudstack.util.solidfire.SolidFireIntegrationTestUtil;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
|
@ -46,9 +47,11 @@ public class SolidFireIntegrationTestManagerImpl implements SolidFireIntegration
|
|||
long storagePoolId = util.getStoragePoolIdForStoragePoolUuid(storagePoolUuid);
|
||||
|
||||
AccountDetailVO accountDetail = accountDetailsDao.findDetail(csAccountId, SolidFireUtil.getAccountKey(storagePoolId));
|
||||
|
||||
if (accountDetail == null){
|
||||
throw new CloudRuntimeException("Unable to find SF account for storage " + storagePoolUuid + " for CS account " + csAccountUuid);
|
||||
}
|
||||
|
||||
String sfAccountId = accountDetail.getValue();
|
||||
|
||||
return Long.parseLong(sfAccountId);
|
||||
|
|
|
|||
|
|
@ -27,14 +27,16 @@ import com.cloud.storage.dao.VolumeDao;
|
|||
import com.cloud.user.Account;
|
||||
import com.cloud.user.dao.AccountDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
import org.apache.cloudstack.api.response.solidfire.ApiVolumeSnapshotDetailsResponse;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
public class SolidFireIntegrationTestUtil {
|
||||
@Inject private AccountDao accountDao;
|
||||
@Inject private ClusterDao clusterDao;
|
||||
|
|
@ -47,15 +49,18 @@ public class SolidFireIntegrationTestUtil {
|
|||
|
||||
public long getAccountIdForAccountUuid(String accountUuid) {
|
||||
Account account = accountDao.findByUuid(accountUuid);
|
||||
if (account == null){
|
||||
|
||||
if (account == null) {
|
||||
throw new CloudRuntimeException("Unable to find Account for ID: " + accountUuid);
|
||||
}
|
||||
|
||||
return account.getAccountId();
|
||||
}
|
||||
|
||||
public long getAccountIdForVolumeUuid(String volumeUuid) {
|
||||
VolumeVO volume = volumeDao.findByUuid(volumeUuid);
|
||||
if (volume == null){
|
||||
|
||||
if (volume == null) {
|
||||
throw new CloudRuntimeException("Unable to find Volume for ID: " + volumeUuid);
|
||||
}
|
||||
|
||||
|
|
@ -64,15 +69,18 @@ public class SolidFireIntegrationTestUtil {
|
|||
|
||||
public long getAccountIdForSnapshotUuid(String snapshotUuid) {
|
||||
SnapshotVO snapshot = snapshotDao.findByUuid(snapshotUuid);
|
||||
if (snapshot == null){
|
||||
|
||||
if (snapshot == null) {
|
||||
throw new CloudRuntimeException("Unable to find Volume for ID: " + snapshotUuid);
|
||||
}
|
||||
|
||||
return snapshot.getAccountId();
|
||||
}
|
||||
|
||||
public long getClusterIdForClusterUuid(String clusterUuid) {
|
||||
ClusterVO cluster = clusterDao.findByUuid(clusterUuid);
|
||||
if (cluster == null){
|
||||
|
||||
if (cluster == null) {
|
||||
throw new CloudRuntimeException("Unable to find Volume for ID: " + clusterUuid);
|
||||
}
|
||||
|
||||
|
|
@ -81,7 +89,8 @@ public class SolidFireIntegrationTestUtil {
|
|||
|
||||
public long getStoragePoolIdForStoragePoolUuid(String storagePoolUuid) {
|
||||
StoragePoolVO storagePool = storagePoolDao.findByUuid(storagePoolUuid);
|
||||
if (storagePool == null){
|
||||
|
||||
if (storagePool == null) {
|
||||
throw new CloudRuntimeException("Unable to find Volume for ID: " + storagePoolUuid);
|
||||
}
|
||||
|
||||
|
|
@ -90,7 +99,8 @@ public class SolidFireIntegrationTestUtil {
|
|||
|
||||
public String getPathForVolumeUuid(String volumeUuid) {
|
||||
VolumeVO volume = volumeDao.findByUuid(volumeUuid);
|
||||
if (volume == null){
|
||||
|
||||
if (volume == null) {
|
||||
throw new CloudRuntimeException("Unable to find Volume for ID: " + volumeUuid);
|
||||
}
|
||||
|
||||
|
|
@ -99,7 +109,8 @@ public class SolidFireIntegrationTestUtil {
|
|||
|
||||
public String getVolume_iScsiName(String volumeUuid) {
|
||||
VolumeVO volume = volumeDao.findByUuid(volumeUuid);
|
||||
if (volume == null){
|
||||
|
||||
if (volume == null) {
|
||||
throw new CloudRuntimeException("Unable to find Volume for ID: " + volumeUuid);
|
||||
}
|
||||
|
||||
|
|
@ -108,7 +119,8 @@ public class SolidFireIntegrationTestUtil {
|
|||
|
||||
public List<ApiVolumeSnapshotDetailsResponse> getSnapshotDetails(String snapshotUuid) {
|
||||
SnapshotVO snapshot = snapshotDao.findByUuid(snapshotUuid);
|
||||
if (snapshot == null){
|
||||
|
||||
if (snapshot == null) {
|
||||
throw new CloudRuntimeException("Unable to find Volume for ID: " + snapshotUuid);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,47 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>cloud-plugin-api-vmware-sioc</artifactId>
|
||||
<name>Apache CloudStack Plugin - API VMware SIOC</name>
|
||||
<parent>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloudstack-plugins</artifactId>
|
||||
<version>4.11.0.0-SNAPSHOT</version>
|
||||
<relativePath>../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-hypervisor-vmware</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<argLine>-Xmx1024m</argLine>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
name=vmware-sioc
|
||||
parent=api
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<beans xmlns="http://www.springframework.org/schema/beans"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns:context="http://www.springframework.org/schema/context"
|
||||
xmlns:aop="http://www.springframework.org/schema/aop"
|
||||
xsi:schemaLocation="http://www.springframework.org/schema/beans
|
||||
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
|
||||
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
|
||||
http://www.springframework.org/schema/context
|
||||
http://www.springframework.org/schema/context/spring-context-3.0.xsd"
|
||||
>
|
||||
|
||||
<bean id="apiSiocServiceImpl" class="org.apache.cloudstack.api.sioc.ApiSiocServiceImpl"/>
|
||||
<bean id="siocManagerImpl" class="org.apache.cloudstack.sioc.SiocManagerImpl"/>
|
||||
|
||||
</beans>
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.api.command.admin.sioc;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.cloudstack.acl.RoleType;
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.BaseCmd;
|
||||
import org.apache.cloudstack.api.Parameter;
|
||||
import org.apache.cloudstack.api.response.StoragePoolResponse;
|
||||
import org.apache.cloudstack.api.response.ZoneResponse;
|
||||
import org.apache.cloudstack.api.response.sioc.ApiUpdateSiocInfoResponse;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.sioc.SiocManager;
|
||||
|
||||
import com.cloud.user.Account;
|
||||
|
||||
@APICommand(name = UpdateSiocInfoCmd.APINAME, description = "Update SIOC info", responseObject = ApiUpdateSiocInfoResponse.class,
|
||||
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false,
|
||||
since = "4.11.0",
|
||||
authorized = {RoleType.Admin})
|
||||
public class UpdateSiocInfoCmd extends BaseCmd {
|
||||
private static final Logger s_logger = Logger.getLogger(UpdateSiocInfoCmd.class.getName());
|
||||
|
||||
public static final String APINAME = "updateSiocInfo";
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "Zone ID", required = true)
|
||||
private long zoneId;
|
||||
|
||||
@Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, description = "Storage Pool ID", required = true)
|
||||
private long storagePoolId;
|
||||
|
||||
@Parameter(name = "sharespergb", type = CommandType.INTEGER, description = "Shares per GB", required = true)
|
||||
private int sharesPerGB;
|
||||
|
||||
@Parameter(name = "limitiopspergb", type = CommandType.INTEGER, description = "Limit IOPS per GB", required = true)
|
||||
private int limitIopsPerGB;
|
||||
|
||||
@Parameter(name = "iopsnotifythreshold", type = CommandType.INTEGER, description = "Notify if IOPS above this value", required = true)
|
||||
private int iopsNotifyThreshold;
|
||||
|
||||
@Inject private SiocManager manager;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEntityOwnerId() {
|
||||
Account account = CallContext.current().getCallingAccount();
|
||||
|
||||
if (account != null) {
|
||||
return account.getId();
|
||||
}
|
||||
|
||||
return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute() {
|
||||
s_logger.info("'UpdateSiocInfoCmd.execute' method invoked");
|
||||
|
||||
String msg = "Success";
|
||||
|
||||
try {
|
||||
manager.updateSiocInfo(zoneId, storagePoolId, sharesPerGB, limitIopsPerGB, iopsNotifyThreshold);
|
||||
}
|
||||
catch (Exception ex) {
|
||||
msg = ex.getMessage();
|
||||
}
|
||||
|
||||
ApiUpdateSiocInfoResponse response = new ApiUpdateSiocInfoResponse(msg);
|
||||
|
||||
response.setResponseName(getCommandName());
|
||||
response.setObjectName("apiupdatesiocinfo");
|
||||
|
||||
setResponseObject(response);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.api.response.sioc;
|
||||
|
||||
import org.apache.cloudstack.api.BaseResponse;
|
||||
|
||||
import com.cloud.serializer.Param;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
public class ApiUpdateSiocInfoResponse extends BaseResponse {
|
||||
@SerializedName("msg")
|
||||
@Param(description = "The return message from the operation ('Success' if successful)")
|
||||
private String _msg;
|
||||
|
||||
public ApiUpdateSiocInfoResponse(String msg) {
|
||||
_msg = msg;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.api.sioc;
|
||||
|
||||
import com.cloud.utils.component.PluggableService;
|
||||
|
||||
public interface ApiSiocService extends PluggableService {
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.api.sioc;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.cloudstack.api.command.admin.sioc.UpdateSiocInfoCmd;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.utils.component.AdapterBase;
|
||||
|
||||
@Component
|
||||
public class ApiSiocServiceImpl extends AdapterBase implements ApiSiocService {
|
||||
@Override
|
||||
public List<Class<?>> getCommands() {
|
||||
List<Class<?>> cmdList = new ArrayList<Class<?>>();
|
||||
|
||||
cmdList.add(UpdateSiocInfoCmd.class);
|
||||
|
||||
return cmdList;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.sioc;
|
||||
|
||||
public interface SiocManager {
|
||||
void updateSiocInfo(long zoneId, long storagePoolId, int sharesPerGB, int limitIopsPerGB, int iopsNotifyThreshold) throws Exception;
|
||||
}
|
||||
|
|
@ -0,0 +1,463 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.sioc;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.util.LoginInfo;
|
||||
import org.apache.cloudstack.util.vmware.VMwareUtil;
|
||||
import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.dc.DataCenterVO;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.hypervisor.vmware.VmwareDatacenterVO;
|
||||
import com.cloud.hypervisor.vmware.VmwareDatacenterZoneMapVO;
|
||||
import com.cloud.hypervisor.vmware.dao.VmwareDatacenterDao;
|
||||
import com.cloud.hypervisor.vmware.dao.VmwareDatacenterZoneMapDao;
|
||||
import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
import com.vmware.vim25.SharesInfo;
|
||||
import com.vmware.vim25.SharesLevel;
|
||||
import com.vmware.vim25.StorageIOAllocationInfo;
|
||||
import com.vmware.vim25.VirtualDevice;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpec;
|
||||
import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
|
||||
import com.vmware.vim25.VirtualDisk;
|
||||
import com.vmware.vim25.VirtualDeviceFileBackingInfo;
|
||||
import com.vmware.vim25.VirtualMachineConfigInfo;
|
||||
import com.vmware.vim25.VirtualMachineConfigSpec;
|
||||
|
||||
@Component
|
||||
public class SiocManagerImpl implements SiocManager {
|
||||
private static final Logger LOGGER = Logger.getLogger(SiocManagerImpl.class);
|
||||
private static final int LOCK_TIME_IN_SECONDS = 3;
|
||||
private static final int ONE_GB_IN_BYTES = 1000000000;
|
||||
private static final int LOWEST_SHARES_PER_VIRTUAL_DISK = 2000; // We want this to be greater than 1,000, which is the VMware default value.
|
||||
private static final int HIGHEST_SHARES_PER_VIRTUAL_DISK = 4000; // VMware limit
|
||||
private static final int LOWEST_LIMIT_IOPS_PER_VIRTUAL_DISK = 16; // VMware limit
|
||||
private static final int HIGHEST_LIMIT_IOPS_PER_VIRTUAL_DISK = 2147483647; // VMware limit
|
||||
|
||||
@Inject private DataCenterDao zoneDao;
|
||||
@Inject private DiskOfferingDao diskOfferingDao;
|
||||
@Inject private PrimaryDataStoreDao storagePoolDao;
|
||||
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
@Inject private VMInstanceDao vmInstanceDao;
|
||||
@Inject private VmwareDatacenterDao vmwareDcDao;
|
||||
@Inject private VmwareDatacenterZoneMapDao vmwareDcZoneMapDao;
|
||||
@Inject private VolumeDao volumeDao;
|
||||
|
||||
@Override
|
||||
public void updateSiocInfo(long zoneId, long storagePoolId, int sharesPerGB, int limitIopsPerGB, int iopsNotifyThreshold) throws Exception {
|
||||
LOGGER.info("'SiocManagerImpl.updateSiocInfo(long, long, int, int, int)' method invoked");
|
||||
|
||||
DataCenterVO zone = zoneDao.findById(zoneId);
|
||||
|
||||
if (zone == null) {
|
||||
throw new Exception("Error: No zone could be located for the following zone ID: " + zoneId + ".");
|
||||
}
|
||||
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
|
||||
|
||||
if (storagePool == null) {
|
||||
throw new Exception("Error: No storage pool could be located for the following pool ID: " + storagePoolId + ".");
|
||||
}
|
||||
|
||||
if (storagePool.getDataCenterId() != zoneId) {
|
||||
throw new Exception("Error: Storage pool '" + storagePool.getName() + "' is not in zone ID " + zoneId + ".");
|
||||
}
|
||||
|
||||
if (!storagePool.getPoolType().equals(StoragePoolType.VMFS)) {
|
||||
throw new Exception("Error: Storage pool '" + storagePool.getName() + "' does not represent a VMFS datastore.");
|
||||
}
|
||||
|
||||
String lockName = zone.getUuid() + "-" + storagePool.getUuid();
|
||||
GlobalLock lock = GlobalLock.getInternLock(lockName);
|
||||
|
||||
if (!lock.lock(LOCK_TIME_IN_SECONDS)) {
|
||||
throw new Exception("Busy: The system is already processing this request.");
|
||||
}
|
||||
|
||||
VMwareUtil.VMwareConnection connection = null;
|
||||
|
||||
try {
|
||||
connection = VMwareUtil.getVMwareConnection(getLoginInfo(zoneId));
|
||||
|
||||
Map<String, ManagedObjectReference> nameToVm = VMwareUtil.getVms(connection);
|
||||
|
||||
List<ManagedObjectReference> allTasks = new ArrayList<>();
|
||||
|
||||
int limitIopsTotal = 0;
|
||||
|
||||
List<VolumeVO> volumes = volumeDao.findByPoolId(storagePoolId, null);
|
||||
|
||||
if (volumes != null && volumes.size() > 0) {
|
||||
Set<Long> instanceIds = new HashSet<>();
|
||||
|
||||
for (VolumeVO volume : volumes) {
|
||||
Long instanceId = volume.getInstanceId();
|
||||
|
||||
if (instanceId != null) {
|
||||
instanceIds.add(instanceId);
|
||||
}
|
||||
}
|
||||
|
||||
for (Long instanceId : instanceIds) {
|
||||
ResultWrapper resultWrapper = updateSiocInfo(connection, nameToVm, instanceId, storagePool, sharesPerGB, limitIopsPerGB);
|
||||
|
||||
limitIopsTotal += resultWrapper.getLimitIopsTotal();
|
||||
|
||||
allTasks.addAll(resultWrapper.getTasks());
|
||||
}
|
||||
}
|
||||
/*
|
||||
Set<String> vmNames = nameToVm.keySet();
|
||||
|
||||
for (String vmName : vmNames) {
|
||||
// If the VM's name doesn't start with "i-", then it should be a worker VM (which is not stored in the CloudStack datastore).
|
||||
if (!vmName.startsWith("i-")) {
|
||||
ResultWrapper resultWrapper = updateSiocInfoForWorkerVM(connection, nameToVm.get(vmName),
|
||||
getDatastoreName(storagePool.getPath()), limitIopsPerGB);
|
||||
|
||||
limitIopsTotal += resultWrapper.getLimitIopsTotal();
|
||||
|
||||
allTasks.addAll(resultWrapper.getTasks());
|
||||
}
|
||||
}
|
||||
*/
|
||||
for (ManagedObjectReference task : allTasks) {
|
||||
VMwareUtil.waitForTask(connection, task);
|
||||
}
|
||||
|
||||
if (limitIopsTotal > iopsNotifyThreshold) {
|
||||
throw new Exception("Warning: Total number of IOPS: " + limitIopsTotal + "; IOPS notify threshold: " + iopsNotifyThreshold);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
VMwareUtil.closeVMwareConnection(connection);
|
||||
|
||||
lock.unlock();
|
||||
lock.releaseRef();
|
||||
}
|
||||
}
|
||||
|
||||
private ResultWrapper updateSiocInfo(VMwareUtil.VMwareConnection connection, Map<String, ManagedObjectReference> nameToVm, Long instanceId,
|
||||
StoragePoolVO storagePool, int sharesPerGB, int limitIopsPerGB) throws Exception {
|
||||
int limitIopsTotal = 0;
|
||||
List<ManagedObjectReference> tasks = new ArrayList<>();
|
||||
|
||||
VMInstanceVO vmInstance = vmInstanceDao.findById(instanceId);
|
||||
|
||||
if (vmInstance == null) {
|
||||
String errMsg = "Error: The VM with ID " + instanceId + " could not be located.";
|
||||
|
||||
throw new Exception(errMsg);
|
||||
}
|
||||
|
||||
String vmName = vmInstance.getInstanceName();
|
||||
|
||||
ManagedObjectReference morVm = nameToVm.get(vmName);
|
||||
|
||||
if (morVm == null) {
|
||||
String errMsg = "Error: The VM with ID " + instanceId + " could not be located (ManagedObjectReference).";
|
||||
|
||||
throw new Exception(errMsg);
|
||||
}
|
||||
|
||||
VirtualMachineConfigInfo vmci = (VirtualMachineConfigInfo)VMwareUtil.getEntityProps(connection, morVm,
|
||||
new String[] { "config" }).get("config");
|
||||
List<VirtualDevice> devices = vmci.getHardware().getDevice();
|
||||
|
||||
for (VirtualDevice device : devices) {
|
||||
if (device instanceof VirtualDisk) {
|
||||
VirtualDisk disk = (VirtualDisk)device;
|
||||
|
||||
VolumeVO volumeVO = getVolumeFromVirtualDisk(vmInstance, storagePool.getId(), devices, disk);
|
||||
|
||||
if (volumeVO != null) {
|
||||
boolean diskUpdated = false;
|
||||
|
||||
StorageIOAllocationInfo sioai = disk.getStorageIOAllocation();
|
||||
|
||||
SharesInfo sharesInfo = sioai.getShares();
|
||||
|
||||
int currentShares = sharesInfo.getShares();
|
||||
int newShares = getNewSharesBasedOnVolumeSize(volumeVO, sharesPerGB);
|
||||
|
||||
if (currentShares != newShares) {
|
||||
sharesInfo.setLevel(SharesLevel.CUSTOM);
|
||||
sharesInfo.setShares(newShares);
|
||||
|
||||
diskUpdated = true;
|
||||
}
|
||||
|
||||
long currentLimitIops = sioai.getLimit() != null ? sioai.getLimit() : Long.MIN_VALUE;
|
||||
long newLimitIops = getNewLimitIopsBasedOnVolumeSize(volumeVO, limitIopsPerGB);
|
||||
|
||||
limitIopsTotal += newLimitIops;
|
||||
|
||||
if (currentLimitIops != newLimitIops) {
|
||||
sioai.setLimit(newLimitIops);
|
||||
|
||||
diskUpdated = true;
|
||||
}
|
||||
|
||||
if (diskUpdated) {
|
||||
VirtualDeviceConfigSpec vdcs = new VirtualDeviceConfigSpec();
|
||||
|
||||
vdcs.setDevice(disk);
|
||||
vdcs.setOperation(VirtualDeviceConfigSpecOperation.EDIT);
|
||||
|
||||
VirtualMachineConfigSpec vmcs = new VirtualMachineConfigSpec();
|
||||
|
||||
vmcs.getDeviceChange().add(vdcs);
|
||||
|
||||
try {
|
||||
ManagedObjectReference task = VMwareUtil.reconfigureVm(connection, morVm, vmcs);
|
||||
|
||||
tasks.add(task);
|
||||
|
||||
LOGGER.info(getInfoMsg(volumeVO, newShares, newLimitIops));
|
||||
} catch (Exception ex) {
|
||||
throw new Exception("Error: " + ex.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new ResultWrapper(limitIopsTotal, tasks);
|
||||
}
|
||||
|
||||
private String getDatastoreName(String path) throws Exception {
|
||||
String searchString = "/";
|
||||
|
||||
int lastIndexOf = path.lastIndexOf(searchString);
|
||||
|
||||
if (lastIndexOf == -1) {
|
||||
throw new Exception("Error: Invalid datastore path");
|
||||
}
|
||||
|
||||
return path.substring(lastIndexOf + searchString.length());
|
||||
}
|
||||
|
||||
private ResultWrapper updateSiocInfoForWorkerVM(VMwareUtil.VMwareConnection connection, ManagedObjectReference morVm, String datastoreName,
|
||||
int limitIopsPerGB) throws Exception {
|
||||
int limitIopsTotal = 0;
|
||||
List<ManagedObjectReference> tasks = new ArrayList<>();
|
||||
|
||||
VirtualMachineConfigInfo vmci = (VirtualMachineConfigInfo)VMwareUtil.getEntityProps(connection, morVm,
|
||||
new String[] { "config" }).get("config");
|
||||
List<VirtualDevice> devices = vmci.getHardware().getDevice();
|
||||
|
||||
for (VirtualDevice device : devices) {
|
||||
if (device instanceof VirtualDisk) {
|
||||
VirtualDisk disk = (VirtualDisk)device;
|
||||
|
||||
if (disk.getBacking() instanceof VirtualDeviceFileBackingInfo) {
|
||||
VirtualDeviceFileBackingInfo backingInfo = (VirtualDeviceFileBackingInfo)disk.getBacking();
|
||||
|
||||
if (backingInfo.getFileName().contains(datastoreName)) {
|
||||
boolean diskUpdated = false;
|
||||
|
||||
StorageIOAllocationInfo sioai = disk.getStorageIOAllocation();
|
||||
|
||||
long currentLimitIops = sioai.getLimit() != null ? sioai.getLimit() : Long.MIN_VALUE;
|
||||
long newLimitIops = getNewLimitIopsBasedOnVolumeSize(disk.getCapacityInBytes(), limitIopsPerGB);
|
||||
|
||||
limitIopsTotal += newLimitIops;
|
||||
|
||||
if (currentLimitIops != newLimitIops) {
|
||||
sioai.setLimit(newLimitIops);
|
||||
|
||||
diskUpdated = true;
|
||||
}
|
||||
|
||||
if (diskUpdated) {
|
||||
VirtualDeviceConfigSpec vdcs = new VirtualDeviceConfigSpec();
|
||||
|
||||
vdcs.setDevice(disk);
|
||||
vdcs.setOperation(VirtualDeviceConfigSpecOperation.EDIT);
|
||||
|
||||
VirtualMachineConfigSpec vmcs = new VirtualMachineConfigSpec();
|
||||
|
||||
vmcs.getDeviceChange().add(vdcs);
|
||||
|
||||
try {
|
||||
ManagedObjectReference task = VMwareUtil.reconfigureVm(connection, morVm, vmcs);
|
||||
|
||||
tasks.add(task);
|
||||
|
||||
LOGGER.info(getInfoMsgForWorkerVm(newLimitIops));
|
||||
} catch (Exception ex) {
|
||||
throw new Exception("Error: " + ex.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new ResultWrapper(limitIopsTotal, tasks);
|
||||
}
|
||||
|
||||
private String getInfoMsg(Volume volume, Integer newShares, Long newLimitIops) {
|
||||
String msgPrefix = "VMware SIOC: Volume = " + volume.getName();
|
||||
|
||||
String msgNewShares = newShares != null ? "; New Shares = " + newShares : "";
|
||||
|
||||
String msgNewLimitIops = newLimitIops != null ? "; New Limit IOPS = " + newLimitIops : "";
|
||||
|
||||
return msgPrefix + msgNewShares + msgNewLimitIops;
|
||||
}
|
||||
|
||||
private String getInfoMsgForWorkerVm(Long newLimitIops) {
|
||||
return "VMware SIOC: Worker VM's Limit IOPS set to " + newLimitIops;
|
||||
}
|
||||
|
||||
private VolumeVO getVolumeFromVirtualDisk(VMInstanceVO vmInstance, long storagePoolId, List<VirtualDevice> allDevices,
|
||||
VirtualDisk disk) throws Exception {
|
||||
List<VolumeVO> volumes = volumeDao.findByInstance(vmInstance.getId());
|
||||
|
||||
if (volumes == null || volumes.size() == 0) {
|
||||
String errMsg = "Error: The VMware virtual disk '" + disk + "' could not be mapped to a CloudStack volume. " +
|
||||
"There were no volumes for the VM with the following ID: " + vmInstance.getId() + ".";
|
||||
|
||||
throw new Exception(errMsg);
|
||||
}
|
||||
|
||||
VirtualMachineDiskInfoBuilder diskInfoBuilder = VMwareUtil.getDiskInfoBuilder(allDevices);
|
||||
|
||||
for (VolumeVO volume : volumes) {
|
||||
Long poolId = volume.getPoolId();
|
||||
|
||||
if (poolId != null && poolId == storagePoolId) {
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(poolId);
|
||||
String path = storagePool.getPath();
|
||||
String charToSearchFor = "/";
|
||||
int index = path.lastIndexOf(charToSearchFor) + charToSearchFor.length();
|
||||
String datastoreName = path.substring(index);
|
||||
VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(volume.getPath(), datastoreName);
|
||||
|
||||
if (diskInfo != null) {
|
||||
String deviceBusName = VMwareUtil.getDeviceBusName(allDevices, disk);
|
||||
|
||||
if (deviceBusName.equals(diskInfo.getDiskDeviceBusName())) {
|
||||
return volume;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private int getNewSharesBasedOnVolumeSize(VolumeVO volumeVO, int sharesPerGB) {
|
||||
long volumeSizeInBytes = getVolumeSizeInBytes(volumeVO);
|
||||
|
||||
double sizeInGB = volumeSizeInBytes / (double)ONE_GB_IN_BYTES;
|
||||
|
||||
int shares = LOWEST_SHARES_PER_VIRTUAL_DISK + ((int)(sharesPerGB * sizeInGB));
|
||||
|
||||
return getAdjustedShares(shares);
|
||||
}
|
||||
|
||||
private int getAdjustedShares(int shares) {
|
||||
shares = Math.max(shares, LOWEST_SHARES_PER_VIRTUAL_DISK);
|
||||
shares = Math.min(shares, HIGHEST_SHARES_PER_VIRTUAL_DISK);
|
||||
|
||||
return shares;
|
||||
}
|
||||
|
||||
private long getNewLimitIopsBasedOnVolumeSize(VolumeVO volumeVO, int limitIopsPerGB) {
|
||||
long volumeSizeInBytes = getVolumeSizeInBytes(volumeVO);
|
||||
|
||||
return getNewLimitIopsBasedOnVolumeSize(volumeSizeInBytes, limitIopsPerGB);
|
||||
}
|
||||
|
||||
private long getNewLimitIopsBasedOnVolumeSize(Long volumeSizeInBytes, int limitIopsPerGB) {
|
||||
if (volumeSizeInBytes == null) {
|
||||
volumeSizeInBytes = (long)ONE_GB_IN_BYTES;
|
||||
}
|
||||
|
||||
double sizeInGB = volumeSizeInBytes / (double)ONE_GB_IN_BYTES;
|
||||
|
||||
long limitIops = (long)(limitIopsPerGB * sizeInGB);
|
||||
|
||||
return getAdjustedLimitIops(limitIops);
|
||||
}
|
||||
|
||||
private long getAdjustedLimitIops(long limitIops) {
|
||||
limitIops = Math.max(limitIops, LOWEST_LIMIT_IOPS_PER_VIRTUAL_DISK);
|
||||
limitIops = Math.min(limitIops, HIGHEST_LIMIT_IOPS_PER_VIRTUAL_DISK);
|
||||
|
||||
return limitIops;
|
||||
}
|
||||
|
||||
private long getVolumeSizeInBytes(VolumeVO volumeVO) {
|
||||
return volumeVO.getSize() != null && volumeVO.getSize() > ONE_GB_IN_BYTES ? volumeVO.getSize() : ONE_GB_IN_BYTES;
|
||||
}
|
||||
|
||||
private LoginInfo getLoginInfo(long zoneId) {
|
||||
VmwareDatacenterZoneMapVO vmwareDcZoneMap = vmwareDcZoneMapDao.findByZoneId(zoneId);
|
||||
Long associatedVmwareDcId = vmwareDcZoneMap.getVmwareDcId();
|
||||
VmwareDatacenterVO associatedVmwareDc = vmwareDcDao.findById(associatedVmwareDcId);
|
||||
|
||||
String host = associatedVmwareDc.getVcenterHost();
|
||||
String username = associatedVmwareDc.getUser();
|
||||
String password = associatedVmwareDc.getPassword();
|
||||
|
||||
return new LoginInfo(host, username, password);
|
||||
}
|
||||
}
|
||||
|
||||
class ResultWrapper {
|
||||
private int limitIopsTotal;
|
||||
private List<ManagedObjectReference> tasks;
|
||||
|
||||
ResultWrapper(int limitIopsTotal, List<ManagedObjectReference> tasks) {
|
||||
this.limitIopsTotal = limitIopsTotal;
|
||||
this.tasks = tasks;
|
||||
}
|
||||
|
||||
int getLimitIopsTotal() {
|
||||
return limitIopsTotal;
|
||||
}
|
||||
|
||||
List<ManagedObjectReference> getTasks() {
|
||||
return tasks;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.util;
|
||||
|
||||
public class LoginInfo {
|
||||
private final String _host;
|
||||
private final String _username;
|
||||
private final String _password;
|
||||
|
||||
public LoginInfo(String host, String username, String password) {
|
||||
_host = host;
|
||||
_username = username;
|
||||
_password = password;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return _host;
|
||||
}
|
||||
|
||||
public String getUsername() {
|
||||
return _username;
|
||||
}
|
||||
|
||||
public String getPassword() {
|
||||
return _password;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,570 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.util.vmware;
|
||||
|
||||
import java.security.cert.CertificateException;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.net.ssl.HostnameVerifier;
|
||||
import javax.net.ssl.HttpsURLConnection;
|
||||
import javax.net.ssl.SSLContext;
|
||||
import javax.net.ssl.SSLSession;
|
||||
import javax.net.ssl.SSLSessionContext;
|
||||
import javax.net.ssl.TrustManager;
|
||||
import javax.net.ssl.X509TrustManager;
|
||||
import javax.xml.ws.BindingProvider;
|
||||
import javax.xml.ws.WebServiceException;
|
||||
|
||||
import org.apache.cloudstack.util.LoginInfo;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder;
|
||||
import com.vmware.vim25.DynamicProperty;
|
||||
import com.vmware.vim25.InvalidCollectorVersionFaultMsg;
|
||||
import com.vmware.vim25.InvalidPropertyFaultMsg;
|
||||
import com.vmware.vim25.LocalizedMethodFault;
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
import com.vmware.vim25.ObjectContent;
|
||||
import com.vmware.vim25.ObjectSpec;
|
||||
import com.vmware.vim25.ObjectUpdate;
|
||||
import com.vmware.vim25.ObjectUpdateKind;
|
||||
import com.vmware.vim25.PropertyChange;
|
||||
import com.vmware.vim25.PropertyChangeOp;
|
||||
import com.vmware.vim25.PropertyFilterSpec;
|
||||
import com.vmware.vim25.PropertyFilterUpdate;
|
||||
import com.vmware.vim25.PropertySpec;
|
||||
import com.vmware.vim25.RetrieveOptions;
|
||||
import com.vmware.vim25.RetrieveResult;
|
||||
import com.vmware.vim25.RuntimeFaultFaultMsg;
|
||||
import com.vmware.vim25.SelectionSpec;
|
||||
import com.vmware.vim25.ServiceContent;
|
||||
import com.vmware.vim25.TaskInfoState;
|
||||
import com.vmware.vim25.TraversalSpec;
|
||||
import com.vmware.vim25.UpdateSet;
|
||||
import com.vmware.vim25.VimPortType;
|
||||
import com.vmware.vim25.VimService;
|
||||
import com.vmware.vim25.VirtualDevice;
|
||||
import com.vmware.vim25.VirtualDeviceBackingInfo;
|
||||
import com.vmware.vim25.VirtualDisk;
|
||||
import com.vmware.vim25.VirtualDiskFlatVer2BackingInfo;
|
||||
import com.vmware.vim25.VirtualIDEController;
|
||||
import com.vmware.vim25.VirtualMachineConfigSpec;
|
||||
import com.vmware.vim25.VirtualSCSIController;
|
||||
|
||||
public class VMwareUtil {
|
||||
private static final Logger s_logger = Logger.getLogger(VMwareUtil.class);
|
||||
|
||||
private VMwareUtil() {}
|
||||
|
||||
public static class VMwareConnection {
|
||||
private VimPortType _vimPortType;
|
||||
private ServiceContent _serviceContent;
|
||||
|
||||
VMwareConnection(VimPortType vimPortType, ServiceContent serviceContent) {
|
||||
_vimPortType = vimPortType;
|
||||
_serviceContent = serviceContent;
|
||||
}
|
||||
|
||||
VimPortType getVimPortType() {
|
||||
return _vimPortType;
|
||||
}
|
||||
|
||||
ServiceContent getServiceContent() {
|
||||
return _serviceContent;
|
||||
}
|
||||
}
|
||||
|
||||
public static VMwareConnection getVMwareConnection(LoginInfo loginInfo) throws Exception {
|
||||
trustAllHttpsCertificates();
|
||||
|
||||
HostnameVerifier hv = new HostnameVerifier() {
|
||||
@Override
|
||||
public boolean verify(String urlHostName, SSLSession session) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
HttpsURLConnection.setDefaultHostnameVerifier(hv);
|
||||
|
||||
ManagedObjectReference serviceInstanceRef = new ManagedObjectReference();
|
||||
|
||||
final String serviceInstanceName = "ServiceInstance";
|
||||
|
||||
serviceInstanceRef.setType(serviceInstanceName);
|
||||
serviceInstanceRef.setValue(serviceInstanceName);
|
||||
|
||||
VimService vimService = new VimService();
|
||||
|
||||
VimPortType vimPortType = vimService.getVimPort();
|
||||
|
||||
Map<String, Object> ctxt = ((BindingProvider)vimPortType).getRequestContext();
|
||||
|
||||
ctxt.put(BindingProvider.ENDPOINT_ADDRESS_PROPERTY, "https://" + loginInfo.getHost() + "/sdk");
|
||||
ctxt.put(BindingProvider.SESSION_MAINTAIN_PROPERTY, true);
|
||||
|
||||
ServiceContent serviceContent = vimPortType.retrieveServiceContent(serviceInstanceRef);
|
||||
|
||||
vimPortType.login(serviceContent.getSessionManager(), loginInfo.getUsername(), loginInfo.getPassword(), null);
|
||||
|
||||
return new VMwareConnection(vimPortType, serviceContent);
|
||||
}
|
||||
|
||||
public static void closeVMwareConnection(VMwareConnection connection) throws Exception {
|
||||
if (connection != null) {
|
||||
connection.getVimPortType().logout(connection.getServiceContent().getSessionManager());
|
||||
}
|
||||
}
|
||||
|
||||
public static Map<String, ManagedObjectReference> getVms(VMwareConnection connection) throws Exception {
|
||||
Map<String, ManagedObjectReference> nameToVm = new HashMap<>();
|
||||
|
||||
ManagedObjectReference rootFolder = connection.getServiceContent().getRootFolder();
|
||||
|
||||
TraversalSpec tSpec = getVMTraversalSpec();
|
||||
|
||||
PropertySpec propertySpec = new PropertySpec();
|
||||
|
||||
propertySpec.setAll(Boolean.FALSE);
|
||||
propertySpec.getPathSet().add("name");
|
||||
propertySpec.setType("VirtualMachine");
|
||||
|
||||
ObjectSpec objectSpec = new ObjectSpec();
|
||||
|
||||
objectSpec.setObj(rootFolder);
|
||||
objectSpec.setSkip(Boolean.TRUE);
|
||||
objectSpec.getSelectSet().add(tSpec);
|
||||
|
||||
PropertyFilterSpec propertyFilterSpec = new PropertyFilterSpec();
|
||||
|
||||
propertyFilterSpec.getPropSet().add(propertySpec);
|
||||
propertyFilterSpec.getObjectSet().add(objectSpec);
|
||||
|
||||
List<PropertyFilterSpec> lstPfs = new ArrayList<>(1);
|
||||
|
||||
lstPfs.add(propertyFilterSpec);
|
||||
|
||||
VimPortType vimPortType = connection.getVimPortType();
|
||||
ManagedObjectReference propertyCollector = connection.getServiceContent().getPropertyCollector();
|
||||
|
||||
List<ObjectContent> lstObjectContent = retrievePropertiesAllObjects(lstPfs, vimPortType, propertyCollector);
|
||||
|
||||
if (lstObjectContent != null) {
|
||||
for (ObjectContent oc : lstObjectContent) {
|
||||
ManagedObjectReference mor = oc.getObj();
|
||||
List<DynamicProperty> dps = oc.getPropSet();
|
||||
String vmName = null;
|
||||
|
||||
if (dps != null) {
|
||||
for (DynamicProperty dp : dps) {
|
||||
vmName = (String)dp.getVal();
|
||||
}
|
||||
}
|
||||
|
||||
if (vmName != null) {
|
||||
nameToVm.put(vmName, mor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nameToVm;
|
||||
}
|
||||
|
||||
public static Map<String, Object> getEntityProps(VMwareConnection connection, ManagedObjectReference entityMor, String[] props)
|
||||
throws InvalidPropertyFaultMsg, RuntimeFaultFaultMsg {
|
||||
Map<String, Object> retVal = new HashMap<>();
|
||||
|
||||
PropertySpec propertySpec = new PropertySpec();
|
||||
|
||||
propertySpec.setAll(Boolean.FALSE);
|
||||
propertySpec.setType(entityMor.getType());
|
||||
propertySpec.getPathSet().addAll(Arrays.asList(props));
|
||||
|
||||
ObjectSpec objectSpec = new ObjectSpec();
|
||||
|
||||
objectSpec.setObj(entityMor);
|
||||
|
||||
// Create PropertyFilterSpec using the PropertySpec and ObjectPec created above.
|
||||
PropertyFilterSpec propertyFilterSpec = new PropertyFilterSpec();
|
||||
|
||||
propertyFilterSpec.getPropSet().add(propertySpec);
|
||||
propertyFilterSpec.getObjectSet().add(objectSpec);
|
||||
|
||||
List<PropertyFilterSpec> propertyFilterSpecs = new ArrayList<>();
|
||||
|
||||
propertyFilterSpecs.add(propertyFilterSpec);
|
||||
|
||||
RetrieveResult rslts = connection.getVimPortType().retrievePropertiesEx(connection.getServiceContent().getPropertyCollector(),
|
||||
propertyFilterSpecs, new RetrieveOptions());
|
||||
List<ObjectContent> listobjcontent = new ArrayList<>();
|
||||
|
||||
if (rslts != null && rslts.getObjects() != null && !rslts.getObjects().isEmpty()) {
|
||||
listobjcontent.addAll(rslts.getObjects());
|
||||
}
|
||||
|
||||
String token = null;
|
||||
|
||||
if (rslts != null && rslts.getToken() != null) {
|
||||
token = rslts.getToken();
|
||||
}
|
||||
|
||||
while (token != null && !token.isEmpty()) {
|
||||
rslts = connection.getVimPortType().continueRetrievePropertiesEx(connection.getServiceContent().getPropertyCollector(),
|
||||
token);
|
||||
|
||||
token = null;
|
||||
|
||||
if (rslts != null) {
|
||||
token = rslts.getToken();
|
||||
|
||||
if (rslts.getObjects() != null && !rslts.getObjects().isEmpty()) {
|
||||
listobjcontent.addAll(rslts.getObjects());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (ObjectContent oc : listobjcontent) {
|
||||
List<DynamicProperty> dps = oc.getPropSet();
|
||||
|
||||
if (dps != null) {
|
||||
for (DynamicProperty dp : dps) {
|
||||
retVal.put(dp.getName(), dp.getVal());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return retVal;
|
||||
}
|
||||
|
||||
public static ManagedObjectReference reconfigureVm(VMwareConnection connection, ManagedObjectReference morVm,
|
||||
VirtualMachineConfigSpec vmcs) throws Exception {
|
||||
return connection.getVimPortType().reconfigVMTask(morVm, vmcs);
|
||||
}
|
||||
|
||||
public static VirtualMachineDiskInfoBuilder getDiskInfoBuilder(List<VirtualDevice> devices) throws Exception {
|
||||
VirtualMachineDiskInfoBuilder builder = new VirtualMachineDiskInfoBuilder();
|
||||
|
||||
if (devices != null) {
|
||||
for (VirtualDevice device : devices) {
|
||||
if (device instanceof VirtualDisk) {
|
||||
VirtualDisk virtualDisk = (VirtualDisk)device;
|
||||
VirtualDeviceBackingInfo backingInfo = virtualDisk.getBacking();
|
||||
|
||||
if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) {
|
||||
VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo;
|
||||
|
||||
String deviceBusName = VMwareUtil.getDeviceBusName(devices, virtualDisk);
|
||||
|
||||
while (diskBackingInfo != null) {
|
||||
builder.addDisk(deviceBusName, diskBackingInfo.getFileName());
|
||||
|
||||
diskBackingInfo = diskBackingInfo.getParent();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static String getDeviceBusName(List<VirtualDevice> allDevices, VirtualDisk disk) throws Exception {
|
||||
for (VirtualDevice device : allDevices) {
|
||||
if (device.getKey() == disk.getControllerKey()) {
|
||||
if (device instanceof VirtualIDEController) {
|
||||
return String.format("ide%d:%d", ((VirtualIDEController)device).getBusNumber(), disk.getUnitNumber());
|
||||
} else if (device instanceof VirtualSCSIController) {
|
||||
return String.format("scsi%d:%d", ((VirtualSCSIController)device).getBusNumber(), disk.getUnitNumber());
|
||||
} else {
|
||||
throw new Exception("The device controller is not supported.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Exception("The device controller could not be located.");
|
||||
}
|
||||
|
||||
public static boolean waitForTask(VMwareConnection connection, ManagedObjectReference task) throws Exception {
|
||||
try {
|
||||
Object[] result = waitForValues(connection, task, new String[] { "info.state", "info.error" }, new String[] { "state" },
|
||||
new Object[][] { new Object[] { TaskInfoState.SUCCESS, TaskInfoState.ERROR } });
|
||||
|
||||
if (result[0].equals(TaskInfoState.SUCCESS)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (result[1] instanceof LocalizedMethodFault) {
|
||||
throw new Exception(((LocalizedMethodFault)result[1]).getLocalizedMessage());
|
||||
}
|
||||
} catch (WebServiceException we) {
|
||||
s_logger.debug("Cancelling vCenter task because the task failed with the following error: " + we.getLocalizedMessage());
|
||||
|
||||
connection.getVimPortType().cancelTask(task);
|
||||
|
||||
throw new Exception("The vCenter task failed due to the following error: " + we.getLocalizedMessage());
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private static Object[] waitForValues(VMwareConnection connection, ManagedObjectReference morObj, String[] filterProps,
|
||||
String[] endWaitProps, Object[][] expectedVals) throws InvalidPropertyFaultMsg, RuntimeFaultFaultMsg,
|
||||
InvalidCollectorVersionFaultMsg {
|
||||
String version = "";
|
||||
Object[] endVals = new Object[endWaitProps.length];
|
||||
Object[] filterVals = new Object[filterProps.length];
|
||||
|
||||
PropertyFilterSpec spec = new PropertyFilterSpec();
|
||||
|
||||
ObjectSpec oSpec = new ObjectSpec();
|
||||
|
||||
oSpec.setObj(morObj);
|
||||
oSpec.setSkip(Boolean.FALSE);
|
||||
|
||||
spec.getObjectSet().add(oSpec);
|
||||
|
||||
PropertySpec pSpec = new PropertySpec();
|
||||
|
||||
pSpec.getPathSet().addAll(Arrays.asList(filterProps));
|
||||
pSpec.setType(morObj.getType());
|
||||
|
||||
spec.getPropSet().add(pSpec);
|
||||
|
||||
ManagedObjectReference propertyCollector = connection.getServiceContent().getPropertyCollector();
|
||||
ManagedObjectReference filterSpecRef = connection.getVimPortType().createFilter(propertyCollector, spec, true);
|
||||
|
||||
boolean reached = false;
|
||||
|
||||
UpdateSet updateSet;
|
||||
List<PropertyFilterUpdate> lstPropertyFilterUpdates;
|
||||
List<ObjectUpdate> lstObjectUpdates;
|
||||
List<PropertyChange> lstPropertyChanges;
|
||||
|
||||
while (!reached) {
|
||||
updateSet = connection.getVimPortType().waitForUpdates(propertyCollector, version);
|
||||
|
||||
if (updateSet == null || updateSet.getFilterSet() == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
version = updateSet.getVersion();
|
||||
|
||||
lstPropertyFilterUpdates = updateSet.getFilterSet();
|
||||
|
||||
for (PropertyFilterUpdate propertyFilterUpdate : lstPropertyFilterUpdates) {
|
||||
lstObjectUpdates = propertyFilterUpdate.getObjectSet();
|
||||
|
||||
for (ObjectUpdate objUpdate : lstObjectUpdates) {
|
||||
if (objUpdate.getKind() == ObjectUpdateKind.MODIFY || objUpdate.getKind() == ObjectUpdateKind.ENTER ||
|
||||
objUpdate.getKind() == ObjectUpdateKind.LEAVE) {
|
||||
lstPropertyChanges = objUpdate.getChangeSet();
|
||||
|
||||
for (PropertyChange propchg : lstPropertyChanges) {
|
||||
updateValues(endWaitProps, endVals, propchg);
|
||||
updateValues(filterProps, filterVals, propchg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Object expectedValue;
|
||||
|
||||
// Check if the expected values have been reached and exit the loop if done.
|
||||
// Also, exit the WaitForUpdates loop if this is the case.
|
||||
for (int chgi = 0; chgi < endVals.length && !reached; chgi++) {
|
||||
for (int vali = 0; vali < expectedVals[chgi].length && !reached; vali++) {
|
||||
expectedValue = expectedVals[chgi][vali];
|
||||
|
||||
reached = expectedValue.equals(endVals[chgi]) || reached;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy the filter when we are done.
|
||||
connection.getVimPortType().destroyPropertyFilter(filterSpecRef);
|
||||
|
||||
return filterVals;
|
||||
}
|
||||
|
||||
private static void updateValues(String[] props, Object[] vals, PropertyChange propertyChange) {
|
||||
for (int findi = 0; findi < props.length; findi++) {
|
||||
if (propertyChange.getName().lastIndexOf(props[findi]) >= 0) {
|
||||
if (propertyChange.getOp() == PropertyChangeOp.REMOVE) {
|
||||
vals[findi] = "";
|
||||
} else {
|
||||
vals[findi] = propertyChange.getVal();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static List<ObjectContent> retrievePropertiesAllObjects(List<PropertyFilterSpec> lstPfs,
|
||||
VimPortType vimPortType, ManagedObjectReference propCollectorRef) throws Exception {
|
||||
List<ObjectContent> lstObjectContent = new ArrayList<>();
|
||||
|
||||
RetrieveOptions retrieveOptions = new RetrieveOptions();
|
||||
|
||||
RetrieveResult rslts = vimPortType.retrievePropertiesEx(propCollectorRef, lstPfs, retrieveOptions);
|
||||
|
||||
if (rslts != null && rslts.getObjects() != null && rslts.getObjects().size() > 0) {
|
||||
List<ObjectContent> lstOc = new ArrayList<>();
|
||||
|
||||
for (ObjectContent oc : rslts.getObjects()) {
|
||||
lstOc.add(oc);
|
||||
}
|
||||
|
||||
lstObjectContent.addAll(lstOc);
|
||||
}
|
||||
|
||||
String token = null;
|
||||
|
||||
if (rslts != null && rslts.getToken() != null) {
|
||||
token = rslts.getToken();
|
||||
}
|
||||
|
||||
while (token != null && !token.isEmpty()) {
|
||||
rslts = vimPortType.continueRetrievePropertiesEx(propCollectorRef, token);
|
||||
token = null;
|
||||
|
||||
if (rslts != null) {
|
||||
token = rslts.getToken();
|
||||
|
||||
if (rslts.getObjects() != null && rslts.getObjects().size() > 0) {
|
||||
List<ObjectContent> lstOc = new ArrayList<>();
|
||||
|
||||
for (ObjectContent oc : rslts.getObjects()) {
|
||||
lstOc.add(oc);
|
||||
}
|
||||
|
||||
lstObjectContent.addAll(lstOc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lstObjectContent;
|
||||
}
|
||||
|
||||
private static TraversalSpec getVMTraversalSpec() {
|
||||
// Create a TraversalSpec that starts from the 'root' objects
|
||||
// and traverses the inventory tree to get to the VirtualMachines.
|
||||
// Build the traversal specs bottoms up
|
||||
|
||||
// TraversalSpec to get to the VM in a vApp
|
||||
TraversalSpec vAppToVM = new TraversalSpec();
|
||||
|
||||
vAppToVM.setName("vAppToVM");
|
||||
vAppToVM.setType("VirtualApp");
|
||||
vAppToVM.setPath("vm");
|
||||
|
||||
// TraversalSpec for vApp to vApp
|
||||
TraversalSpec vAppToVApp = new TraversalSpec();
|
||||
|
||||
vAppToVApp.setName("vAppToVApp");
|
||||
vAppToVApp.setType("VirtualApp");
|
||||
vAppToVApp.setPath("resourcePool");
|
||||
|
||||
// SelectionSpec for vApp-to-vApp recursion
|
||||
SelectionSpec vAppRecursion = new SelectionSpec();
|
||||
|
||||
vAppRecursion.setName("vAppToVApp");
|
||||
|
||||
// SelectionSpec to get to a VM in the vApp
|
||||
SelectionSpec vmInVApp = new SelectionSpec();
|
||||
|
||||
vmInVApp.setName("vAppToVM");
|
||||
|
||||
// SelectionSpec for both vApp to vApp and vApp to VM
|
||||
List<SelectionSpec> vAppToVMSS = new ArrayList<>();
|
||||
|
||||
vAppToVMSS.add(vAppRecursion);
|
||||
vAppToVMSS.add(vmInVApp);
|
||||
|
||||
vAppToVApp.getSelectSet().addAll(vAppToVMSS);
|
||||
|
||||
// This SelectionSpec is used for recursion for Folder recursion
|
||||
SelectionSpec sSpec = new SelectionSpec();
|
||||
|
||||
sSpec.setName("VisitFolders");
|
||||
|
||||
// Traversal to get to the vmFolder from DataCenter
|
||||
TraversalSpec dataCenterToVMFolder = new TraversalSpec();
|
||||
|
||||
dataCenterToVMFolder.setName("DataCenterToVMFolder");
|
||||
dataCenterToVMFolder.setType("Datacenter");
|
||||
dataCenterToVMFolder.setPath("vmFolder");
|
||||
dataCenterToVMFolder.setSkip(false);
|
||||
|
||||
dataCenterToVMFolder.getSelectSet().add(sSpec);
|
||||
|
||||
// TraversalSpec to get to the DataCenter from rootFolder
|
||||
TraversalSpec traversalSpec = new TraversalSpec();
|
||||
|
||||
traversalSpec.setName("VisitFolders");
|
||||
traversalSpec.setType("Folder");
|
||||
traversalSpec.setPath("childEntity");
|
||||
traversalSpec.setSkip(false);
|
||||
|
||||
List<SelectionSpec> sSpecArr = new ArrayList<>();
|
||||
|
||||
sSpecArr.add(sSpec);
|
||||
sSpecArr.add(dataCenterToVMFolder);
|
||||
sSpecArr.add(vAppToVM);
|
||||
sSpecArr.add(vAppToVApp);
|
||||
|
||||
traversalSpec.getSelectSet().addAll(sSpecArr);
|
||||
|
||||
return traversalSpec;
|
||||
}
|
||||
|
||||
private static void trustAllHttpsCertificates() throws Exception {
|
||||
// Create a trust manager that does not validate certificate chains:
|
||||
TrustManager[] trustAllCerts = new TrustManager[1];
|
||||
|
||||
TrustManager tm = new TrustAllTrustManager();
|
||||
|
||||
trustAllCerts[0] = tm;
|
||||
|
||||
SSLContext sc = SSLContext.getInstance("SSL");
|
||||
|
||||
SSLSessionContext sslsc = sc.getServerSessionContext();
|
||||
|
||||
sslsc.setSessionTimeout(0);
|
||||
|
||||
sc.init(null, trustAllCerts, null);
|
||||
|
||||
HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
|
||||
}
|
||||
|
||||
private static class TrustAllTrustManager implements TrustManager, X509TrustManager {
|
||||
@Override
|
||||
public X509Certificate[] getAcceptedIssuers() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkServerTrusted(X509Certificate[] certs, String authType) throws CertificateException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkClientTrusted(X509Certificate[] certs, String authType) throws CertificateException {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2367,6 +2367,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
vm.getDevices().addDevice(getVifDriver(nic.getType(), nic.getName()).plug(nic, vm.getPlatformEmulator(), nicAdapter));
|
||||
}
|
||||
|
||||
public boolean cleanupDisk(Map<String, String> volumeToDisconnect) {
|
||||
return _storagePoolMgr.disconnectPhysicalDisk(volumeToDisconnect);
|
||||
}
|
||||
|
||||
public boolean cleanupDisk(final DiskDef disk) {
|
||||
final String path = disk.getDiskPath();
|
||||
|
||||
|
|
|
|||
|
|
@ -33,24 +33,39 @@ public class MigrateKVMAsync implements Callable<Domain> {
|
|||
private String dxml = "";
|
||||
private String vmName = "";
|
||||
private String destIp = "";
|
||||
private boolean migrateStorage;
|
||||
private boolean autoConvergence;
|
||||
|
||||
public MigrateKVMAsync(final LibvirtComputingResource libvirtComputingResource, final Domain dm, final Connect dconn, final String dxml, final String vmName, final String destIp) {
|
||||
public MigrateKVMAsync(final LibvirtComputingResource libvirtComputingResource, final Domain dm, final Connect dconn, final String dxml,
|
||||
final boolean migrateStorage, final boolean autoConvergence, final String vmName, final String destIp) {
|
||||
this.libvirtComputingResource = libvirtComputingResource;
|
||||
|
||||
this.dm = dm;
|
||||
this.dconn = dconn;
|
||||
this.dxml = dxml;
|
||||
this.migrateStorage = migrateStorage;
|
||||
this.autoConvergence = autoConvergence;
|
||||
this.vmName = vmName;
|
||||
this.destIp = destIp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Domain call() throws LibvirtException {
|
||||
// set compression flag for migration if libvirt version supports it
|
||||
if (dconn.getLibVirVersion() < 1003000) {
|
||||
return dm.migrate(dconn, 1 << 0, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed());
|
||||
} else {
|
||||
return dm.migrate(dconn, 1 << 0|1 << 11, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed());
|
||||
long flags = 1 << 0;
|
||||
|
||||
// set compression flag for migration, if libvirt version supports it
|
||||
if (dconn.getLibVirVersion() >= 1000003) {
|
||||
flags |= 1 << 11;
|
||||
}
|
||||
|
||||
if (migrateStorage) {
|
||||
flags |= 1 << 6;
|
||||
}
|
||||
|
||||
if (autoConvergence && dconn.getLibVirVersion() >= 1002003) {
|
||||
flags |= 1 << 13;
|
||||
}
|
||||
|
||||
return dm.migrate(dconn, flags, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,10 +20,12 @@
|
|||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.storage.CopyVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.CopyVolumeCommand;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.agent.api.to.StorageFilerTO;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
|
||||
|
|
@ -33,8 +35,13 @@ import com.cloud.resource.CommandWrapper;
|
|||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
@ResourceWrapper(handles = CopyVolumeCommand.class)
|
||||
public final class LibvirtCopyVolumeCommandWrapper extends CommandWrapper<CopyVolumeCommand, Answer, LibvirtComputingResource> {
|
||||
private static final Logger LOGGER = Logger.getLogger(LibvirtCopyVolumeCommandWrapper.class);
|
||||
|
||||
@Override
|
||||
public Answer execute(final CopyVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
|
|
@ -46,20 +53,30 @@ public final class LibvirtCopyVolumeCommandWrapper extends CommandWrapper<CopyVo
|
|||
* ManagementServerImpl shows that it always sets copyToSecondary to
|
||||
* true
|
||||
*/
|
||||
final boolean copyToSecondary = command.toSecondaryStorage();
|
||||
String volumePath = command.getVolumePath();
|
||||
final StorageFilerTO pool = command.getPool();
|
||||
final String secondaryStorageUrl = command.getSecondaryStorageURL();
|
||||
KVMStoragePool secondaryStoragePool = null;
|
||||
KVMStoragePool primaryPool = null;
|
||||
|
||||
Map<String, String> srcDetails = command.getSrcDetails();
|
||||
|
||||
if (srcDetails != null) {
|
||||
return handleCopyDataFromVolumeToSecondaryStorageUsingSrcDetails(command, libvirtComputingResource);
|
||||
}
|
||||
|
||||
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
|
||||
final boolean copyToSecondary = command.toSecondaryStorage();
|
||||
final StorageFilerTO pool = command.getPool();
|
||||
final String secondaryStorageUrl = command.getSecondaryStorageURL();
|
||||
|
||||
KVMStoragePool secondaryStoragePool = null;
|
||||
String volumePath;
|
||||
KVMStoragePool primaryPool;
|
||||
|
||||
try {
|
||||
try {
|
||||
primaryPool = storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid());
|
||||
} catch (final CloudRuntimeException e) {
|
||||
if (e.getMessage().contains("not found")) {
|
||||
primaryPool = storagePoolMgr.createStoragePool(pool.getUuid(), pool.getHost(), pool.getPort(), pool.getPath(), pool.getUserInfo(), pool.getType());
|
||||
primaryPool = storagePoolMgr.createStoragePool(pool.getUuid(), pool.getHost(), pool.getPort(), pool.getPath(),
|
||||
pool.getUserInfo(), pool.getType());
|
||||
} else {
|
||||
return new CopyVolumeAnswer(command, false, e.getMessage(), null, null);
|
||||
}
|
||||
|
|
@ -85,6 +102,7 @@ public final class LibvirtCopyVolumeCommandWrapper extends CommandWrapper<CopyVo
|
|||
secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl + volumePath);
|
||||
|
||||
final KVMPhysicalDisk volume = secondaryStoragePool.getPhysicalDisk(command.getVolumePath() + ".qcow2");
|
||||
|
||||
storagePoolMgr.copyPhysicalDisk(volume, volumeName, primaryPool, 0);
|
||||
|
||||
return new CopyVolumeAnswer(command, true, null, null, volumeName);
|
||||
|
|
@ -97,4 +115,61 @@ public final class LibvirtCopyVolumeCommandWrapper extends CommandWrapper<CopyVo
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Answer handleCopyDataFromVolumeToSecondaryStorageUsingSrcDetails(CopyVolumeCommand command, LibvirtComputingResource libvirtComputingResource) {
|
||||
KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
PrimaryDataStoreTO srcPrimaryDataStore = null;
|
||||
KVMStoragePool secondaryStoragePool = null;
|
||||
|
||||
Map<String, String> srcDetails = command.getSrcDetails();
|
||||
|
||||
String srcPath = srcDetails.get(DiskTO.IQN);
|
||||
|
||||
if (srcPath == null) {
|
||||
return new CopyVolumeAnswer(command, false, "No IQN was specified", null, null);
|
||||
}
|
||||
|
||||
try {
|
||||
LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
|
||||
String destVolumeName = libvirtUtilitiesHelper.generateUUIDName() + ".qcow2";
|
||||
String destVolumePath = command.getVolumePath() + File.separator;
|
||||
|
||||
String secondaryStorageUrl = command.getSecondaryStorageURL();
|
||||
|
||||
secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl);
|
||||
|
||||
secondaryStoragePool.createFolder(File.separator + destVolumePath);
|
||||
|
||||
storagePoolMgr.deleteStoragePool(secondaryStoragePool.getType(), secondaryStoragePool.getUuid());
|
||||
|
||||
secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl + File.separator + destVolumePath);
|
||||
|
||||
VolumeObjectTO srcVolumeObjectTO = (VolumeObjectTO)command.getSrcData();
|
||||
|
||||
srcPrimaryDataStore = (PrimaryDataStoreTO)srcVolumeObjectTO.getDataStore();
|
||||
|
||||
storagePoolMgr.connectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath, srcDetails);
|
||||
|
||||
KVMPhysicalDisk srcPhysicalDisk = storagePoolMgr.getPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath);
|
||||
|
||||
storagePoolMgr.copyPhysicalDisk(srcPhysicalDisk, destVolumeName, secondaryStoragePool, command.getWait() * 1000);
|
||||
|
||||
return new CopyVolumeAnswer(command, true, null, null, destVolumePath + destVolumeName);
|
||||
} catch (final CloudRuntimeException e) {
|
||||
return new CopyVolumeAnswer(command, false, e.toString(), null, null);
|
||||
} finally {
|
||||
try {
|
||||
if (srcPrimaryDataStore != null) {
|
||||
storagePoolMgr.disconnectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOGGER.warn("Unable to disconnect from the source device.", e);
|
||||
}
|
||||
|
||||
if (secondaryStoragePool != null) {
|
||||
storagePoolMgr.deleteStoragePool(secondaryStoragePool.getType(), secondaryStoragePool.getUuid());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,13 +30,17 @@ import com.cloud.utils.exception.CloudRuntimeException;
|
|||
|
||||
@ResourceWrapper(handles = DeleteStoragePoolCommand.class)
|
||||
public final class LibvirtDeleteStoragePoolCommandWrapper extends CommandWrapper<DeleteStoragePoolCommand, Answer, LibvirtComputingResource> {
|
||||
|
||||
@Override
|
||||
public Answer execute(final DeleteStoragePoolCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
try {
|
||||
final StorageFilerTO pool = command.getPool();
|
||||
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
storagePoolMgr.deleteStoragePool(pool.getType(), pool.getUuid());
|
||||
// if getRemoveDatastore() is true, then we are dealing with managed storage and can skip the delete logic here
|
||||
if (!command.getRemoveDatastore()) {
|
||||
final StorageFilerTO pool = command.getPool();
|
||||
final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
|
||||
storagePoolMgr.deleteStoragePool(pool.getType(), pool.getUuid());
|
||||
}
|
||||
|
||||
return new Answer(command);
|
||||
} catch (final CloudRuntimeException e) {
|
||||
return new Answer(command, false, e.toString());
|
||||
|
|
|
|||
|
|
@ -19,7 +19,12 @@
|
|||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.InputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
|
@ -28,12 +33,32 @@ import java.util.concurrent.Future;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.transform.Transformer;
|
||||
import javax.xml.transform.TransformerConfigurationException;
|
||||
import javax.xml.transform.TransformerException;
|
||||
import javax.xml.transform.TransformerFactory;
|
||||
import javax.xml.transform.dom.DOMSource;
|
||||
import javax.xml.transform.stream.StreamResult;
|
||||
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.DomainInfo.DomainState;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Element;
|
||||
import org.w3c.dom.NamedNodeMap;
|
||||
import org.w3c.dom.Node;
|
||||
import org.w3c.dom.NodeList;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.MigrateAnswer;
|
||||
import com.cloud.agent.api.MigrateCommand;
|
||||
|
|
@ -45,6 +70,7 @@ import com.cloud.hypervisor.kvm.resource.VifDriver;
|
|||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@ResourceWrapper(handles = MigrateCommand.class)
|
||||
public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCommand, Answer, LibvirtComputingResource> {
|
||||
|
|
@ -61,7 +87,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
String result = null;
|
||||
|
||||
List<InterfaceDef> ifaces = null;
|
||||
List<DiskDef> disks = null;
|
||||
List<DiskDef> disks;
|
||||
|
||||
Domain dm = null;
|
||||
Connect dconn = null;
|
||||
|
|
@ -69,6 +95,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
Connect conn = null;
|
||||
String xmlDesc = null;
|
||||
List<Ternary<String, Boolean, String>> vmsnapshots = null;
|
||||
|
||||
try {
|
||||
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
|
||||
|
||||
|
|
@ -79,7 +106,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
/*
|
||||
We replace the private IP address with the address of the destination host.
|
||||
This is because the VNC listens on the private IP address of the hypervisor,
|
||||
but that address is ofcourse different on the target host.
|
||||
but that address is of course different on the target host.
|
||||
|
||||
MigrateCommand.getDestinationIp() returns the private IP address of the target
|
||||
hypervisor. So it's safe to use.
|
||||
|
|
@ -104,12 +131,19 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
// delete the metadata of vm snapshots before migration
|
||||
vmsnapshots = libvirtComputingResource.cleanVMSnapshotMetadata(dm);
|
||||
|
||||
Map<String, MigrateCommand.MigrateDiskInfo> mapMigrateStorage = command.getMigrateStorage();
|
||||
|
||||
if (MapUtils.isNotEmpty(mapMigrateStorage)) {
|
||||
xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage);
|
||||
}
|
||||
|
||||
dconn = libvirtUtilitiesHelper.retrieveQemuConnection("qemu+tcp://" + command.getDestinationIp() + "/system");
|
||||
|
||||
//run migration in thread so we can monitor it
|
||||
s_logger.info("Live migration of instance " + vmName + " initiated");
|
||||
final ExecutorService executor = Executors.newFixedThreadPool(1);
|
||||
final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, vmName, command.getDestinationIp());
|
||||
final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, MapUtils.isNotEmpty(mapMigrateStorage),
|
||||
command.isAutoConvergence(), vmName, command.getDestinationIp());
|
||||
final Future<Domain> migrateThread = executor.submit(worker);
|
||||
executor.shutdown();
|
||||
long sleeptime = 0;
|
||||
|
|
@ -167,6 +201,21 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
} catch (final TimeoutException e) {
|
||||
s_logger.debug("Timed out while migrating domain: " + e.getMessage());
|
||||
result = e.getMessage();
|
||||
} catch (final IOException e) {
|
||||
s_logger.debug("IOException: " + e.getMessage());
|
||||
result = e.getMessage();
|
||||
} catch (final ParserConfigurationException e) {
|
||||
s_logger.debug("ParserConfigurationException: " + e.getMessage());
|
||||
result = e.getMessage();
|
||||
} catch (final SAXException e) {
|
||||
s_logger.debug("SAXException: " + e.getMessage());
|
||||
result = e.getMessage();
|
||||
} catch (final TransformerConfigurationException e) {
|
||||
s_logger.debug("TransformerConfigurationException: " + e.getMessage());
|
||||
result = e.getMessage();
|
||||
} catch (final TransformerException e) {
|
||||
s_logger.debug("TransformerException: " + e.getMessage());
|
||||
result = e.getMessage();
|
||||
} finally {
|
||||
try {
|
||||
if (dm != null && result != null) {
|
||||
|
|
@ -230,4 +279,138 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
}
|
||||
return xmlDesc;
|
||||
}
|
||||
|
||||
// Pass in a list of the disks to update in the XML (xmlDesc). Each disk passed in needs to have a serial number. If any disk's serial number in the
|
||||
// list does not match a disk in the XML, an exception should be thrown.
|
||||
// In addition to the serial number, each disk in the list needs the following info:
|
||||
// * The value of the 'type' of the disk (ex. file, block)
|
||||
// * The value of the 'type' of the driver of the disk (ex. qcow2, raw)
|
||||
// * The source of the disk needs an attribute that is either 'file' or 'dev' as well as its corresponding value.
|
||||
private String replaceStorage(String xmlDesc, Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage)
|
||||
throws IOException, ParserConfigurationException, SAXException, TransformerException {
|
||||
InputStream in = IOUtils.toInputStream(xmlDesc);
|
||||
|
||||
DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
|
||||
DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
|
||||
Document doc = docBuilder.parse(in);
|
||||
|
||||
// Get the root element
|
||||
Node domainNode = doc.getFirstChild();
|
||||
|
||||
NodeList domainChildNodes = domainNode.getChildNodes();
|
||||
|
||||
for (int i = 0; i < domainChildNodes.getLength(); i++) {
|
||||
Node domainChildNode = domainChildNodes.item(i);
|
||||
|
||||
if ("devices".equals(domainChildNode.getNodeName())) {
|
||||
NodeList devicesChildNodes = domainChildNode.getChildNodes();
|
||||
|
||||
for (int x = 0; x < devicesChildNodes.getLength(); x++) {
|
||||
Node deviceChildNode = devicesChildNodes.item(x);
|
||||
|
||||
if ("disk".equals(deviceChildNode.getNodeName())) {
|
||||
Node diskNode = deviceChildNode;
|
||||
|
||||
String sourceFileDevText = getSourceFileDevText(diskNode);
|
||||
|
||||
String path = getPathFromSourceFileDevText(migrateStorage.keySet(), sourceFileDevText);
|
||||
|
||||
if (path != null) {
|
||||
MigrateCommand.MigrateDiskInfo migrateDiskInfo = migrateStorage.remove(path);
|
||||
|
||||
NamedNodeMap diskNodeAttributes = diskNode.getAttributes();
|
||||
Node diskNodeAttribute = diskNodeAttributes.getNamedItem("type");
|
||||
|
||||
diskNodeAttribute.setTextContent(migrateDiskInfo.getDiskType().toString());
|
||||
|
||||
NodeList diskChildNodes = diskNode.getChildNodes();
|
||||
|
||||
for (int z = 0; z < diskChildNodes.getLength(); z++) {
|
||||
Node diskChildNode = diskChildNodes.item(z);
|
||||
|
||||
if ("driver".equals(diskChildNode.getNodeName())) {
|
||||
Node driverNode = diskChildNode;
|
||||
|
||||
NamedNodeMap driverNodeAttributes = driverNode.getAttributes();
|
||||
Node driverNodeAttribute = driverNodeAttributes.getNamedItem("type");
|
||||
|
||||
driverNodeAttribute.setTextContent(migrateDiskInfo.getDriverType().toString());
|
||||
} else if ("source".equals(diskChildNode.getNodeName())) {
|
||||
diskNode.removeChild(diskChildNode);
|
||||
|
||||
Element newChildSourceNode = doc.createElement("source");
|
||||
|
||||
newChildSourceNode.setAttribute(migrateDiskInfo.getSource().toString(), migrateDiskInfo.getSourceText());
|
||||
|
||||
diskNode.appendChild(newChildSourceNode);
|
||||
} else if ("auth".equals(diskChildNode.getNodeName())) {
|
||||
diskNode.removeChild(diskChildNode);
|
||||
} else if ("iotune".equals(diskChildNode.getNodeName())) {
|
||||
diskNode.removeChild(diskChildNode);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!migrateStorage.isEmpty()) {
|
||||
throw new CloudRuntimeException("Disk info was passed into LibvirtMigrateCommandWrapper.replaceStorage that was not used.");
|
||||
}
|
||||
|
||||
return getXml(doc);
|
||||
}
|
||||
|
||||
private String getPathFromSourceFileDevText(Set<String> paths, String sourceFileDevText) {
|
||||
if (paths != null && sourceFileDevText != null) {
|
||||
for (String path : paths) {
|
||||
if (sourceFileDevText.contains(path)) {
|
||||
return path;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private String getSourceFileDevText(Node diskNode) {
|
||||
NodeList diskChildNodes = diskNode.getChildNodes();
|
||||
|
||||
for (int i = 0; i < diskChildNodes.getLength(); i++) {
|
||||
Node diskChildNode = diskChildNodes.item(i);
|
||||
|
||||
if ("source".equals(diskChildNode.getNodeName())) {
|
||||
NamedNodeMap diskNodeAttributes = diskChildNode.getAttributes();
|
||||
|
||||
Node diskNodeAttribute = diskNodeAttributes.getNamedItem("file");
|
||||
|
||||
if (diskNodeAttribute != null) {
|
||||
return diskNodeAttribute.getTextContent();
|
||||
}
|
||||
|
||||
diskNodeAttribute = diskNodeAttributes.getNamedItem("dev");
|
||||
|
||||
if (diskNodeAttribute != null) {
|
||||
return diskNodeAttribute.getTextContent();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private String getXml(Document doc) throws TransformerException {
|
||||
TransformerFactory transformerFactory = TransformerFactory.newInstance();
|
||||
Transformer transformer = transformerFactory.newTransformer();
|
||||
|
||||
DOMSource source = new DOMSource(doc);
|
||||
|
||||
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
|
||||
StreamResult result = new StreamResult(byteArrayOutputStream);
|
||||
|
||||
transformer.transform(source, result);
|
||||
|
||||
return byteArrayOutputStream.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,95 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.MigrateVolumeCommand;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
@ResourceWrapper(handles = MigrateVolumeCommand.class)
|
||||
public final class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper<MigrateVolumeCommand, Answer, LibvirtComputingResource> {
|
||||
private static final Logger LOGGER = Logger.getLogger(LibvirtMigrateVolumeCommandWrapper.class);
|
||||
|
||||
@Override
|
||||
public Answer execute(final MigrateVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
KVMStoragePoolManager storagePoolManager = libvirtComputingResource.getStoragePoolMgr();
|
||||
|
||||
VolumeObjectTO srcVolumeObjectTO = (VolumeObjectTO)command.getSrcData();
|
||||
PrimaryDataStoreTO srcPrimaryDataStore = (PrimaryDataStoreTO)srcVolumeObjectTO.getDataStore();
|
||||
|
||||
Map<String, String> srcDetails = command.getSrcDetails();
|
||||
|
||||
String srcPath = srcDetails != null ? srcDetails.get(DiskTO.IQN) : srcVolumeObjectTO.getPath();
|
||||
|
||||
VolumeObjectTO destVolumeObjectTO = (VolumeObjectTO)command.getDestData();
|
||||
PrimaryDataStoreTO destPrimaryDataStore = (PrimaryDataStoreTO)destVolumeObjectTO.getDataStore();
|
||||
|
||||
Map<String, String> destDetails = command.getDestDetails();
|
||||
|
||||
String destPath = destDetails != null && destDetails.get(DiskTO.IQN) != null ? destDetails.get(DiskTO.IQN) :
|
||||
(destVolumeObjectTO.getPath() != null ? destVolumeObjectTO.getPath() : UUID.randomUUID().toString());
|
||||
|
||||
try {
|
||||
storagePoolManager.connectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath, srcDetails);
|
||||
|
||||
KVMPhysicalDisk srcPhysicalDisk = storagePoolManager.getPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath);
|
||||
|
||||
KVMStoragePool destPrimaryStorage = storagePoolManager.getStoragePool(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid());
|
||||
|
||||
storagePoolManager.connectPhysicalDisk(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid(), destPath, destDetails);
|
||||
|
||||
storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds());
|
||||
}
|
||||
catch (Exception ex) {
|
||||
return new MigrateVolumeAnswer(command, false, ex.getMessage(), null);
|
||||
}
|
||||
finally {
|
||||
try {
|
||||
storagePoolManager.disconnectPhysicalDisk(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid(), destPath);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOGGER.warn("Unable to disconnect from the destination device.", e);
|
||||
}
|
||||
|
||||
try {
|
||||
storagePoolManager.disconnectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOGGER.warn("Unable to disconnect from the source device.", e);
|
||||
}
|
||||
}
|
||||
|
||||
return new MigrateVolumeAnswer(command, true, null, destPath);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.ModifyTargetsAnswer;
|
||||
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
@ResourceWrapper(handles = ModifyTargetsCommand.class)
|
||||
public final class LibvirtModifyTargetsCommandWrapper extends CommandWrapper<ModifyTargetsCommand, Answer, LibvirtComputingResource> {
|
||||
private static final Logger s_logger = Logger.getLogger(LibvirtMigrateCommandWrapper.class);
|
||||
|
||||
@Override
|
||||
public Answer execute(final ModifyTargetsCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
|
||||
List<Map<String, String>> targets = command.getTargets();
|
||||
|
||||
// When attempting to connect to one or more targets, place the successfully connected path into this List.
|
||||
List<String> connectedPaths = new ArrayList<>(targets.size());
|
||||
|
||||
for (Map<String, String> target : targets) {
|
||||
StoragePoolType storagePoolType = StoragePoolType.valueOf(target.get(ModifyTargetsCommand.STORAGE_TYPE));
|
||||
String storageUuid = target.get(ModifyTargetsCommand.STORAGE_UUID);
|
||||
String path = target.get(ModifyTargetsCommand.IQN);
|
||||
|
||||
if (command.getAdd()) {
|
||||
if (storagePoolMgr.connectPhysicalDisk(storagePoolType, storageUuid, path, target)) {
|
||||
KVMPhysicalDisk kvmPhysicalDisk = storagePoolMgr.getPhysicalDisk(storagePoolType, storageUuid, path);
|
||||
|
||||
connectedPaths.add(kvmPhysicalDisk.getPath());
|
||||
}
|
||||
else {
|
||||
throw new CloudRuntimeException("Unable to connect to the following target: " + path);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (!storagePoolMgr.disconnectPhysicalDisk(storagePoolType, storageUuid, path)) {
|
||||
throw new CloudRuntimeException("Unable to disconnect from the following target: " + path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ModifyTargetsAnswer modifyTargetsAnswer = new ModifyTargetsAnswer();
|
||||
|
||||
modifyTargetsAnswer.setConnectedPaths(connectedPaths);
|
||||
|
||||
return modifyTargetsAnswer;
|
||||
}
|
||||
}
|
||||
|
|
@ -45,6 +45,11 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp
|
|||
@Override
|
||||
public Answer execute(final PrepareForMigrationCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
final VirtualMachineTO vm = command.getVirtualMachine();
|
||||
|
||||
if (command.isRollback()) {
|
||||
return handleRollback(command, libvirtComputingResource);
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Preparing host for migrating " + vm);
|
||||
}
|
||||
|
|
@ -89,4 +94,15 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Answer handleRollback(PrepareForMigrationCommand command, LibvirtComputingResource libvirtComputingResource) {
|
||||
KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
VirtualMachineTO vmTO = command.getVirtualMachine();
|
||||
|
||||
if (!storagePoolMgr.disconnectPhysicalDisksViaVmSpec(vmTO)) {
|
||||
return new PrepareForMigrationAnswer(command, "failed to disconnect physical disks from host");
|
||||
}
|
||||
|
||||
return new PrepareForMigrationAnswer(command);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,8 +19,9 @@
|
|||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.util.List;
|
||||
import java.io.File;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.ssh.SshHelper;
|
||||
|
|
@ -88,9 +89,23 @@ public final class LibvirtStopCommandWrapper extends CommandWrapper<StopCommand,
|
|||
final String result = libvirtComputingResource.stopVM(conn, vmName, command.isForceStop());
|
||||
|
||||
if (result == null) {
|
||||
for (final DiskDef disk : disks) {
|
||||
libvirtComputingResource.cleanupDisk(disk);
|
||||
if (disks != null && disks.size() > 0) {
|
||||
for (final DiskDef disk : disks) {
|
||||
libvirtComputingResource.cleanupDisk(disk);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// When using iSCSI-based managed storage, if the user shuts a VM down from the guest OS (as opposed to doing so from CloudStack),
|
||||
// info needs to be passed to the KVM agent to have it disconnect KVM from the applicable iSCSI volumes.
|
||||
List<Map<String, String>> volumesToDisconnect = command.getVolumesToDisconnect();
|
||||
|
||||
if (volumesToDisconnect != null) {
|
||||
for (Map<String, String> volumeToDisconnect : volumesToDisconnect) {
|
||||
libvirtComputingResource.cleanupDisk(volumeToDisconnect);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (final InterfaceDef iface : ifaces) {
|
||||
// We don't know which "traffic type" is associated with
|
||||
// each interface at this point, so inform all vif drivers
|
||||
|
|
|
|||
|
|
@ -20,12 +20,15 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.storage.Storage;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Storage.ProvisioningType;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.utils.StringUtils;
|
||||
|
|
@ -37,7 +40,7 @@ import com.cloud.utils.script.Script;
|
|||
public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
||||
private static final Logger s_logger = Logger.getLogger(IscsiAdmStorageAdaptor.class);
|
||||
|
||||
private static final Map<String, KVMStoragePool> MapStorageUuidToStoragePool = new HashMap<String, KVMStoragePool>();
|
||||
private static final Map<String, KVMStoragePool> MapStorageUuidToStoragePool = new HashMap<>();
|
||||
|
||||
@Override
|
||||
public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, StoragePoolType storagePoolType) {
|
||||
|
|
@ -115,7 +118,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
}
|
||||
|
||||
// ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10 --login
|
||||
// ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 --login
|
||||
iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger);
|
||||
|
||||
iScsiAdmCmd.add("-m", "node");
|
||||
|
|
@ -165,6 +168,23 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
}
|
||||
|
||||
private void waitForDiskToBecomeUnavailable(String host, int port, String iqn, String lun) {
|
||||
int numberOfTries = 10;
|
||||
int timeBetweenTries = 1000;
|
||||
|
||||
String deviceByPath = getByPath(host, port, "/" + iqn + "/" + lun);
|
||||
|
||||
while (getDeviceSize(deviceByPath) > 0 && numberOfTries > 0) {
|
||||
numberOfTries--;
|
||||
|
||||
try {
|
||||
Thread.sleep(timeBetweenTries);
|
||||
} catch (Exception ex) {
|
||||
// don't do anything
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeChapCommand(String path, KVMStoragePool pool, String nParameter, String vParameter, String detail) throws Exception {
|
||||
Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger);
|
||||
|
||||
|
|
@ -193,13 +213,13 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
|
||||
// example by-path: /dev/disk/by-path/ip-192.168.233.10:3260-iscsi-iqn.2012-03.com.solidfire:storagepool2-lun-0
|
||||
private String getByPath(String host, String path) {
|
||||
return "/dev/disk/by-path/ip-" + host + "-iscsi-" + getIqn(path) + "-lun-" + getLun(path);
|
||||
private static String getByPath(String host, int port, String path) {
|
||||
return "/dev/disk/by-path/ip-" + host + ":" + port + "-iscsi-" + getIqn(path) + "-lun-" + getLun(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
|
||||
String deviceByPath = getByPath(pool.getSourceHost() + ":" + pool.getSourcePort(), volumeUuid);
|
||||
String deviceByPath = getByPath(pool.getSourceHost(), pool.getSourcePort(), volumeUuid);
|
||||
KVMPhysicalDisk physicalDisk = new KVMPhysicalDisk(deviceByPath, volumeUuid, pool);
|
||||
|
||||
physicalDisk.setFormat(PhysicalDiskFormat.RAW);
|
||||
|
|
@ -226,6 +246,9 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
s_logger.info("Successfully retrieved the size of device " + deviceByPath);
|
||||
}
|
||||
|
||||
return Long.parseLong(parser.getLine());
|
||||
}
|
||||
|
|
@ -252,10 +275,10 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
return tmp[index].trim();
|
||||
}
|
||||
|
||||
public boolean disconnectPhysicalDisk(String host, int port, String iqn, String lun) {
|
||||
private boolean disconnectPhysicalDisk(String host, int port, String iqn, String lun) {
|
||||
// use iscsiadm to log out of the iSCSI target and un-discover it
|
||||
|
||||
// ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10 --logout
|
||||
// ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 --logout
|
||||
Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger);
|
||||
|
||||
iScsiAdmCmd.add("-m", "node");
|
||||
|
|
@ -295,6 +318,8 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
System.out.println("Removed iSCSI target /" + iqn + "/" + lun);
|
||||
}
|
||||
|
||||
waitForDiskToBecomeUnavailable(host, port, iqn, lun);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -303,6 +328,19 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
return disconnectPhysicalDisk(pool.getSourceHost(), pool.getSourcePort(), getIqn(volumeUuid), getLun(volumeUuid));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
|
||||
String host = volumeToDisconnect.get(DiskTO.STORAGE_HOST);
|
||||
String port = volumeToDisconnect.get(DiskTO.STORAGE_PORT);
|
||||
String path = volumeToDisconnect.get(DiskTO.IQN);
|
||||
|
||||
if (host != null && port != null && path != null) {
|
||||
return disconnectPhysicalDisk(host, Integer.parseInt(port), getIqn(path), getLun(path));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDiskByPath(String localPath) {
|
||||
String search1 = "/dev/disk/by-path/ip-";
|
||||
|
|
@ -310,7 +348,7 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
String search3 = "-iscsi-";
|
||||
String search4 = "-lun-";
|
||||
|
||||
if (localPath.indexOf(search3) == -1) {
|
||||
if (!localPath.contains(search3)) {
|
||||
// this volume doesn't below to this adaptor, so just return true
|
||||
return true;
|
||||
}
|
||||
|
|
@ -356,8 +394,37 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) {
|
||||
throw new UnsupportedOperationException("Copying a disk is not supported in this configuration.");
|
||||
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk srcDisk, String destVolumeUuid, KVMStoragePool destPool, int timeout) {
|
||||
QemuImg q = new QemuImg(timeout);
|
||||
|
||||
QemuImgFile srcFile;
|
||||
|
||||
KVMStoragePool srcPool = srcDisk.getPool();
|
||||
|
||||
if (srcPool.getType() == StoragePoolType.RBD) {
|
||||
srcFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(srcPool.getSourceHost(), srcPool.getSourcePort(),
|
||||
srcPool.getAuthUserName(), srcPool.getAuthSecret(),
|
||||
srcDisk.getPath()),srcDisk.getFormat());
|
||||
} else {
|
||||
srcFile = new QemuImgFile(srcDisk.getPath(), srcDisk.getFormat());
|
||||
}
|
||||
|
||||
KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(destVolumeUuid);
|
||||
|
||||
QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat());
|
||||
|
||||
try {
|
||||
q.convert(srcFile, destFile);
|
||||
} catch (QemuImgException ex) {
|
||||
String msg = "Failed to copy data from " + srcDisk.getPath() + " to " +
|
||||
destDisk.getPath() + ". The error was the following: " + ex.getMessage();
|
||||
|
||||
s_logger.error(msg);
|
||||
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
|
||||
return destPool.getPhysicalDisk(destVolumeUuid);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -158,6 +158,18 @@ public class KVMStoragePoolManager {
|
|||
return result;
|
||||
}
|
||||
|
||||
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
|
||||
for (Map.Entry<String, StorageAdaptor> set : _storageMapper.entrySet()) {
|
||||
StorageAdaptor adaptor = set.getValue();
|
||||
|
||||
if (adaptor.disconnectPhysicalDisk(volumeToDisconnect)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean disconnectPhysicalDiskByPath(String path) {
|
||||
for (Map.Entry<String, StorageAdaptor> set : _storageMapper.entrySet()) {
|
||||
StorageAdaptor adaptor = set.getValue();
|
||||
|
|
|
|||
|
|
@ -237,8 +237,17 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
|
||||
} else if (destData instanceof TemplateObjectTO) {
|
||||
final TemplateObjectTO destTempl = (TemplateObjectTO)destData;
|
||||
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
|
||||
TemplateObjectTO destTempl = (TemplateObjectTO)destData;
|
||||
|
||||
Map<String, String> details = primaryStore.getDetails();
|
||||
|
||||
String path = details != null ? details.get("managedStoreTarget") : null;
|
||||
|
||||
storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details);
|
||||
|
||||
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
|
||||
|
||||
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path);
|
||||
} else {
|
||||
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds());
|
||||
}
|
||||
|
|
@ -422,24 +431,41 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
Map<String, String> details = cmd.getOptions2();
|
||||
|
||||
String path = details != null ? details.get(DiskTO.IQN) : null;
|
||||
|
||||
storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details);
|
||||
|
||||
final String volumeName = UUID.randomUUID().toString();
|
||||
|
||||
final int index = srcVolumePath.lastIndexOf(File.separator);
|
||||
final String volumeDir = srcVolumePath.substring(0, index);
|
||||
String srcVolumeName = srcVolumePath.substring(index + 1);
|
||||
|
||||
secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(secondaryStorageUrl + File.separator + volumeDir);
|
||||
|
||||
if (!srcVolumeName.endsWith(".qcow2") && srcFormat == ImageFormat.QCOW2) {
|
||||
srcVolumeName = srcVolumeName + ".qcow2";
|
||||
}
|
||||
|
||||
final KVMPhysicalDisk volume = secondaryStoragePool.getPhysicalDisk(srcVolumeName);
|
||||
|
||||
volume.setFormat(PhysicalDiskFormat.valueOf(srcFormat.toString()));
|
||||
final KVMPhysicalDisk newDisk = storagePoolMgr.copyPhysicalDisk(volume, volumeName, primaryPool, cmd.getWaitInMillSeconds());
|
||||
|
||||
final KVMPhysicalDisk newDisk = storagePoolMgr.copyPhysicalDisk(volume, path != null ? path : volumeName, primaryPool, cmd.getWaitInMillSeconds());
|
||||
|
||||
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path);
|
||||
|
||||
final VolumeObjectTO newVol = new VolumeObjectTO();
|
||||
|
||||
newVol.setFormat(ImageFormat.valueOf(newDisk.getFormat().toString().toUpperCase()));
|
||||
newVol.setPath(volumeName);
|
||||
newVol.setPath(path != null ? path : volumeName);
|
||||
|
||||
return new CopyCmdAnswer(newVol);
|
||||
} catch (final CloudRuntimeException e) {
|
||||
s_logger.debug("Failed to ccopyVolumeFromImageCacheToPrimary: ", e);
|
||||
s_logger.debug("Failed to copyVolumeFromImageCacheToPrimary: ", e);
|
||||
|
||||
return new CopyCmdAnswer(e.toString());
|
||||
} finally {
|
||||
if (secondaryStoragePool != null) {
|
||||
|
|
@ -496,6 +522,13 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
|
||||
@Override
|
||||
public Answer createTemplateFromVolume(final CopyCommand cmd) {
|
||||
Map<String, String> details = cmd.getOptions();
|
||||
|
||||
if (details != null && details.get(DiskTO.IQN) != null) {
|
||||
// use the managed-storage approach
|
||||
return createTemplateFromVolumeOrSnapshot(cmd);
|
||||
}
|
||||
|
||||
final DataTO srcData = cmd.getSrcTO();
|
||||
final DataTO destData = cmd.getDestTO();
|
||||
final int wait = cmd.getWaitInMillSeconds();
|
||||
|
|
@ -510,7 +543,8 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
final NfsTO nfsImageStore = (NfsTO)imageStore;
|
||||
|
||||
KVMStoragePool secondaryStorage = null;
|
||||
KVMStoragePool primary = null;
|
||||
KVMStoragePool primary;
|
||||
|
||||
try {
|
||||
final String templateFolder = template.getPath();
|
||||
|
||||
|
|
@ -614,8 +648,139 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Answer createTemplateFromSnapshot(final CopyCommand cmd) {
|
||||
return null; //To change body of implemented methods use File | Settings | File Templates.
|
||||
public Answer createTemplateFromSnapshot(CopyCommand cmd) {
|
||||
Map<String, String> details = cmd.getOptions();
|
||||
|
||||
if (details != null && details.get(DiskTO.IQN) != null) {
|
||||
// use the managed-storage approach
|
||||
return createTemplateFromVolumeOrSnapshot(cmd);
|
||||
}
|
||||
|
||||
return new CopyCmdAnswer("operation not supported");
|
||||
}
|
||||
|
||||
private Answer createTemplateFromVolumeOrSnapshot(CopyCommand cmd) {
|
||||
DataTO srcData = cmd.getSrcTO();
|
||||
|
||||
final boolean isVolume;
|
||||
|
||||
if (srcData instanceof VolumeObjectTO) {
|
||||
isVolume = true;
|
||||
}
|
||||
else if (srcData instanceof SnapshotObjectTO) {
|
||||
isVolume = false;
|
||||
}
|
||||
else {
|
||||
return new CopyCmdAnswer("unsupported object type");
|
||||
}
|
||||
|
||||
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcData.getDataStore();
|
||||
|
||||
DataTO destData = cmd.getDestTO();
|
||||
TemplateObjectTO template = (TemplateObjectTO)destData;
|
||||
DataStoreTO imageStore = template.getDataStore();
|
||||
|
||||
if (!(imageStore instanceof NfsTO)) {
|
||||
return new CopyCmdAnswer("unsupported protocol");
|
||||
}
|
||||
|
||||
NfsTO nfsImageStore = (NfsTO)imageStore;
|
||||
|
||||
KVMStoragePool secondaryStorage = null;
|
||||
|
||||
try {
|
||||
Map<String, String> details = cmd.getOptions();
|
||||
|
||||
String path = details != null ? details.get(DiskTO.IQN) : null;
|
||||
|
||||
if (path == null) {
|
||||
new CloudRuntimeException("The 'path' field must be specified.");
|
||||
}
|
||||
|
||||
storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details);
|
||||
|
||||
KVMPhysicalDisk srcDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path);
|
||||
|
||||
secondaryStorage = storagePoolMgr.getStoragePoolByURI(nfsImageStore.getUrl());
|
||||
|
||||
String templateFolder = template.getPath();
|
||||
String tmpltPath = secondaryStorage.getLocalPath() + File.separator + templateFolder;
|
||||
|
||||
storageLayer.mkdirs(tmpltPath);
|
||||
|
||||
String templateName = UUID.randomUUID().toString();
|
||||
|
||||
s_logger.debug("Converting " + srcDisk.getFormat().toString() + " disk " + srcDisk.getPath() + " into template " + templateName);
|
||||
|
||||
String destName = templateFolder + "/" + templateName + ".qcow2";
|
||||
|
||||
storagePoolMgr.copyPhysicalDisk(srcDisk, destName, secondaryStorage, cmd.getWaitInMillSeconds());
|
||||
|
||||
File templateProp = new File(tmpltPath + "/template.properties");
|
||||
|
||||
if (!templateProp.exists()) {
|
||||
templateProp.createNewFile();
|
||||
}
|
||||
|
||||
String templateContent = "filename=" + templateName + ".qcow2" + System.getProperty("line.separator");
|
||||
|
||||
DateFormat dateFormat = new SimpleDateFormat("MM_dd_yyyy");
|
||||
Date date = new Date();
|
||||
|
||||
if (isVolume) {
|
||||
templateContent += "volume.name=" + dateFormat.format(date) + System.getProperty("line.separator");
|
||||
}
|
||||
else {
|
||||
templateContent += "snapshot.name=" + dateFormat.format(date) + System.getProperty("line.separator");
|
||||
}
|
||||
|
||||
FileOutputStream templFo = new FileOutputStream(templateProp);
|
||||
|
||||
templFo.write(templateContent.getBytes());
|
||||
templFo.flush();
|
||||
templFo.close();
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
|
||||
params.put(StorageLayer.InstanceConfigKey, storageLayer);
|
||||
|
||||
Processor qcow2Processor = new QCOW2Processor();
|
||||
|
||||
qcow2Processor.configure("QCOW2 Processor", params);
|
||||
|
||||
FormatInfo info = qcow2Processor.process(tmpltPath, null, templateName);
|
||||
|
||||
TemplateLocation loc = new TemplateLocation(storageLayer, tmpltPath);
|
||||
|
||||
loc.create(1, true, templateName);
|
||||
loc.addFormat(info);
|
||||
loc.save();
|
||||
|
||||
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path);
|
||||
|
||||
TemplateObjectTO newTemplate = new TemplateObjectTO();
|
||||
|
||||
newTemplate.setPath(templateFolder + File.separator + templateName + ".qcow2");
|
||||
newTemplate.setSize(info.virtualSize);
|
||||
newTemplate.setPhysicalSize(info.size);
|
||||
newTemplate.setFormat(ImageFormat.QCOW2);
|
||||
newTemplate.setName(templateName);
|
||||
|
||||
return new CopyCmdAnswer(newTemplate);
|
||||
} catch (Exception ex) {
|
||||
if (isVolume) {
|
||||
s_logger.debug("Failed to create template from volume: ", ex);
|
||||
}
|
||||
else {
|
||||
s_logger.debug("Failed to create template from snapshot: ", ex);
|
||||
}
|
||||
|
||||
return new CopyCmdAnswer(ex.toString());
|
||||
} finally {
|
||||
if (secondaryStorage != null) {
|
||||
secondaryStorage.delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected String copyToS3(final File srcFile, final S3TO destStore, final String destPath) throws InterruptedException {
|
||||
|
|
@ -1327,7 +1492,17 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
final String primaryUuid = pool.getUuid();
|
||||
final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(pool.getPoolType(), primaryUuid);
|
||||
final String volUuid = UUID.randomUUID().toString();
|
||||
final KVMPhysicalDisk disk = storagePoolMgr.copyPhysicalDisk(snapshotDisk, volUuid, primaryPool, cmd.getWaitInMillSeconds());
|
||||
|
||||
Map<String, String> details = cmd.getOptions2();
|
||||
|
||||
String path = details != null ? details.get(DiskTO.IQN) : null;
|
||||
|
||||
storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path, details);
|
||||
|
||||
KVMPhysicalDisk disk = storagePoolMgr.copyPhysicalDisk(snapshotDisk, path != null ? path : volUuid, primaryPool, cmd.getWaitInMillSeconds());
|
||||
|
||||
storagePoolMgr.disconnectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path);
|
||||
|
||||
final VolumeObjectTO newVol = new VolumeObjectTO();
|
||||
newVol.setPath(disk.getName());
|
||||
newVol.setSize(disk.getVirtualSize());
|
||||
|
|
|
|||
|
|
@ -760,6 +760,12 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
|
||||
// this is for managed storage that needs to cleanup disks after use
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDiskByPath(String localPath) {
|
||||
// we've only ever cleaned up ISOs that are NFS mounted
|
||||
|
|
|
|||
|
|
@ -260,6 +260,11 @@ public class ManagedNfsStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDiskByPath(String localPath) {
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -48,6 +48,8 @@ public interface StorageAdaptor {
|
|||
// given disk path (per database) and pool, clean up disk on host
|
||||
public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool);
|
||||
|
||||
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect);
|
||||
|
||||
// given local path to file/device (per Libvirt XML), 1) check that device is
|
||||
// handled by your adaptor, return false if not. 2) clean up device, return true
|
||||
public boolean disconnectPhysicalDiskByPath(String localPath);
|
||||
|
|
|
|||
|
|
@ -253,6 +253,7 @@ import com.cloud.hypervisor.vmware.mo.DatastoreMO;
|
|||
import com.cloud.hypervisor.vmware.mo.DiskControllerType;
|
||||
import com.cloud.hypervisor.vmware.mo.FeatureKeyConstants;
|
||||
import com.cloud.hypervisor.vmware.mo.HostMO;
|
||||
import com.cloud.hypervisor.vmware.mo.HostDatastoreSystemMO;
|
||||
import com.cloud.hypervisor.vmware.mo.HostStorageSystemMO;
|
||||
import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper;
|
||||
import com.cloud.hypervisor.vmware.mo.NetworkDetails;
|
||||
|
|
@ -308,6 +309,8 @@ import com.cloud.agent.api.GetVmIpAddressCommand;
|
|||
public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer {
|
||||
private static final Logger s_logger = Logger.getLogger(VmwareResource.class);
|
||||
|
||||
private static final Random RANDOM = new Random(System.nanoTime());
|
||||
|
||||
protected String _name;
|
||||
|
||||
protected final long _opsTimeout = 900000; // 15 minutes time out to time
|
||||
|
|
@ -680,10 +683,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
boolean useWorkerVm = false;
|
||||
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||
String poolId = cmd.getPoolUuid();
|
||||
VirtualMachineMO vmMo = null;
|
||||
DatastoreMO dsMo = null;
|
||||
ManagedObjectReference morDS = null;
|
||||
|
||||
String vmdkDataStorePath = null;
|
||||
|
||||
try {
|
||||
|
|
@ -693,43 +694,80 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
} else if (newSize == oldSize) {
|
||||
return new ResizeVolumeAnswer(cmd, true, "success", newSize * ResourceType.bytesToKiB);
|
||||
}
|
||||
|
||||
if (vmName.equalsIgnoreCase("none")) {
|
||||
// we need to spawn a worker VM to attach the volume to and
|
||||
// resize the volume.
|
||||
// we need to spawn a worker VM to attach the volume to and resize the volume.
|
||||
useWorkerVm = true;
|
||||
vmName = getWorkerName(getServiceContext(), cmd, 0);
|
||||
|
||||
morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId);
|
||||
dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
|
||||
String poolId = cmd.getPoolUuid();
|
||||
|
||||
ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId);
|
||||
DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
|
||||
|
||||
s_logger.info("Create worker VM " + vmName);
|
||||
|
||||
vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName);
|
||||
|
||||
if (vmMo == null) {
|
||||
throw new Exception("Unable to create a worker VM for volume resize");
|
||||
}
|
||||
|
||||
synchronized (this) {
|
||||
vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + ".vmdk");
|
||||
vmMo.attachDisk(new String[] {vmdkDataStorePath}, morDS);
|
||||
|
||||
vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS);
|
||||
}
|
||||
}
|
||||
|
||||
// find VM through datacenter (VM is not at the target host yet)
|
||||
vmMo = hyperHost.findVmOnPeerHyperHost(vmName);
|
||||
|
||||
if (vmMo == null) {
|
||||
String msg = "VM " + vmName + " does not exist in VMware datacenter";
|
||||
|
||||
s_logger.error(msg);
|
||||
|
||||
throw new Exception(msg);
|
||||
}
|
||||
|
||||
Pair<VirtualDisk, String> vdisk = vmMo.getDiskDevice(path);
|
||||
|
||||
if (vdisk == null) {
|
||||
if (s_logger.isTraceEnabled())
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("resize volume done (failed)");
|
||||
}
|
||||
|
||||
throw new Exception("No such disk device: " + path);
|
||||
}
|
||||
|
||||
// IDE virtual disk cannot be re-sized if VM is running
|
||||
if (vdisk.second() != null && vdisk.second().contains("ide")) {
|
||||
throw new Exception("Re-sizing a virtual disk over IDE controller is not supported in VMware hypervisor. "
|
||||
+ "Please re-try when virtual disk is attached to a VM using SCSI controller.");
|
||||
throw new Exception("Re-sizing a virtual disk over an IDE controller is not supported in the VMware hypervisor. " +
|
||||
"Please re-try when virtual disk is attached to a VM using a SCSI controller.");
|
||||
}
|
||||
|
||||
if (cmd.isManaged()) {
|
||||
VmwareContext context = getServiceContext();
|
||||
|
||||
ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
|
||||
ClusterMO clusterMO = new ClusterMO(context, morCluster);
|
||||
|
||||
List<Pair<ManagedObjectReference, String>> lstHosts = clusterMO.getClusterHosts();
|
||||
|
||||
Collections.shuffle(lstHosts, RANDOM);
|
||||
|
||||
Pair<ManagedObjectReference, String> host = lstHosts.get(0);
|
||||
|
||||
HostMO hostMO = new HostMO(context, host.first());
|
||||
HostDatastoreSystemMO hostDatastoreSystem = hostMO.getHostDatastoreSystemMO();
|
||||
|
||||
String iScsiName = cmd.get_iScsiName();
|
||||
|
||||
ManagedObjectReference morDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, VmwareResource.getDatastoreName(iScsiName));
|
||||
DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDS);
|
||||
|
||||
_storageProcessor.expandDatastore(hostDatastoreSystem, dsMo);
|
||||
}
|
||||
|
||||
if (vdisk.second() != null && !vdisk.second().toLowerCase().startsWith("scsi"))
|
||||
|
|
@ -744,17 +782,22 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
throw new Exception("Resize is not supported because Disk device has Parent "+ ((VirtualDiskFlatVer2BackingInfo)disk.getBacking()).getParent().getUuid());
|
||||
}
|
||||
String vmdkAbsFile = getAbsoluteVmdkFile(disk);
|
||||
|
||||
if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) {
|
||||
vmMo.updateAdapterTypeIfRequired(vmdkAbsFile);
|
||||
}
|
||||
|
||||
disk.setCapacityInKB(newSize);
|
||||
|
||||
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
|
||||
VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec();
|
||||
|
||||
deviceConfigSpec.setDevice(disk);
|
||||
deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT);
|
||||
|
||||
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
|
||||
|
||||
vmConfigSpec.getDeviceChange().add(deviceConfigSpec);
|
||||
|
||||
if (!vmMo.configureVm(vmConfigSpec)) {
|
||||
throw new Exception("Failed to configure VM to resize disk. vmName: " + vmName);
|
||||
}
|
||||
|
|
@ -762,12 +805,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return new ResizeVolumeAnswer(cmd, true, "success", newSize * 1024);
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Unable to resize volume", e);
|
||||
|
||||
String error = "Failed to resize volume: " + e.getMessage();
|
||||
|
||||
return new ResizeVolumeAnswer(cmd, false, error);
|
||||
} finally {
|
||||
try {
|
||||
if (useWorkerVm == true) {
|
||||
if (useWorkerVm) {
|
||||
s_logger.info("Destroy worker VM after volume resize");
|
||||
|
||||
vmMo.detachDisk(vmdkDataStorePath, false);
|
||||
vmMo.destroy();
|
||||
}
|
||||
|
|
@ -2190,9 +2236,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, String.valueOf(nicMask));
|
||||
postNvpConfigBeforeStart(vmMo, vmSpec);
|
||||
|
||||
Map<String, String> iqnToPath = new HashMap<String, String>();
|
||||
Map<String, Map<String, String>> iqnToData = new HashMap<>();
|
||||
|
||||
postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, ideControllerKey, scsiControllerKey, iqnToPath, hyperHost, context);
|
||||
postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, ideControllerKey, scsiControllerKey, iqnToData, hyperHost, context);
|
||||
|
||||
//
|
||||
// Power-on VM
|
||||
|
|
@ -2203,7 +2249,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
StartAnswer startAnswer = new StartAnswer(cmd);
|
||||
|
||||
startAnswer.setIqnToPath(iqnToPath);
|
||||
startAnswer.setIqnToData(iqnToData);
|
||||
|
||||
// Since VM was successfully powered-on, if there was an existing VM in a different cluster that was unregistered, delete all the files associated with it.
|
||||
if (existingVmName != null && existingVmFileLayout != null) {
|
||||
|
|
@ -2460,11 +2506,21 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
final String datastoreDiskPath;
|
||||
|
||||
if (isManaged) {
|
||||
String vmdkPath = new DatastoreFile(volumeTO.getPath()).getFileBaseName();
|
||||
|
||||
if (volumeTO.getVolumeType() == Volume.Type.ROOT) {
|
||||
datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getName(), VmwareManager.s_vmwareSearchExcludeFolder.value());
|
||||
if (vmdkPath == null) {
|
||||
vmdkPath = volumeTO.getName();
|
||||
}
|
||||
|
||||
datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, vmdkPath);
|
||||
}
|
||||
else {
|
||||
datastoreDiskPath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk");
|
||||
if (vmdkPath == null) {
|
||||
vmdkPath = dsMo.getName();
|
||||
}
|
||||
|
||||
datastoreDiskPath = dsMo.getDatastorePath(vmdkPath + ".vmdk");
|
||||
}
|
||||
} else {
|
||||
datastoreDiskPath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, volumeTO.getPath(), VmwareManager.s_vmwareSearchExcludeFolder.value());
|
||||
|
|
@ -2822,8 +2878,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
|
||||
private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey, int scsiControllerKey,
|
||||
Map<String, String> iqnToPath, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
|
||||
private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey,
|
||||
int scsiControllerKey, Map<String, Map<String, String>> iqnToData, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
|
||||
VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
|
||||
|
||||
for (DiskTO vol : sortedDisks) {
|
||||
|
|
@ -2862,10 +2918,18 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
|
||||
VolumeObjectTO volInSpec = getVolumeInSpec(vmSpec, volumeTO);
|
||||
|
||||
if (volInSpec != null) {
|
||||
if (managed) {
|
||||
Map<String, String> data = new HashMap<>();
|
||||
|
||||
String datastoreVolumePath = diskChain[0];
|
||||
iqnToPath.put(details.get(DiskTO.IQN), datastoreVolumePath);
|
||||
|
||||
data.put(StartAnswer.PATH, datastoreVolumePath);
|
||||
data.put(StartAnswer.IMAGE_FORMAT, Storage.ImageFormat.OVA.toString());
|
||||
|
||||
iqnToData.put(details.get(DiskTO.IQN), data);
|
||||
|
||||
vol.setPath(datastoreVolumePath);
|
||||
volumeTO.setPath(datastoreVolumePath);
|
||||
volInSpec.setPath(datastoreVolumePath);
|
||||
|
|
@ -2972,9 +3036,39 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return listForSort.toArray(new DiskTO[0]);
|
||||
}
|
||||
|
||||
private HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context, DiskTO[] disks,
|
||||
Command cmd) throws Exception {
|
||||
HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> mapIdToMors = new HashMap<String, Pair<ManagedObjectReference, DatastoreMO>>();
|
||||
/**
|
||||
* Only call this for managed storage.
|
||||
* Ex. "[-iqn.2010-01.com.solidfire:4nhe.vol-1.27-0] i-2-18-VM/ROOT-18.vmdk" should return "i-2-18-VM/ROOT-18"
|
||||
*/
|
||||
public String getVmdkPath(String path) {
|
||||
if (!com.cloud.utils.StringUtils.isNotBlank(path)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final String search = "]";
|
||||
|
||||
int startIndex = path.indexOf(search);
|
||||
|
||||
if (startIndex == -1) {
|
||||
return null;
|
||||
}
|
||||
|
||||
path = path.substring(startIndex + search.length());
|
||||
|
||||
final String search2 = ".vmdk";
|
||||
|
||||
int endIndex = path.indexOf(search2);
|
||||
|
||||
if (endIndex == -1) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return path.substring(0, endIndex).trim();
|
||||
}
|
||||
|
||||
private HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context,
|
||||
DiskTO[] disks, Command cmd) throws Exception {
|
||||
HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> mapIdToMors = new HashMap<>();
|
||||
|
||||
assert (hyperHost != null) && (context != null);
|
||||
|
||||
|
|
@ -3000,20 +3094,33 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
// if the datastore is not present, we need to discover the iSCSI device that will support it,
|
||||
// create the datastore, and create a VMDK file in the datastore
|
||||
if (morDatastore == null) {
|
||||
morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName, details.get(DiskTO.STORAGE_HOST),
|
||||
Integer.parseInt(details.get(DiskTO.STORAGE_PORT)), volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : null,
|
||||
details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), details.get(DiskTO.CHAP_TARGET_USERNAME),
|
||||
details.get(DiskTO.CHAP_TARGET_SECRET), Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd);
|
||||
final String vmdkPath = getVmdkPath(volumeTO.getPath());
|
||||
|
||||
morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName,
|
||||
details.get(DiskTO.STORAGE_HOST), Integer.parseInt(details.get(DiskTO.STORAGE_PORT)),
|
||||
vmdkPath,
|
||||
details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET),
|
||||
details.get(DiskTO.CHAP_TARGET_USERNAME), details.get(DiskTO.CHAP_TARGET_SECRET),
|
||||
Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd);
|
||||
|
||||
DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore);
|
||||
String datastoreVolumePath = dsMo.getDatastorePath((volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : dsMo.getName()) + ".vmdk");
|
||||
|
||||
final String datastoreVolumePath;
|
||||
|
||||
if (vmdkPath != null) {
|
||||
datastoreVolumePath = dsMo.getDatastorePath(vmdkPath + ".vmdk");
|
||||
}
|
||||
else {
|
||||
datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk");
|
||||
}
|
||||
|
||||
volumeTO.setPath(datastoreVolumePath);
|
||||
vol.setPath(datastoreVolumePath);
|
||||
}
|
||||
|
||||
mapIdToMors.put(datastoreName, new Pair<ManagedObjectReference, DatastoreMO>(morDatastore, new DatastoreMO(context, morDatastore)));
|
||||
} else {
|
||||
mapIdToMors.put(datastoreName, new Pair<>(morDatastore, new DatastoreMO(context, morDatastore)));
|
||||
}
|
||||
else {
|
||||
ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolUuid);
|
||||
|
||||
if (morDatastore == null) {
|
||||
|
|
@ -3024,7 +3131,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
throw new Exception(msg);
|
||||
}
|
||||
|
||||
mapIdToMors.put(poolUuid, new Pair<ManagedObjectReference, DatastoreMO>(morDatastore, new DatastoreMO(context, morDatastore)));
|
||||
mapIdToMors.put(poolUuid, new Pair<>(morDatastore, new DatastoreMO(context, morDatastore)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -4100,9 +4207,35 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
|
||||
protected Answer execute(ModifyTargetsCommand cmd) {
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
|
||||
VmwareContext context = getServiceContext(cmd);
|
||||
VmwareHypervisorHost hyperHost = getHyperHost(context);
|
||||
|
||||
handleTargets(cmd.getAdd(), cmd.getTargets(), (HostMO)hyperHost);
|
||||
List<HostMO> hostMOs = new ArrayList<>();
|
||||
|
||||
if (cmd.getApplyToAllHostsInCluster()) {
|
||||
try {
|
||||
ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
|
||||
ClusterMO clusterMO = new ClusterMO(context, morCluster);
|
||||
|
||||
List<Pair<ManagedObjectReference, String>> hosts = clusterMO.getClusterHosts();
|
||||
|
||||
for (Pair<ManagedObjectReference, String> host : hosts) {
|
||||
HostMO hostMO = new HostMO(context, host.first());
|
||||
|
||||
hostMOs.add(hostMO);
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
s_logger.error(ex.getMessage(), ex);
|
||||
|
||||
throw new CloudRuntimeException(ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
else {
|
||||
hostMOs.add((HostMO)hyperHost);
|
||||
}
|
||||
|
||||
handleTargets(cmd.getAdd(), cmd.getTargetTypeToRemove(), cmd.isRemoveAsync(), cmd.getTargets(), hostMOs);
|
||||
|
||||
return new ModifyTargetsAnswer();
|
||||
}
|
||||
|
|
@ -4133,7 +4266,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
long capacity = summary.getCapacity();
|
||||
long available = summary.getFreeSpace();
|
||||
|
||||
Map<String, TemplateProp> tInfo = new HashMap<String, TemplateProp>();
|
||||
Map<String, TemplateProp> tInfo = new HashMap<>();
|
||||
ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, capacity, available, tInfo);
|
||||
|
||||
if (cmd.getAdd() && pool.getType() == StoragePoolType.VMFS) {
|
||||
|
|
@ -4156,11 +4289,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
|
||||
private void handleTargets(boolean add, List<Map<String, String>> targets, HostMO host) {
|
||||
private void handleTargets(boolean add, ModifyTargetsCommand.TargetTypeToRemove targetTypeToRemove, boolean isRemoveAsync,
|
||||
List<Map<String, String>> targets, List<HostMO> hosts) {
|
||||
if (targets != null && targets.size() > 0) {
|
||||
try {
|
||||
_storageProcessor.handleTargetsForHost(add, targets, host);
|
||||
} catch (Exception ex) {
|
||||
_storageProcessor.handleTargets(add, targetTypeToRemove, isRemoveAsync, targets, hosts);
|
||||
}
|
||||
catch (Exception ex) {
|
||||
s_logger.warn(ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
@ -4173,8 +4308,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
try {
|
||||
if (cmd.getRemoveDatastore()) {
|
||||
_storageProcessor.handleDatastoreAndVmdkDetach(cmd.getDetails().get(DeleteStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(DeleteStoragePoolCommand.IQN),
|
||||
cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT)));
|
||||
_storageProcessor.handleDatastoreAndVmdkDetach(cmd, cmd.getDetails().get(DeleteStoragePoolCommand.DATASTORE_NAME),
|
||||
cmd.getDetails().get(DeleteStoragePoolCommand.IQN), cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST),
|
||||
Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT)));
|
||||
|
||||
return new Answer(cmd, true, "success");
|
||||
} else {
|
||||
|
|
@ -4204,6 +4340,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return str.replace('/', '-');
|
||||
}
|
||||
|
||||
public static String createDatastoreNameFromIqn(String iqn) {
|
||||
return "-" + iqn + "-0";
|
||||
}
|
||||
|
||||
protected AttachIsoAnswer execute(AttachIsoCommand cmd) {
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("Executing resource AttachIsoCommand: " + _gson.toJson(cmd));
|
||||
|
|
@ -5119,7 +5259,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
|
||||
for (HostHostBusAdapter hba : hostStorageSystem.getStorageDeviceInfo().getHostBusAdapter()) {
|
||||
if (hba instanceof HostInternetScsiHba) {
|
||||
return ((HostInternetScsiHba)hba).getIScsiName();
|
||||
HostInternetScsiHba hostInternetScsiHba = (HostInternetScsiHba)hba;
|
||||
|
||||
if (hostInternetScsiHba.isIsSoftwareBased()) {
|
||||
return ((HostInternetScsiHba)hba).getIScsiName();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -5991,7 +6135,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
return dcMo.findVm(vol.getPath());
|
||||
}
|
||||
|
||||
private String getAbsoluteVmdkFile(VirtualDisk disk) {
|
||||
public String getAbsoluteVmdkFile(VirtualDisk disk) {
|
||||
String vmdkAbsFile = null;
|
||||
VirtualDeviceBackingInfo backingInfo = disk.getBacking();
|
||||
if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -2459,12 +2459,12 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
return sr;
|
||||
}
|
||||
|
||||
private String resignatureIscsiSr(Connection conn, Host host, Map<String, String> deviceConfig, String srNameLabel, Map<String, String> smConfig) throws XmlRpcException, XenAPIException {
|
||||
|
||||
private String resignatureIscsiSr(Connection conn, Host host, Map<String, String> deviceConfig, String srNameLabel, Map<String, String> smConfig)
|
||||
throws XmlRpcException, XenAPIException {
|
||||
String pooluuid;
|
||||
|
||||
try {
|
||||
SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.RELVMOISCSI.toString(),
|
||||
"user", true, smConfig);
|
||||
SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, SRType.RELVMOISCSI.toString(), "user", true, smConfig);
|
||||
|
||||
// The successful outcome of SR.create (right above) is to throw an exception of type XenAPIException (with expected
|
||||
// toString() text) after resigning the metadata (we indicated to perform a resign by passing in SRType.RELVMOISCSI.toString()).
|
||||
|
|
@ -2492,6 +2492,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
|||
throw new CloudRuntimeException("Non-existent or invalid SR UUID");
|
||||
}
|
||||
}
|
||||
|
||||
return pooluuid;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -298,7 +298,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
return new AttachAnswer(disk);
|
||||
}
|
||||
|
||||
VDI vdi = null;
|
||||
VDI vdi;
|
||||
|
||||
if (isManaged) {
|
||||
vdi = hypervisorResource.prepareManagedStorage(conn, details, data.getPath(), vdiNameLabel);
|
||||
|
|
@ -320,10 +320,13 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
vbdr.VDI = vdi;
|
||||
vbdr.bootable = false;
|
||||
vbdr.userdevice = "autodetect";
|
||||
|
||||
final Long deviceId = disk.getDiskSeq();
|
||||
|
||||
if (deviceId != null && !hypervisorResource.isDeviceUsed(conn, vm, deviceId)) {
|
||||
vbdr.userdevice = deviceId.toString();
|
||||
}
|
||||
|
||||
vbdr.mode = Types.VbdMode.RW;
|
||||
vbdr.type = Types.VbdType.DISK;
|
||||
vbdr.unpluggable = true;
|
||||
|
|
@ -337,6 +340,7 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
vbd.destroy(conn);
|
||||
throw e;
|
||||
}
|
||||
|
||||
// Update the VDI's label to include the VM name
|
||||
vdi.setNameLabel(conn, vdiNameLabel);
|
||||
|
||||
|
|
@ -345,7 +349,9 @@ public class XenServerStorageProcessor implements StorageProcessor {
|
|||
return new AttachAnswer(newDisk);
|
||||
} catch (final Exception e) {
|
||||
final String msg = "Failed to attach volume for uuid: " + data.getPath() + " due to " + e.toString();
|
||||
|
||||
s_logger.warn(msg, e);
|
||||
|
||||
return new AttachAnswer(msg);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1079,7 +1079,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
public Answer createTemplateFromSnapshot2(final CopyCommand cmd) {
|
||||
private Answer createTemplateFromSnapshot2(final CopyCommand cmd) {
|
||||
final Connection conn = hypervisorResource.getConnection();
|
||||
|
||||
final SnapshotObjectTO snapshotObjTO = (SnapshotObjectTO) cmd.getSrcTO();
|
||||
|
|
@ -1089,13 +1089,11 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
|||
return null;
|
||||
}
|
||||
|
||||
NfsTO destStore = null;
|
||||
PrimaryDataStoreTO srcStore = null;
|
||||
URI destUri = null;
|
||||
NfsTO destStore;
|
||||
URI destUri;
|
||||
|
||||
try {
|
||||
srcStore = (PrimaryDataStoreTO) snapshotObjTO.getDataStore();
|
||||
destStore = (NfsTO) templateObjTO.getDataStore();
|
||||
destStore = (NfsTO)templateObjTO.getDataStore();
|
||||
destUri = new URI(destStore.getUrl());
|
||||
} catch (final Exception ex) {
|
||||
s_logger.debug("Invalid URI", ex);
|
||||
|
|
@ -1118,7 +1116,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
|||
final String storageHost = srcDetails.get(DiskTO.STORAGE_HOST);
|
||||
final String chapInitiatorUsername = srcDetails.get(DiskTO.CHAP_INITIATOR_USERNAME);
|
||||
final String chapInitiatorSecret = srcDetails.get(DiskTO.CHAP_INITIATOR_SECRET);
|
||||
String srType = null;
|
||||
String srType;
|
||||
|
||||
srType = CitrixResourceBase.SRType.LVMOISCSI.toString();
|
||||
|
||||
|
|
@ -1173,13 +1171,6 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor {
|
|||
result = true;
|
||||
|
||||
return new CopyCmdAnswer(newTemplate);
|
||||
// } catch (Exception ex) {
|
||||
// s_logger.error("Failed to create a template from a snapshot",
|
||||
// ex);
|
||||
//
|
||||
// return new
|
||||
// CopyCmdAnswer("Failed to create a template from a snapshot: " +
|
||||
// ex.toString());
|
||||
} catch (final BadServerResponse e) {
|
||||
s_logger.error("Failed to create a template from a snapshot due to incomprehensible server response", e);
|
||||
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ import com.cloud.network.Networks.BroadcastDomainType;
|
|||
import com.cloud.network.Networks.IsolationType;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.xensource.xenapi.Connection;
|
||||
|
|
@ -62,8 +63,9 @@ public final class CitrixStartCommandWrapper extends CommandWrapper<StartCommand
|
|||
final String vmName = vmSpec.getName();
|
||||
VmPowerState state = VmPowerState.HALTED;
|
||||
VM vm = null;
|
||||
// if a VDI is created, record its UUID to send back to the CS MS
|
||||
final Map<String, String> iqnToPath = new HashMap<String, String>();
|
||||
// if a VDI is created, record its UUID and its type (ex. VHD) to send back to the CS MS
|
||||
final Map<String, Map<String, String>> iqnToData = new HashMap<>();
|
||||
|
||||
try {
|
||||
final Set<VM> vms = VM.getByNameLabel(conn, vmName);
|
||||
if (vms != null) {
|
||||
|
|
@ -112,9 +114,14 @@ public final class CitrixStartCommandWrapper extends CommandWrapper<StartCommand
|
|||
final VDI newVdi = citrixResourceBase.prepareManagedDisk(conn, disk, vmSpec.getId(), vmSpec.getName());
|
||||
|
||||
if (newVdi != null) {
|
||||
final Map<String, String> data = new HashMap<>();
|
||||
|
||||
final String path = newVdi.getUuid(conn);
|
||||
|
||||
iqnToPath.put(disk.getDetails().get(DiskTO.IQN), path);
|
||||
data.put(StartAnswer.PATH, path);
|
||||
data.put(StartAnswer.IMAGE_FORMAT, Storage.ImageFormat.VHD.toString());
|
||||
|
||||
iqnToData.put(disk.getDetails().get(DiskTO.IQN), data);
|
||||
}
|
||||
|
||||
citrixResourceBase.createVbd(conn, disk, vmName, vm, vmSpec.getBootloader(), newVdi);
|
||||
|
|
@ -201,7 +208,7 @@ public final class CitrixStartCommandWrapper extends CommandWrapper<StartCommand
|
|||
|
||||
final StartAnswer startAnswer = new StartAnswer(command);
|
||||
|
||||
startAnswer.setIqnToPath(iqnToPath);
|
||||
startAnswer.setIqnToData(iqnToData);
|
||||
|
||||
return startAnswer;
|
||||
} catch (final Exception e) {
|
||||
|
|
@ -210,7 +217,7 @@ public final class CitrixStartCommandWrapper extends CommandWrapper<StartCommand
|
|||
|
||||
final StartAnswer startAnswer = new StartAnswer(command, msg);
|
||||
|
||||
startAnswer.setIqnToPath(iqnToPath);
|
||||
startAnswer.setIqnToData(iqnToData);
|
||||
|
||||
return startAnswer;
|
||||
} finally {
|
||||
|
|
|
|||
|
|
@ -58,7 +58,10 @@ import com.cloud.exception.AgentUnavailableException;
|
|||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.dao.SnapshotDao;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.Snapshot;
|
||||
import com.cloud.storage.SnapshotVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
|
|
@ -84,6 +87,8 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||
private VolumeDetailsDao volumeDetailsDao;
|
||||
@Inject
|
||||
VMInstanceDao instanceDao;
|
||||
@Inject
|
||||
SnapshotDao snapshotDao;
|
||||
|
||||
@Override
|
||||
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
|
||||
|
|
@ -135,6 +140,21 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||
return volumeDetail.getValue();
|
||||
}
|
||||
|
||||
private void verifyNoSnapshotsOnManagedStorageVolumes(Map<VolumeInfo, DataStore> volumeToPool) {
|
||||
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
||||
VolumeInfo volumeInfo = entry.getKey();
|
||||
StoragePool storagePool = storagePoolDao.findById(volumeInfo.getPoolId());
|
||||
|
||||
if (storagePool.isManaged()) {
|
||||
List<SnapshotVO> snapshots = getNonDestroyedSnapshots(volumeInfo.getId());
|
||||
|
||||
if (snapshots != null && snapshots.size() > 0) {
|
||||
throw new CloudRuntimeException("Cannot perform this action on a volume with one or more snapshots");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tell the underlying storage plug-in to create a new volume, put it in the VAG of the destination cluster, and
|
||||
* send a command to the destination cluster to create an SR and to attach to the SR from all hosts in the cluster.
|
||||
|
|
@ -185,6 +205,24 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||
return iqn;
|
||||
}
|
||||
|
||||
private List<SnapshotVO> getNonDestroyedSnapshots(long csVolumeId) {
|
||||
List<SnapshotVO> lstSnapshots = snapshotDao.listByVolumeId(csVolumeId);
|
||||
|
||||
if (lstSnapshots == null) {
|
||||
lstSnapshots = new ArrayList<>();
|
||||
}
|
||||
|
||||
List<SnapshotVO> lstSnapshots2 = new ArrayList<>();
|
||||
|
||||
for (SnapshotVO snapshot : lstSnapshots) {
|
||||
if (!Snapshot.State.Destroyed.equals(snapshot.getState())) {
|
||||
lstSnapshots2.add(snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
return lstSnapshots2;
|
||||
}
|
||||
|
||||
private void handleManagedVolumePostMigration(VolumeInfo volumeInfo, Host srcHost, VolumeObjectTO volumeTO) {
|
||||
final Map<String, String> details = new HashMap<>();
|
||||
|
||||
|
|
@ -273,6 +311,8 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
|
|||
// Initiate migration of a virtual machine with its volumes.
|
||||
|
||||
try {
|
||||
verifyNoSnapshotsOnManagedStorageVolumes(volumeToPool);
|
||||
|
||||
List<Pair<VolumeTO, String>> volumeToStorageUuid = new ArrayList<>();
|
||||
|
||||
for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
|
||||
|
|
|
|||
|
|
@ -187,6 +187,17 @@
|
|||
<module>network-elements/cisco-vnmc</module>
|
||||
</modules>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>vmware-sioc</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>noredist</name>
|
||||
</property>
|
||||
</activation>
|
||||
<modules>
|
||||
<module>api/vmware-sioc</module>
|
||||
</modules>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>mysqlha</id>
|
||||
<activation>
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ import com.cloud.dc.dao.ClusterDao;
|
|||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.ResizeVolumePayload;
|
||||
import com.cloud.storage.Snapshot.State;
|
||||
|
|
@ -81,6 +82,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
|||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.SolidFireUtil;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
||||
|
|
@ -118,6 +120,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString());
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
|
||||
return mapCapabilities;
|
||||
}
|
||||
|
|
@ -332,12 +335,11 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
float fClusterDefaultBurstIopsPercentOfMaxIops = Float.parseFloat(clusterDefaultBurstIopsPercentOfMaxIops);
|
||||
|
||||
return (long)(maxIops * fClusterDefaultBurstIopsPercentOfMaxIops);
|
||||
return Math.min((long)(maxIops * fClusterDefaultBurstIopsPercentOfMaxIops), SolidFireUtil.MAX_IOPS_PER_VOLUME);
|
||||
}
|
||||
|
||||
private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, DataObject dataObject, long sfAccountId) {
|
||||
long storagePoolId = dataObject.getDataStore().getId();
|
||||
|
||||
private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection,
|
||||
DataObject dataObject, long storagePoolId, long sfAccountId) {
|
||||
final Long minIops;
|
||||
final Long maxIops;
|
||||
final Long volumeSize;
|
||||
|
|
@ -423,12 +425,12 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
// To be backward compatible with releases prior to 4.5, call updateVolumeDetails here.
|
||||
// That way if SolidFireUtil.VOLUME_SIZE wasn't put in the volume_details table when the
|
||||
// volume was initially created, it can be placed in volume_details here.
|
||||
updateVolumeDetails(volume.getId(), volumeSize);
|
||||
updateVolumeDetails(volume.getId(), volumeSize, sfVolume.getScsiNaaDeviceId());
|
||||
|
||||
usedSpace += volumeSize;
|
||||
}
|
||||
catch (NumberFormatException ex) {
|
||||
// can be ignored (the "folder" column didn't have a valid "long" in it (hasn't been placed there yet))
|
||||
catch (Exception ex) {
|
||||
// can be ignored
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -493,10 +495,15 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
// TemplateInfo sometimes has a size equal to null.
|
||||
long templateSize = templateInfo.getSize() != null ? templateInfo.getSize() : 0;
|
||||
|
||||
volumeSize = (long)(templateSize + templateSize * (LOWEST_HYPERVISOR_SNAPSHOT_RESERVE / 100f));
|
||||
if (templateInfo.getHypervisorType() == Hypervisor.HypervisorType.KVM) {
|
||||
volumeSize = templateSize;
|
||||
}
|
||||
else {
|
||||
volumeSize = (long)(templateSize + templateSize * (LOWEST_HYPERVISOR_SNAPSHOT_RESERVE / 100f));
|
||||
}
|
||||
}
|
||||
|
||||
return volumeSize;
|
||||
return Math.max(volumeSize, SolidFireUtil.MIN_VOLUME_SIZE);
|
||||
}
|
||||
|
||||
private long getVolumeSizeIncludingHypervisorSnapshotReserve(long volumeSize, Integer hypervisorSnapshotReserve) {
|
||||
|
|
@ -506,7 +513,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f);
|
||||
}
|
||||
|
||||
return volumeSize;
|
||||
return Math.max(volumeSize, SolidFireUtil.MIN_VOLUME_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -537,7 +544,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
private final long _maxIops;
|
||||
private final long _burstIops;
|
||||
|
||||
public Iops(long minIops, long maxIops, long burstIops) throws IllegalArgumentException {
|
||||
Iops(long minIops, long maxIops, long burstIops) throws IllegalArgumentException {
|
||||
if (minIops <= 0 || maxIops <= 0) {
|
||||
throw new IllegalArgumentException("The 'Min IOPS' and 'Max IOPS' values must be greater than 0.");
|
||||
}
|
||||
|
|
@ -555,15 +562,15 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
_burstIops = burstIops;
|
||||
}
|
||||
|
||||
public long getMinIops() {
|
||||
long getMinIops() {
|
||||
return _minIops;
|
||||
}
|
||||
|
||||
public long getMaxIops() {
|
||||
long getMaxIops() {
|
||||
return _maxIops;
|
||||
}
|
||||
|
||||
public long getBurstIops() {
|
||||
long getBurstIops() {
|
||||
return _burstIops;
|
||||
}
|
||||
}
|
||||
|
|
@ -618,6 +625,11 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
callback.complete(result);
|
||||
}
|
||||
else {
|
||||
if (errMsg != null) {
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private long getCreateSolidFireAccountId(SolidFireUtil.SolidFireConnection sfConnection, long csAccountId, long storagePoolId) {
|
||||
|
|
@ -741,7 +753,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotId, "takeSnapshot");
|
||||
|
||||
if (snapshotDetails != null && snapshotDetails.getValue() != null) {
|
||||
return new Boolean(snapshotDetails.getValue());
|
||||
return Boolean.parseBoolean(snapshotDetails.getValue());
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
@ -778,8 +790,8 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
" and data-object type: " + dataObjectType);
|
||||
}
|
||||
|
||||
final long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfNewVolumeName,
|
||||
getVolumeAttributes(volumeInfo));
|
||||
final long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId,
|
||||
SolidFireUtil.getSolidFireVolumeName(sfNewVolumeName), getVolumeAttributes(volumeInfo));
|
||||
|
||||
final Iops iops = getIops(volumeInfo.getMinIops(), volumeInfo.getMaxIops(), storagePoolId);
|
||||
|
||||
|
|
@ -826,7 +838,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
SolidFireUtil.SolidFireSnapshot sfSnapshot = SolidFireUtil.getSnapshot(sfConnection, sfVolumeId, sfSnapshotId);
|
||||
|
||||
long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, sfSnapshot.getName(), null);
|
||||
long newSfVolumeId = SolidFireUtil.createClone(sfConnection, sfVolumeId, sfSnapshotId, sfAccountId, SolidFireUtil.getSolidFireVolumeName(sfSnapshot.getName()), null);
|
||||
|
||||
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.STORAGE_POOL_ID);
|
||||
|
||||
|
|
@ -839,12 +851,17 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
return SolidFireUtil.getVolume(sfConnection, newSfVolumeId);
|
||||
}
|
||||
|
||||
private void updateVolumeDetails(long volumeId, long sfVolumeSize) {
|
||||
private void updateVolumeDetails(long volumeId, long sfVolumeSize, String scsiNaaDeviceId) {
|
||||
volumeDetailsDao.removeDetail(volumeId, SolidFireUtil.VOLUME_SIZE);
|
||||
volumeDetailsDao.removeDetail(volumeId, DiskTO.SCSI_NAA_DEVICE_ID);
|
||||
|
||||
VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeId, SolidFireUtil.VOLUME_SIZE, String.valueOf(sfVolumeSize), false);
|
||||
|
||||
volumeDetailsDao.persist(volumeDetailVo);
|
||||
|
||||
volumeDetailVo = new VolumeDetailVO(volumeId, DiskTO.SCSI_NAA_DEVICE_ID, scsiNaaDeviceId, false);
|
||||
|
||||
volumeDetailsDao.persist(volumeDetailVo);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -889,7 +906,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
@Override
|
||||
public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
CreateCmdResult result = null;
|
||||
CreateCmdResult result;
|
||||
|
||||
try {
|
||||
VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
|
||||
|
|
@ -928,9 +945,17 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
String sfNewSnapshotName = volumeInfo.getName() + "-" + snapshotInfo.getUuid();
|
||||
|
||||
long sfNewSnapshotId = SolidFireUtil.createSnapshot(sfConnection, sfVolumeId, sfNewSnapshotName, getSnapshotAttributes(snapshotInfo));
|
||||
int maxSnapshotNameLength = 64;
|
||||
int trimRequired = sfNewSnapshotName.length() - maxSnapshotNameLength;
|
||||
|
||||
updateSnapshotDetails(snapshotInfo.getId(), sfVolumeId, sfNewSnapshotId, storagePoolId, sfVolumeSize);
|
||||
if (trimRequired > 0) {
|
||||
sfNewSnapshotName = StringUtils.left(volumeInfo.getName(), (volumeInfo.getName().length() - trimRequired)) + "-" + snapshotInfo.getUuid();
|
||||
}
|
||||
|
||||
long sfNewSnapshotId = SolidFireUtil.createSnapshot(sfConnection, sfVolumeId, SolidFireUtil.getSolidFireVolumeName(sfNewSnapshotName),
|
||||
getSnapshotAttributes(snapshotInfo));
|
||||
|
||||
updateSnapshotDetails(snapshotInfo.getId(), volumeInfo.getId(), sfVolumeId, sfNewSnapshotId, storagePoolId, sfVolumeSize);
|
||||
|
||||
snapshotObjectTo.setPath("SfSnapshotId=" + sfNewSnapshotId);
|
||||
}
|
||||
|
|
@ -941,8 +966,8 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
final Iops iops = getIops(MIN_IOPS_FOR_SNAPSHOT_VOLUME, MAX_IOPS_FOR_SNAPSHOT_VOLUME, storagePoolId);
|
||||
|
||||
long sfNewVolumeId = SolidFireUtil.createVolume(sfConnection, sfNewVolumeName, sfVolume.getAccountId(), sfVolumeSize,
|
||||
sfVolume.isEnable512e(), getSnapshotAttributes(snapshotInfo), iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
|
||||
long sfNewVolumeId = SolidFireUtil.createVolume(sfConnection, SolidFireUtil.getSolidFireVolumeName(sfNewVolumeName), sfVolume.getAccountId(),
|
||||
sfVolumeSize, sfVolume.isEnable512e(), getSnapshotAttributes(snapshotInfo), iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops());
|
||||
|
||||
SolidFireUtil.SolidFireVolume sfNewVolume = SolidFireUtil.getVolume(sfConnection, sfNewVolumeId);
|
||||
|
||||
|
|
@ -972,8 +997,15 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
callback.complete(result);
|
||||
}
|
||||
|
||||
private void updateSnapshotDetails(long csSnapshotId, long sfVolumeId, long sfNewSnapshotId, long storagePoolId, long sfNewVolumeSize) {
|
||||
private void updateSnapshotDetails(long csSnapshotId, long csVolumeId, long sfVolumeId, long sfNewSnapshotId, long storagePoolId, long sfNewVolumeSize) {
|
||||
SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
SolidFireUtil.ORIG_CS_VOLUME_ID,
|
||||
String.valueOf(csVolumeId),
|
||||
false);
|
||||
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
|
||||
snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
SolidFireUtil.VOLUME_ID,
|
||||
String.valueOf(sfVolumeId),
|
||||
false);
|
||||
|
|
@ -1057,7 +1089,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
SolidFireUtil.SolidFireVolume sfVolume;
|
||||
|
||||
if (isBasicCreate) {
|
||||
sfVolume = createSolidFireVolume(sfConnection, volumeInfo, sfAccountId);
|
||||
sfVolume = createSolidFireVolume(sfConnection, volumeInfo, storagePoolId, sfAccountId);
|
||||
|
||||
volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_CREATE);
|
||||
|
||||
|
|
@ -1088,7 +1120,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
}
|
||||
}
|
||||
else {
|
||||
sfVolume = createSolidFireVolume(sfConnection, volumeInfo, sfAccountId);
|
||||
sfVolume = createSolidFireVolume(sfConnection, volumeInfo, storagePoolId, sfAccountId);
|
||||
}
|
||||
|
||||
String iqn = sfVolume.getIqn();
|
||||
|
|
@ -1102,7 +1134,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
volumeDao.update(volume.getId(), volume);
|
||||
|
||||
updateVolumeDetails(volume.getId(), sfVolume.getTotalSize());
|
||||
updateVolumeDetails(volume.getId(), sfVolume.getTotalSize(), sfVolume.getScsiNaaDeviceId());
|
||||
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
|
||||
|
||||
|
|
@ -1139,6 +1171,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
addTempVolumeId(csSnapshotId, String.valueOf(sfVolume.getId()));
|
||||
|
||||
handleSnapshotDetails(csSnapshotId, DiskTO.IQN, sfVolume.getIqn());
|
||||
handleSnapshotDetails(csSnapshotId, DiskTO.SCSI_NAA_DEVICE_ID, sfVolume.getScsiNaaDeviceId());
|
||||
}
|
||||
else if (snapshotDetails != null && snapshotDetails.getValue() != null && snapshotDetails.getValue().equalsIgnoreCase("delete")) {
|
||||
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.VOLUME_ID);
|
||||
|
|
@ -1150,6 +1183,10 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DiskTO.IQN);
|
||||
|
||||
snapshotDetailsDao.remove(snapshotDetails.getId());
|
||||
|
||||
snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DiskTO.SCSI_NAA_DEVICE_ID);
|
||||
|
||||
snapshotDetailsDao.remove(snapshotDetails.getId());
|
||||
}
|
||||
else {
|
||||
throw new CloudRuntimeException("Invalid state in 'createTempVolume(SnapshotInfo, long)'");
|
||||
|
|
@ -1163,7 +1200,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
long sfAccountId = getCreateSolidFireAccountId(sfConnection, templateInfo.getAccountId(), storagePoolId);
|
||||
|
||||
SolidFireUtil.SolidFireVolume sfVolume = createSolidFireVolume(sfConnection, templateInfo, sfAccountId);
|
||||
SolidFireUtil.SolidFireVolume sfVolume = createSolidFireVolume(sfConnection, templateInfo, storagePoolId, sfAccountId);
|
||||
|
||||
String iqn = sfVolume.getIqn();
|
||||
|
||||
|
|
@ -1200,7 +1237,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
VolumeVO volumeVO = volumeDao.findById(volumeId);
|
||||
|
||||
SolidFireUtil.deleteVolume(sfConnection, Long.parseLong(volumeVO.getFolder()));
|
||||
deleteSolidFireVolume(sfConnection, volumeId, Long.parseLong(volumeVO.getFolder()));
|
||||
|
||||
volumeVO.setFolder(String.valueOf(sfVolumeId));
|
||||
volumeVO.set_iScsiName(iqn);
|
||||
|
|
@ -1308,10 +1345,6 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
SolidFireUtil.deleteVolume(sfConnection, sfTemplateVolumeId);
|
||||
|
||||
VMTemplateStoragePoolVO templatePoolRef = tmpltPoolDao.findByPoolTemplate(storagePoolId, template.getId());
|
||||
|
||||
tmpltPoolDao.remove(templatePoolRef.getId());
|
||||
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
|
||||
|
||||
// getUsedBytes(StoragePool) will not include the template to delete because the "template_spool_ref" table has already been updated by this point
|
||||
|
|
@ -1329,8 +1362,40 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback<CommandResult> callback) {
|
||||
throw new UnsupportedOperationException("Reverting not supported. Create a template or volume based on the snapshot instead.");
|
||||
public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshot2, AsyncCompletionCallback<CommandResult> callback) {
|
||||
VolumeInfo volumeInfo = snapshot.getBaseVolume();
|
||||
|
||||
VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId());
|
||||
|
||||
if (volumeVO == null || volumeVO.getRemoved() != null) {
|
||||
String errMsg = "The volume that the snapshot belongs to no longer exists.";
|
||||
|
||||
CommandResult commandResult = new CommandResult();
|
||||
|
||||
commandResult.setResult(errMsg);
|
||||
|
||||
callback.complete(commandResult);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(volumeVO.getPoolId(), storagePoolDetailsDao);
|
||||
|
||||
long sfVolumeId = Long.parseLong(volumeInfo.getFolder());
|
||||
|
||||
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshot.getId(), SolidFireUtil.SNAPSHOT_ID);
|
||||
|
||||
long sfSnapshotId = Long.parseLong(snapshotDetails.getValue());
|
||||
|
||||
SolidFireUtil.rollBackVolumeToSnapshot(sfConnection, sfVolumeId, sfSnapshotId);
|
||||
|
||||
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolumeId);
|
||||
|
||||
updateVolumeDetails(volumeVO.getId(), sfVolume.getTotalSize(), sfVolume.getScsiNaaDeviceId());
|
||||
|
||||
CommandResult commandResult = new CommandResult();
|
||||
|
||||
callback.complete(commandResult);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -1338,65 +1403,80 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
String iqn = null;
|
||||
String errMsg = null;
|
||||
|
||||
if (dataObject.getType() == DataObjectType.VOLUME) {
|
||||
VolumeInfo volumeInfo = (VolumeInfo)dataObject;
|
||||
iqn = volumeInfo.get_iScsiName();
|
||||
long storagePoolId = volumeInfo.getPoolId();
|
||||
long sfVolumeId = Long.parseLong(volumeInfo.getFolder());
|
||||
ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload();
|
||||
try {
|
||||
if (dataObject.getType() == DataObjectType.VOLUME) {
|
||||
VolumeInfo volumeInfo = (VolumeInfo)dataObject;
|
||||
|
||||
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
|
||||
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolumeId);
|
||||
iqn = volumeInfo.get_iScsiName();
|
||||
|
||||
verifySufficientIopsForStoragePool(storagePoolId, volumeInfo.getId(), payload.newMinIops);
|
||||
verifySufficientBytesForStoragePool(storagePoolId, volumeInfo.getId(), payload.newSize, payload.newHypervisorSnapshotReserve);
|
||||
long storagePoolId = volumeInfo.getPoolId();
|
||||
long sfVolumeId = Long.parseLong(volumeInfo.getFolder());
|
||||
|
||||
long sfNewVolumeSize = sfVolume.getTotalSize();
|
||||
ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload();
|
||||
|
||||
Integer hsr = volumeInfo.getHypervisorSnapshotReserve();
|
||||
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
|
||||
SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getVolume(sfConnection, sfVolumeId);
|
||||
|
||||
if (payload.newSize != null || payload.newHypervisorSnapshotReserve != null) {
|
||||
if (payload.newHypervisorSnapshotReserve != null) {
|
||||
if (hsr != null) {
|
||||
if (payload.newHypervisorSnapshotReserve > hsr) {
|
||||
long newMinIops = payload.newMinIops != null ? payload.newMinIops : sfVolume.getMinIops();
|
||||
|
||||
verifySufficientIopsForStoragePool(storagePoolId, sfVolume, newMinIops);
|
||||
|
||||
long newSize = payload.newSize != null ? payload.newSize : volumeInfo.getSize();
|
||||
|
||||
verifySufficientBytesForStoragePool(storagePoolId, volumeInfo.getId(), newSize, payload.newHypervisorSnapshotReserve);
|
||||
|
||||
long sfNewVolumeSize = sfVolume.getTotalSize();
|
||||
|
||||
Integer hsr = volumeInfo.getHypervisorSnapshotReserve();
|
||||
|
||||
if (payload.newSize != null || payload.newHypervisorSnapshotReserve != null) {
|
||||
if (payload.newHypervisorSnapshotReserve != null) {
|
||||
if (hsr != null) {
|
||||
if (payload.newHypervisorSnapshotReserve > hsr) {
|
||||
hsr = payload.newHypervisorSnapshotReserve;
|
||||
}
|
||||
} else {
|
||||
hsr = payload.newHypervisorSnapshotReserve;
|
||||
}
|
||||
}
|
||||
else {
|
||||
hsr = payload.newHypervisorSnapshotReserve;
|
||||
}
|
||||
|
||||
sfNewVolumeSize = getVolumeSizeIncludingHypervisorSnapshotReserve(newSize, hsr);
|
||||
}
|
||||
|
||||
sfNewVolumeSize = getVolumeSizeIncludingHypervisorSnapshotReserve(payload.newSize, hsr);
|
||||
Map<String, String> mapAttributes = new HashMap<>();
|
||||
|
||||
mapAttributes.put(SolidFireUtil.CloudStackVolumeId, String.valueOf(volumeInfo.getId()));
|
||||
mapAttributes.put(SolidFireUtil.CloudStackVolumeSize, NumberFormat.getInstance().format(payload.newSize));
|
||||
|
||||
long newMaxIops = payload.newMaxIops != null ? payload.newMaxIops : sfVolume.getMaxIops();
|
||||
|
||||
SolidFireUtil.modifyVolume(sfConnection, sfVolumeId, sfNewVolumeSize, mapAttributes,
|
||||
newMinIops, newMaxIops, getDefaultBurstIops(storagePoolId, newMaxIops));
|
||||
|
||||
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
|
||||
|
||||
volume.setMinIops(newMinIops);
|
||||
volume.setMaxIops(newMaxIops);
|
||||
volume.setHypervisorSnapshotReserve(hsr);
|
||||
|
||||
volumeDao.update(volume.getId(), volume);
|
||||
|
||||
// SolidFireUtil.VOLUME_SIZE was introduced in 4.5.
|
||||
updateVolumeDetails(volume.getId(), sfNewVolumeSize, sfVolume.getScsiNaaDeviceId());
|
||||
} else {
|
||||
errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize";
|
||||
}
|
||||
|
||||
Map<String, String> mapAttributes = new HashMap<>();
|
||||
|
||||
mapAttributes.put(SolidFireUtil.CloudStackVolumeId, String.valueOf(volumeInfo.getId()));
|
||||
mapAttributes.put(SolidFireUtil.CloudStackVolumeSize, NumberFormat.getInstance().format(payload.newSize));
|
||||
|
||||
SolidFireUtil.modifyVolume(sfConnection, sfVolumeId, sfNewVolumeSize, mapAttributes,
|
||||
payload.newMinIops, payload.newMaxIops, getDefaultBurstIops(storagePoolId, payload.newMaxIops));
|
||||
|
||||
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
|
||||
|
||||
volume.setMinIops(payload.newMinIops);
|
||||
volume.setMaxIops(payload.newMaxIops);
|
||||
volume.setHypervisorSnapshotReserve(hsr);
|
||||
|
||||
volumeDao.update(volume.getId(), volume);
|
||||
|
||||
// SolidFireUtil.VOLUME_SIZE was introduced in 4.5.
|
||||
updateVolumeDetails(volume.getId(), sfNewVolumeSize);
|
||||
} else {
|
||||
errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize";
|
||||
}
|
||||
catch (Exception ex) {
|
||||
errMsg = ex.getMessage();
|
||||
}
|
||||
finally {
|
||||
CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg));
|
||||
|
||||
CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg));
|
||||
result.setResult(errMsg);
|
||||
|
||||
result.setResult(errMsg);
|
||||
|
||||
callback.complete(result);
|
||||
callback.complete(result);
|
||||
}
|
||||
}
|
||||
|
||||
private void verifySufficientBytesForStoragePool(long requestedBytes, long storagePoolId) {
|
||||
|
|
@ -1453,10 +1533,8 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
}
|
||||
}
|
||||
|
||||
private void verifySufficientIopsForStoragePool(long storagePoolId, long volumeId, long newMinIops) {
|
||||
VolumeVO volume = volumeDao.findById(volumeId);
|
||||
|
||||
long currentMinIops = volume.getMinIops();
|
||||
private void verifySufficientIopsForStoragePool(long storagePoolId, SolidFireUtil.SolidFireVolume sfVolume, long newMinIops) {
|
||||
long currentMinIops = sfVolume.getMinIops();
|
||||
long diffInMinIops = newMinIops - currentMinIops;
|
||||
|
||||
// if the desire is for more IOPS
|
||||
|
|
@ -1488,8 +1566,20 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
private void deleteSolidFireSnapshot(SolidFireUtil.SolidFireConnection sfConnection, long csSnapshotId, long sfSnapshotId) {
|
||||
SolidFireUtil.deleteSnapshot(sfConnection, sfSnapshotId);
|
||||
|
||||
final long volumeId;
|
||||
final VolumeVO volume;
|
||||
|
||||
SnapshotVO snapshot = snapshotDao.findById(csSnapshotId);
|
||||
VolumeVO volume = volumeDao.findById(snapshot.getVolumeId());
|
||||
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, SolidFireUtil.ORIG_CS_VOLUME_ID);
|
||||
|
||||
if (snapshotDetails != null && snapshotDetails.getValue() != null) {
|
||||
volumeId = Long.valueOf(snapshotDetails.getValue());
|
||||
}
|
||||
else {
|
||||
volumeId = snapshot.getVolumeId();
|
||||
}
|
||||
|
||||
volume = volumeDao.findById(volumeId);
|
||||
|
||||
if (volume == null) { // if the CloudStack volume has been deleted
|
||||
List<SnapshotVO> lstSnapshots = getNonDestroyedSnapshots(snapshot.getVolumeId());
|
||||
|
|
@ -1500,7 +1590,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
// The CloudStack volume snapshot has not yet been set to the DESTROYED state, so check to make
|
||||
// sure snapshotVo.getId() != csSnapshotId when determining if any volume snapshots remain for the given CloudStack volume.
|
||||
if (snapshotVo.getId() != csSnapshotId) {
|
||||
SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(snapshotVo.getId(), SolidFireUtil.SNAPSHOT_ID);
|
||||
snapshotDetails = snapshotDetailsDao.findDetail(snapshotVo.getId(), SolidFireUtil.SNAPSHOT_ID);
|
||||
|
||||
// We are only interested here in volume snapshots that make use of SolidFire snapshots (as opposed to ones
|
||||
// that make use of SolidFire volumes).
|
||||
|
|
@ -1511,9 +1601,9 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
}
|
||||
|
||||
if (lstSnapshots2.isEmpty()) {
|
||||
volume = volumeDao.findByIdIncludingRemoved(snapshot.getVolumeId());
|
||||
VolumeVO volumeToDelete = volumeDao.findByIdIncludingRemoved(volumeId);
|
||||
|
||||
SolidFireUtil.deleteVolume(sfConnection, Long.parseLong(volume.getFolder()));
|
||||
SolidFireUtil.deleteVolume(sfConnection, Long.parseLong(volumeToDelete.getFolder()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ import org.apache.log4j.Logger;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
|
|
@ -41,7 +42,10 @@ import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
|||
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
|
|
@ -57,10 +61,13 @@ import com.cloud.storage.StoragePoolAutomation;
|
|||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle {
|
||||
private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreLifeCycle.class);
|
||||
|
||||
@Inject private CapacityManager _capacityMgr;
|
||||
@Inject private ClusterDao _clusterDao;
|
||||
@Inject private DataCenterDao _zoneDao;
|
||||
@Inject private PrimaryDataStoreDao _storagePoolDao;
|
||||
@Inject private PrimaryDataStoreHelper _dataStoreHelper;
|
||||
|
|
@ -77,6 +84,8 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||
public DataStore initialize(Map<String, Object> dsInfos) {
|
||||
String url = (String)dsInfos.get("url");
|
||||
Long zoneId = (Long)dsInfos.get("zoneId");
|
||||
Long podId = (Long)dsInfos.get("podId");
|
||||
Long clusterId = (Long)dsInfos.get("clusterId");
|
||||
String storagePoolName = (String)dsInfos.get("name");
|
||||
String providerName = (String)dsInfos.get("providerName");
|
||||
Long capacityBytes = (Long)dsInfos.get("capacityBytes");
|
||||
|
|
@ -85,6 +94,14 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||
@SuppressWarnings("unchecked")
|
||||
Map<String, String> details = (Map<String, String>)dsInfos.get("details");
|
||||
|
||||
if (podId != null && clusterId == null) {
|
||||
throw new CloudRuntimeException("If the Pod ID is specified, the Cluster ID must also be specified.");
|
||||
}
|
||||
|
||||
if (podId == null && clusterId != null) {
|
||||
throw new CloudRuntimeException("If the Pod ID is not specified, the Cluster ID must also not be specified.");
|
||||
}
|
||||
|
||||
String storageVip = SolidFireUtil.getStorageVip(url);
|
||||
int storagePort = SolidFireUtil.getStoragePort(url);
|
||||
|
||||
|
|
@ -104,13 +121,26 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||
parameters.setType(StoragePoolType.Iscsi);
|
||||
parameters.setUuid(UUID.randomUUID().toString());
|
||||
parameters.setZoneId(zoneId);
|
||||
parameters.setPodId(podId);
|
||||
parameters.setClusterId(clusterId);
|
||||
parameters.setName(storagePoolName);
|
||||
parameters.setProviderName(providerName);
|
||||
parameters.setManaged(true);
|
||||
parameters.setCapacityBytes(capacityBytes);
|
||||
parameters.setUsedBytes(0);
|
||||
parameters.setCapacityIops(capacityIops);
|
||||
parameters.setHypervisorType(HypervisorType.Any);
|
||||
|
||||
if (clusterId != null) {
|
||||
ClusterVO clusterVO = _clusterDao.findById(clusterId);
|
||||
|
||||
Preconditions.checkNotNull(clusterVO, "Unable to locate the specified cluster");
|
||||
|
||||
parameters.setHypervisorType(clusterVO.getHypervisorType());
|
||||
}
|
||||
else {
|
||||
parameters.setHypervisorType(HypervisorType.Any);
|
||||
}
|
||||
|
||||
parameters.setTags(tags);
|
||||
parameters.setDetails(details);
|
||||
|
||||
|
|
@ -166,11 +196,26 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||
". Exception: " + ex);
|
||||
}
|
||||
|
||||
if (lClusterDefaultMinIops < SolidFireUtil.MIN_IOPS_PER_VOLUME) {
|
||||
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be greater than or equal to " +
|
||||
SolidFireUtil.MIN_IOPS_PER_VOLUME + ".");
|
||||
}
|
||||
|
||||
if (lClusterDefaultMinIops > SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) {
|
||||
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be less than or equal to " +
|
||||
SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME + ".");
|
||||
}
|
||||
|
||||
if (lClusterDefaultMinIops > lClusterDefaultMaxIops) {
|
||||
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be less than or equal to the parameter '" +
|
||||
SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + "'.");
|
||||
}
|
||||
|
||||
if (lClusterDefaultMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
|
||||
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + "' must be less than or equal to " +
|
||||
SolidFireUtil.MAX_IOPS_PER_VOLUME + ".");
|
||||
}
|
||||
|
||||
if (Float.compare(fClusterDefaultBurstIopsPercentOfMaxIops, 1.0f) < 0) {
|
||||
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS + "' must be greater than or equal to 1.");
|
||||
}
|
||||
|
|
@ -186,23 +231,35 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||
// do not implement this method for SolidFire's plug-in
|
||||
@Override
|
||||
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
|
||||
return true; // should be ignored for zone-wide-only plug-ins like SolidFire's
|
||||
return true;
|
||||
}
|
||||
|
||||
// do not implement this method for SolidFire's plug-in
|
||||
@Override
|
||||
public boolean attachCluster(DataStore store, ClusterScope scope) {
|
||||
return true; // should be ignored for zone-wide-only plug-ins like SolidFire's
|
||||
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
|
||||
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
|
||||
|
||||
List<HostVO> hosts =
|
||||
_resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId());
|
||||
|
||||
for (HostVO host : hosts) {
|
||||
try {
|
||||
_storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
|
||||
}
|
||||
}
|
||||
|
||||
_dataStoreHelper.attachCluster(dataStore);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
|
||||
_dataStoreHelper.attachZone(dataStore);
|
||||
|
||||
List<HostVO> xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId());
|
||||
List<HostVO> vmWareServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.VMware, scope.getScopeId());
|
||||
List<HostVO> kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
|
||||
List<HostVO> hosts = new ArrayList<HostVO>();
|
||||
List<HostVO> hosts = new ArrayList<>();
|
||||
|
||||
hosts.addAll(xenServerHosts);
|
||||
hosts.addAll(vmWareServerHosts);
|
||||
|
|
@ -216,6 +273,8 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC
|
|||
}
|
||||
}
|
||||
|
||||
_dataStoreHelper.attachZone(dataStore);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -47,6 +47,7 @@ import com.cloud.agent.AgentManager;
|
|||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.CreateStoragePoolCommand;
|
||||
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.dc.ClusterDetailsDao;
|
||||
import com.cloud.dc.ClusterDetailsVO;
|
||||
|
|
@ -90,7 +91,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
@Inject private StoragePoolAutomation _storagePoolAutomation;
|
||||
@Inject private StoragePoolDetailsDao _storagePoolDetailsDao;
|
||||
@Inject private StoragePoolHostDao _storagePoolHostDao;
|
||||
@Inject protected TemplateManager _tmpltMgr;
|
||||
@Inject private TemplateManager _tmpltMgr;
|
||||
|
||||
// invoked to add primary storage that is based on the SolidFire plug-in
|
||||
@Override
|
||||
|
|
@ -168,6 +169,10 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
details.put(SolidFireUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
|
||||
details.put(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
|
||||
|
||||
if (capacityBytes < SolidFireUtil.MIN_VOLUME_SIZE) {
|
||||
capacityBytes = SolidFireUtil.MIN_VOLUME_SIZE;
|
||||
}
|
||||
|
||||
long lMinIops = 100;
|
||||
long lMaxIops = 15000;
|
||||
long lBurstIops = 15000;
|
||||
|
|
@ -214,8 +219,16 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
throw new CloudRuntimeException("The parameter '" + CAPACITY_IOPS + "' must be equal to the parameter '" + SolidFireUtil.MIN_IOPS + "'.");
|
||||
}
|
||||
|
||||
if (lMinIops > SolidFireUtil.MAX_IOPS_PER_VOLUME || lMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME || lBurstIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
|
||||
throw new CloudRuntimeException("This volume cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
|
||||
if (lMinIops > SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) {
|
||||
throw new CloudRuntimeException("This volume's Min IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) + " IOPS.");
|
||||
}
|
||||
|
||||
if (lMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
|
||||
throw new CloudRuntimeException("This volume's Max IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
|
||||
}
|
||||
|
||||
if (lBurstIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
|
||||
throw new CloudRuntimeException("This volume's Burst IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
|
||||
}
|
||||
|
||||
details.put(SolidFireUtil.MIN_IOPS, String.valueOf(lMinIops));
|
||||
|
|
@ -282,7 +295,9 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
|
||||
SolidFireUtil.updateCsDbWithSolidFireAccountInfo(csAccount.getId(), sfAccount, dataStore.getId(), _accountDetailsDao);
|
||||
} catch (Exception ex) {
|
||||
_primaryDataStoreDao.expunge(dataStore.getId());
|
||||
if (dataStore != null) {
|
||||
_primaryDataStoreDao.expunge(dataStore.getId());
|
||||
}
|
||||
|
||||
throw new CloudRuntimeException(ex.getMessage());
|
||||
}
|
||||
|
|
@ -320,7 +335,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
private final SolidFireUtil.SolidFireVolume _sfVolume;
|
||||
private final SolidFireUtil.SolidFireAccount _sfAccount;
|
||||
|
||||
public SolidFireCreateVolume(SolidFireUtil.SolidFireVolume sfVolume, SolidFireUtil.SolidFireAccount sfAccount) {
|
||||
SolidFireCreateVolume(SolidFireUtil.SolidFireVolume sfVolume, SolidFireUtil.SolidFireAccount sfAccount) {
|
||||
_sfVolume = sfVolume;
|
||||
_sfAccount = sfAccount;
|
||||
}
|
||||
|
|
@ -394,7 +409,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
throw new CloudRuntimeException("Unable to create storage in cluster " + primaryDataStoreInfo.getClusterId());
|
||||
}
|
||||
|
||||
List<HostVO> poolHosts = new ArrayList<HostVO>();
|
||||
List<HostVO> poolHosts = new ArrayList<>();
|
||||
|
||||
for (HostVO host : allHosts) {
|
||||
try {
|
||||
|
|
@ -427,7 +442,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
if (HypervisorType.VMware.equals(hypervisorType)) {
|
||||
cmd.setCreateDatastore(true);
|
||||
|
||||
Map<String, String> details = new HashMap<String, String>();
|
||||
Map<String, String> details = new HashMap<>();
|
||||
|
||||
StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePool.getId(), SolidFireUtil.DATASTORE_NAME);
|
||||
|
||||
|
|
@ -455,7 +470,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
} else {
|
||||
_primaryDataStoreDao.expunge(storagePool.getId());
|
||||
|
||||
String msg = "";
|
||||
final String msg;
|
||||
|
||||
if (answer != null) {
|
||||
msg = "Cannot create storage pool through host '" + hostId + "' due to the following: " + answer.getDetails();
|
||||
|
|
@ -514,6 +529,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
}
|
||||
|
||||
Long clusterId = null;
|
||||
Long hostId = null;
|
||||
|
||||
for (StoragePoolHostVO host : hostPoolRecords) {
|
||||
DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(storagePool);
|
||||
|
|
@ -521,7 +537,7 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
if (HypervisorType.VMware.equals(hypervisorType)) {
|
||||
deleteCmd.setRemoveDatastore(true);
|
||||
|
||||
Map<String, String> details = new HashMap<String, String>();
|
||||
Map<String, String> details = new HashMap<>();
|
||||
|
||||
StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePool.getId(), SolidFireUtil.DATASTORE_NAME);
|
||||
|
||||
|
|
@ -551,12 +567,18 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
|
||||
if (hostVO != null) {
|
||||
clusterId = hostVO.getClusterId();
|
||||
hostId = hostVO.getId();
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
else {
|
||||
s_logger.error("Failed to delete storage pool using Host ID " + host.getHostId() + ": " + answer.getResult());
|
||||
if (answer != null) {
|
||||
s_logger.error("Failed to delete storage pool using Host ID " + host.getHostId() + ": " + answer.getResult());
|
||||
}
|
||||
else {
|
||||
s_logger.error("Failed to delete storage pool using Host ID " + host.getHostId());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -582,11 +604,60 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
}
|
||||
}
|
||||
|
||||
if (hostId != null) {
|
||||
handleTargetsForVMware(hostId, storagePool.getId());
|
||||
}
|
||||
|
||||
deleteSolidFireVolume(storagePool.getId());
|
||||
|
||||
return _primaryDataStoreHelper.deletePrimaryDataStore(dataStore);
|
||||
}
|
||||
|
||||
private void handleTargetsForVMware(long hostId, long storagePoolId) {
|
||||
HostVO host = _hostDao.findById(hostId);
|
||||
|
||||
if (host.getHypervisorType() == HypervisorType.VMware) {
|
||||
String storageAddress = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_VIP).getValue();
|
||||
int storagePort = Integer.parseInt(_storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.STORAGE_PORT).getValue());
|
||||
String iqn = _storagePoolDetailsDao.findDetail(storagePoolId, SolidFireUtil.IQN).getValue();
|
||||
|
||||
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||
|
||||
List<Map<String, String>> targets = new ArrayList<>();
|
||||
|
||||
Map<String, String> target = new HashMap<>();
|
||||
|
||||
target.put(ModifyTargetsCommand.STORAGE_HOST, storageAddress);
|
||||
target.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePort));
|
||||
target.put(ModifyTargetsCommand.IQN, iqn);
|
||||
|
||||
targets.add(target);
|
||||
|
||||
cmd.setTargets(targets);
|
||||
cmd.setApplyToAllHostsInCluster(true);
|
||||
cmd.setAdd(false);
|
||||
cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC);
|
||||
cmd.setRemoveAsync(true);
|
||||
|
||||
sendModifyTargetsCommand(cmd, hostId);
|
||||
}
|
||||
}
|
||||
|
||||
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
|
||||
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||
|
||||
if (answer == null) {
|
||||
String msg = "Unable to get an answer to the modify targets command";
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
else if (!answer.getResult()) {
|
||||
String msg = "Unable to modify target on the following host: " + hostId;
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
}
|
||||
|
||||
private void removeVolumeFromVag(long storagePoolId, long clusterId) {
|
||||
long sfVolumeId = getVolumeId(storagePoolId);
|
||||
ClusterDetailsVO clusterDetail = _clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePoolId));
|
||||
|
|
@ -671,8 +742,8 @@ public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStor
|
|||
long burstIops = currentBurstIops;
|
||||
|
||||
if (capacityIops != null) {
|
||||
if (capacityIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
|
||||
throw new CloudRuntimeException("This volume cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
|
||||
if (capacityIops > SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) {
|
||||
throw new CloudRuntimeException("This volume cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) + " IOPS.");
|
||||
}
|
||||
|
||||
float maxPercentOfMin = currentMaxIops / (float)currentMinIops;
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ public class SolidFireHostListener implements HypervisorHostListener {
|
|||
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.PROVIDER_NAME,
|
||||
_clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao);
|
||||
|
||||
handleVMware(host, true);
|
||||
handleVMware(host, true, ModifyTargetsCommand.TargetTypeToRemove.NEITHER);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -122,10 +122,6 @@ public class SolidFireHostListener implements HypervisorHostListener {
|
|||
|
||||
@Override
|
||||
public boolean hostAboutToBeRemoved(long hostId) {
|
||||
HostVO host = _hostDao.findById(hostId);
|
||||
|
||||
handleVMware(host, false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -134,6 +130,10 @@ public class SolidFireHostListener implements HypervisorHostListener {
|
|||
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.PROVIDER_NAME,
|
||||
_clusterDao, _clusterDetailsDao, _storagePoolDao, _storagePoolDetailsDao, _hostDao);
|
||||
|
||||
HostVO host = _hostDao.findById(hostId);
|
||||
|
||||
handleVMware(host, false, ModifyTargetsCommand.TargetTypeToRemove.BOTH);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -151,7 +151,7 @@ public class SolidFireHostListener implements HypervisorHostListener {
|
|||
}
|
||||
}
|
||||
|
||||
private void handleVMware(HostVO host, boolean add) {
|
||||
private void handleVMware(HostVO host, boolean add, ModifyTargetsCommand.TargetTypeToRemove targetTypeToRemove) {
|
||||
if (HypervisorType.VMware.equals(host.getHypervisorType())) {
|
||||
List<StoragePoolVO> storagePools = _storagePoolDao.findPoolsByProvider(SolidFireUtil.PROVIDER_NAME);
|
||||
|
||||
|
|
@ -166,8 +166,9 @@ public class SolidFireHostListener implements HypervisorHostListener {
|
|||
|
||||
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||
|
||||
cmd.setAdd(add);
|
||||
cmd.setTargets(targets);
|
||||
cmd.setAdd(add);
|
||||
cmd.setTargetTypeToRemove(targetTypeToRemove);
|
||||
|
||||
sendModifyTargetsCommand(cmd, host.getId());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
|
|||
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), true, SolidFireUtil.SHARED_PROVIDER_NAME,
|
||||
clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao);
|
||||
|
||||
handleVMware(hostId, true);
|
||||
handleVMware(hostId, true, ModifyTargetsCommand.TargetTypeToRemove.NEITHER);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -121,20 +121,22 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
|
|||
|
||||
@Override
|
||||
public boolean hostAboutToBeRemoved(long hostId) {
|
||||
handleVMware(hostId, false);
|
||||
HostVO host = hostDao.findById(hostId);
|
||||
|
||||
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, host.getClusterId(), false, SolidFireUtil.SHARED_PROVIDER_NAME,
|
||||
clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao);
|
||||
|
||||
handleVMware(hostId, false, ModifyTargetsCommand.TargetTypeToRemove.BOTH);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hostRemoved(long hostId, long clusterId) {
|
||||
SolidFireUtil.hostAddedToOrRemovedFromCluster(hostId, clusterId, false, SolidFireUtil.SHARED_PROVIDER_NAME,
|
||||
clusterDao, clusterDetailsDao, storagePoolDao, storagePoolDetailsDao, hostDao);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private void handleVMware(long hostId, boolean add) {
|
||||
private void handleVMware(long hostId, boolean add, ModifyTargetsCommand.TargetTypeToRemove targetTypeToRemove) {
|
||||
HostVO host = hostDao.findById(hostId);
|
||||
|
||||
if (HypervisorType.VMware.equals(host.getHypervisorType())) {
|
||||
|
|
@ -172,8 +174,10 @@ public class SolidFireSharedHostListener implements HypervisorHostListener {
|
|||
if (targets.size() > 0) {
|
||||
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||
|
||||
cmd.setAdd(add);
|
||||
cmd.setTargets(targets);
|
||||
cmd.setAdd(add);
|
||||
cmd.setTargetTypeToRemove(targetTypeToRemove);
|
||||
cmd.setRemoveAsync(true);
|
||||
|
||||
sendModifyTargetsCommand(cmd, hostId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,6 +65,7 @@ import com.solidfire.element.api.ListVolumesRequest;
|
|||
import com.solidfire.element.api.ModifyVolumeAccessGroupRequest;
|
||||
import com.solidfire.element.api.ModifyVolumeRequest;
|
||||
import com.solidfire.element.api.QoS;
|
||||
import com.solidfire.element.api.RollbackToSnapshotRequest;
|
||||
import com.solidfire.element.api.Snapshot;
|
||||
import com.solidfire.element.api.SolidFireElement;
|
||||
import com.solidfire.element.api.Volume;
|
||||
|
|
@ -115,6 +116,8 @@ public class SolidFireUtil {
|
|||
public static final String CloudStackTemplateId = "CloudStackTemplateId";
|
||||
public static final String CloudStackTemplateSize = "CloudStackTemplateSize";
|
||||
|
||||
public static final String ORIG_CS_VOLUME_ID = "originalCloudStackVolumeId";
|
||||
|
||||
public static final String VOLUME_SIZE = "sfVolumeSize";
|
||||
|
||||
public static final String STORAGE_POOL_ID = "sfStoragePoolId";
|
||||
|
|
@ -124,6 +127,10 @@ public class SolidFireUtil {
|
|||
public static final String DATASTORE_NAME = "datastoreName";
|
||||
public static final String IQN = "iqn";
|
||||
|
||||
public static final long MIN_VOLUME_SIZE = 1000000000;
|
||||
|
||||
public static final long MIN_IOPS_PER_VOLUME = 100;
|
||||
public static final long MAX_MIN_IOPS_PER_VOLUME = 15000;
|
||||
public static final long MAX_IOPS_PER_VOLUME = 100000;
|
||||
|
||||
private static final int DEFAULT_MANAGEMENT_PORT = 443;
|
||||
|
|
@ -520,7 +527,8 @@ public class SolidFireUtil {
|
|||
Volume volume = getSolidFireElement(sfConnection).listVolumes(request).getVolumes()[0];
|
||||
|
||||
return new SolidFireVolume(volume.getVolumeID(), volume.getName(), volume.getIqn(), volume.getAccountID(), volume.getStatus(),
|
||||
volume.getEnable512e(), volume.getQos().getMinIOPS(), volume.getQos().getMaxIOPS(), volume.getQos().getBurstIOPS(), volume.getTotalSize());
|
||||
volume.getEnable512e(), volume.getQos().getMinIOPS(), volume.getQos().getMaxIOPS(), volume.getQos().getBurstIOPS(),
|
||||
volume.getTotalSize(), volume.getScsiNAADeviceID());
|
||||
}
|
||||
|
||||
public static void deleteVolume(SolidFireConnection sfConnection, long volumeId) {
|
||||
|
|
@ -544,9 +552,10 @@ public class SolidFireUtil {
|
|||
private final long _maxIops;
|
||||
private final long _burstIops;
|
||||
private final long _totalSize;
|
||||
private final String _scsiNaaDeviceId;
|
||||
|
||||
SolidFireVolume(long id, String name, String iqn, long accountId, String status, boolean enable512e,
|
||||
long minIops, long maxIops, long burstIops, long totalSize) {
|
||||
long minIops, long maxIops, long burstIops, long totalSize, String scsiNaaDeviceId) {
|
||||
_id = id;
|
||||
_name = name;
|
||||
_iqn = "/" + iqn + "/0";
|
||||
|
|
@ -557,6 +566,7 @@ public class SolidFireUtil {
|
|||
_maxIops = maxIops;
|
||||
_burstIops = burstIops;
|
||||
_totalSize = totalSize;
|
||||
_scsiNaaDeviceId = scsiNaaDeviceId;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
|
|
@ -599,6 +609,10 @@ public class SolidFireUtil {
|
|||
return _totalSize;
|
||||
}
|
||||
|
||||
public String getScsiNaaDeviceId() {
|
||||
return _scsiNaaDeviceId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return _iqn.hashCode();
|
||||
|
|
@ -644,11 +658,13 @@ public class SolidFireUtil {
|
|||
Snapshot[] snapshots = getSolidFireElement(sfConnection).listSnapshots(request).getSnapshots();
|
||||
|
||||
String snapshotName = null;
|
||||
long totalSize = 0;
|
||||
|
||||
if (snapshots != null) {
|
||||
for (Snapshot snapshot : snapshots) {
|
||||
if (snapshot.getSnapshotID() == snapshotId) {
|
||||
snapshotName = snapshot.getName();
|
||||
totalSize = snapshot.getTotalSize();
|
||||
|
||||
break;
|
||||
}
|
||||
|
|
@ -659,7 +675,7 @@ public class SolidFireUtil {
|
|||
throw new CloudRuntimeException("Could not find SolidFire snapshot ID: " + snapshotId + " for the following SolidFire volume ID: " + volumeId);
|
||||
}
|
||||
|
||||
return new SolidFireSnapshot(snapshotId, snapshotName);
|
||||
return new SolidFireSnapshot(snapshotId, snapshotName, totalSize);
|
||||
}
|
||||
|
||||
public static void deleteSnapshot(SolidFireConnection sfConnection, long snapshotId) {
|
||||
|
|
@ -670,13 +686,24 @@ public class SolidFireUtil {
|
|||
getSolidFireElement(sfConnection).deleteSnapshot(request);
|
||||
}
|
||||
|
||||
public static void rollBackVolumeToSnapshot(SolidFireConnection sfConnection, long volumeId, long snapshotId) {
|
||||
RollbackToSnapshotRequest request = RollbackToSnapshotRequest.builder()
|
||||
.volumeID(volumeId)
|
||||
.snapshotID(snapshotId)
|
||||
.build();
|
||||
|
||||
getSolidFireElement(sfConnection).rollbackToSnapshot(request);
|
||||
}
|
||||
|
||||
public static class SolidFireSnapshot {
|
||||
private final long _id;
|
||||
private final String _name;
|
||||
private final long _totalSize;
|
||||
|
||||
SolidFireSnapshot(long id, String name) {
|
||||
SolidFireSnapshot(long id, String name, long totalSize) {
|
||||
_id = id;
|
||||
_name = name;
|
||||
_totalSize = totalSize;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
|
|
@ -686,6 +713,10 @@ public class SolidFireUtil {
|
|||
public String getName() {
|
||||
return _name;
|
||||
}
|
||||
|
||||
public long getTotalSize() {
|
||||
return _totalSize;
|
||||
}
|
||||
}
|
||||
|
||||
public static long createClone(SolidFireConnection sfConnection, long volumeId, long snapshotId, long accountId,
|
||||
|
|
|
|||
|
|
@ -293,7 +293,9 @@ public class ViewResponseHelper {
|
|||
vs = ApiDBUtils.getVolumeStatistics(vrData.getPath());
|
||||
}
|
||||
else if (vr.getFormat() == ImageFormat.OVA){
|
||||
vs = ApiDBUtils.getVolumeStatistics(vrData.getChainInfo());
|
||||
if (vrData.getChainInfo() != null) {
|
||||
vs = ApiDBUtils.getVolumeStatistics(vrData.getChainInfo());
|
||||
}
|
||||
}
|
||||
if (vs != null){
|
||||
long vsz = vs.getVirtualSize();
|
||||
|
|
|
|||
|
|
@ -899,7 +899,14 @@ public enum Config {
|
|||
"0",
|
||||
"Default disk I/O writerate in bytes per second allowed in User vm's disk.",
|
||||
null),
|
||||
|
||||
KvmAutoConvergence(
|
||||
"Advanced",
|
||||
ManagementServer.class,
|
||||
Boolean.class,
|
||||
"kvm.auto.convergence",
|
||||
"false",
|
||||
"Setting this to 'true' allows KVM to use auto convergence to complete VM migration (libvirt version 1.2.3+ and QEMU version 1.6+)",
|
||||
null),
|
||||
ControlCidr(
|
||||
"Advanced",
|
||||
ManagementServer.class,
|
||||
|
|
|
|||
|
|
@ -1232,16 +1232,29 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
|
||||
if (volClusterId != null) {
|
||||
if (!host.getClusterId().equals(volClusterId) || usesLocal) {
|
||||
if (hasSuitablePoolsForVolume(volume, host, vmProfile)) {
|
||||
requiresStorageMotion.put(host, true);
|
||||
} else {
|
||||
if (storagePool.isManaged()) {
|
||||
// At the time being, we do not support storage migration of a volume from managed storage unless the managed storage
|
||||
// is at the zone level and the source and target storage pool is the same.
|
||||
// If the source and target storage pool is the same and it is managed, then we still have to perform a storage migration
|
||||
// because we need to create a new target volume and copy the contents of the source volume into it before deleting the
|
||||
// source volume.
|
||||
iterator.remove();
|
||||
}
|
||||
else {
|
||||
if (hasSuitablePoolsForVolume(volume, host, vmProfile)) {
|
||||
requiresStorageMotion.put(host, true);
|
||||
} else {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (storagePool.isManaged()) {
|
||||
if (srcHost.getClusterId() != host.getClusterId()) {
|
||||
// If the volume's storage pool is managed and at the zone level, then we still have to perform a storage migration
|
||||
// because we need to create a new target volume and copy the contents of the source volume into it before deleting
|
||||
// the source volume.
|
||||
requiresStorageMotion.put(host, true);
|
||||
}
|
||||
}
|
||||
|
|
@ -1388,12 +1401,19 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
final StoragePoolVO srcVolumePool = _poolDao.findById(volume.getPoolId());
|
||||
// Get all the pools available. Only shared pools are considered because only a volume on a shared pools
|
||||
// can be live migrated while the virtual machine stays on the same host.
|
||||
List<StoragePoolVO> storagePools = null;
|
||||
if (srcVolumePool.getClusterId() == null) {
|
||||
storagePools = _poolDao.findZoneWideStoragePoolsByTags(volume.getDataCenterId(), null);
|
||||
} else {
|
||||
|
||||
List<StoragePoolVO> storagePools;
|
||||
|
||||
if (srcVolumePool.getClusterId() != null) {
|
||||
storagePools = _poolDao.findPoolsByTags(volume.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null);
|
||||
}
|
||||
else {
|
||||
storagePools = new ArrayList<>();
|
||||
}
|
||||
|
||||
List<StoragePoolVO> zoneWideStoragePools = _poolDao.findZoneWideStoragePoolsByTags(volume.getDataCenterId(), null);
|
||||
|
||||
storagePools.addAll(zoneWideStoragePools);
|
||||
|
||||
storagePools.remove(srcVolumePool);
|
||||
for (final StoragePoolVO pool : storagePools) {
|
||||
|
|
|
|||
|
|
@ -40,8 +40,6 @@ import java.util.concurrent.TimeUnit;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
|
||||
import org.apache.log4j.Logger;
|
||||
|
|
@ -104,6 +102,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
|||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.DeleteStoragePoolCommand;
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
|
|
@ -143,6 +142,7 @@ import com.cloud.host.Host;
|
|||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.Status;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.hypervisor.HypervisorGuruManager;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
|
|
@ -846,6 +846,32 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool) {
|
||||
final Map<String, String> details = new HashMap<>();
|
||||
|
||||
details.put(DeleteStoragePoolCommand.DATASTORE_NAME, iScsiName);
|
||||
details.put(DeleteStoragePoolCommand.IQN, iScsiName);
|
||||
details.put(DeleteStoragePoolCommand.STORAGE_HOST, storagePool.getHostAddress());
|
||||
details.put(DeleteStoragePoolCommand.STORAGE_PORT, String.valueOf(storagePool.getPort()));
|
||||
|
||||
final DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand();
|
||||
|
||||
cmd.setDetails(details);
|
||||
cmd.setRemoveDatastore(true);
|
||||
|
||||
final Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||
|
||||
if (answer == null || !answer.getResult()) {
|
||||
String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" +
|
||||
(StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "");
|
||||
|
||||
s_logger.error(errMsg);
|
||||
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@DB
|
||||
public boolean deletePool(DeletePoolCmd cmd) {
|
||||
|
|
@ -1237,41 +1263,54 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This method only applies for managed storage.
|
||||
*
|
||||
* For XenServer and vSphere, see if we need to remove an SR or a datastore, then remove the underlying volume
|
||||
* from any applicable access control list (before other code attempts to delete the volume that supports it).
|
||||
*
|
||||
* For KVM, just tell the underlying storage plug-in to remove the volume from any applicable access control list
|
||||
* (before other code attempts to delete the volume that supports it).
|
||||
*/
|
||||
private void handleManagedStorage(Volume volume) {
|
||||
Long instanceId = volume.getInstanceId();
|
||||
|
||||
// The idea of this "if" statement is to see if we need to remove an SR/datastore before
|
||||
// deleting the volume that supports it on a SAN. This only applies for managed storage.
|
||||
if (instanceId != null) {
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId());
|
||||
|
||||
if (storagePool != null && storagePool.isManaged()) {
|
||||
DataTO volTO = volFactory.getVolume(volume.getId()).getTO();
|
||||
DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType());
|
||||
|
||||
DettachCommand cmd = new DettachCommand(disk, null);
|
||||
|
||||
cmd.setManaged(true);
|
||||
|
||||
cmd.setStorageHost(storagePool.getHostAddress());
|
||||
cmd.setStoragePort(storagePool.getPort());
|
||||
|
||||
cmd.set_iScsiName(volume.get_iScsiName());
|
||||
|
||||
VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(instanceId);
|
||||
|
||||
Long lastHostId = vmInstanceVO.getLastHostId();
|
||||
|
||||
if (lastHostId != null) {
|
||||
Answer answer = _agentMgr.easySend(lastHostId, cmd);
|
||||
|
||||
if (answer != null && answer.getResult()) {
|
||||
VolumeInfo volumeInfo = volFactory.getVolume(volume.getId());
|
||||
HostVO host = _hostDao.findById(lastHostId);
|
||||
HostVO host = _hostDao.findById(lastHostId);
|
||||
ClusterVO cluster = _clusterDao.findById(host.getClusterId());
|
||||
VolumeInfo volumeInfo = volFactory.getVolume(volume.getId());
|
||||
|
||||
if (cluster.getHypervisorType() == HypervisorType.KVM) {
|
||||
volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore());
|
||||
} else {
|
||||
s_logger.warn("Unable to remove host-side clustered file system for the following volume: " + volume.getUuid());
|
||||
}
|
||||
else {
|
||||
DataTO volTO = volFactory.getVolume(volume.getId()).getTO();
|
||||
DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType());
|
||||
|
||||
DettachCommand cmd = new DettachCommand(disk, null);
|
||||
|
||||
cmd.setManaged(true);
|
||||
|
||||
cmd.setStorageHost(storagePool.getHostAddress());
|
||||
cmd.setStoragePort(storagePool.getPort());
|
||||
|
||||
cmd.set_iScsiName(volume.get_iScsiName());
|
||||
|
||||
Answer answer = _agentMgr.easySend(lastHostId, cmd);
|
||||
|
||||
if (answer != null && answer.getResult()) {
|
||||
volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore());
|
||||
} else {
|
||||
s_logger.warn("Unable to remove host-side clustered file system for the following volume: " + volume.getUuid());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package com.cloud.storage;
|
|||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.api.ApiDBUtils;
|
||||
|
|
@ -475,7 +476,17 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
}
|
||||
} else {
|
||||
volume.setDiskOfferingId(diskOfferingId);
|
||||
|
||||
DiskOfferingVO diskOfferingVO = _diskOfferingDao.findById(diskOfferingId);
|
||||
|
||||
Boolean isCustomizedIops = diskOfferingVO != null && diskOfferingVO.isCustomizedIops() != null ? diskOfferingVO.isCustomizedIops() : false;
|
||||
|
||||
if (isCustomizedIops == null || !isCustomizedIops) {
|
||||
volume.setMinIops(diskOfferingVO.getMinIops());
|
||||
volume.setMaxIops(diskOfferingVO.getMaxIops());
|
||||
}
|
||||
}
|
||||
|
||||
// volume.setSize(size);
|
||||
volume.setInstanceId(null);
|
||||
volume.setUpdated(new Date());
|
||||
|
|
@ -845,10 +856,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
@DB
|
||||
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true)
|
||||
public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationException {
|
||||
Long newSize = null;
|
||||
Long newMinIops = null;
|
||||
Long newMaxIops = null;
|
||||
Integer newHypervisorSnapshotReserve = null;
|
||||
Long newSize;
|
||||
Long newMinIops;
|
||||
Long newMaxIops;
|
||||
Integer newHypervisorSnapshotReserve;
|
||||
boolean shrinkOk = cmd.getShrinkOk();
|
||||
|
||||
VolumeVO volume = _volsDao.findById(cmd.getEntityId());
|
||||
|
|
@ -865,13 +876,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
/* Does the caller have authority to act on this volume? */
|
||||
_accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume);
|
||||
|
||||
if(volume.getInstanceId() != null) {
|
||||
// Check that Vm to which this volume is attached does not have VM Snapshots
|
||||
if (_vmSnapshotDao.findByVm(volume.getInstanceId()).size() > 0) {
|
||||
throw new InvalidParameterValueException("Volume cannot be resized which is attached to VM with VM Snapshots");
|
||||
}
|
||||
}
|
||||
|
||||
DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
|
||||
DiskOfferingVO newDiskOffering = null;
|
||||
|
||||
|
|
@ -916,7 +920,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
newMinIops = cmd.getMinIops();
|
||||
|
||||
if (newMinIops != null) {
|
||||
if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) {
|
||||
if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) {
|
||||
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Min IOPS' parameter.");
|
||||
}
|
||||
}
|
||||
|
|
@ -928,7 +932,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
newMaxIops = cmd.getMaxIops();
|
||||
|
||||
if (newMaxIops != null) {
|
||||
if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) {
|
||||
if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) {
|
||||
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Max IOPS' parameter.");
|
||||
}
|
||||
}
|
||||
|
|
@ -967,9 +971,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
throw new InvalidParameterValueException("The new disk offering requires that a size be specified.");
|
||||
}
|
||||
|
||||
// convert from bytes to GiB
|
||||
// convert from GiB to bytes
|
||||
newSize = newSize << 30;
|
||||
} else {
|
||||
if (cmd.getSize() != null) {
|
||||
throw new InvalidParameterValueException("You cannnot pass in a custom disk size to a non-custom disk offering.");
|
||||
}
|
||||
|
||||
newSize = newDiskOffering.getDiskSize();
|
||||
}
|
||||
|
||||
|
|
@ -996,10 +1004,35 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
|
||||
// if the caller is looking to change the size of the volume
|
||||
if (currentSize != newSize) {
|
||||
if (volume.getInstanceId() != null) {
|
||||
// Check that VM to which this volume is attached does not have VM snapshots
|
||||
if (_vmSnapshotDao.findByVm(volume.getInstanceId()).size() > 0) {
|
||||
throw new InvalidParameterValueException("A volume that is attached to a VM with any VM snapshots cannot be resized.");
|
||||
}
|
||||
}
|
||||
|
||||
if (!validateVolumeSizeRange(newSize)) {
|
||||
throw new InvalidParameterValueException("Requested size out of range");
|
||||
}
|
||||
|
||||
Long storagePoolId = volume.getPoolId();
|
||||
|
||||
if (storagePoolId != null) {
|
||||
StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
|
||||
|
||||
if (storagePoolVO.isManaged()) {
|
||||
Long instanceId = volume.getInstanceId();
|
||||
|
||||
if (instanceId != null) {
|
||||
VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(instanceId);
|
||||
|
||||
if (vmInstanceVO.getHypervisorType() == HypervisorType.KVM && vmInstanceVO.getState() != State.Stopped) {
|
||||
throw new CloudRuntimeException("This kind of KVM disk cannot be resized while it is connected to a VM that's not in the Stopped state.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Let's make certain they (think they) know what they're doing if they
|
||||
* want to shrink by forcing them to provide the shrinkok parameter.
|
||||
|
|
@ -1161,26 +1194,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
|
||||
// this call to resize has a different impact depending on whether the
|
||||
// underlying primary storage is managed or not
|
||||
// if managed, this is the chance for the plug-in to change IOPS value, if applicable
|
||||
// if managed, this is the chance for the plug-in to change the size and/or IOPS values
|
||||
// if not managed, this is the chance for the plug-in to talk to the hypervisor layer
|
||||
// to change the size of the disk
|
||||
AsyncCallFuture<VolumeApiResult> future = volService.resize(vol);
|
||||
VolumeApiResult result = future.get();
|
||||
|
||||
// managed storage is designed in such a way that the storage plug-in does not
|
||||
// talk to the hypervisor layer; as such, if the storage is managed and the
|
||||
// current and new sizes are different, then CloudStack (i.e. not a storage plug-in)
|
||||
// needs to tell the hypervisor to resize the disk
|
||||
if (storagePool.isManaged() && currentSize != newSize) {
|
||||
if (hosts != null && hosts.length > 0) {
|
||||
volService.resizeVolumeOnHypervisor(volumeId, newSize, hosts[0], instanceName);
|
||||
}
|
||||
|
||||
volume.setSize(newSize);
|
||||
|
||||
_volsDao.update(volume.getId(), volume);
|
||||
}
|
||||
|
||||
if (result.isFailed()) {
|
||||
s_logger.warn("Failed to resize the volume " + volume);
|
||||
String details = "";
|
||||
|
|
@ -1190,11 +1209,30 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
throw new CloudRuntimeException(details);
|
||||
}
|
||||
|
||||
// managed storage is designed in such a way that the storage plug-in does not
|
||||
// talk to the hypervisor layer; as such, if the storage is managed and the
|
||||
// current and new sizes are different, then CloudStack (i.e. not a storage plug-in)
|
||||
// needs to tell the hypervisor to resize the disk
|
||||
if (storagePool.isManaged() && currentSize != newSize) {
|
||||
if (hosts != null && hosts.length > 0) {
|
||||
HostVO hostVO = _hostDao.findById(hosts[0]);
|
||||
|
||||
if (hostVO.getHypervisorType() != HypervisorType.KVM) {
|
||||
volService.resizeVolumeOnHypervisor(volumeId, newSize, hosts[0], instanceName);
|
||||
}
|
||||
}
|
||||
|
||||
volume.setSize(newSize);
|
||||
|
||||
_volsDao.update(volume.getId(), volume);
|
||||
}
|
||||
|
||||
volume = _volsDao.findById(volume.getId());
|
||||
|
||||
if (newDiskOfferingId != null) {
|
||||
volume.setDiskOfferingId(newDiskOfferingId);
|
||||
}
|
||||
|
||||
if (currentSize != newSize) {
|
||||
volume.setSize(newSize);
|
||||
}
|
||||
|
|
@ -1851,6 +1889,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
|
||||
volService.revokeAccess(volFactory.getVolume(volume.getId()), host, dataStore);
|
||||
|
||||
handleTargetsForVMware(hostId, volumePool.getHostAddress(), volumePool.getPort(), volume.get_iScsiName());
|
||||
|
||||
return _volsDao.findById(volumeId);
|
||||
} else {
|
||||
|
||||
|
|
@ -1885,6 +1925,46 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
}
|
||||
}
|
||||
|
||||
private void handleTargetsForVMware(long hostId, String storageAddress, int storagePort, String iScsiName) {
|
||||
HostVO host = _hostDao.findById(hostId);
|
||||
|
||||
if (host.getHypervisorType() == HypervisorType.VMware) {
|
||||
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||
|
||||
List<Map<String, String>> targets = new ArrayList<>();
|
||||
|
||||
Map<String, String> target = new HashMap<>();
|
||||
|
||||
target.put(ModifyTargetsCommand.STORAGE_HOST, storageAddress);
|
||||
target.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePort));
|
||||
target.put(ModifyTargetsCommand.IQN, iScsiName);
|
||||
|
||||
targets.add(target);
|
||||
|
||||
cmd.setTargets(targets);
|
||||
cmd.setApplyToAllHostsInCluster(true);
|
||||
cmd.setAdd(false);
|
||||
cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC);
|
||||
|
||||
sendModifyTargetsCommand(cmd, hostId);
|
||||
}
|
||||
}
|
||||
|
||||
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
|
||||
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||
|
||||
if (answer == null) {
|
||||
String msg = "Unable to get an answer to the modify targets command";
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
else if (!answer.getResult()) {
|
||||
String msg = "Unable to modify target on the following host: " + hostId;
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@DB
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_MIGRATE, eventDescription = "migrating volume", async = true)
|
||||
|
|
@ -2088,6 +2168,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
}
|
||||
|
||||
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId());
|
||||
|
||||
if (storagePoolVO.isManaged() && locationType == null) {
|
||||
locationType = Snapshot.LocationType.PRIMARY;
|
||||
}
|
||||
|
|
@ -2206,6 +2287,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
}
|
||||
|
||||
StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId());
|
||||
|
||||
if (!storagePoolVO.isManaged() && locationType != null) {
|
||||
throw new InvalidParameterValueException("VolumeId: " + volumeId + " LocationType is supported only for managed storage");
|
||||
}
|
||||
|
|
@ -2295,7 +2377,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
throw new InvalidParameterValueException("Please specify a valid zone.");
|
||||
}
|
||||
if (volume.getPoolId() == null) {
|
||||
throw new InvalidParameterValueException("The volume doesnt belong to a storage pool so cant extract it");
|
||||
throw new InvalidParameterValueException("The volume doesn't belong to a storage pool so can't extract it");
|
||||
}
|
||||
// Extract activity only for detached volumes or for volumes whose
|
||||
// instance is stopped
|
||||
|
|
@ -2419,9 +2501,15 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
|
||||
String extractUrl = secStore.createEntityExtractUrl(vol.getPath(), vol.getFormat(), vol);
|
||||
VolumeDataStoreVO volumeStoreRef = _volumeStoreDao.findByVolume(volumeId);
|
||||
|
||||
volumeStoreRef.setExtractUrl(extractUrl);
|
||||
volumeStoreRef.setExtractUrlCreated(DateUtil.now());
|
||||
volumeStoreRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
|
||||
volumeStoreRef.setDownloadPercent(100);
|
||||
volumeStoreRef.setZoneId(zoneId);
|
||||
|
||||
_volumeStoreDao.update(volumeStoreRef.getId(), volumeStoreRef);
|
||||
|
||||
return extractUrl;
|
||||
}
|
||||
|
||||
|
|
@ -2630,6 +2718,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
// Mark the volume as attached
|
||||
if (sendCommand) {
|
||||
DiskTO disk = answer.getDisk();
|
||||
|
||||
_volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), disk.getDiskSeq());
|
||||
|
||||
volumeToAttach = _volsDao.findById(volumeToAttach.getId());
|
||||
|
|
@ -2643,6 +2732,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
deviceId = getDeviceId(vm, deviceId);
|
||||
|
||||
_volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), deviceId);
|
||||
|
||||
volumeToAttach = _volsDao.findById(volumeToAttach.getId());
|
||||
|
||||
if (vm.getHypervisorType() == HypervisorType.KVM && volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) {
|
||||
volumeToAttach.setPath(volumeToAttach.get_iScsiName());
|
||||
|
||||
_volsDao.update(volumeToAttach.getId(), volumeToAttach);
|
||||
}
|
||||
}
|
||||
|
||||
// insert record for disk I/O statistics
|
||||
|
|
|
|||
|
|
@ -291,7 +291,9 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
|
|||
}
|
||||
}
|
||||
|
||||
SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Image);
|
||||
DataStoreRole dataStoreRole = getDataStoreRole(snapshot, _snapshotStoreDao, dataStoreMgr);
|
||||
|
||||
SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshotId, dataStoreRole);
|
||||
if (snapshotInfo == null) {
|
||||
throw new CloudRuntimeException("snapshot:" + snapshotId + " not exist in data store");
|
||||
}
|
||||
|
|
@ -548,6 +550,10 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
|
|||
return false;
|
||||
}
|
||||
|
||||
DataStoreRole dataStoreRole = getDataStoreRole(snapshotCheck, _snapshotStoreDao, dataStoreMgr);
|
||||
|
||||
SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, dataStoreRole);
|
||||
|
||||
try {
|
||||
boolean result = snapshotStrategy.deleteSnapshot(snapshotId);
|
||||
|
||||
|
|
@ -562,8 +568,6 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
|
|||
}
|
||||
|
||||
if (snapshotCheck.getState() == Snapshot.State.BackedUp) {
|
||||
SnapshotDataStoreVO snapshotStoreRef = _snapshotStoreDao.findBySnapshot(snapshotId, DataStoreRole.Image);
|
||||
|
||||
if (snapshotStoreRef != null) {
|
||||
_resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.secondary_storage, new Long(snapshotStoreRef.getPhysicalSize()));
|
||||
}
|
||||
|
|
@ -1396,8 +1400,16 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
|
|||
hypervisorType = storagePool.getHypervisor();
|
||||
|
||||
// at the time being, managed storage only supports XenServer, ESX(i), and KVM (i.e. not Hyper-V), so the VHD file type can be mapped to XenServer
|
||||
if (storagePool.isManaged() && HypervisorType.Any.equals(hypervisorType) && ImageFormat.VHD.equals(volume.getFormat())) {
|
||||
hypervisorType = HypervisorType.XenServer;
|
||||
if (storagePool.isManaged() && HypervisorType.Any.equals(hypervisorType)) {
|
||||
if (ImageFormat.VHD.equals(volume.getFormat())) {
|
||||
hypervisorType = HypervisorType.XenServer;
|
||||
}
|
||||
else if (ImageFormat.OVA.equals(volume.getFormat())) {
|
||||
hypervisorType = HypervisorType.VMware;
|
||||
}
|
||||
else if (ImageFormat.QCOW2.equals(volume.getFormat())) {
|
||||
hypervisorType = HypervisorType.KVM;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
hypervisorType = volume.getHypervisorType();
|
||||
|
|
|
|||
|
|
@ -70,6 +70,7 @@ import org.apache.cloudstack.api.command.user.vm.UpdateVmNicIpCmd;
|
|||
import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd;
|
||||
import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd;
|
||||
import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd;
|
||||
import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntity;
|
||||
import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMNetworkMapDao;
|
||||
|
|
@ -109,6 +110,7 @@ import com.cloud.agent.api.GetVmStatsAnswer;
|
|||
import com.cloud.agent.api.GetVmStatsCommand;
|
||||
import com.cloud.agent.api.GetVolumeStatsAnswer;
|
||||
import com.cloud.agent.api.GetVolumeStatsCommand;
|
||||
import com.cloud.agent.api.ModifyTargetsCommand;
|
||||
import com.cloud.agent.api.PvlanSetupCommand;
|
||||
import com.cloud.agent.api.RestoreVMSnapshotAnswer;
|
||||
import com.cloud.agent.api.RestoreVMSnapshotCommand;
|
||||
|
|
@ -1116,6 +1118,20 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
// Check that the specified service offering ID is valid
|
||||
_itMgr.checkIfCanUpgrade(vmInstance, newServiceOffering);
|
||||
|
||||
DiskOfferingVO newROOTDiskOffering = _diskOfferingDao.findById(newServiceOffering.getId());
|
||||
|
||||
List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vmInstance.getId());
|
||||
|
||||
for (final VolumeVO rootVolumeOfVm : vols) {
|
||||
rootVolumeOfVm.setDiskOfferingId(newROOTDiskOffering.getId());
|
||||
|
||||
_volsDao.update(rootVolumeOfVm.getId(), rootVolumeOfVm);
|
||||
|
||||
ResizeVolumeCmd resizeVolumeCmd = new ResizeVolumeCmd(rootVolumeOfVm.getId(), newROOTDiskOffering.getMinIops(), newROOTDiskOffering.getMaxIops());
|
||||
|
||||
_volumeService.resizeVolume(resizeVolumeCmd);
|
||||
}
|
||||
|
||||
// Check if the new service offering can be applied to vm instance
|
||||
ServiceOffering newSvcOffering = _offeringDao.findById(svcOffId);
|
||||
Account owner = _accountMgr.getActiveAccountById(vmInstance.getAccountId());
|
||||
|
|
@ -4955,7 +4971,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
|
||||
List<VolumeVO> vols = _volsDao.findByInstance(vm.getId());
|
||||
if (vols.size() > 1) {
|
||||
throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to dettach data disks at first");
|
||||
throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first");
|
||||
}
|
||||
|
||||
// Check that Vm does not have VM Snapshots
|
||||
|
|
@ -4969,7 +4985,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
destPool.getClusterId()).getHypervisorType();
|
||||
}
|
||||
|
||||
if (vm.getHypervisorType() != destHypervisorType) {
|
||||
if (vm.getHypervisorType() != destHypervisorType && destHypervisorType != HypervisorType.Any) {
|
||||
throw new InvalidParameterValueException("hypervisor is not compatible: dest: " + destHypervisorType.toString() + ", vm: " + vm.getHypervisorType().toString());
|
||||
}
|
||||
_itMgr.storageMigration(vm.getUuid(), destPool);
|
||||
|
|
@ -5384,13 +5400,47 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
}
|
||||
|
||||
// Check if the source and destination hosts are of the same type and support storage motion.
|
||||
if (!(srcHost.getHypervisorType().equals(destinationHost.getHypervisorType()))) {
|
||||
throw new CloudRuntimeException("The source and destination hosts are not of the same type. " + "Source hypervisor type and version: "
|
||||
+ srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: "
|
||||
+ destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion());
|
||||
if (!srcHost.getHypervisorType().equals(destinationHost.getHypervisorType())) {
|
||||
throw new CloudRuntimeException("The source and destination hosts are not of the same type and version. Source hypervisor type and version: " +
|
||||
srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " +
|
||||
destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion());
|
||||
}
|
||||
|
||||
String srcHostVersion = srcHost.getHypervisorVersion();
|
||||
String destinationHostVersion = destinationHost.getHypervisorVersion();
|
||||
|
||||
if (HypervisorType.KVM.equals(srcHost.getHypervisorType())) {
|
||||
if (srcHostVersion == null) {
|
||||
srcHostVersion = "";
|
||||
}
|
||||
|
||||
if (destinationHostVersion == null) {
|
||||
destinationHostVersion = "";
|
||||
}
|
||||
}
|
||||
|
||||
if (!srcHostVersion.equals(destinationHostVersion)) {
|
||||
throw new CloudRuntimeException("The source and destination hosts are not of the same type and version. Source hypervisor type and version: " +
|
||||
srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " +
|
||||
destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion());
|
||||
}
|
||||
|
||||
HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
|
||||
|
||||
if (capabilities == null && HypervisorType.KVM.equals(srcHost.getHypervisorType())) {
|
||||
List<HypervisorCapabilitiesVO> lstHypervisorCapabilities = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM);
|
||||
|
||||
if (lstHypervisorCapabilities != null) {
|
||||
for (HypervisorCapabilitiesVO hypervisorCapabilities : lstHypervisorCapabilities) {
|
||||
if (hypervisorCapabilities.isStorageMotionSupported()) {
|
||||
capabilities = hypervisorCapabilities;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!capabilities.isStorageMotionSupported()) {
|
||||
throw new CloudRuntimeException("Migration with storage isn't supported on hypervisor " + srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion());
|
||||
}
|
||||
|
|
@ -6168,9 +6218,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
}
|
||||
|
||||
private void handleManagedStorage(UserVmVO vm, VolumeVO root) {
|
||||
if ( Volume.State.Allocated.equals(root.getState()) ){
|
||||
if (Volume.State.Allocated.equals(root.getState())) {
|
||||
return;
|
||||
}
|
||||
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(root.getPoolId());
|
||||
|
||||
if (storagePool != null && storagePool.isManaged()) {
|
||||
|
|
@ -6203,7 +6254,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
Map<String, String> details = primaryDataStore.getDetails();
|
||||
|
||||
if (details == null) {
|
||||
details = new HashMap<String, String>();
|
||||
details = new HashMap<>();
|
||||
|
||||
primaryDataStore.setDetails(details);
|
||||
}
|
||||
|
|
@ -6212,38 +6263,87 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
|
||||
cmd = new DeleteCommand(volumeInfo.getTO());
|
||||
}
|
||||
else if (host.getHypervisorType() == HypervisorType.KVM) {
|
||||
cmd = null;
|
||||
}
|
||||
else {
|
||||
throw new CloudRuntimeException("This hypervisor type is not supported on managed storage for this command.");
|
||||
}
|
||||
|
||||
Commands cmds = new Commands(Command.OnError.Stop);
|
||||
if (cmd != null) {
|
||||
Commands cmds = new Commands(Command.OnError.Stop);
|
||||
|
||||
cmds.addCommand(cmd);
|
||||
cmds.addCommand(cmd);
|
||||
|
||||
try {
|
||||
_agentMgr.send(hostId, cmds);
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new CloudRuntimeException(ex.getMessage());
|
||||
}
|
||||
try {
|
||||
_agentMgr.send(hostId, cmds);
|
||||
} catch (Exception ex) {
|
||||
throw new CloudRuntimeException(ex.getMessage());
|
||||
}
|
||||
|
||||
if (!cmds.isSuccessful()) {
|
||||
for (Answer answer : cmds.getAnswers()) {
|
||||
if (!answer.getResult()) {
|
||||
s_logger.warn("Failed to reset vm due to: " + answer.getDetails());
|
||||
if (!cmds.isSuccessful()) {
|
||||
for (Answer answer : cmds.getAnswers()) {
|
||||
if (!answer.getResult()) {
|
||||
s_logger.warn("Failed to reset vm due to: " + answer.getDetails());
|
||||
|
||||
throw new CloudRuntimeException("Unable to reset " + vm + " due to " + answer.getDetails());
|
||||
throw new CloudRuntimeException("Unable to reset " + vm + " due to " + answer.getDetails());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// root.getPoolId() should be null if the VM we are detaching the disk from has never been started before
|
||||
DataStore dataStore = root.getPoolId() != null ? _dataStoreMgr.getDataStore(root.getPoolId(), DataStoreRole.Primary) : null;
|
||||
|
||||
volumeMgr.revokeAccess(volFactory.getVolume(root.getId()), host, dataStore);
|
||||
|
||||
if (dataStore != null) {
|
||||
handleTargetsForVMware(host.getId(), storagePool.getHostAddress(), storagePool.getPort(), root.get_iScsiName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void handleTargetsForVMware(long hostId, String storageAddress, int storagePort, String iScsiName) {
|
||||
HostVO host = _hostDao.findById(hostId);
|
||||
|
||||
if (host.getHypervisorType() == HypervisorType.VMware) {
|
||||
ModifyTargetsCommand cmd = new ModifyTargetsCommand();
|
||||
|
||||
List<Map<String, String>> targets = new ArrayList<>();
|
||||
|
||||
Map<String, String> target = new HashMap<>();
|
||||
|
||||
target.put(ModifyTargetsCommand.STORAGE_HOST, storageAddress);
|
||||
target.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePort));
|
||||
target.put(ModifyTargetsCommand.IQN, iScsiName);
|
||||
|
||||
targets.add(target);
|
||||
|
||||
cmd.setTargets(targets);
|
||||
cmd.setApplyToAllHostsInCluster(true);
|
||||
cmd.setAdd(false);
|
||||
cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC);
|
||||
|
||||
sendModifyTargetsCommand(cmd, hostId);
|
||||
}
|
||||
}
|
||||
|
||||
private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) {
|
||||
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||
|
||||
if (answer == null) {
|
||||
String msg = "Unable to get an answer to the modify targets command";
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
else if (!answer.getResult()) {
|
||||
String msg = "Unable to modify target on the following host: " + hostId;
|
||||
|
||||
s_logger.warn(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareStop(VirtualMachineProfile profile) {
|
||||
UserVmVO vm = _vmDao.findById(profile.getId());
|
||||
|
|
|
|||
|
|
@ -16,12 +16,6 @@
|
|||
// under the License.
|
||||
package com.cloud.storage.snapshot;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyLong;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
|
|
@ -79,6 +73,11 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
|
|||
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyLong;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class SnapshotManagerTest {
|
||||
@Spy
|
||||
|
|
@ -126,7 +125,7 @@ public class SnapshotManagerTest {
|
|||
@Mock
|
||||
DataStore storeMock;
|
||||
@Mock
|
||||
SnapshotDataStoreDao _snapshotStoreDao;
|
||||
SnapshotDataStoreDao snapshotStoreDao;
|
||||
@Mock
|
||||
SnapshotDataStoreVO snapshotStoreMock;
|
||||
@Mock
|
||||
|
|
@ -153,7 +152,7 @@ public class SnapshotManagerTest {
|
|||
_snapshotMgr._storagePoolDao = _storagePoolDao;
|
||||
_snapshotMgr._resourceMgr = _resourceMgr;
|
||||
_snapshotMgr._vmSnapshotDao = _vmSnapshotDao;
|
||||
_snapshotMgr._snapshotStoreDao = _snapshotStoreDao;
|
||||
_snapshotMgr._snapshotStoreDao = snapshotStoreDao;
|
||||
|
||||
when(_snapshotDao.findById(anyLong())).thenReturn(snapshotMock);
|
||||
when(snapshotMock.getVolumeId()).thenReturn(TEST_VOLUME_ID);
|
||||
|
|
@ -260,6 +259,7 @@ public class SnapshotManagerTest {
|
|||
when(snapshotMock.getState()).thenReturn(Snapshot.State.Destroyed);
|
||||
when(snapshotMock.getAccountId()).thenReturn(2L);
|
||||
when(snapshotMock.getDataCenterId()).thenReturn(2L);
|
||||
|
||||
_snapshotMgr.deleteSnapshot(TEST_SNAPSHOT_ID);
|
||||
}
|
||||
|
||||
|
|
@ -311,14 +311,14 @@ public class SnapshotManagerTest {
|
|||
when(_vmDao.findById(anyLong())).thenReturn(vmMock);
|
||||
when(vmMock.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM);
|
||||
when(_vmSnapshotDao.findById(anyLong())).thenReturn(vmSnapshotMock);
|
||||
when(_snapshotStoreDao.findParent(any(DataStoreRole.class), anyLong(), anyLong())).thenReturn(null);
|
||||
when(snapshotStoreDao.findParent(any(DataStoreRole.class), anyLong(), anyLong())).thenReturn(null);
|
||||
when(snapshotFactory.getSnapshot(anyLong(), Mockito.any(DataStore.class))).thenReturn(snapshotInfoMock);
|
||||
when(storeMock.create(snapshotInfoMock)).thenReturn(snapshotInfoMock);
|
||||
when(_snapshotStoreDao.findBySnapshot(anyLong(), any(DataStoreRole.class))).thenReturn(snapshotStoreMock);
|
||||
when(_snapshotStoreDao.update(anyLong(), any(SnapshotDataStoreVO.class))).thenReturn(true);
|
||||
when(snapshotStoreDao.findBySnapshot(anyLong(), any(DataStoreRole.class))).thenReturn(snapshotStoreMock);
|
||||
when(snapshotStoreDao.update(anyLong(), any(SnapshotDataStoreVO.class))).thenReturn(true);
|
||||
when(_snapshotDao.update(anyLong(), any(SnapshotVO.class))).thenReturn(true);
|
||||
when(vmMock.getAccountId()).thenReturn(2L);
|
||||
when(snapshotStrategy.backupSnapshot(any(SnapshotInfo.class))).thenReturn(snapshotInfoMock);;;
|
||||
when(snapshotStrategy.backupSnapshot(any(SnapshotInfo.class))).thenReturn(snapshotInfoMock);
|
||||
|
||||
Snapshot snapshot = _snapshotMgr.backupSnapshotFromVmSnapshot(TEST_SNAPSHOT_ID, TEST_VM_ID, TEST_VOLUME_ID, TEST_VM_SNAPSHOT_ID);
|
||||
Assert.assertNotNull(snapshot);
|
||||
|
|
@ -330,7 +330,7 @@ public class SnapshotManagerTest {
|
|||
when(_vmDao.findById(anyLong())).thenReturn(vmMock);
|
||||
when(vmMock.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM);
|
||||
when(_vmSnapshotDao.findById(anyLong())).thenReturn(vmSnapshotMock);
|
||||
when(_snapshotStoreDao.findParent(any(DataStoreRole.class), anyLong(), anyLong())).thenReturn(snapshotStoreMock);
|
||||
when(snapshotStoreDao.findParent(any(DataStoreRole.class), anyLong(), anyLong())).thenReturn(snapshotStoreMock);
|
||||
when(snapshotStoreMock.getInstallPath()).thenReturn("VM_SNAPSHOT_NAME");
|
||||
when(vmSnapshotMock.getName()).thenReturn("VM_SNAPSHOT_NAME");
|
||||
Snapshot snapshot = _snapshotMgr.backupSnapshotFromVmSnapshot(TEST_SNAPSHOT_ID, TEST_VM_ID, TEST_VOLUME_ID, TEST_VM_SNAPSHOT_ID);
|
||||
|
|
|
|||
|
|
@ -51,6 +51,9 @@ from marvin.lib.utils import cleanup_resources
|
|||
# Check that ip_address_of_new_xenserver_host / ip_address_of_new_kvm_host is correct.
|
||||
# If using XenServer, verify the "xen_server_master_hostname" variable is correct.
|
||||
# If using KVM, verify the "kvm_1_ip_address" variable is correct.
|
||||
#
|
||||
# Note:
|
||||
# If you do have more than one cluster, you might need to change this line: cls.cluster = list_clusters(cls.apiClient)[0]
|
||||
|
||||
|
||||
class TestData:
|
||||
|
|
@ -92,18 +95,18 @@ class TestData:
|
|||
# modify to control which hypervisor type to test
|
||||
hypervisor_type = xenServer
|
||||
xen_server_master_hostname = "XenServer-6.5-1"
|
||||
kvm_1_ip_address = "192.168.129.84"
|
||||
ip_address_of_new_xenserver_host = "192.168.129.243"
|
||||
ip_address_of_new_kvm_host = "192.168.129.3"
|
||||
kvm_1_ip_address = "10.117.40.112"
|
||||
ip_address_of_new_xenserver_host = "10.117.40.107"
|
||||
ip_address_of_new_kvm_host = "10.117.40.116"
|
||||
|
||||
def __init__(self):
|
||||
self.testdata = {
|
||||
TestData.solidFire: {
|
||||
TestData.mvip: "192.168.139.112",
|
||||
TestData.mvip: "10.117.40.120",
|
||||
TestData.username: "admin",
|
||||
TestData.password: "admin",
|
||||
TestData.port: 443,
|
||||
TestData.url: "https://192.168.139.112:443"
|
||||
TestData.url: "https://10.117.40.120:443"
|
||||
},
|
||||
TestData.kvm: {
|
||||
TestData.username: "root",
|
||||
|
|
@ -144,7 +147,7 @@ class TestData:
|
|||
TestData.primaryStorage: {
|
||||
TestData.name: "SolidFire-%d" % random.randint(0, 100),
|
||||
TestData.scope: "ZONE",
|
||||
TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||
TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||
|
|
@ -157,7 +160,7 @@ class TestData:
|
|||
TestData.primaryStorage2: {
|
||||
TestData.name: "SolidFireShared-%d" % random.randint(0, 100),
|
||||
TestData.scope: "CLUSTER",
|
||||
TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||
TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"minIops=5000;maxIops=50000;burstIops=75000",
|
||||
TestData.provider: "SolidFireShared",
|
||||
|
|
@ -191,7 +194,7 @@ class TestData:
|
|||
TestData.zoneId: 1,
|
||||
TestData.clusterId: 1,
|
||||
TestData.domainId: 1,
|
||||
TestData.url: "192.168.129.50"
|
||||
TestData.url: "10.117.40.114"
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -223,7 +226,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
|
|||
# Get Resources from Cloud Infrastructure
|
||||
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
|
||||
cls.cluster = list_clusters(cls.apiClient)[0]
|
||||
cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"])
|
||||
cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
|
||||
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
|
||||
|
||||
# Create test account
|
||||
|
|
@ -753,7 +756,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
|
|||
|
||||
searchFor = "InitiatorName="
|
||||
|
||||
stdin, stdout, stderr = ssh_connection.exec_command("sudo grep " + searchFor + " /etc/iscsi/initiatorname.iscsi")
|
||||
stdout = ssh_connection.exec_command("sudo grep " + searchFor + " /etc/iscsi/initiatorname.iscsi")[1]
|
||||
|
||||
result = stdout.read()
|
||||
|
||||
|
|
|
|||
|
|
@ -93,17 +93,17 @@ class TestData():
|
|||
zoneId = "zoneid"
|
||||
|
||||
# modify to control which hypervisor type to test
|
||||
hypervisor_type = kvm
|
||||
hypervisor_type = xenServer
|
||||
xen_server_hostname = "XenServer-6.5-1"
|
||||
|
||||
def __init__(self):
|
||||
self.testdata = {
|
||||
TestData.solidFire: {
|
||||
TestData.mvip: "192.168.139.112",
|
||||
TestData.mvip: "10.117.40.120",
|
||||
TestData.username: "admin",
|
||||
TestData.password: "admin",
|
||||
TestData.port: 443,
|
||||
TestData.url: "https://192.168.139.112:443"
|
||||
TestData.url: "https://10.117.40.120:443"
|
||||
},
|
||||
TestData.kvm: {
|
||||
TestData.username: "root",
|
||||
|
|
@ -130,7 +130,7 @@ class TestData():
|
|||
TestData.primaryStorage: {
|
||||
TestData.name: TestData.get_name_for_solidfire_storage(),
|
||||
TestData.scope: "ZONE",
|
||||
TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||
TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||
|
|
@ -184,7 +184,7 @@ class TestData():
|
|||
TestData.zoneId: 1,
|
||||
TestData.clusterId: 1,
|
||||
TestData.domainId: 1,
|
||||
TestData.url: "192.168.129.50"
|
||||
TestData.url: "10.117.40.114"
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -225,7 +225,7 @@ class TestManagedSystemVMs(cloudstackTestCase):
|
|||
# Get Resources from Cloud Infrastructure
|
||||
cls.zone = Zone(get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]).__dict__)
|
||||
cls.cluster = list_clusters(cls.apiClient)[0]
|
||||
cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"])
|
||||
cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
|
||||
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
|
||||
|
||||
# Create test account
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ import logging
|
|||
import random
|
||||
import SignedAPICall
|
||||
import time
|
||||
import XenAPI
|
||||
|
||||
from solidfire.factory import ElementFactory
|
||||
|
||||
|
|
@ -36,7 +35,7 @@ from nose.plugins.attrib import attr
|
|||
from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, VirtualMachine, Volume
|
||||
|
||||
# common - commonly used methods for all tests are listed here
|
||||
from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes, list_snapshots
|
||||
from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_volumes, list_snapshots
|
||||
|
||||
# utils - utility classes for common cleanup, external library wrappers, etc.
|
||||
from marvin.lib.utils import cleanup_resources, wait_until
|
||||
|
|
@ -76,6 +75,7 @@ class TestData():
|
|||
user = "user"
|
||||
username = "username"
|
||||
virtualMachine = "virtualmachine"
|
||||
vmWare = "vmware"
|
||||
volume_1 = "volume_1"
|
||||
volume_2 = "volume_2"
|
||||
xenServer = "xenserver"
|
||||
|
|
@ -87,16 +87,16 @@ class TestData():
|
|||
def __init__(self):
|
||||
self.testdata = {
|
||||
TestData.solidFire: {
|
||||
TestData.mvip: "192.168.139.112",
|
||||
TestData.mvip: "10.117.40.120",
|
||||
TestData.username: "admin",
|
||||
TestData.password: "admin",
|
||||
TestData.port: 443,
|
||||
TestData.url: "https://192.168.139.112:443"
|
||||
TestData.url: "https://10.117.40.120:443"
|
||||
},
|
||||
TestData.primaryStorage: {
|
||||
"name": "SolidFire-%d" % random.randint(0, 100),
|
||||
TestData.scope: "ZONE",
|
||||
"url": "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||
"url": "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||
|
|
@ -157,7 +157,7 @@ class TestData():
|
|||
TestData.zoneId: 1,
|
||||
TestData.clusterId: 1,
|
||||
TestData.domainId: 1,
|
||||
TestData.url: "192.168.129.50"
|
||||
TestData.url: "10.117.40.114"
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -176,6 +176,8 @@ class TestSnapshots(cloudstackTestCase):
|
|||
_should_be_six_volumes_in_list_err_msg = "There should be six volumes in this list."
|
||||
_should_be_seven_volumes_in_list_err_msg = "There should be seven volumes in this list."
|
||||
_should_be_five_items_in_list_err_msg = "There should be five items in this list."
|
||||
_should_be_six_items_in_list_err_msg = "There should be six items in this list."
|
||||
_should_be_seven_items_in_list_err_msg = "There should be seven items in this list."
|
||||
_sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
|
||||
|
||||
@classmethod
|
||||
|
|
@ -197,7 +199,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
# Get Resources from Cloud Infrastructure
|
||||
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
|
||||
cls.cluster = list_clusters(cls.apiClient)[0]
|
||||
cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"])
|
||||
cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
|
||||
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
|
||||
|
||||
# Create test account
|
||||
|
|
@ -304,7 +306,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots
|
||||
|
|
@ -313,11 +315,11 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
|
||||
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
|
||||
vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg)
|
||||
vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg)
|
||||
|
||||
self._delete_and_test_snapshot(vol_snap_2)
|
||||
|
||||
|
|
@ -325,9 +327,9 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
self._delete_and_test_snapshot(vol_snap_3)
|
||||
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
|
||||
virtual_machine.delete(self.apiClient, True)
|
||||
|
||||
|
|
@ -336,7 +338,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
self._delete_and_test_snapshot(vol_snap_1)
|
||||
|
||||
|
|
@ -345,7 +347,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
self._delete_and_test_snapshot(vol_snap_2)
|
||||
|
||||
|
|
@ -381,20 +383,20 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots
|
||||
|
||||
sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
|
||||
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
|
||||
vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg)
|
||||
vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 3, TestSnapshots._should_be_three_snapshots_in_list_err_msg)
|
||||
|
||||
services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"}
|
||||
services = {"displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true"}
|
||||
|
||||
template = Template.create_from_snapshot(self.apiClient, vol_snap_2, services)
|
||||
|
||||
|
|
@ -429,16 +431,16 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots
|
||||
|
||||
sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
|
||||
|
||||
vol_snap_a = self._create_and_test_snapshot(vm_2_root_volume.id, sf_volume_2, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
vol_snap_a = self._create_and_test_snapshot(vm_2_root_volume, sf_volume_2, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
|
||||
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
|
||||
|
||||
volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a.id, services, account=self.account.name, domainid=self.domain.id)
|
||||
|
||||
|
|
@ -449,9 +451,9 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
|
||||
|
||||
|
|
@ -469,9 +471,9 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
volume_created_from_snapshot = virtual_machine_2.attach_volume(
|
||||
self.apiClient,
|
||||
|
|
@ -489,8 +491,8 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
virtual_machine_2.delete(self.apiClient, True)
|
||||
|
||||
|
|
@ -499,7 +501,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
list_volumes_response = list_volumes(
|
||||
self.apiClient,
|
||||
|
|
@ -555,7 +557,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots
|
||||
|
|
@ -626,7 +628,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots
|
||||
|
|
@ -645,7 +647,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
vol_snap_3 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 3, primary_storage_db_id, sf_volume_size,
|
||||
sf_account_id, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg)
|
||||
|
||||
services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"}
|
||||
services = {"displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true"}
|
||||
|
||||
template = Template.create_from_snapshot(self.apiClient, vol_snap_2, services)
|
||||
|
||||
|
|
@ -680,7 +682,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots
|
||||
|
|
@ -693,7 +695,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
vol_snap_a = self._create_and_test_snapshot_2(vm_2_root_volume.id, sf_volume_id_2, sf_volume_id + 5, primary_storage_db_id, sf_volume_size_2,
|
||||
sf_account_id, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg)
|
||||
|
||||
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
|
||||
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
|
||||
|
||||
volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a.id, services, account=self.account.name, domainid=self.domain.id)
|
||||
|
||||
|
|
@ -704,9 +706,9 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
|
||||
|
||||
|
|
@ -724,8 +726,8 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
volume_created_from_snapshot = virtual_machine_2.attach_volume(
|
||||
self.apiClient,
|
||||
|
|
@ -794,7 +796,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
virtual_machine.stop(self.apiClient, True)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
sf_volume_id = sf_volume.volume_id
|
||||
sf_volume_size = sf_volume.total_size
|
||||
|
|
@ -807,7 +809,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
|
||||
|
||||
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
|
||||
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
|
||||
|
||||
volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_1.id, services, account=self.account.name, domainid=self.domain.id)
|
||||
|
||||
|
|
@ -818,7 +820,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
sf_util.check_list(sf_volume_2.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
|
||||
|
||||
|
|
@ -914,7 +916,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots
|
||||
|
|
@ -934,11 +936,11 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.set_supports_resign(True, self.dbConnection)
|
||||
|
||||
vol_snap_a = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
vol_snap_a = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
vol_snap_b = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
vol_snap_b = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
|
||||
services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"}
|
||||
services = {"displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true"}
|
||||
|
||||
template_1 = Template.create_from_snapshot(self.apiClient, vol_snap_1, services)
|
||||
|
||||
|
|
@ -973,14 +975,14 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots
|
||||
|
||||
sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
|
||||
|
||||
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
|
||||
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
|
||||
|
||||
volume_created_from_snapshot_1 = Volume.create_from_snapshot(self.apiClient, vol_snap_2.id, services, account=self.account.name, domainid=self.domain.id)
|
||||
|
||||
|
|
@ -994,7 +996,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
volume_created_from_snapshot_1
|
||||
)
|
||||
|
||||
services = {"displaytext": "Template-A", "name": "Template-A-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"}
|
||||
services = {"displaytext": "Template-A", "name": "Template-A-name", "ostypeid": self.template.ostypeid, "ispublic": "true"}
|
||||
|
||||
template_a = Template.create_from_snapshot(self.apiClient, vol_snap_a, services)
|
||||
|
||||
|
|
@ -1029,14 +1031,14 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, vm_3_root_volume_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_3_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_3.volume_id).snapshots
|
||||
|
||||
sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
|
||||
|
||||
services = {"diskname": "Vol-A", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
|
||||
services = {"diskname": "Vol-A", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
|
||||
|
||||
volume_created_from_snapshot_a = Volume.create_from_snapshot(self.apiClient, vol_snap_b.id, services, account=self.account.name, domainid=self.domain.id)
|
||||
|
||||
|
|
@ -1147,7 +1149,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots
|
||||
|
|
@ -1158,11 +1160,11 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
vol_snap_1_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
|
||||
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
vol_snap_3_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
|
||||
|
||||
vol_snap_4 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
vol_snap_4 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
|
||||
self._delete_and_test_archive_snapshot(vol_snap_3_archive)
|
||||
|
||||
|
|
@ -1174,7 +1176,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
vol_snap_1_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
|
||||
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
virtual_machine.delete(self.apiClient, True)
|
||||
|
||||
|
|
@ -1183,7 +1185,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
self._delete_and_test_archive_snapshot(vol_snap_1_archive)
|
||||
|
||||
|
|
@ -1192,7 +1194,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
self._delete_and_test_snapshot(vol_snap_2)
|
||||
|
||||
|
|
@ -1228,22 +1230,22 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots
|
||||
|
||||
sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
|
||||
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
vol_snap_2_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
|
||||
|
||||
vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
|
||||
vol_snap_4_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
|
||||
|
||||
services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"}
|
||||
services = {"displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true"}
|
||||
|
||||
template = Template.create_from_snapshot(self.apiClient, vol_snap_2_archive, services)
|
||||
|
||||
|
|
@ -1278,7 +1280,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots
|
||||
|
|
@ -1298,9 +1300,9 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
|
||||
|
||||
|
|
@ -1318,9 +1320,9 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
volume_created_from_snapshot = virtual_machine_2.attach_volume(
|
||||
self.apiClient,
|
||||
|
|
@ -1340,8 +1342,8 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_2_root_volume_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
virtual_machine_2.delete(self.apiClient, True)
|
||||
|
||||
|
|
@ -1350,7 +1352,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
|
||||
sf_volume_3 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
list_volumes_response = list_volumes(
|
||||
self.apiClient,
|
||||
|
|
@ -1370,9 +1372,8 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
|
||||
|
||||
@attr(hypervisor='KVM')
|
||||
def test_05_create_volume_snapshot_using_sf_snapshot_and_revert_volume_to_snapshot(self):
|
||||
if TestData.hypervisor_type != TestData.kvm:
|
||||
if TestData.hypervisor_type != TestData.vmWare and TestData.hypervisor_type != TestData.kvm:
|
||||
return
|
||||
|
||||
virtual_machine = VirtualMachine.create(
|
||||
|
|
@ -1404,7 +1405,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
|
||||
|
||||
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
|
||||
sf_volume = sf_util.get_sf_volume_by_name(self, sf_volumes, vm_1_root_volume_name)
|
||||
|
||||
# Get snapshot information for volume from SolidFire cluster
|
||||
sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots
|
||||
|
|
@ -1413,9 +1414,56 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
|
||||
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
|
||||
virtual_machine.stop(self.apiClient, False)
|
||||
|
||||
if TestData.hypervisor_type == TestData.vmWare:
|
||||
try:
|
||||
Volume.revertToSnapshot(self.apiClient, vol_snap_1.id)
|
||||
|
||||
self.assertTrue(False, "An exception should have been thrown when trying to revert a volume to a snapshot and the volume is a root disk on VMware.")
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
Volume.revertToSnapshot(self.apiClient, vol_snap_1.id)
|
||||
|
||||
virtual_machine.start(self.apiClient)
|
||||
|
||||
try:
|
||||
Volume.revertToSnapshot(self.apiClient, vol_snap_1.id)
|
||||
|
||||
self.assertTrue(False, "An exception should have been thrown when trying to revert a volume to a snapshot and the volume is attached to a running VM.")
|
||||
except:
|
||||
pass
|
||||
|
||||
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
|
||||
|
||||
volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_2.id, services, account=self.account.name, domainid=self.domain.id)
|
||||
|
||||
volume_created_from_snapshot_name = volume_created_from_snapshot.name
|
||||
|
||||
# Get volume information from SolidFire cluster
|
||||
sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)
|
||||
|
||||
sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
|
||||
|
||||
sf_volume_2 = sf_util.get_sf_volume_by_name(self, sf_volumes, volume_created_from_snapshot_name)
|
||||
|
||||
self._delete_and_test_snapshot(vol_snap_2)
|
||||
|
||||
self._delete_and_test_snapshot(vol_snap_1)
|
||||
|
||||
vol_snap_1 = self._create_and_test_snapshot(volume_created_from_snapshot, sf_volume_2, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
|
||||
|
||||
vol_snap_2 = self._create_and_test_snapshot(volume_created_from_snapshot, sf_volume_2, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
|
||||
|
||||
volume_created_from_snapshot = virtual_machine.attach_volume(
|
||||
self.apiClient,
|
||||
volume_created_from_snapshot
|
||||
)
|
||||
|
||||
virtual_machine.stop(self.apiClient, False)
|
||||
|
||||
|
|
@ -1436,28 +1484,34 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
virtual_machine.delete(self.apiClient, True)
|
||||
|
||||
def _check_list_not_empty(self, in_list):
|
||||
self.assertEqual(
|
||||
isinstance(in_list, list),
|
||||
True,
|
||||
"'in_list' is not a list."
|
||||
)
|
||||
volume_created_from_snapshot = Volume(volume_created_from_snapshot.__dict__)
|
||||
|
||||
self.assertGreater(
|
||||
len(in_list),
|
||||
0,
|
||||
"The size of 'in_list' must be greater than zero."
|
||||
)
|
||||
volume_created_from_snapshot.delete(self.apiClient)
|
||||
|
||||
# used when SolidFire snapshots are being used for CloudStack volume snapshots
|
||||
def _check_snapshot_details(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, sf_snapshot_id, storage_pool_id, sf_volume_size):
|
||||
sf_util.check_list(sf_snapshot_details, 5, self, TestSnapshots._should_be_five_items_in_list_err_msg)
|
||||
def _check_snapshot_details(self, sf_snapshot_details, cs_snapshot_id, volume, sf_volume_id, sf_snapshot_id, storage_pool_id, sf_volume_size):
|
||||
if TestData.hypervisor_type == TestData.vmWare:
|
||||
expected_num_details = 7
|
||||
|
||||
err_msg = TestSnapshots._should_be_seven_items_in_list_err_msg
|
||||
else:
|
||||
expected_num_details = 6
|
||||
|
||||
err_msg = TestSnapshots._should_be_six_items_in_list_err_msg
|
||||
|
||||
volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, volume)
|
||||
|
||||
sf_util.check_list(sf_snapshot_details, expected_num_details, self, err_msg)
|
||||
|
||||
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "takeSnapshot", "true")
|
||||
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id)
|
||||
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "snapshotId", sf_snapshot_id)
|
||||
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfStoragePoolId", storage_pool_id)
|
||||
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfVolumeSize", sf_volume_size)
|
||||
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "originalCloudStackVolumeId", volume_id)
|
||||
|
||||
if TestData.hypervisor_type == TestData.vmWare:
|
||||
self._check_snapshot_detail_with(str.endswith, sf_snapshot_details, cs_snapshot_id, "vmdk", ".vmdk")
|
||||
|
||||
# used when SolidFire volumes are being used for CloudStack volume snapshots
|
||||
def _check_snapshot_details_2(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, storage_pool_id, sf_volume_size):
|
||||
|
|
@ -1466,7 +1520,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id)
|
||||
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfStoragePoolId", storage_pool_id)
|
||||
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfVolumeSize", sf_volume_size)
|
||||
self._check_snapshot_detail_starts_with(sf_snapshot_details, cs_snapshot_id, "iqn", "/iqn.")
|
||||
self._check_snapshot_detail_with(str.startswith, sf_snapshot_details, cs_snapshot_id, "iqn", "/iqn.")
|
||||
self._check_snapshot_detail_size(sf_snapshot_details, cs_snapshot_id, "path", 36)
|
||||
|
||||
def _check_snapshot_detail(self, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, snapshot_detail_value):
|
||||
|
|
@ -1480,16 +1534,16 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and value '" + str(snapshot_detail_value) + "'.")
|
||||
|
||||
def _check_snapshot_detail_starts_with(self, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, starts_with):
|
||||
def _check_snapshot_detail_with(self, with_f, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, with_str):
|
||||
for sf_snapshot_detail_dict in sf_snapshot_details_list:
|
||||
if sf_snapshot_detail_dict["volumeSnapshotId"] != cs_snapshot_id:
|
||||
raise Exception("This snapshot detail does not apply to the expected CloudStack volume snapshot.")
|
||||
|
||||
if sf_snapshot_detail_dict["snapshotDetailsName"] == snapshot_detail_key:
|
||||
if sf_snapshot_detail_dict["snapshotDetailsValue"].startswith(starts_with):
|
||||
if with_f(str(sf_snapshot_detail_dict["snapshotDetailsValue"]), with_str):
|
||||
return
|
||||
|
||||
raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and 'starts with' value '" + starts_with + "'.")
|
||||
raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and 'starts with/ends with' value '" + with_str + "'.")
|
||||
|
||||
def _check_snapshot_detail_size(self, sf_snapshot_details_list, cs_snapshot_id, snapshot_detail_key, length):
|
||||
for sf_snapshot_detail_dict in sf_snapshot_details_list:
|
||||
|
|
@ -1503,7 +1557,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
raise Exception("There is a problem with the snapshot details key '" + snapshot_detail_key + "' and 'length' value '" + str(length) + "'.")
|
||||
|
||||
def _most_recent_sf_snapshot(self, sf_snapshots):
|
||||
self._check_list_not_empty(sf_snapshots)
|
||||
sf_util.check_list_not_empty(self, sf_snapshots)
|
||||
|
||||
most_recent_id = 0
|
||||
sf_snapshot_to_return = None
|
||||
|
|
@ -1520,40 +1574,13 @@ class TestSnapshots(cloudstackTestCase):
|
|||
return sf_snapshot_to_return
|
||||
|
||||
def _get_cs_volume_snapshot_db_id(self, vol_snap):
|
||||
return self._get_db_id("snapshots", vol_snap)
|
||||
return sf_util.get_db_id(self.dbConnection, "snapshots", vol_snap)
|
||||
|
||||
def _get_cs_storage_pool_db_id(self, storage_pool):
|
||||
return self._get_db_id("storage_pool", storage_pool)
|
||||
|
||||
def _get_db_id(self, table, db_obj):
|
||||
sql_query = "Select id From " + table + " Where uuid = '" + str(db_obj.id) + "'"
|
||||
|
||||
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
|
||||
sql_result = self.dbConnection.execute(sql_query)
|
||||
|
||||
return sql_result[0][0]
|
||||
|
||||
def _get_sf_volume_by_name(self, sf_volumes, sf_volume_name):
|
||||
self._check_list_not_empty(sf_volumes)
|
||||
|
||||
sf_volume = None
|
||||
|
||||
for volume in sf_volumes:
|
||||
if volume.name == sf_volume_name:
|
||||
sf_volume = volume
|
||||
|
||||
break
|
||||
|
||||
self.assertNotEqual(
|
||||
sf_volume,
|
||||
None,
|
||||
"The SolidFire volume could not be found in the expected account."
|
||||
)
|
||||
|
||||
return sf_volume
|
||||
return sf_util.get_db_id(self.dbConnection, "storage_pool", storage_pool)
|
||||
|
||||
def _get_sf_volume_by_id(self, sf_volumes, sf_volume_id):
|
||||
self._check_list_not_empty(sf_volumes)
|
||||
sf_util.check_list_not_empty(self, sf_volumes)
|
||||
|
||||
sf_volume = None
|
||||
|
||||
|
|
@ -1596,10 +1623,10 @@ class TestSnapshots(cloudstackTestCase):
|
|||
)
|
||||
|
||||
# used when SolidFire snapshots are being used for CloudStack volume snapshots
|
||||
def _create_and_test_snapshot(self, volume_id_for_snapshot, sf_volume, primary_storage_db_id, expected_num_snapshots, snapshot_err_msg):
|
||||
def _create_and_test_snapshot(self, volume_for_snapshot, sf_volume, primary_storage_db_id, expected_num_snapshots, snapshot_err_msg):
|
||||
vol_snap = Snapshot.create(
|
||||
self.apiClient,
|
||||
volume_id=volume_id_for_snapshot
|
||||
volume_id=volume_for_snapshot.id
|
||||
)
|
||||
|
||||
self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP)
|
||||
|
|
@ -1619,7 +1646,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
|
||||
vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap)
|
||||
|
||||
self._check_snapshot_details(sf_snapshot_details, vol_snap_db_id, sf_volume_id, sf_snapshot.snapshot_id, primary_storage_db_id, sf_volume.total_size)
|
||||
self._check_snapshot_details(sf_snapshot_details, vol_snap_db_id, volume_for_snapshot, sf_volume_id, sf_snapshot.snapshot_id, primary_storage_db_id, sf_volume.total_size)
|
||||
|
||||
return vol_snap
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,516 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import random
|
||||
import SignedAPICall
|
||||
import urllib2
|
||||
|
||||
from solidfire.factory import ElementFactory
|
||||
|
||||
from util import sf_util
|
||||
|
||||
# All tests inherit from cloudstackTestCase
|
||||
from marvin.cloudstackTestCase import cloudstackTestCase
|
||||
|
||||
# Import Integration Libraries
|
||||
|
||||
# base - contains all resources as entities and defines create, delete, list operations on them
|
||||
from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, Volume
|
||||
|
||||
# common - commonly used methods for all tests are listed here
|
||||
from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_volumes
|
||||
|
||||
# utils - utility classes for common cleanup, external library wrappers, etc.
|
||||
from marvin.lib.utils import cleanup_resources, wait_until
|
||||
|
||||
# Prerequisites:
|
||||
# Only one zone
|
||||
# Only one pod
|
||||
# Only one cluster
|
||||
|
||||
# Note:
|
||||
# If you do have more than one cluster, you might need to change this line: cls.cluster = list_clusters(cls.apiClient)[0]
|
||||
# Set extract.url.cleanup.interval to 240.
|
||||
# Set extract.url.expiration.interval to 120.
|
||||
|
||||
|
||||
class TestData:
|
||||
account = "account"
|
||||
capacityBytes = "capacitybytes"
|
||||
capacityIops = "capacityiops"
|
||||
clusterId = "clusterId"
|
||||
computeOffering = "computeoffering"
|
||||
diskOffering = "diskoffering"
|
||||
domainId = "domainId"
|
||||
hypervisor = "hypervisor"
|
||||
kvm = "kvm"
|
||||
login = "login"
|
||||
mvip = "mvip"
|
||||
password = "password"
|
||||
port = "port"
|
||||
primaryStorage = "primarystorage"
|
||||
provider = "provider"
|
||||
scope = "scope"
|
||||
solidFire = "solidfire"
|
||||
storageTag = "SolidFire_SAN_1"
|
||||
tags = "tags"
|
||||
url = "url"
|
||||
user = "user"
|
||||
username = "username"
|
||||
virtualMachine = "virtualmachine"
|
||||
volume_1 = "volume_1"
|
||||
xenServer = "xenserver"
|
||||
zoneId = "zoneId"
|
||||
|
||||
# modify to control which hypervisor type to test
|
||||
hypervisor_type = kvm
|
||||
volume_url = "http://10.117.40.114/tiny-centos-63.qcow2"
|
||||
file_type = "QCOW2"
|
||||
properties_file = "volume.properties"
|
||||
install_path_index = 14
|
||||
secondary_storage_server = "10.117.40.114"
|
||||
secondary_storage_server_root = "/export/secondary/"
|
||||
secondary_storage_server_username = "cloudstack"
|
||||
secondary_storage_server_password = "solidfire"
|
||||
# "HTTP_DOWNLOAD" and "FTP_UPLOAD" are valid for download_mode, but they lead to the same behavior
|
||||
download_mode = "HTTP_DOWNLOAD"
|
||||
|
||||
def __init__(self):
|
||||
self.testdata = {
|
||||
TestData.solidFire: {
|
||||
TestData.mvip: "10.117.40.120",
|
||||
TestData.username: "admin",
|
||||
TestData.password: "admin",
|
||||
TestData.port: 443,
|
||||
TestData.url: "https://10.117.40.120:443"
|
||||
},
|
||||
TestData.account: {
|
||||
"email": "test@test.com",
|
||||
"firstname": "John",
|
||||
"lastname": "Doe",
|
||||
TestData.username: "test",
|
||||
TestData.password: "test"
|
||||
},
|
||||
TestData.user: {
|
||||
"email": "user@test.com",
|
||||
"firstname": "Jane",
|
||||
"lastname": "Doe",
|
||||
TestData.username: "testuser",
|
||||
TestData.password: "password"
|
||||
},
|
||||
TestData.primaryStorage: {
|
||||
"name": "SolidFire-%d" % random.randint(0, 100),
|
||||
TestData.scope: "ZONE",
|
||||
"url": "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||
TestData.provider: "SolidFire",
|
||||
TestData.tags: TestData.storageTag,
|
||||
TestData.capacityIops: 4500000,
|
||||
TestData.capacityBytes: 2251799813685248,
|
||||
TestData.hypervisor: "Any"
|
||||
},
|
||||
TestData.virtualMachine: {
|
||||
"name": "TestVM",
|
||||
"displayname": "Test VM"
|
||||
},
|
||||
TestData.computeOffering: {
|
||||
"name": "SF_CO_1",
|
||||
"displaytext": "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100,
|
||||
"memory": 128,
|
||||
"storagetype": "shared",
|
||||
"customizediops": False,
|
||||
"miniops": "10000",
|
||||
"maxiops": "15000",
|
||||
"hypervisorsnapshotreserve": 200,
|
||||
TestData.tags: TestData.storageTag
|
||||
},
|
||||
TestData.diskOffering: {
|
||||
"name": "SF_DO_1",
|
||||
"displaytext": "SF_DO_1 Custom Size",
|
||||
"customizediops": False,
|
||||
"miniops": 5000,
|
||||
"maxiops": 10000,
|
||||
TestData.tags: TestData.storageTag,
|
||||
"storagetype": "shared"
|
||||
},
|
||||
TestData.volume_1: {
|
||||
"diskname": "testvolume",
|
||||
},
|
||||
TestData.zoneId: 1,
|
||||
TestData.clusterId: 1,
|
||||
TestData.domainId: 1,
|
||||
TestData.url: "10.117.40.114"
|
||||
}
|
||||
|
||||
|
||||
class TestUploadDownload(cloudstackTestCase):
|
||||
errorText = "should be either detached or the VM should be in stopped state"
|
||||
assertText = "The length of the response for the 'volume_store_ref' result should be equal to 1."
|
||||
assertText2 = "The length of the response for the 'volume_store_ref' result should be equal to 0."
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# Set up API client
|
||||
testclient = super(TestUploadDownload, cls).getClsTestClient()
|
||||
|
||||
cls.apiClient = testclient.getApiClient()
|
||||
cls.configData = testclient.getParsedTestDataConfig()
|
||||
cls.dbConnection = testclient.getDbConnection()
|
||||
|
||||
cls.testdata = TestData().testdata
|
||||
|
||||
# Set up SolidFire connection
|
||||
solidfire = cls.testdata[TestData.solidFire]
|
||||
|
||||
cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password])
|
||||
|
||||
# Get Resources from Cloud Infrastructure
|
||||
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
|
||||
cls.cluster = list_clusters(cls.apiClient)[1]
|
||||
cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
|
||||
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
|
||||
|
||||
# Create test account
|
||||
cls.account = Account.create(
|
||||
cls.apiClient,
|
||||
cls.testdata[TestData.account],
|
||||
admin=1
|
||||
)
|
||||
|
||||
# Set up connection to make customized API calls
|
||||
user = User.create(
|
||||
cls.apiClient,
|
||||
cls.testdata[TestData.user],
|
||||
account=cls.account.name,
|
||||
domainid=cls.domain.id
|
||||
)
|
||||
|
||||
url = cls.testdata[TestData.url]
|
||||
|
||||
api_url = "http://" + url + ":8080/client/api"
|
||||
userkeys = User.registerUserKeys(cls.apiClient, user.id)
|
||||
|
||||
cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)
|
||||
|
||||
primarystorage = cls.testdata[TestData.primaryStorage]
|
||||
|
||||
cls.primary_storage = StoragePool.create(
|
||||
cls.apiClient,
|
||||
primarystorage,
|
||||
scope=primarystorage[TestData.scope],
|
||||
zoneid=cls.zone.id,
|
||||
provider=primarystorage[TestData.provider],
|
||||
tags=primarystorage[TestData.tags],
|
||||
capacityiops=primarystorage[TestData.capacityIops],
|
||||
capacitybytes=primarystorage[TestData.capacityBytes],
|
||||
hypervisor=primarystorage[TestData.hypervisor]
|
||||
)
|
||||
|
||||
compute_offering = ServiceOffering.create(
|
||||
cls.apiClient,
|
||||
cls.testdata[TestData.computeOffering]
|
||||
)
|
||||
|
||||
cls.disk_offering = DiskOffering.create(
|
||||
cls.apiClient,
|
||||
cls.testdata[TestData.diskOffering],
|
||||
custom=True
|
||||
)
|
||||
|
||||
# Create VM and volume for tests
|
||||
cls.virtual_machine = VirtualMachine.create(
|
||||
cls.apiClient,
|
||||
cls.testdata[TestData.virtualMachine],
|
||||
accountid=cls.account.name,
|
||||
zoneid=cls.zone.id,
|
||||
serviceofferingid=compute_offering.id,
|
||||
templateid=cls.template.id,
|
||||
domainid=cls.domain.id,
|
||||
startvm=True
|
||||
)
|
||||
|
||||
cls._cleanup = [
|
||||
compute_offering,
|
||||
cls.disk_offering,
|
||||
user,
|
||||
cls.account
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.virtual_machine.delete(cls.apiClient, True)
|
||||
|
||||
cleanup_resources(cls.apiClient, cls._cleanup)
|
||||
|
||||
cls.primary_storage.delete(cls.apiClient)
|
||||
|
||||
sf_util.purge_solidfire_volumes(cls.sfe)
|
||||
except Exception as e:
|
||||
logging.debug("Exception in tearDownClass(cls): %s" % e)
|
||||
|
||||
def setUp(self):
|
||||
self.cleanup = []
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
cleanup_resources(self.apiClient, self.cleanup)
|
||||
except Exception as e:
|
||||
logging.debug("Exception in tearDown(self): %s" % e)
|
||||
|
||||
def test_01_upload_and_download_snapshot(self):
|
||||
list_volumes_response = list_volumes(
|
||||
self.apiClient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
|
||||
sf_util.check_list(list_volumes_response, 1, self, "There should only be one volume in this list.")
|
||||
|
||||
vm_root_volume = list_volumes_response[0]
|
||||
|
||||
### Perform tests related to uploading a QCOW2 file to secondary storage and then moving it to managed storage
|
||||
|
||||
volume_name = "Volume-A"
|
||||
services = {"format": TestData.file_type, "diskname": volume_name}
|
||||
|
||||
uploaded_volume = Volume.upload(self.apiClient, services, self.zone.id,
|
||||
account=self.account.name, domainid=self.account.domainid,
|
||||
url=TestData.volume_url, diskofferingid=self.disk_offering.id)
|
||||
|
||||
self._wait_for_volume_state(uploaded_volume.id, "Uploaded")
|
||||
|
||||
uploaded_volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, uploaded_volume)
|
||||
|
||||
result = self._get_volume_store_ref_row(uploaded_volume_id)
|
||||
|
||||
self.assertEqual(
|
||||
len(result),
|
||||
1,
|
||||
TestUploadDownload.assertText
|
||||
)
|
||||
|
||||
install_path = self._get_install_path(result[0][TestData.install_path_index])
|
||||
|
||||
self._verify_uploaded_volume_present(install_path)
|
||||
|
||||
uploaded_volume = self.virtual_machine.attach_volume(
|
||||
self.apiClient,
|
||||
uploaded_volume
|
||||
)
|
||||
|
||||
uploaded_volume = sf_util.check_and_get_cs_volume(self, uploaded_volume.id, volume_name, self)
|
||||
|
||||
sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, "The SolidFire account ID should be a non-zero integer.")
|
||||
|
||||
sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(sf_volumes),
|
||||
0,
|
||||
"The length of the response for the SolidFire-volume query should not be zero."
|
||||
)
|
||||
|
||||
sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, uploaded_volume.name, self)
|
||||
|
||||
sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, uploaded_volume, self)
|
||||
|
||||
sf_util.check_size_and_iops(sf_volume, uploaded_volume, sf_volume_size, self)
|
||||
|
||||
sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
|
||||
|
||||
sf_util.check_vag(sf_volume, sf_vag_id, self)
|
||||
|
||||
result = self._get_volume_store_ref_row(uploaded_volume_id)
|
||||
|
||||
self.assertEqual(
|
||||
len(result),
|
||||
0,
|
||||
TestUploadDownload.assertText2
|
||||
)
|
||||
|
||||
self._verify_uploaded_volume_not_present(install_path)
|
||||
|
||||
### Perform tests related to extracting the contents of a volume on managed storage to a QCOW2 file
|
||||
### and downloading the file
|
||||
|
||||
try:
|
||||
# for data disk
|
||||
Volume.extract(self.apiClient, uploaded_volume.id, self.zone.id, TestData.download_mode)
|
||||
|
||||
raise Exception("The volume extraction (for the data disk) did not fail (as expected).")
|
||||
except Exception as e:
|
||||
if TestUploadDownload.errorText in str(e):
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
vm_root_volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, vm_root_volume)
|
||||
|
||||
try:
|
||||
# for root disk
|
||||
Volume.extract(self.apiClient, vm_root_volume.id, self.zone.id, TestData.download_mode)
|
||||
|
||||
raise Exception("The volume extraction (for the root disk) did not fail (as expected).")
|
||||
except Exception as e:
|
||||
if TestUploadDownload.errorText in str(e):
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
self.virtual_machine.stop(self.apiClient)
|
||||
|
||||
self._extract_volume_and_verify(uploaded_volume_id, "Unable to locate the extracted file for the data disk (attached)")
|
||||
|
||||
result = self._get_volume_store_ref_row(vm_root_volume_id)
|
||||
|
||||
self.assertEqual(
|
||||
len(result),
|
||||
0,
|
||||
TestUploadDownload.assertText2
|
||||
)
|
||||
|
||||
self._extract_volume_and_verify(vm_root_volume_id, "Unable to locate the extracted file for the root disk")
|
||||
|
||||
uploaded_volume = self.virtual_machine.detach_volume(
|
||||
self.apiClient,
|
||||
uploaded_volume
|
||||
)
|
||||
|
||||
self._extract_volume_and_verify(uploaded_volume_id, "Unable to locate the extracted file for the data disk (detached)")
|
||||
|
||||
uploaded_volume = Volume(uploaded_volume.__dict__)
|
||||
|
||||
uploaded_volume.delete(self.apiClient)
|
||||
|
||||
# self.virtual_machine.start(self.apiClient)
|
||||
|
||||
def _verify_uploaded_volume_present(self, install_path, verify_properties_file=True):
|
||||
result, result2 = self._get_results(install_path)
|
||||
|
||||
self.assertFalse(result is None or len(result.strip()) == 0, "Unable to find the QCOW2 file")
|
||||
|
||||
if verify_properties_file:
|
||||
self.assertFalse(result2 is None or len(result2.strip()) == 0, "Unable to find the " + TestData.properties_file + " file")
|
||||
|
||||
def _verify_uploaded_volume_not_present(self, install_path):
|
||||
result, result2 = self._get_results(install_path)
|
||||
|
||||
self.assertTrue(result is None or len(result.strip()) == 0, "QCOW2 file present, but should not be")
|
||||
self.assertTrue(result2 is None or len(result2.strip()) == 0, TestData.properties_file + " file present, but should not be")
|
||||
|
||||
def _get_results(self, install_path):
|
||||
ssh_connection = sf_util.get_ssh_connection(TestData.secondary_storage_server,
|
||||
TestData.secondary_storage_server_username,
|
||||
TestData.secondary_storage_server_password)
|
||||
|
||||
stdout = ssh_connection.exec_command("ls -l " + TestData.secondary_storage_server_root +
|
||||
install_path + " | grep qcow2")[1]
|
||||
|
||||
result = stdout.read()
|
||||
|
||||
stdout = ssh_connection.exec_command("ls -l " + TestData.secondary_storage_server_root +
|
||||
install_path + " | grep " + TestData.properties_file)[1]
|
||||
|
||||
result2 = stdout.read()
|
||||
|
||||
ssh_connection.close()
|
||||
|
||||
return result, result2
|
||||
|
||||
def _get_install_path(self, install_path):
|
||||
index = install_path.rfind('/')
|
||||
|
||||
return install_path[:index]
|
||||
|
||||
def _get_volume_store_ref_row(self, volume_id):
|
||||
sql_query = "Select * From volume_store_ref Where volume_id = '" + str(volume_id) + "'"
|
||||
|
||||
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
|
||||
sql_result = self.dbConnection.execute(sql_query)
|
||||
|
||||
return sql_result
|
||||
|
||||
def _extract_volume_and_verify(self, volume_id, error_msg):
|
||||
extract_result = Volume.extract(self.apiClient, volume_id, self.zone.id, TestData.download_mode)
|
||||
|
||||
result = self._get_volume_store_ref_row(volume_id)
|
||||
|
||||
self.assertEqual(
|
||||
len(result),
|
||||
1,
|
||||
TestUploadDownload.assertText
|
||||
)
|
||||
|
||||
install_path = self._get_install_path(result[0][TestData.install_path_index])
|
||||
|
||||
self._verify_uploaded_volume_present(install_path, False)
|
||||
|
||||
url_response = urllib2.urlopen(extract_result.url)
|
||||
|
||||
if url_response.code != 200:
|
||||
raise Exception(error_msg)
|
||||
|
||||
self._wait_for_removal_of_extracted_volume(volume_id, extract_result.url)
|
||||
|
||||
def _wait_for_removal_of_extracted_volume(self, volume_id, extract_result_url):
|
||||
retry_interval = 60
|
||||
num_tries = 10
|
||||
|
||||
wait_result, return_val = wait_until(retry_interval, num_tries, self._check_removal_of_extracted_volume_state, volume_id, extract_result_url)
|
||||
|
||||
if not wait_result:
|
||||
raise Exception(return_val)
|
||||
|
||||
def _check_removal_of_extracted_volume_state(self, volume_id, extract_result_url):
|
||||
result = self._get_volume_store_ref_row(volume_id)
|
||||
|
||||
if len(result) == 0:
|
||||
try:
|
||||
urllib2.urlopen(extract_result_url)
|
||||
except Exception as e:
|
||||
if "404" in str(e):
|
||||
return True, ""
|
||||
|
||||
return False, "The extracted volume has not been removed."
|
||||
|
||||
def _wait_for_volume_state(self, volume_id, volume_state):
|
||||
retry_interval = 30
|
||||
num_tries = 10
|
||||
|
||||
wait_result, return_val = wait_until(retry_interval, num_tries, TestUploadDownload._check_volume_state, self.apiClient, volume_id, volume_state)
|
||||
|
||||
if not wait_result:
|
||||
raise Exception(return_val)
|
||||
|
||||
@staticmethod
|
||||
def _check_volume_state(api_client, volume_id, volume_state):
|
||||
volume = list_volumes(
|
||||
api_client,
|
||||
id=volume_id,
|
||||
listall=True
|
||||
)[0]
|
||||
|
||||
if str(volume.state).lower() == volume_state.lower():
|
||||
return True, ""
|
||||
|
||||
return False, "The volume is not in the '" + volume_state + "' state. State = " + str(volume.state)
|
||||
|
|
@ -84,11 +84,11 @@ class TestData():
|
|||
def __init__(self):
|
||||
self.testdata = {
|
||||
TestData.solidFire: {
|
||||
TestData.mvip: "192.168.139.112",
|
||||
TestData.mvip: "10.117.40.120",
|
||||
TestData.username: "admin",
|
||||
TestData.password: "admin",
|
||||
TestData.port: 443,
|
||||
TestData.url: "https://192.168.139.112:443"
|
||||
TestData.url: "https://10.117.40.120:443"
|
||||
},
|
||||
TestData.xenServer: {
|
||||
TestData.username: "root",
|
||||
|
|
@ -118,7 +118,7 @@ class TestData():
|
|||
TestData.primaryStorage: {
|
||||
TestData.name: "SolidFire-%d" % random.randint(0, 100),
|
||||
TestData.scope: "ZONE",
|
||||
TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||
TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||
|
|
@ -132,7 +132,7 @@ class TestData():
|
|||
TestData.primaryStorage2: {
|
||||
TestData.name: "SolidFireShared-%d" % random.randint(0, 100),
|
||||
TestData.scope: "CLUSTER",
|
||||
TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||
TestData.url: "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"minIops=5000;maxIops=50000;burstIops=75000",
|
||||
TestData.provider: "SolidFireShared",
|
||||
|
|
@ -211,7 +211,7 @@ class TestData():
|
|||
TestData.clusterId1: 1,
|
||||
TestData.clusterId2: 2,
|
||||
TestData.domainId: 1,
|
||||
TestData.url: "192.168.129.50"
|
||||
TestData.url: "10.117.40.114"
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@
|
|||
import logging
|
||||
import random
|
||||
import SignedAPICall
|
||||
import time
|
||||
import XenAPI
|
||||
|
||||
from solidfire.factory import ElementFactory
|
||||
|
|
@ -71,14 +72,17 @@ class TestData:
|
|||
xenServer = "xenserver"
|
||||
zoneId = "zoneId"
|
||||
|
||||
# modify to control which hypervisor type to test
|
||||
hypervisor_type = xenServer
|
||||
|
||||
def __init__(self):
|
||||
self.testdata = {
|
||||
TestData.solidFire: {
|
||||
TestData.mvip: "192.168.139.112",
|
||||
TestData.mvip: "10.117.40.120",
|
||||
TestData.username: "admin",
|
||||
TestData.password: "admin",
|
||||
TestData.port: 443,
|
||||
TestData.url: "https://192.168.139.112:443"
|
||||
TestData.url: "https://10.117.40.120:443"
|
||||
},
|
||||
TestData.xenServer: {
|
||||
TestData.username: "root",
|
||||
|
|
@ -101,7 +105,7 @@ class TestData:
|
|||
TestData.primaryStorage: {
|
||||
"name": "SolidFire-%d" % random.randint(0, 100),
|
||||
TestData.scope: "ZONE",
|
||||
"url": "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
|
||||
"url": "MVIP=10.117.40.120;SVIP=10.117.41.120;" +
|
||||
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
|
||||
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
|
||||
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
|
||||
|
|
@ -145,7 +149,7 @@ class TestData:
|
|||
TestData.zoneId: 1,
|
||||
TestData.clusterId: 1,
|
||||
TestData.domainId: 1,
|
||||
TestData.url: "192.168.129.50"
|
||||
TestData.url: "10.117.40.114"
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -190,7 +194,7 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||
|
||||
# Get Resources from Cloud Infrastructure
|
||||
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
|
||||
template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"])
|
||||
template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
|
||||
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
|
||||
|
||||
# Create test account
|
||||
|
|
@ -252,7 +256,6 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||
)
|
||||
|
||||
cls._cleanup = [
|
||||
cls.virtual_machine,
|
||||
compute_offering,
|
||||
cls.disk_offering,
|
||||
user,
|
||||
|
|
@ -262,6 +265,10 @@ class TestVMSnapshots(cloudstackTestCase):
|
|||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
time.sleep(60)
|
||||
|
||||
cls.virtual_machine.delete(cls.apiClient, True)
|
||||
|
||||
cleanup_resources(cls.apiClient, cls._cleanup)
|
||||
|
||||
cls.primary_storage.delete(cls.apiClient)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -17,6 +17,8 @@
|
|||
|
||||
import paramiko
|
||||
|
||||
from marvin.lib.common import list_volumes
|
||||
|
||||
def check_list(in_list, expected_size_of_list, obj_assert, err_msg):
|
||||
obj_assert.assertEqual(
|
||||
isinstance(in_list, list),
|
||||
|
|
@ -247,3 +249,83 @@ def get_ssh_connection(ip_address, username, password):
|
|||
|
||||
return ssh_client
|
||||
|
||||
def get_sf_volume_by_name(obj_assert, sf_volumes, sf_volume_name):
|
||||
check_list_not_empty(obj_assert, sf_volumes)
|
||||
|
||||
sf_volume = None
|
||||
|
||||
for volume in sf_volumes:
|
||||
if volume.name == sf_volume_name:
|
||||
sf_volume = volume
|
||||
|
||||
break
|
||||
|
||||
obj_assert.assertNotEqual(
|
||||
sf_volume,
|
||||
None,
|
||||
"The SolidFire volume could not be found in the expected list."
|
||||
)
|
||||
|
||||
return sf_volume
|
||||
|
||||
def check_list_not_empty(obj_assert, in_list):
|
||||
obj_assert.assertEqual(
|
||||
isinstance(in_list, list),
|
||||
True,
|
||||
"'in_list' is not a list."
|
||||
)
|
||||
|
||||
obj_assert.assertGreater(
|
||||
len(in_list),
|
||||
0,
|
||||
"The size of 'in_list' must be greater than zero."
|
||||
)
|
||||
|
||||
def check_and_get_cs_volume(obj_test, volume_id, volume_name, obj_assert):
|
||||
list_volumes_response = list_volumes(
|
||||
obj_test.apiClient,
|
||||
id=volume_id
|
||||
)
|
||||
|
||||
check_list(list_volumes_response, 1, obj_assert, "There should only be one volume in this list.")
|
||||
|
||||
cs_volume = list_volumes_response[0]
|
||||
|
||||
check_volume(obj_test, cs_volume, volume_name, obj_assert)
|
||||
|
||||
return cs_volume
|
||||
|
||||
def check_volume(obj_test, cs_volume, volume_name, obj_assert):
|
||||
obj_assert.assertTrue(
|
||||
cs_volume.name.startswith(volume_name),
|
||||
"The volume name is incorrect."
|
||||
)
|
||||
|
||||
obj_assert.assertEqual(
|
||||
cs_volume.zoneid,
|
||||
obj_test.zone.id,
|
||||
"The zone is incorrect."
|
||||
)
|
||||
|
||||
obj_assert.assertEqual(
|
||||
cs_volume.diskofferingid,
|
||||
obj_test.disk_offering.id,
|
||||
"The disk offering is incorrect."
|
||||
)
|
||||
|
||||
obj_assert.assertEqual(
|
||||
cs_volume.storagetype,
|
||||
obj_test.disk_offering.storagetype,
|
||||
"The storage type is incorrect."
|
||||
)
|
||||
|
||||
def get_cs_volume_db_id(dbConnection, vol):
|
||||
return get_db_id(dbConnection, "volumes", vol)
|
||||
|
||||
def get_db_id(dbConnection, table, db_obj):
|
||||
sql_query = "Select id From " + table + " Where uuid = '" + str(db_obj.id) + "'"
|
||||
|
||||
# make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
|
||||
sql_result = dbConnection.execute(sql_query)
|
||||
|
||||
return sql_result[0][0]
|
||||
|
|
|
|||
|
|
@ -189,7 +189,8 @@ known_categories = {
|
|||
'removeAnnotation' : 'Annotations',
|
||||
'CA': 'Certificate',
|
||||
'listElastistorInterface': 'Misc',
|
||||
'cloudian': 'Cloudian'
|
||||
'cloudian': 'Cloudian',
|
||||
'Sioc' : 'Sioc'
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -981,7 +981,15 @@ class Volume:
|
|||
elif "customdiskofferingid" in services:
|
||||
cmd.diskofferingid = services["customdiskofferingid"]
|
||||
|
||||
cmd.size = services["customdisksize"]
|
||||
if "customdisksize" in services:
|
||||
cmd.size = services["customdisksize"]
|
||||
|
||||
if "customminiops" in services:
|
||||
cmd.miniops = services["customminiops"]
|
||||
|
||||
if "custommaxiops" in services:
|
||||
cmd.maxiops = services["custommaxiops"]
|
||||
|
||||
cmd.zoneid = services["zoneid"]
|
||||
|
||||
if account:
|
||||
|
|
@ -1020,6 +1028,12 @@ class Volume:
|
|||
cmd.domainid = services["domainid"]
|
||||
return Volume(apiclient.createVolume(cmd).__dict__)
|
||||
|
||||
@classmethod
|
||||
def revertToSnapshot(cls, apiclient, volumeSnapshotId):
|
||||
cmd = revertSnapshot.revertSnapshotCmd()
|
||||
cmd.id = volumeSnapshotId
|
||||
return apiclient.revertSnapshot(cmd)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete Volume"""
|
||||
cmd = deleteVolume.deleteVolumeCmd()
|
||||
|
|
@ -1330,12 +1344,34 @@ class Template:
|
|||
|
||||
return apiclient.extractTemplate(cmd)
|
||||
|
||||
@classmethod
|
||||
def create_from_volume(cls, apiclient, volume, services,
|
||||
random_name=True):
|
||||
"""Create Template from volume"""
|
||||
# Create template from Volume ID
|
||||
cmd = createTemplate.createTemplateCmd()
|
||||
|
||||
Template._set_command(apiclient, cmd, services, random_name)
|
||||
|
||||
cmd.volumeid = volume.id
|
||||
|
||||
return Template(apiclient.createTemplate(cmd).__dict__)
|
||||
|
||||
@classmethod
|
||||
def create_from_snapshot(cls, apiclient, snapshot, services,
|
||||
random_name=True):
|
||||
"""Create Template from snapshot"""
|
||||
# Create template from Virtual machine and Snapshot ID
|
||||
# Create template from Snapshot ID
|
||||
cmd = createTemplate.createTemplateCmd()
|
||||
|
||||
Template._set_command(apiclient, cmd, services, random_name)
|
||||
|
||||
cmd.snapshotid = snapshot.id
|
||||
|
||||
return Template(apiclient.createTemplate(cmd).__dict__)
|
||||
|
||||
@classmethod
|
||||
def _set_command(cls, apiclient, cmd, services, random_name=True):
|
||||
cmd.displaytext = services["displaytext"]
|
||||
cmd.name = "-".join([
|
||||
services["name"],
|
||||
|
|
@ -1362,9 +1398,6 @@ class Template:
|
|||
raise Exception(
|
||||
"Unable to find Ostype is required for creating template")
|
||||
|
||||
cmd.snapshotid = snapshot.id
|
||||
return Template(apiclient.createTemplate(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient, zoneid=None):
|
||||
"""Delete Template"""
|
||||
|
||||
|
|
@ -2188,12 +2221,15 @@ class DiskOffering:
|
|||
|
||||
if "customizediops" in services:
|
||||
cmd.customizediops = services["customizediops"]
|
||||
else:
|
||||
cmd.customizediops = False
|
||||
|
||||
if "maxiops" in services:
|
||||
cmd.maxiops = services["maxiops"]
|
||||
if not cmd.customizediops:
|
||||
if "miniops" in services:
|
||||
cmd.miniops = services["miniops"]
|
||||
|
||||
if "miniops" in services:
|
||||
cmd.miniops = services["miniops"]
|
||||
if "maxiops" in services:
|
||||
cmd.maxiops = services["maxiops"]
|
||||
|
||||
if "hypervisorsnapshotreserve" in services:
|
||||
cmd.hypervisorsnapshotreserve = services["hypervisorsnapshotreserve"]
|
||||
|
|
|
|||
|
|
@ -18016,6 +18016,10 @@
|
|||
id: "gluster",
|
||||
description: "Gluster"
|
||||
});
|
||||
items.push({
|
||||
id: "custom",
|
||||
description: "custom"
|
||||
});
|
||||
args.response.success({
|
||||
data: items
|
||||
});
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ public class DatastoreMO extends BaseMO {
|
|||
@Override
|
||||
public String getName() throws Exception {
|
||||
if (_name == null)
|
||||
_name = (String)_context.getVimClient().getDynamicProperty(_mor, "name");
|
||||
_name = _context.getVimClient().getDynamicProperty(_mor, "name");
|
||||
|
||||
return _name;
|
||||
}
|
||||
|
|
@ -109,7 +109,7 @@ public class DatastoreMO extends BaseMO {
|
|||
PropertyFilterSpec pfSpec = new PropertyFilterSpec();
|
||||
pfSpec.getPropSet().add(pSpec);
|
||||
pfSpec.getObjectSet().add(oSpec);
|
||||
List<PropertyFilterSpec> pfSpecArr = new ArrayList<PropertyFilterSpec>();
|
||||
List<PropertyFilterSpec> pfSpecArr = new ArrayList<>();
|
||||
pfSpecArr.add(pfSpec);
|
||||
|
||||
List<ObjectContent> ocs = _context.getService().retrieveProperties(_context.getPropertyCollector(), pfSpecArr);
|
||||
|
|
@ -118,10 +118,14 @@ public class DatastoreMO extends BaseMO {
|
|||
assert (ocs.get(0).getObj() != null);
|
||||
assert (ocs.get(0).getPropSet() != null);
|
||||
String dcName = ocs.get(0).getPropSet().get(0).getVal().toString();
|
||||
_ownerDc = new Pair<DatacenterMO, String>(new DatacenterMO(_context, ocs.get(0).getObj()), dcName);
|
||||
_ownerDc = new Pair<>(new DatacenterMO(_context, ocs.get(0).getObj()), dcName);
|
||||
return _ownerDc;
|
||||
}
|
||||
|
||||
public void renameDatastore(String newDatastoreName) throws Exception {
|
||||
_context.getService().renameDatastore(_mor, newDatastoreName);
|
||||
}
|
||||
|
||||
public void makeDirectory(String path, ManagedObjectReference morDc) throws Exception {
|
||||
String datastoreName = getName();
|
||||
ManagedObjectReference morFileManager = _context.getServiceContent().getFileManager();
|
||||
|
|
@ -133,7 +137,7 @@ public class DatastoreMO extends BaseMO {
|
|||
_context.getService().makeDirectory(morFileManager, fullPath, morDc, true);
|
||||
}
|
||||
|
||||
public String getDatastoreRootPath() throws Exception {
|
||||
String getDatastoreRootPath() throws Exception {
|
||||
return String.format("[%s]", getName());
|
||||
}
|
||||
|
||||
|
|
@ -210,7 +214,7 @@ public class DatastoreMO extends BaseMO {
|
|||
return false;
|
||||
}
|
||||
|
||||
public boolean copyDatastoreFile(String srcFilePath, ManagedObjectReference morSrcDc, ManagedObjectReference morDestDs, String destFilePath,
|
||||
boolean copyDatastoreFile(String srcFilePath, ManagedObjectReference morSrcDc, ManagedObjectReference morDestDs, String destFilePath,
|
||||
ManagedObjectReference morDestDc, boolean forceOverwrite) throws Exception {
|
||||
|
||||
String srcDsName = getName();
|
||||
|
|
@ -269,7 +273,7 @@ public class DatastoreMO extends BaseMO {
|
|||
public String[] getVmdkFileChain(String rootVmdkDatastoreFullPath) throws Exception {
|
||||
Pair<DatacenterMO, String> dcPair = getOwnerDatacenter();
|
||||
|
||||
List<String> files = new ArrayList<String>();
|
||||
List<String> files = new ArrayList<>();
|
||||
files.add(rootVmdkDatastoreFullPath);
|
||||
|
||||
String currentVmdkFullPath = rootVmdkDatastoreFullPath;
|
||||
|
|
@ -399,7 +403,7 @@ public class DatastoreMO extends BaseMO {
|
|||
return rootDirectoryFilePath;
|
||||
}
|
||||
|
||||
String parentFolderPath = null;
|
||||
String parentFolderPath;
|
||||
String absoluteFileName = null;
|
||||
s_logger.info("Searching file " + fileName + " in " + datastorePath);
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,10 @@ import com.vmware.vim25.CustomFieldStringValue;
|
|||
import com.vmware.vim25.DatastoreInfo;
|
||||
import com.vmware.vim25.DynamicProperty;
|
||||
import com.vmware.vim25.HostNasVolumeSpec;
|
||||
import com.vmware.vim25.HostResignatureRescanResult;
|
||||
import com.vmware.vim25.HostScsiDisk;
|
||||
import com.vmware.vim25.HostUnresolvedVmfsResignatureSpec;
|
||||
import com.vmware.vim25.HostUnresolvedVmfsVolume;
|
||||
import com.vmware.vim25.ManagedObjectReference;
|
||||
import com.vmware.vim25.NasDatastoreInfo;
|
||||
import com.vmware.vim25.ObjectContent;
|
||||
|
|
@ -34,6 +37,7 @@ import com.vmware.vim25.PropertyFilterSpec;
|
|||
import com.vmware.vim25.PropertySpec;
|
||||
import com.vmware.vim25.TraversalSpec;
|
||||
import com.vmware.vim25.VmfsDatastoreCreateSpec;
|
||||
import com.vmware.vim25.VmfsDatastoreExpandSpec;
|
||||
import com.vmware.vim25.VmfsDatastoreOption;
|
||||
|
||||
import com.cloud.hypervisor.vmware.util.VmwareContext;
|
||||
|
|
@ -75,6 +79,18 @@ public class HostDatastoreSystemMO extends BaseMO {
|
|||
return null;
|
||||
}
|
||||
|
||||
public List<HostUnresolvedVmfsVolume> queryUnresolvedVmfsVolumes() throws Exception {
|
||||
return _context.getService().queryUnresolvedVmfsVolumes(_mor);
|
||||
}
|
||||
|
||||
public List<VmfsDatastoreOption> queryVmfsDatastoreExpandOptions(DatastoreMO datastoreMO) throws Exception {
|
||||
return _context.getService().queryVmfsDatastoreExpandOptions(_mor, datastoreMO.getMor());
|
||||
}
|
||||
|
||||
public void expandVmfsDatastore(DatastoreMO datastoreMO, VmfsDatastoreExpandSpec vmfsDatastoreExpandSpec) throws Exception {
|
||||
_context.getService().expandVmfsDatastore(_mor, datastoreMO.getMor(), vmfsDatastoreExpandSpec);
|
||||
}
|
||||
|
||||
// storeUrl in nfs://host/exportpath format
|
||||
public ManagedObjectReference findDatastoreByUrl(String storeUrl) throws Exception {
|
||||
assert (storeUrl != null);
|
||||
|
|
@ -195,6 +211,22 @@ public class HostDatastoreSystemMO extends BaseMO {
|
|||
return null;
|
||||
}
|
||||
|
||||
public HostResignatureRescanResult resignatureUnresolvedVmfsVolume(HostUnresolvedVmfsResignatureSpec resolutionSpec) throws Exception {
|
||||
ManagedObjectReference task = _context.getService().resignatureUnresolvedVmfsVolumeTask(_mor, resolutionSpec);
|
||||
|
||||
boolean result = _context.getVimClient().waitForTask(task);
|
||||
|
||||
if (result) {
|
||||
_context.waitForTaskProgressDone(task);
|
||||
|
||||
TaskMO taskMO = new TaskMO(_context, task);
|
||||
|
||||
return (HostResignatureRescanResult)taskMO.getTaskInfo().getResult();
|
||||
} else {
|
||||
throw new Exception("Unable to register vm due to " + TaskMO.getTaskFailureInfo(_context, task));
|
||||
}
|
||||
}
|
||||
|
||||
public List<ObjectContent> getDatastorePropertiesOnHostDatastoreSystem(String[] propertyPaths) throws Exception {
|
||||
|
||||
PropertySpec pSpec = new PropertySpec();
|
||||
|
|
|
|||
|
|
@ -52,4 +52,12 @@ public class HostStorageSystemMO extends BaseMO {
|
|||
public void rescanVmfs() throws Exception {
|
||||
_context.getService().rescanVmfs(_mor);
|
||||
}
|
||||
|
||||
public void mountVmfsVolume(String datastoreUuid) throws Exception {
|
||||
_context.getService().mountVmfsVolume(_mor, datastoreUuid);
|
||||
}
|
||||
|
||||
public void unmountVmfsVolume(String datastoreUuid) throws Exception {
|
||||
_context.getService().unmountVmfsVolume(_mor, datastoreUuid);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,8 +89,9 @@ public class VirtualMachineDiskInfoBuilder {
|
|||
private static boolean chainContains(List<String> chain, String diskBackingFileBaseName, String dataStoreName) {
|
||||
for (String backing : chain) {
|
||||
DatastoreFile file = new DatastoreFile(backing);
|
||||
|
||||
// Ensure matching disk exists in the right datastore
|
||||
if (file.getFileBaseName().equals(diskBackingFileBaseName) && backing.contains(dataStoreName))
|
||||
if (file.getFileBaseName().equals(diskBackingFileBaseName) && file.getDatastoreName().equals(dataStoreName))
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -468,19 +468,26 @@ public class VirtualMachineMO extends BaseMO {
|
|||
}
|
||||
|
||||
public boolean createSnapshot(String snapshotName, String snapshotDescription, boolean dumpMemory, boolean quiesce) throws Exception {
|
||||
return createSnapshotGetReference(snapshotName, snapshotDescription, dumpMemory, quiesce) != null;
|
||||
}
|
||||
|
||||
public ManagedObjectReference createSnapshotGetReference(String snapshotName, String snapshotDescription, boolean dumpMemory, boolean quiesce) throws Exception {
|
||||
long apiTimeout = _context.getVimClient().getVcenterSessionTimeout();
|
||||
ManagedObjectReference morTask = _context.getService().createSnapshotTask(_mor, snapshotName, snapshotDescription, dumpMemory, quiesce);
|
||||
|
||||
boolean result = _context.getVimClient().waitForTask(morTask);
|
||||
|
||||
if (result) {
|
||||
_context.waitForTaskProgressDone(morTask);
|
||||
|
||||
ManagedObjectReference morSnapshot = null;
|
||||
|
||||
// We still need to wait until the object appear in vCenter
|
||||
long startTick = System.currentTimeMillis();
|
||||
|
||||
while (System.currentTimeMillis() - startTick < apiTimeout) {
|
||||
morSnapshot = getSnapshotMor(snapshotName);
|
||||
|
||||
if (morSnapshot != null) {
|
||||
break;
|
||||
}
|
||||
|
|
@ -493,16 +500,20 @@ public class VirtualMachineMO extends BaseMO {
|
|||
}
|
||||
|
||||
if (morSnapshot == null) {
|
||||
s_logger.error("We've been waiting for over " + apiTimeout + " milli seconds for snapshot MOR to be appearing in vCenter after CreateSnapshot task is done, but it is still not there?!");
|
||||
return false;
|
||||
s_logger.error("We've been waiting for over " + apiTimeout + " milli seconds for snapshot MOR to be appearing in vCenter after CreateSnapshot task is done, " +
|
||||
"but it is still not there?!");
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
s_logger.debug("Waited for " + (System.currentTimeMillis() - startTick) + " seconds for snapshot object [" + snapshotName + "] to appear in vCenter.");
|
||||
return true;
|
||||
|
||||
return morSnapshot;
|
||||
} else {
|
||||
s_logger.error("VMware createSnapshot_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask));
|
||||
}
|
||||
|
||||
return false;
|
||||
return null;
|
||||
}
|
||||
|
||||
public boolean removeSnapshot(String snapshotName, boolean removeChildren) throws Exception {
|
||||
|
|
@ -542,6 +553,21 @@ public class VirtualMachineMO extends BaseMO {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes all of the snapshots of a VM.
|
||||
*/
|
||||
public void consolidateAllSnapshots() throws Exception {
|
||||
ManagedObjectReference task = _context.getService().removeAllSnapshotsTask(_mor, true);
|
||||
|
||||
boolean result = _context.getVimClient().waitForTask(task);
|
||||
|
||||
if (result) {
|
||||
_context.waitForTaskProgressDone(task);
|
||||
} else {
|
||||
throw new Exception("Unable to register VM due to the following issue: " + TaskMO.getTaskFailureInfo(_context, task));
|
||||
}
|
||||
}
|
||||
|
||||
public boolean removeAllSnapshots() throws Exception {
|
||||
VirtualMachineSnapshotInfo snapshotInfo = getSnapshotInfo();
|
||||
|
||||
|
|
@ -2339,12 +2365,16 @@ public class VirtualMachineMO extends BaseMO {
|
|||
|
||||
// return pair of VirtualDisk and disk device bus name(ide0:0, etc)
|
||||
public Pair<VirtualDisk, String> getDiskDevice(String vmdkDatastorePath) throws Exception {
|
||||
final String zeroLengthString = "";
|
||||
|
||||
List<VirtualDevice> devices = _context.getVimClient().getDynamicProperty(_mor, "config.hardware.device");
|
||||
ArrayList<Pair<VirtualDisk, String>> partialMatchingDiskDevices = new ArrayList<Pair<VirtualDisk, String>>();
|
||||
ArrayList<Pair<VirtualDisk, String>> partialMatchingDiskDevices = new ArrayList<>();
|
||||
|
||||
DatastoreFile dsSrcFile = new DatastoreFile(vmdkDatastorePath);
|
||||
|
||||
String srcBaseName = dsSrcFile.getFileBaseName();
|
||||
String trimmedSrcBaseName = VmwareHelper.trimSnapshotDeltaPostfix(srcBaseName);
|
||||
String srcDatastoreName = dsSrcFile.getDatastoreName() != null ? dsSrcFile.getDatastoreName() : zeroLengthString;
|
||||
|
||||
s_logger.info("Look for disk device info for volume : " + vmdkDatastorePath + " with base name: " + srcBaseName);
|
||||
|
||||
|
|
@ -2353,22 +2383,38 @@ public class VirtualMachineMO extends BaseMO {
|
|||
if (device instanceof VirtualDisk) {
|
||||
s_logger.info("Test against disk device, controller key: " + device.getControllerKey() + ", unit number: " + device.getUnitNumber());
|
||||
|
||||
VirtualDeviceBackingInfo backingInfo = ((VirtualDisk)device).getBacking();
|
||||
VirtualDeviceBackingInfo backingInfo = device.getBacking();
|
||||
|
||||
if (backingInfo instanceof VirtualDiskFlatVer2BackingInfo) {
|
||||
VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo;
|
||||
|
||||
do {
|
||||
s_logger.info("Test against disk backing : " + diskBackingInfo.getFileName());
|
||||
|
||||
DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName());
|
||||
String backingBaseName = dsBackingFile.getFileBaseName();
|
||||
if (backingBaseName.equalsIgnoreCase(srcBaseName)) {
|
||||
String deviceNumbering = getDeviceBusName(devices, device);
|
||||
s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering);
|
||||
return new Pair<VirtualDisk, String>((VirtualDisk)device, deviceNumbering);
|
||||
|
||||
String backingDatastoreName = dsBackingFile.getDatastoreName() != null ? dsBackingFile.getDatastoreName() : zeroLengthString;
|
||||
|
||||
if (srcDatastoreName.equals(zeroLengthString)) {
|
||||
backingDatastoreName = zeroLengthString;
|
||||
}
|
||||
if (backingBaseName.contains(trimmedSrcBaseName)) {
|
||||
String deviceNumbering = getDeviceBusName(devices, device);
|
||||
partialMatchingDiskDevices.add(new Pair<VirtualDisk, String>((VirtualDisk)device, deviceNumbering));
|
||||
|
||||
if (srcDatastoreName.equalsIgnoreCase(backingDatastoreName)) {
|
||||
String backingBaseName = dsBackingFile.getFileBaseName();
|
||||
|
||||
if (backingBaseName.equalsIgnoreCase(srcBaseName)) {
|
||||
String deviceNumbering = getDeviceBusName(devices, device);
|
||||
|
||||
s_logger.info("Disk backing : " + diskBackingInfo.getFileName() + " matches ==> " + deviceNumbering);
|
||||
|
||||
return new Pair<>((VirtualDisk)device, deviceNumbering);
|
||||
}
|
||||
|
||||
if (backingBaseName.contains(trimmedSrcBaseName)) {
|
||||
String deviceNumbering = getDeviceBusName(devices, device);
|
||||
|
||||
partialMatchingDiskDevices.add(new Pair<>((VirtualDisk)device, deviceNumbering));
|
||||
}
|
||||
}
|
||||
|
||||
diskBackingInfo = diskBackingInfo.getParent();
|
||||
|
|
@ -2380,19 +2426,24 @@ public class VirtualMachineMO extends BaseMO {
|
|||
|
||||
// No disk device was found with an exact match for the volume path, hence look for disk device that matches the trimmed name.
|
||||
s_logger.info("No disk device with an exact match found for volume : " + vmdkDatastorePath + ". Look for disk device info against trimmed base name: " + srcBaseName);
|
||||
|
||||
if (partialMatchingDiskDevices != null) {
|
||||
if (partialMatchingDiskDevices.size() == 1) {
|
||||
VirtualDiskFlatVer2BackingInfo matchingDiskBackingInfo = (VirtualDiskFlatVer2BackingInfo)partialMatchingDiskDevices.get(0).first().getBacking();
|
||||
|
||||
s_logger.info("Disk backing : " + matchingDiskBackingInfo.getFileName() + " matches ==> " + partialMatchingDiskDevices.get(0).second());
|
||||
|
||||
return partialMatchingDiskDevices.get(0);
|
||||
} else if (partialMatchingDiskDevices.size() > 1) {
|
||||
s_logger.warn("Disk device info lookup for volume: " + vmdkDatastorePath + " failed as multiple disk devices were found to match" +
|
||||
" volume's trimmed base name: " + trimmedSrcBaseName);
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
s_logger.warn("Disk device info lookup for volume: " + vmdkDatastorePath + " failed as no matching disk device found");
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue