[CLOUDSTACK-10323] Allow changing disk offering during volume migration (#2486)

* [CLOUDSTACK-10323] Allow changing disk offering during volume migration

This is a continuation of work developed on PR #2425 (CLOUDSTACK-10240), which provided root admins an override mechanism to move volumes between storage systems types (local/shared) even when the disk offering would not allow such operation. To complete the work, we will now provide a way for administrators to enter a new disk offering that can reflect the new placement of the volume. We will add an extra parameter to allow the root admin inform a new disk offering for the volume. Therefore, when the volume is being migrated, it will be possible to replace the disk offering to reflect the new placement of the volume.

The API method will have the following parameters:

* storageid (required)
* volumeid (required)
* livemigrate(optional)
* newdiskofferingid (optional) – this is the new parameter

The expected behavior is the following:

* If “newdiskofferingid” is not provided the current behavior is maintained. Override mechanism will also keep working as we have seen so far.
* If the “newdiskofferingid” is provided by the admin, we will execute the following checks
** new disk offering mode (local/shared) must match the target storage mode. If it does not match, an exception will be thrown and the operator will receive a message indicating the problem.
** we will check if the new disk offering tags match the target storage tags. If it does not match, an exception will be thrown and the operator will receive a message indicating the problem.
** check if the target storage has the capacity for the new volume. If it does not have enough space, then an exception is thrown and the operator will receive a message indicating the problem.
** check if the size of the volume is the same as the size of the new disk offering. If it is not the same, we will ALLOW the change of the service offering, and a warning message will be logged.

We execute the change of the Disk offering as soon as the migration of the volume finishes. Therefore, if an error happens during the migration and the volume remains in the original storage system, the disk offering will keep reflecting this situation.

* Code formatting

* Adding a test to cover migration with new disk offering (#4)

* Adding a test to cover migration with new disk offering

* Update test_volumes.py

* Update test_volumes.py

* fix test_11_migrate_volume_and_change_offering

* Fix typo in Java doc
This commit is contained in:
Rafael Weingärtner 2018-04-26 20:05:55 -03:00 committed by GitHub
parent 46bd94c6a2
commit d6cbd774b7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 788 additions and 492 deletions

View File

@ -86,6 +86,7 @@ public class ApiConstants {
public static final String DEVICE_ID = "deviceid"; public static final String DEVICE_ID = "deviceid";
public static final String DIRECT_DOWNLOAD = "directdownload"; public static final String DIRECT_DOWNLOAD = "directdownload";
public static final String DISK_OFFERING_ID = "diskofferingid"; public static final String DISK_OFFERING_ID = "diskofferingid";
public static final String NEW_DISK_OFFERING_ID = "newdiskofferingid";
public static final String DISK_SIZE = "disksize"; public static final String DISK_SIZE = "disksize";
public static final String UTILIZATION = "utilization"; public static final String UTILIZATION = "utilization";
public static final String DRIVER = "driver"; public static final String DRIVER = "driver";

View File

@ -25,17 +25,13 @@ import org.apache.cloudstack.api.response.VolumeResponse;
import com.cloud.storage.Volume; import com.cloud.storage.Volume;
@APICommand(name = "migrateVolume", description = "Migrate volume", responseObject = VolumeResponse.class, since = "3.0.0", responseView = ResponseView.Full, entityType = {
@APICommand(name = "migrateVolume", description = "Migrate volume", responseObject = VolumeResponse.class, since = "3.0.0", responseView = ResponseView.Full, entityType = {Volume.class}, Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class MigrateVolumeCmdByAdmin extends MigrateVolumeCmd { public class MigrateVolumeCmdByAdmin extends MigrateVolumeCmd {
@Override @Override
public void execute(){ public void execute() {
Volume result; Volume result = _volumeService.migrateVolume(this);
result = _volumeService.migrateVolume(this);
if (result != null) { if (result != null) {
VolumeResponse response = _responseGenerator.createVolumeResponse(ResponseView.Full, result); VolumeResponse response = _responseGenerator.createVolumeResponse(ResponseView.Full, result);
response.setResponseName(getCommandName()); response.setResponseName(getCommandName());

View File

@ -30,31 +30,27 @@ import com.cloud.event.EventTypes;
import com.cloud.storage.Volume; import com.cloud.storage.Volume;
import com.cloud.user.Account; import com.cloud.user.Account;
@APICommand(name = "migrateVolume", description = "Migrate volume", responseObject = VolumeResponse.class, since = "3.0.0", responseView = ResponseView.Restricted, entityType = {Volume.class}, @APICommand(name = "migrateVolume", description = "Migrate volume", responseObject = VolumeResponse.class, since = "3.0.0", responseView = ResponseView.Restricted, entityType = {
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
public class MigrateVolumeCmd extends BaseAsyncCmd { public class MigrateVolumeCmd extends BaseAsyncCmd {
private static final String s_name = "migratevolumeresponse"; private static final String s_name = "migratevolumeresponse";
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
//////////////// API parameters ///////////////////// //////////////// API parameters /////////////////////
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
@Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.UUID, entityType = VolumeResponse.class, required = true, description = "the ID of the volume") @Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.UUID, entityType = VolumeResponse.class, required = true, description = "the ID of the volume")
private Long volumeId; private Long volumeId;
@Parameter(name = ApiConstants.STORAGE_ID, @Parameter(name = ApiConstants.STORAGE_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, required = true, description = "destination storage pool ID to migrate the volume to")
type = CommandType.UUID,
entityType = StoragePoolResponse.class,
required = true,
description = "destination storage pool ID to migrate the volume to")
private Long storageId; private Long storageId;
@Parameter(name = ApiConstants.LIVE_MIGRATE, @Parameter(name = ApiConstants.LIVE_MIGRATE, type = CommandType.BOOLEAN, required = false, description = "if the volume should be live migrated when it is attached to a running vm")
type = CommandType.BOOLEAN,
required = false,
description = "if the volume should be live migrated when it is attached to a running vm")
private Boolean liveMigrate; private Boolean liveMigrate;
@Parameter(name = ApiConstants.NEW_DISK_OFFERING_ID, type = CommandType.STRING, description = "The new disk offering ID that replaces the current one used by the volume. This new disk offering is used to better reflect the new storage where the volume is going to be migrated to.")
private String newDiskOfferingUuid;
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
/////////////////// Accessors /////////////////////// /////////////////// Accessors ///////////////////////
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
@ -87,12 +83,12 @@ public class MigrateVolumeCmd extends BaseAsyncCmd {
@Override @Override
public long getEntityOwnerId() { public long getEntityOwnerId() {
Volume volume = _entityMgr.findById(Volume.class, getVolumeId()); Volume volume = _entityMgr.findById(Volume.class, getVolumeId());
if (volume != null) { if (volume != null) {
return volume.getAccountId(); return volume.getAccountId();
} }
return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked
} }
@Override @Override
@ -105,6 +101,10 @@ public class MigrateVolumeCmd extends BaseAsyncCmd {
return "Attempting to migrate volume Id: " + getVolumeId() + " to storage pool Id: " + getStoragePoolId(); return "Attempting to migrate volume Id: " + getVolumeId() + " to storage pool Id: " + getStoragePoolId();
} }
public String getNewDiskOfferingUuid() {
return newDiskOfferingUuid;
}
@Override @Override
public void execute() { public void execute() {
Volume result; Volume result;

View File

@ -22,12 +22,14 @@ public class VmWorkMigrateVolume extends VmWork {
private long volumeId; private long volumeId;
private long destPoolId; private long destPoolId;
private boolean liveMigrate; private boolean liveMigrate;
private Long newDiskOfferingId;
public VmWorkMigrateVolume(long userId, long accountId, long vmId, String handlerName, long volumeId, long destPoolId, boolean liveMigrate) { public VmWorkMigrateVolume(long userId, long accountId, long vmId, String handlerName, long volumeId, long destPoolId, boolean liveMigrate, Long newDiskOfferingId) {
super(userId, accountId, vmId, handlerName); super(userId, accountId, vmId, handlerName);
this.volumeId = volumeId; this.volumeId = volumeId;
this.destPoolId = destPoolId; this.destPoolId = destPoolId;
this.liveMigrate = liveMigrate; this.liveMigrate = liveMigrate;
this.newDiskOfferingId = newDiskOfferingId;
} }
public long getVolumeId() { public long getVolumeId() {
@ -41,4 +43,8 @@ public class VmWorkMigrateVolume extends VmWork {
public boolean isLiveMigrate() { public boolean isLiveMigrate() {
return liveMigrate; return liveMigrate;
} }
public Long getNewDiskOfferingId() {
return newDiskOfferingId;
}
} }

View File

@ -30,8 +30,6 @@ import java.util.concurrent.ExecutionException;
import javax.inject.Inject; import javax.inject.Inject;
import javax.naming.ConfigurationException; import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
@ -67,6 +65,7 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.log4j.Logger;
import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.DiskTO;
@ -225,8 +224,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
// Find a destination storage pool with the specified criteria // Find a destination storage pool with the specified criteria
DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId()); DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
DiskProfile dskCh = new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), DiskProfile dskCh = new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(),
diskOffering.getTagsArray(), diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null); diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null);
dskCh.setHyperType(dataDiskHyperType); dskCh.setHyperType(dataDiskHyperType);
storageMgr.setDiskProfileThrottling(dskCh, null, diskOffering); storageMgr.setDiskProfileThrottling(dskCh, null, diskOffering);
@ -249,17 +248,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} }
public VolumeVO allocateDuplicateVolumeVO(Volume oldVol, Long templateId) { public VolumeVO allocateDuplicateVolumeVO(Volume oldVol, Long templateId) {
VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), oldVol.getName(), oldVol.getDataCenterId(), oldVol.getDomainId(), oldVol.getAccountId(), oldVol.getDiskOfferingId(),
oldVol.getName(), oldVol.getProvisioningType(), oldVol.getSize(), oldVol.getMinIops(), oldVol.getMaxIops(), oldVol.get_iScsiName());
oldVol.getDataCenterId(),
oldVol.getDomainId(),
oldVol.getAccountId(),
oldVol.getDiskOfferingId(),
oldVol.getProvisioningType(),
oldVol.getSize(),
oldVol.getMinIops(),
oldVol.getMaxIops(),
oldVol.get_iScsiName());
if (templateId != null) { if (templateId != null) {
newVol.setTemplateId(templateId); newVol.setTemplateId(templateId);
} else { } else {
@ -398,8 +388,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
DataStoreRole dataStoreRole = getDataStoreRole(snapshot); DataStoreRole dataStoreRole = getDataStoreRole(snapshot);
SnapshotInfo snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), dataStoreRole); SnapshotInfo snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), dataStoreRole);
if (snapInfo == null && dataStoreRole == DataStoreRole.Image) {
if(snapInfo == null && dataStoreRole == DataStoreRole.Image) {
// snapshot is not backed up to secondary, let's do that now. // snapshot is not backed up to secondary, let's do that now.
snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), DataStoreRole.Primary); snapInfo = snapshotFactory.getSnapshot(snapshot.getId(), DataStoreRole.Primary);
@ -480,8 +469,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
throw new CloudRuntimeException("Template " + template.getName() + " has not been completely downloaded to zone " + dc.getId()); throw new CloudRuntimeException("Template " + template.getName() + " has not been completely downloaded to zone " + dc.getId());
} }
return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), ss.getSize(), diskOffering.getTagsArray(), return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), ss.getSize(), diskOffering.getTagsArray(), diskOffering.getUseLocalStorage(),
diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), Storage.ImageFormat.ISO != template.getFormat() ? template.getId() : null); diskOffering.isRecreatable(), Storage.ImageFormat.ISO != template.getFormat() ? template.getId() : null);
} else { } else {
return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(), return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(),
diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null); diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null);
@ -489,8 +478,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} }
@DB @DB
public VolumeInfo copyVolumeFromSecToPrimary(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId, public VolumeInfo copyVolumeFromSecToPrimary(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId, ServiceOffering offering,
ServiceOffering offering, DiskOffering diskOffering, List<StoragePool> avoids, long size, HypervisorType hyperType) throws NoTransitionException { DiskOffering diskOffering, List<StoragePool> avoids, long size, HypervisorType hyperType) throws NoTransitionException {
final HashSet<StoragePool> avoidPools = new HashSet<StoragePool>(avoids); final HashSet<StoragePool> avoidPools = new HashSet<StoragePool>(avoids);
DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, diskOffering); DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, diskOffering);
@ -522,8 +511,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} }
@DB @DB
public VolumeInfo createVolume(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId, ServiceOffering offering, public VolumeInfo createVolume(VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate template, DataCenter dc, Pod pod, Long clusterId, ServiceOffering offering, DiskOffering diskOffering,
DiskOffering diskOffering, List<StoragePool> avoids, long size, HypervisorType hyperType) { List<StoragePool> avoids, long size, HypervisorType hyperType) {
// update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage) // update the volume's hv_ss_reserve (hypervisor snapshot reserve) from a disk offering (used for managed storage)
volume = volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType); volume = volService.updateHypervisorSnapshotReserveForVolume(diskOffering, volume.getId(), hyperType);
@ -659,12 +648,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} }
protected DiskProfile toDiskProfile(Volume vol, DiskOffering offering) { protected DiskProfile toDiskProfile(Volume vol, DiskOffering offering) {
return new DiskProfile(vol.getId(), vol.getVolumeType(), vol.getName(), offering.getId(), vol.getSize(), offering.getTagsArray(), offering.getUseLocalStorage(), return new DiskProfile(vol.getId(), vol.getVolumeType(), vol.getName(), offering.getId(), vol.getSize(), offering.getTagsArray(), offering.getUseLocalStorage(), offering.isRecreatable(),
offering.isRecreatable(), vol.getTemplateId()); vol.getTemplateId());
} }
@Override @Override
public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner, Long deviceId) { public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner,
Long deviceId) {
if (size == null) { if (size == null) {
size = offering.getDiskSize(); size = offering.getDiskSize();
} else { } else {
@ -674,17 +664,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
minIops = minIops != null ? minIops : offering.getMinIops(); minIops = minIops != null ? minIops : offering.getMinIops();
maxIops = maxIops != null ? maxIops : offering.getMaxIops(); maxIops = maxIops != null ? maxIops : offering.getMaxIops();
VolumeVO vol = new VolumeVO(type, VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), offering.getProvisioningType(), size, minIops, maxIops, null);
name,
vm.getDataCenterId(),
owner.getDomainId(),
owner.getId(),
offering.getId(),
offering.getProvisioningType(),
size,
minIops,
maxIops,
null);
if (vm != null) { if (vm != null) {
vol.setInstanceId(vm.getId()); vol.setInstanceId(vm.getId());
} }
@ -722,11 +702,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} }
@Override @Override
public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm, Account owner) { public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm,
Account owner) {
assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template really...."; assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template really....";
Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId()); Long size = _tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId());
if (rootDisksize != null ) { if (rootDisksize != null) {
rootDisksize = rootDisksize * 1024 * 1024 * 1024; rootDisksize = rootDisksize * 1024 * 1024 * 1024;
if (rootDisksize > size) { if (rootDisksize > size) {
s_logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name); s_logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name);
@ -739,17 +720,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
minIops = minIops != null ? minIops : offering.getMinIops(); minIops = minIops != null ? minIops : offering.getMinIops();
maxIops = maxIops != null ? maxIops : offering.getMaxIops(); maxIops = maxIops != null ? maxIops : offering.getMaxIops();
VolumeVO vol = new VolumeVO(type, VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), offering.getProvisioningType(), size, minIops, maxIops, null);
name,
vm.getDataCenterId(),
owner.getDomainId(),
owner.getId(),
offering.getId(),
offering.getProvisioningType(),
size,
minIops,
maxIops,
null);
vol.setFormat(getSupportedImageFormatForCluster(template.getHypervisorType())); vol.setFormat(getSupportedImageFormatForCluster(template.getHypervisorType()));
if (vm != null) { if (vm != null) {
vol.setInstanceId(vm.getId()); vol.setInstanceId(vm.getId());
@ -770,7 +741,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
vol.setDisplayVolume(userVm.isDisplayVm()); vol.setDisplayVolume(userVm.isDisplayVm());
} }
vol = _volsDao.persist(vol); vol = _volsDao.persist(vol);
// Create event and update resource count for volumes if vm is a user vm // Create event and update resource count for volumes if vm is a user vm
@ -820,16 +790,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} }
} }
private VolumeInfo copyVolume(StoragePool rootDiskPool, VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate rootDiskTmplt, DataCenter dcVO, Pod pod, private VolumeInfo copyVolume(StoragePool rootDiskPool, VolumeInfo volume, VirtualMachine vm, VirtualMachineTemplate rootDiskTmplt, DataCenter dcVO, Pod pod, DiskOffering diskVO,
DiskOffering diskVO, ServiceOffering svo, HypervisorType rootDiskHyperType) throws NoTransitionException { ServiceOffering svo, HypervisorType rootDiskHyperType) throws NoTransitionException {
if (!isSupportedImageFormatForCluster(volume, rootDiskHyperType)) { if (!isSupportedImageFormatForCluster(volume, rootDiskHyperType)) {
throw new InvalidParameterValueException("Failed to attach volume to VM since volumes format " + volume.getFormat().getFileExtension() throw new InvalidParameterValueException("Failed to attach volume to VM since volumes format " + volume.getFormat().getFileExtension() + " is not compatible with the vm hypervisor type");
+ " is not compatible with the vm hypervisor type");
} }
VolumeInfo volumeOnPrimary = copyVolumeFromSecToPrimary(volume, vm, rootDiskTmplt, dcVO, pod, rootDiskPool.getClusterId(), svo, diskVO, new ArrayList<StoragePool>(), VolumeInfo volumeOnPrimary = copyVolumeFromSecToPrimary(volume, vm, rootDiskTmplt, dcVO, pod, rootDiskPool.getClusterId(), svo, diskVO, new ArrayList<StoragePool>(), volume.getSize(),
volume.getSize(), rootDiskHyperType); rootDiskHyperType);
return volumeOnPrimary; return volumeOnPrimary;
} }
@ -874,8 +843,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
long vmTemplateId = vm.getTemplateId(); long vmTemplateId = vm.getTemplateId();
if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) { if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) {
if (s_logger.isDebugEnabled()) { if (s_logger.isDebugEnabled()) {
s_logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId s_logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId + ", updating templateId in the new Volume");
+ ", updating templateId in the new Volume");
} }
templateIdToUse = vmTemplateId; templateIdToUse = vmTemplateId;
} }
@ -989,7 +957,9 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
@DB @DB
public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException { public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException {
VolumeInfo vol = volFactory.getVolume(volume.getId()); VolumeInfo vol = volFactory.getVolume(volume.getId());
AsyncCallFuture<VolumeApiResult> future = volService.copyVolume(vol, (DataStore)destPool);
DataStore dataStoreTarget = dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary);
AsyncCallFuture<VolumeApiResult> future = volService.copyVolume(vol, dataStoreTarget);
try { try {
VolumeApiResult result = future.get(); VolumeApiResult result = future.get();
if (result.isFailed()) { if (result.isFailed()) {
@ -1111,8 +1081,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
for (VolumeVO vol : vols) { for (VolumeVO vol : vols) {
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
DataTO volTO = volumeInfo.getTO(); DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
disk.setDetails(getDetails(volumeInfo, dataStore)); disk.setDetails(getDetails(volumeInfo, dataStore));
@ -1220,7 +1189,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} else { } else {
storageMigrationEnabled = StorageMigrationEnabled.value(); storageMigrationEnabled = StorageMigrationEnabled.value();
} }
if(storageMigrationEnabled){ if (storageMigrationEnabled) {
if (s_logger.isDebugEnabled()) { if (s_logger.isDebugEnabled()) {
s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner"); s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner");
} }
@ -1240,8 +1209,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} }
} else { } else {
if (vol.getPoolId() == null) { if (vol.getPoolId() == null) {
throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol, throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol, Volume.class, vol.getId());
Volume.class, vol.getId());
} }
if (s_logger.isDebugEnabled()) { if (s_logger.isDebugEnabled()) {
s_logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM"); s_logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM");
@ -1322,8 +1290,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
long hostId = vm.getVirtualMachine().getHostId(); long hostId = vm.getVirtualMachine().getHostId();
future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId); future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
} } else {
else {
future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ); future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ);
} }
} }
@ -1401,7 +1368,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
if (oldHostId != hostId) { if (oldHostId != hostId) {
Host oldHost = _hostDao.findById(oldHostId); Host oldHost = _hostDao.findById(oldHostId);
Host host = _hostDao.findById(hostId);
DataStore storagePool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); DataStore storagePool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
storageMgr.removeStoragePoolFromCluster(oldHostId, vol.get_iScsiName(), pool); storageMgr.removeStoragePoolFromCluster(oldHostId, vol.get_iScsiName(), pool);
@ -1420,8 +1386,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
DataTO volTO = volumeInfo.getTO(); DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
disk.setDetails(getDetails(volumeInfo, dataStore)); disk.setDetails(getDetails(volumeInfo, dataStore));
@ -1437,13 +1402,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
cloneType = UserVmCloneType.full; cloneType = UserVmCloneType.full;
} }
UserVmCloneSettingVO cloneSettingVO = _vmCloneSettingDao.findByVmId(vm.getId()); UserVmCloneSettingVO cloneSettingVO = _vmCloneSettingDao.findByVmId(vm.getId());
if (cloneSettingVO != null){ if (cloneSettingVO != null) {
if (! cloneSettingVO.getCloneType().equals(cloneType.toString())){ if (!cloneSettingVO.getCloneType().equals(cloneType.toString())) {
cloneSettingVO.setCloneType(cloneType.toString()); cloneSettingVO.setCloneType(cloneType.toString());
_vmCloneSettingDao.update(cloneSettingVO.getVmId(), cloneSettingVO); _vmCloneSettingDao.update(cloneSettingVO.getVmId(), cloneSettingVO);
} }
} } else {
else {
UserVmCloneSettingVO vmCloneSettingVO = new UserVmCloneSettingVO(vm.getId(), cloneType.toString()); UserVmCloneSettingVO vmCloneSettingVO = new UserVmCloneSettingVO(vm.getId(), cloneType.toString());
_vmCloneSettingDao.persist(vmCloneSettingVO); _vmCloneSettingDao.persist(vmCloneSettingVO);
} }
@ -1468,8 +1432,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
return true; return true;
} }
public static final ConfigKey<Long> MaxVolumeSize = new ConfigKey<Long>(Long.class, "storage.max.volume.size", "Storage", "2000", "The maximum size for a volume (in GB).", public static final ConfigKey<Long> MaxVolumeSize = new ConfigKey<Long>(Long.class, "storage.max.volume.size", "Storage", "2000", "The maximum size for a volume (in GB).", true);
true);
public static final ConfigKey<Boolean> RecreatableSystemVmEnabled = new ConfigKey<Boolean>(Boolean.class, "recreate.systemvm.enabled", "Advanced", "false", public static final ConfigKey<Boolean> RecreatableSystemVmEnabled = new ConfigKey<Boolean>(Boolean.class, "recreate.systemvm.enabled", "Advanced", "false",
"If true, will recreate system vm root disk whenever starting system vm", true); "If true, will recreate system vm root disk whenever starting system vm", true);
@ -1481,8 +1444,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
"Enable/disable storage migration across primary storage", true); "Enable/disable storage migration across primary storage", true);
static final ConfigKey<Boolean> VolumeUrlCheck = new ConfigKey<Boolean>("Advanced", Boolean.class, "volume.url.check", "true", static final ConfigKey<Boolean> VolumeUrlCheck = new ConfigKey<Boolean>("Advanced", Boolean.class, "volume.url.check", "true",
"Check the url for a volume before downloading it from the management server. Set to flase when you managment has no internet access.", "Check the url for a volume before downloading it from the management server. Set to flase when you managment has no internet access.", true);
true);
@Override @Override
public ConfigKey<?>[] getConfigKeys() { public ConfigKey<?>[] getConfigKeys() {
@ -1510,9 +1472,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
_volsDao.remove(volume.getId()); _volsDao.remove(volume.getId());
} }
if(volume.getState().equals(Volume.State.Attaching)) { if (volume.getState().equals(Volume.State.Attaching)) {
s_logger.warn("Vol: " + volume.getName() + " failed to attach to VM: " + _userVmDao.findById(vmId).getHostName() + s_logger.warn("Vol: " + volume.getName() + " failed to attach to VM: " + _userVmDao.findById(vmId).getHostName() + " on last mgt server stop, changing state back to Ready");
" on last mgt server stop, changing state back to Ready");
volume.setState(Volume.State.Ready); volume.setState(Volume.State.Ready);
_volsDao.update(volumeId, volume); _volsDao.update(volumeId, volume);
} }
@ -1552,8 +1513,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
@Override @Override
public void cleanupStorageJobs() { public void cleanupStorageJobs() {
//clean up failure jobs related to volume //clean up failure jobs related to volume
List<AsyncJobVO> jobs = _jobMgr.findFailureAsyncJobs(VmWorkAttachVolume.class.getName(), List<AsyncJobVO> jobs = _jobMgr.findFailureAsyncJobs(VmWorkAttachVolume.class.getName(), VmWorkMigrateVolume.class.getName(), VmWorkTakeVolumeSnapshot.class.getName());
VmWorkMigrateVolume.class.getName(), VmWorkTakeVolumeSnapshot.class.getName());
for (AsyncJobVO job : jobs) { for (AsyncJobVO job : jobs) {
try { try {
@ -1595,8 +1555,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
} }
// FIXME - All this is boiler plate code and should be done as part of state transition. This shouldn't be part of orchestrator. // FIXME - All this is boiler plate code and should be done as part of state transition. This shouldn't be part of orchestrator.
// publish usage event for the volume // publish usage event for the volume
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), Volume.class.getName(),
Volume.class.getName(), volume.getUuid(), volume.isDisplayVolume()); volume.getUuid(), volume.isDisplayVolume());
_resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, volume.isDisplay()); _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, volume.isDisplay());
_resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.primary_storage, volume.isDisplay(), new Long(volume.getSize())); _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.primary_storage, volume.isDisplay(), new Long(volume.getSize()));
} catch (Exception e) { } catch (Exception e) {
@ -1622,13 +1582,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
VolumeVO vol = _volsDao.findById(volumeId); VolumeVO vol = _volsDao.findById(volumeId);
boolean needUpdate = false; boolean needUpdate = false;
// Volume path is not getting updated in the DB, need to find reason and fix the issue. // Volume path is not getting updated in the DB, need to find reason and fix the issue.
if (vol.getPath() == null) if (vol.getPath() == null) {
return; return;
if (!vol.getPath().equalsIgnoreCase(path)) }
if (!vol.getPath().equalsIgnoreCase(path)) {
needUpdate = true; needUpdate = true;
}
if (chainInfo != null && (vol.getChainInfo() == null || !chainInfo.equalsIgnoreCase(vol.getChainInfo()))) if (chainInfo != null && (vol.getChainInfo() == null || !chainInfo.equalsIgnoreCase(vol.getChainInfo()))) {
needUpdate = true; needUpdate = true;
}
if (needUpdate) { if (needUpdate) {
s_logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo); s_logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo);

View File

@ -121,10 +121,10 @@ public class DiskOfferingVO implements DiskOffering {
@Enumerated(value = EnumType.STRING) @Enumerated(value = EnumType.STRING)
private DiskCacheMode cacheMode; private DiskCacheMode cacheMode;
@Column(name="provisioning_type") @Column(name = "provisioning_type")
Storage.ProvisioningType provisioningType; Storage.ProvisioningType provisioningType;
@Column(name="display_offering") @Column(name = "display_offering")
boolean displayOffering = true; boolean displayOffering = true;
@Enumerated(EnumType.STRING) @Enumerated(EnumType.STRING)
@ -138,8 +138,8 @@ public class DiskOfferingVO implements DiskOffering {
uuid = UUID.randomUUID().toString(); uuid = UUID.randomUUID().toString();
} }
public DiskOfferingVO(Long domainId, String name, String displayText, Storage.ProvisioningType provisioningType, long diskSize, String tags, boolean isCustomized, public DiskOfferingVO(Long domainId, String name, String displayText, Storage.ProvisioningType provisioningType, long diskSize, String tags, boolean isCustomized, Boolean isCustomizedIops,
Boolean isCustomizedIops, Long minIops, Long maxIops, DiskCacheMode cacheMode) { Long minIops, Long maxIops, DiskCacheMode cacheMode) {
this.domainId = domainId; this.domainId = domainId;
this.name = name; this.name = name;
this.displayText = displayText; this.displayText = displayText;
@ -157,8 +157,8 @@ public class DiskOfferingVO implements DiskOffering {
this.cacheMode = cacheMode; this.cacheMode = cacheMode;
} }
public DiskOfferingVO(Long domainId, String name, String displayText, Storage.ProvisioningType provisioningType, long diskSize, String tags, boolean isCustomized, public DiskOfferingVO(Long domainId, String name, String displayText, Storage.ProvisioningType provisioningType, long diskSize, String tags, boolean isCustomized, Boolean isCustomizedIops,
Boolean isCustomizedIops, Long minIops, Long maxIops) { Long minIops, Long maxIops) {
this.domainId = domainId; this.domainId = domainId;
this.name = name; this.name = name;
this.displayText = displayText; this.displayText = displayText;
@ -176,8 +176,8 @@ public class DiskOfferingVO implements DiskOffering {
state = State.Active; state = State.Active;
} }
public DiskOfferingVO(String name, String displayText, Storage.ProvisioningType provisioningType, boolean mirrored, String tags, boolean recreatable, public DiskOfferingVO(String name, String displayText, Storage.ProvisioningType provisioningType, boolean mirrored, String tags, boolean recreatable, boolean useLocalStorage, boolean systemUse,
boolean useLocalStorage, boolean systemUse, boolean customized) { boolean customized) {
domainId = null; domainId = null;
type = Type.Service; type = Type.Service;
this.name = name; this.name = name;
@ -194,8 +194,8 @@ public class DiskOfferingVO implements DiskOffering {
// domain specific offerings constructor (null domainId implies public // domain specific offerings constructor (null domainId implies public
// offering) // offering)
public DiskOfferingVO(String name, String displayText, Storage.ProvisioningType provisioningType, boolean mirrored, String tags, boolean recreatable, public DiskOfferingVO(String name, String displayText, Storage.ProvisioningType provisioningType, boolean mirrored, String tags, boolean recreatable, boolean useLocalStorage, boolean systemUse,
boolean useLocalStorage, boolean systemUse, boolean customized, Long domainId) { boolean customized, Long domainId) {
type = Type.Service; type = Type.Service;
this.name = name; this.name = name;
this.displayText = displayText; this.displayText = displayText;
@ -210,8 +210,8 @@ public class DiskOfferingVO implements DiskOffering {
state = State.Active; state = State.Active;
} }
public DiskOfferingVO(long id, String name, String displayText, Storage.ProvisioningType provisioningType, boolean mirrored, String tags, boolean recreatable, public DiskOfferingVO(long id, String name, String displayText, Storage.ProvisioningType provisioningType, boolean mirrored, String tags, boolean recreatable, boolean useLocalStorage,
boolean useLocalStorage, boolean systemUse, boolean customized, boolean customizedIops, Long domainId, Long minIops, Long maxIops) { boolean systemUse, boolean customized, boolean customizedIops, Long domainId, Long minIops, Long maxIops) {
this.id = id; this.id = id;
type = Type.Service; type = Type.Service;
this.name = name; this.name = name;
@ -308,6 +308,7 @@ public class DiskOfferingVO implements DiskOffering {
return domainId; return domainId;
} }
@Override
public Type getType() { public Type getType() {
return type; return type;
} }
@ -349,7 +350,7 @@ public class DiskOfferingVO implements DiskOffering {
} }
@Override @Override
public Storage.ProvisioningType getProvisioningType(){ public Storage.ProvisioningType getProvisioningType() {
return provisioningType; return provisioningType;
} }
@ -514,4 +515,8 @@ public class DiskOfferingVO implements DiskOffering {
public Integer getHypervisorSnapshotReserve() { public Integer getHypervisorSnapshotReserve() {
return hypervisorSnapshotReserve; return hypervisorSnapshotReserve;
} }
public boolean isShared() {
return !useLocalStorage;
}
} }

View File

@ -93,33 +93,30 @@ public interface VolumeDao extends GenericDao<VolumeVO, Long>, StateDao<Volume.S
/** /**
* Gets the Total Primary Storage space allocated for an account * Gets the Total Primary Storage space allocated for an account
* *
* @param account
* @param list of ids of virtual router VMs under this account * @param list of ids of virtual router VMs under this account
* @return total Primary Storage space (in bytes) used * @return total Primary Storage space (in bytes) used
*/ */
long primaryStorageUsedForAccount(long accountId, List<Long> virtualRouters); long primaryStorageUsedForAccount(long accountId, List<Long> virtualRouters);
/** /**
* Gets the Total Secondary Storage space used by volumes allocated for an * Gets the Total Secondary Storage space used by volumes allocated for an account
* account
*
* @param account
* @return total Secondary Storage space (in bytes) used * @return total Secondary Storage space (in bytes) used
*/ */
long secondaryStorageUsedForAccount(long accountId); long secondaryStorageUsedForAccount(long accountId);
/*** /***
*
* @param volumeId
* @return the scope of the storage pool where the volume is present (ZONE/CLUSTER) * @return the scope of the storage pool where the volume is present (ZONE/CLUSTER)
*/ */
ScopeType getVolumeStoragePoolScope(long volumeId); ScopeType getVolumeStoragePoolScope(long volumeId);
/*** /***
* Updates the destVol uuid with srcVol uuid and sets the srcVol uuid as null. * Updates the destVol uuid with srcVol uuid and sets the srcVol uuid as null.
* @param srcVolId
* @param destVolId
* @return returns true if transaction is successful. * @return returns true if transaction is successful.
*/ */
boolean updateUuid(long srcVolId, long destVolId); boolean updateUuid(long srcVolId, long destVolId);
/**
* Updates the disk offering for the given volume.
*/
void updateDiskOffering(long volumeId, long diskOfferingId);
} }

View File

@ -72,16 +72,13 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
// need to account for zone-wide primary storage where storage_pool has // need to account for zone-wide primary storage where storage_pool has
// null-value pod and cluster, where hypervisor information is stored in // null-value pod and cluster, where hypervisor information is stored in
// storage_pool // storage_pool
protected static final String SELECT_HYPERTYPE_FROM_CLUSTER_VOLUME = protected static final String SELECT_HYPERTYPE_FROM_CLUSTER_VOLUME = "SELECT c.hypervisor_type from volumes v, storage_pool s, cluster c where v.pool_id = s.id and s.cluster_id = c.id and v.id = ?";
"SELECT c.hypervisor_type from volumes v, storage_pool s, cluster c where v.pool_id = s.id and s.cluster_id = c.id and v.id = ?";
protected static final String SELECT_HYPERTYPE_FROM_ZONE_VOLUME = "SELECT s.hypervisor from volumes v, storage_pool s where v.pool_id = s.id and v.id = ?"; protected static final String SELECT_HYPERTYPE_FROM_ZONE_VOLUME = "SELECT s.hypervisor from volumes v, storage_pool s where v.pool_id = s.id and v.id = ?";
protected static final String SELECT_POOLSCOPE = "SELECT s.scope from storage_pool s, volumes v where s.id = v.pool_id and v.id = ?"; protected static final String SELECT_POOLSCOPE = "SELECT s.scope from storage_pool s, volumes v where s.id = v.pool_id and v.id = ?";
private static final String ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT = private static final String ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT = "SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? "
"SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? "
+ " AND pool.pod_id = ? AND pool.cluster_id = ? " + " GROUP BY pool.id ORDER BY 2 ASC "; + " AND pool.pod_id = ? AND pool.cluster_id = ? " + " GROUP BY pool.id ORDER BY 2 ASC ";
private static final String ORDER_ZONE_WIDE_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT = private static final String ORDER_ZONE_WIDE_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT = "SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? "
"SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? "
+ " AND pool.scope = 'ZONE' AND pool.status='Up' " + " GROUP BY pool.id ORDER BY 2 ASC "; + " AND pool.scope = 'ZONE' AND pool.status='Up' " + " GROUP BY pool.id ORDER BY 2 ASC ";
@Override @Override
@ -276,12 +273,13 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
try { try {
ScopeType scope = getVolumeStoragePoolScope(volumeId); ScopeType scope = getVolumeStoragePoolScope(volumeId);
if (scope != null) { if (scope != null) {
if (scope == ScopeType.CLUSTER || scope == ScopeType.HOST) if (scope == ScopeType.CLUSTER || scope == ScopeType.HOST) {
sql = SELECT_HYPERTYPE_FROM_CLUSTER_VOLUME; sql = SELECT_HYPERTYPE_FROM_CLUSTER_VOLUME;
else if (scope == ScopeType.ZONE) } else if (scope == ScopeType.ZONE) {
sql = SELECT_HYPERTYPE_FROM_ZONE_VOLUME; sql = SELECT_HYPERTYPE_FROM_ZONE_VOLUME;
else } else {
s_logger.error("Unhandled scope type '" + scope + "' when running getHypervisorType on volume id " + volumeId); s_logger.error("Unhandled scope type '" + scope + "' when running getHypervisorType on volume id " + volumeId);
}
pstmt = txn.prepareAutoCloseStatement(sql); pstmt = txn.prepareAutoCloseStatement(sql);
pstmt.setLong(1, volumeId); pstmt.setLong(1, volumeId);
@ -511,34 +509,12 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
VolumeVO dbVol = findByIdIncludingRemoved(vo.getId()); VolumeVO dbVol = findByIdIncludingRemoved(vo.getId());
if (dbVol != null) { if (dbVol != null) {
StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString());
str.append(": DB Data={id=") str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=")
.append(dbVol.getId()) .append(dbVol.getUpdated());
.append("; state=") str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(vo.getUpdatedCount())
.append(dbVol.getState()) .append("; updatedTime=").append(vo.getUpdated());
.append("; updatecount=") str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated)
.append(dbVol.getUpdatedCount()) .append("; updatedTime=").append(oldUpdatedTime);
.append(";updatedTime=")
.append(dbVol.getUpdated());
str.append(": New Data={id=")
.append(vo.getId())
.append("; state=")
.append(nextState)
.append("; event=")
.append(event)
.append("; updatecount=")
.append(vo.getUpdatedCount())
.append("; updatedTime=")
.append(vo.getUpdated());
str.append(": stale Data={id=")
.append(vo.getId())
.append("; state=")
.append(currentState)
.append("; event=")
.append(event)
.append("; updatecount=")
.append(oldUpdated)
.append("; updatedTime=")
.append(oldUpdatedTime);
} else { } else {
s_logger.debug("Unable to update volume: id=" + vo.getId() + ", as there is no such volume exists in the database anymore"); s_logger.debug("Unable to update volume: id=" + vo.getId() + ", as there is no such volume exists in the database anymore");
} }
@ -685,4 +661,19 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
} }
return null; return null;
} }
private String sqlUpdateDiskOffering = "UPDATE volumes SET disk_offering_id = ? where id =?";
@Override
public void updateDiskOffering(long volumeId, long diskOfferingId) {
try (TransactionLegacy txn = TransactionLegacy.currentTxn();
PreparedStatement pstmt = txn.prepareAutoCloseStatement(sqlUpdateDiskOffering)) {
pstmt.setLong(1, diskOfferingId);
pstmt.setLong(2, volumeId);
pstmt.executeUpdate();
txn.commit();
} catch (SQLException e) {
throw new CloudRuntimeException(e);
}
}
} }

View File

@ -69,6 +69,8 @@ import org.apache.cloudstack.storage.command.AttachCommand;
import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.command.DettachCommand;
import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
@ -76,6 +78,7 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil; import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo;
import org.apache.commons.collections.CollectionUtils;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.joda.time.DateTime; import org.joda.time.DateTime;
import org.joda.time.DateTimeZone; import org.joda.time.DateTimeZone;
@ -247,6 +250,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
private ClusterDetailsDao _clusterDetailsDao; private ClusterDetailsDao _clusterDetailsDao;
@Inject @Inject
private StorageManager storageMgr; private StorageManager storageMgr;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
protected Gson _gson; protected Gson _gson;
@ -254,12 +259,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this);
static final ConfigKey<Long> VmJobCheckInterval = new ConfigKey<Long>("Advanced", Long.class, "vm.job.check.interval", "3000", static final ConfigKey<Long> VmJobCheckInterval = new ConfigKey<Long>("Advanced", Long.class, "vm.job.check.interval", "3000", "Interval in milliseconds to check if the job is complete", false);
"Interval in milliseconds to check if the job is complete", false);
static final ConfigKey<Boolean> VolumeUrlCheck = new ConfigKey<Boolean>("Advanced", Boolean.class, "volume.url.check", "true", static final ConfigKey<Boolean> VolumeUrlCheck = new ConfigKey<Boolean>("Advanced", Boolean.class, "volume.url.check", "true",
"Check the url for a volume before downloading it from the management server. Set to false when you managment has no internet access.", "Check the url for a volume before downloading it from the management server. Set to false when you managment has no internet access.", true);
true);
private long _maxVolumeSizeInGb; private long _maxVolumeSizeInGb;
private final StateMachine2<Volume.State, Volume.Event, Volume> _volStateMachine; private final StateMachine2<Volume.State, Volume.Event, Volume> _volStateMachine;
@ -330,7 +333,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
EndPoint ep = pair.first(); EndPoint ep = pair.first();
DataObject dataObject = pair.second(); DataObject dataObject = pair.second();
GetUploadParamsResponse response = new GetUploadParamsResponse(); GetUploadParamsResponse response = new GetUploadParamsResponse();
String ssvmUrlDomain = _configDao.getValue(Config.SecStorageSecureCopyCert.key()); String ssvmUrlDomain = _configDao.getValue(Config.SecStorageSecureCopyCert.key());
@ -355,10 +357,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
/* /*
* encoded metadata using the post upload config key * encoded metadata using the post upload config key
*/ */
TemplateOrVolumePostUploadCommand command = TemplateOrVolumePostUploadCommand command = new TemplateOrVolumePostUploadCommand(vol.getId(), vol.getUuid(), volumeStore.getInstallPath(), cmd.getChecksum(), vol.getType().toString(),
new TemplateOrVolumePostUploadCommand(vol.getId(), vol.getUuid(), volumeStore.getInstallPath(), cmd.getChecksum(), vol.getType().toString(), vol.getName(), vol.getFormat().toString(), dataObject.getDataStore().getUri(), dataObject.getDataStore().getRole().toString());
vol.getName(), vol.getFormat().toString(), dataObject.getDataStore().getUri(),
dataObject.getDataStore().getRole().toString());
command.setLocalPath(volumeStore.getLocalDownloadPath()); command.setLocalPath(volumeStore.getLocalDownloadPath());
//using the existing max upload size configuration //using the existing max upload size configuration
command.setProcessTimeout(NumbersUtil.parseLong(_configDao.getValue("vmware.package.ova.timeout"), 3600)); command.setProcessTimeout(NumbersUtil.parseLong(_configDao.getValue("vmware.package.ova.timeout"), 3600));
@ -378,8 +378,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
}); });
} }
private boolean validateVolume(Account caller, long ownerId, Long zoneId, String volumeName, String url, private boolean validateVolume(Account caller, long ownerId, Long zoneId, String volumeName, String url, String format, Long diskOfferingId) throws ResourceAllocationException {
String format, Long diskOfferingId) throws ResourceAllocationException {
// permission check // permission check
Account volumeOwner = _accountMgr.getActiveAccountById(ownerId); Account volumeOwner = _accountMgr.getActiveAccountById(ownerId);
@ -400,8 +399,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
//validating the url only when url is not null. url can be null incase of form based post upload //validating the url only when url is not null. url can be null incase of form based post upload
if (url != null ) { if (url != null) {
if( url.toLowerCase().contains("file://")) { if (url.toLowerCase().contains("file://")) {
throw new InvalidParameterValueException("File:// type urls are currently unsupported"); throw new InvalidParameterValueException("File:// type urls are currently unsupported");
} }
UriUtils.validateUrl(format, url); UriUtils.validateUrl(format, url);
@ -425,8 +424,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
// Check that the the disk offering specified is valid // Check that the the disk offering specified is valid
if (diskOfferingId != null) { if (diskOfferingId != null) {
DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId); DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId);
if ((diskOffering == null) || diskOffering.getRemoved() != null if ((diskOffering == null) || diskOffering.getRemoved() != null || !DiskOfferingVO.Type.Disk.equals(diskOffering.getType())) {
|| !DiskOfferingVO.Type.Disk.equals(diskOffering.getType())) {
throw new InvalidParameterValueException("Please specify a valid disk offering."); throw new InvalidParameterValueException("Please specify a valid disk offering.");
} }
if (!diskOffering.isCustomized()) { if (!diskOffering.isCustomized()) {
@ -448,8 +446,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
@DB @DB
protected VolumeVO persistVolume(final Account owner, final Long zoneId, final String volumeName, final String url, protected VolumeVO persistVolume(final Account owner, final Long zoneId, final String volumeName, final String url, final String format, final Long diskOfferingId, final Volume.State state) {
final String format, final Long diskOfferingId, final Volume.State state) {
return Transaction.execute(new TransactionCallback<VolumeVO>() { return Transaction.execute(new TransactionCallback<VolumeVO>() {
@Override @Override
public VolumeVO doInTransaction(TransactionStatus status) { public VolumeVO doInTransaction(TransactionStatus status) {
@ -531,7 +528,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@DB @DB
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true) @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true)
public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException { public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException {
// FIXME: some of the scheduled event stuff might be missing here...
Account caller = CallContext.current().getCallingAccount(); Account caller = CallContext.current().getCallingAccount();
long ownerId = cmd.getEntityOwnerId(); long ownerId = cmd.getEntityOwnerId();
@ -590,12 +586,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (size == null) { if (size == null) {
throw new InvalidParameterValueException("This disk offering requires a custom size specified"); throw new InvalidParameterValueException("This disk offering requires a custom size specified");
} }
Long customDiskOfferingMaxSize = _volumeMgr.CustomDiskOfferingMaxSize.value(); Long customDiskOfferingMaxSize = VolumeOrchestrationService.CustomDiskOfferingMaxSize.value();
Long customDiskOfferingMinSize = _volumeMgr.CustomDiskOfferingMinSize.value(); Long customDiskOfferingMinSize = VolumeOrchestrationService.CustomDiskOfferingMinSize.value();
if ((sizeInGB < customDiskOfferingMinSize) || (sizeInGB > customDiskOfferingMaxSize)) { if ((sizeInGB < customDiskOfferingMinSize) || (sizeInGB > customDiskOfferingMaxSize)) {
throw new InvalidParameterValueException("Volume size: " + sizeInGB + "GB is out of allowed range. Max: " + customDiskOfferingMaxSize + " Min:" throw new InvalidParameterValueException("Volume size: " + sizeInGB + "GB is out of allowed range. Max: " + customDiskOfferingMaxSize + " Min:" + customDiskOfferingMinSize);
+ customDiskOfferingMinSize);
} }
} }
@ -719,15 +714,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
String userSpecifiedName = getVolumeNameFromCommand(cmd); String userSpecifiedName = getVolumeNameFromCommand(cmd);
VolumeVO volume = commitVolume(cmd, caller, owner, displayVolume, zoneId, diskOfferingId, provisioningType, size, VolumeVO volume = commitVolume(cmd, caller, owner, displayVolume, zoneId, diskOfferingId, provisioningType, size, minIops, maxIops, parentVolume, userSpecifiedName,
minIops, maxIops, parentVolume, userSpecifiedName, _uuidMgr.generateUuid(Volume.class, cmd.getCustomId())); _uuidMgr.generateUuid(Volume.class, cmd.getCustomId()));
return volume; return volume;
} }
private VolumeVO commitVolume(final CreateVolumeCmd cmd, final Account caller, final Account owner, final Boolean displayVolume, private VolumeVO commitVolume(final CreateVolumeCmd cmd, final Account caller, final Account owner, final Boolean displayVolume, final Long zoneId, final Long diskOfferingId,
final Long zoneId, final Long diskOfferingId, final Storage.ProvisioningType provisioningType, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume, final Storage.ProvisioningType provisioningType, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume, final String userSpecifiedName, final String uuid) {
final String userSpecifiedName, final String uuid) {
return Transaction.execute(new TransactionCallback<VolumeVO>() { return Transaction.execute(new TransactionCallback<VolumeVO>() {
@Override @Override
public VolumeVO doInTransaction(TransactionStatus status) { public VolumeVO doInTransaction(TransactionStatus status) {
@ -755,8 +749,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
volume = _volsDao.persist(volume); volume = _volsDao.persist(volume);
if (cmd.getSnapshotId() == null && displayVolume) { if (cmd.getSnapshotId() == null && displayVolume) {
// for volume created from snapshot, create usage event after volume creation // for volume created from snapshot, create usage event after volume creation
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), diskOfferingId, null, size,
diskOfferingId, null, size, Volume.class.getName(), volume.getUuid(), displayVolume); Volume.class.getName(), volume.getUuid(), displayVolume);
} }
CallContext.current().setEventDetails("Volume Id: " + volume.getId()); CallContext.current().setEventDetails("Volume Id: " + volume.getId());
@ -841,8 +835,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
createdVolume = _volumeMgr.createVolumeFromSnapshot(volume, snapshot, vm); createdVolume = _volumeMgr.createVolumeFromSnapshot(volume, snapshot, vm);
VolumeVO volumeVo = _volsDao.findById(createdVolume.getId()); VolumeVO volumeVo = _volsDao.findById(createdVolume.getId());
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, createdVolume.getAccountId(), createdVolume.getDataCenterId(), createdVolume.getId(), UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, createdVolume.getAccountId(), createdVolume.getDataCenterId(), createdVolume.getId(), createdVolume.getName(),
createdVolume.getName(), createdVolume.getDiskOfferingId(), null, createdVolume.getSize(), Volume.class.getName(), createdVolume.getUuid(), volumeVo.isDisplayVolume()); createdVolume.getDiskOfferingId(), null, createdVolume.getSize(), Volume.class.getName(), createdVolume.getUuid(), volumeVo.isDisplayVolume());
return volumeVo; return volumeVo;
} }
@ -882,14 +876,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId()); HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId());
if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer && if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer && hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Any
hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Any && hypervisorType != HypervisorType.None ) { && hypervisorType != HypervisorType.None) {
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override"); throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override");
} }
if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) { if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) {
throw new InvalidParameterValueException("Volume should be in ready or allocated state before attempting a resize. Volume " + throw new InvalidParameterValueException("Volume should be in ready or allocated state before attempting a resize. Volume " + volume.getUuid() + " is in state " + volume.getState() + ".");
volume.getUuid() + " is in state " + volume.getState() + ".");
} }
// if we are to use the existing disk offering // if we are to use the existing disk offering
@ -900,14 +893,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
// if the caller is looking to change the size of the volume // if the caller is looking to change the size of the volume
if (newSize != null) { if (newSize != null) {
if (!diskOffering.isCustomized() && !volume.getVolumeType().equals(Volume.Type.ROOT)) { if (!diskOffering.isCustomized() && !volume.getVolumeType().equals(Volume.Type.ROOT)) {
throw new InvalidParameterValueException("To change a volume's size without providing a new disk offering, its current disk offering must be " + throw new InvalidParameterValueException("To change a volume's size without providing a new disk offering, its current disk offering must be "
"customizable or it must be a root volume (if providing a disk offering, make sure it is different from the current disk offering)."); + "customizable or it must be a root volume (if providing a disk offering, make sure it is different from the current disk offering).");
} }
// convert from bytes to GiB // convert from bytes to GiB
newSize = newSize << 30; newSize = newSize << 30;
} } else {
else {
// no parameter provided; just use the original size of the volume // no parameter provided; just use the original size of the volume
newSize = volume.getSize(); newSize = volume.getSize();
} }
@ -918,8 +910,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) { if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) {
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Min IOPS' parameter."); throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Min IOPS' parameter.");
} }
} } else {
else {
// no parameter provided; just use the original min IOPS of the volume // no parameter provided; just use the original min IOPS of the volume
newMinIops = volume.getMinIops(); newMinIops = volume.getMinIops();
} }
@ -930,8 +921,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) { if (!volume.getVolumeType().equals(Volume.Type.ROOT) && (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops())) {
throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Max IOPS' parameter."); throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Max IOPS' parameter.");
} }
} } else {
else {
// no parameter provided; just use the original max IOPS of the volume // no parameter provided; just use the original max IOPS of the volume
newMaxIops = volume.getMaxIops(); newMaxIops = volume.getMaxIops();
} }
@ -985,8 +975,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops(); newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops();
validateIops(newMinIops, newMaxIops); validateIops(newMinIops, newMaxIops);
} } else {
else {
newMinIops = newDiskOffering.getMinIops(); newMinIops = newDiskOffering.getMinIops();
newMaxIops = newDiskOffering.getMaxIops(); newMaxIops = newDiskOffering.getMaxIops();
} }
@ -1035,8 +1024,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
* the actual disk size. * the actual disk size.
*/ */
if (currentSize > newSize && !shrinkOk) { if (currentSize > newSize && !shrinkOk) {
throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize + " would shrink the volume." + throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize + " would shrink the volume."
"Need to sign off by supplying the shrinkok parameter with value of true."); + "Need to sign off by supplying the shrinkok parameter with value of true.");
} }
if (newSize > currentSize) { if (newSize > currentSize) {
@ -1070,14 +1059,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); UserVmVO userVm = _userVmDao.findById(volume.getInstanceId());
if (userVm != null) { if (userVm != null) {
if (volume.getVolumeType().equals(Volume.Type.ROOT) && userVm.getPowerState()!= VirtualMachine.PowerState.PowerOff && hypervisorType == HypervisorType.VMware){ if (volume.getVolumeType().equals(Volume.Type.ROOT) && userVm.getPowerState() != VirtualMachine.PowerState.PowerOff && hypervisorType == HypervisorType.VMware) {
s_logger.error(" For ROOT volume resize VM should be in Power Off state."); s_logger.error(" For ROOT volume resize VM should be in Power Off state.");
throw new InvalidParameterValueException("VM current state is : "+userVm.getPowerState()+ ". But VM should be in "+VirtualMachine.PowerState.PowerOff+" state."); throw new InvalidParameterValueException("VM current state is : " + userVm.getPowerState() + ". But VM should be in " + VirtualMachine.PowerState.PowerOff + " state.");
} }
// serialize VM operation // serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
if ( jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
// avoid re-entrance // avoid re-entrance
VmWorkJobVO placeHolder = null; VmWorkJobVO placeHolder = null;
@ -1085,14 +1074,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
placeHolder = createPlaceHolderWork(userVm.getId()); placeHolder = createPlaceHolderWork(userVm.getId());
try { try {
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve,
newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
} finally { } finally {
_workJobDao.expunge(placeHolder.getId()); _workJobDao.expunge(placeHolder.getId());
} }
} else { } else {
Outcome<Volume> outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops, Outcome<Volume> outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve,
newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk);
try { try {
outcome.get(); outcome.get();
@ -1107,17 +1096,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (jobResult != null) { if (jobResult != null) {
if (jobResult instanceof ConcurrentOperationException) { if (jobResult instanceof ConcurrentOperationException) {
throw (ConcurrentOperationException)jobResult; throw (ConcurrentOperationException)jobResult;
} } else if (jobResult instanceof ResourceAllocationException) {
else if (jobResult instanceof ResourceAllocationException) {
throw (ResourceAllocationException)jobResult; throw (ResourceAllocationException)jobResult;
} } else if (jobResult instanceof RuntimeException) {
else if (jobResult instanceof RuntimeException) {
throw (RuntimeException)jobResult; throw (RuntimeException)jobResult;
} } else if (jobResult instanceof Throwable) {
else if (jobResult instanceof Throwable) {
throw new RuntimeException("Unexpected exception", (Throwable)jobResult); throw new RuntimeException("Unexpected exception", (Throwable)jobResult);
} } else if (jobResult instanceof Long) {
else if (jobResult instanceof Long) {
return _volsDao.findById((Long)jobResult); return _volsDao.findById((Long)jobResult);
} }
} }
@ -1126,8 +1111,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
} }
return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null,
newHypervisorSnapshotReserve, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); shrinkOk);
} }
private void validateIops(Long minIops, Long maxIops) { private void validateIops(Long minIops, Long maxIops) {
@ -1142,8 +1127,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
} }
private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, Integer newHypervisorSnapshotReserve, Long newDiskOfferingId,
Integer newHypervisorSnapshotReserve, Long newDiskOfferingId, boolean shrinkOk) { boolean shrinkOk) {
VolumeVO volume = _volsDao.findById(volumeId); VolumeVO volume = _volsDao.findById(volumeId);
UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); UserVmVO userVm = _userVmDao.findById(volume.getInstanceId());
StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId());
@ -1180,8 +1165,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
} }
ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged);
shrinkOk, instanceName, hosts, isManaged);
try { try {
VolumeInfo vol = volFactory.getVolume(volume.getId()); VolumeInfo vol = volFactory.getVolume(volume.getId());
@ -1404,14 +1388,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (moveVolumeNeeded) { if (moveVolumeNeeded) {
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)newVolumeOnPrimaryStorage.getDataStore(); PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)newVolumeOnPrimaryStorage.getDataStore();
if (primaryStore.isLocal()) { if (primaryStore.isLocal()) {
throw new CloudRuntimeException("Failed to attach local data volume " + volumeToAttach.getName() + " to VM " + vm.getDisplayName() throw new CloudRuntimeException(
+ " as migration of local data volume is not allowed"); "Failed to attach local data volume " + volumeToAttach.getName() + " to VM " + vm.getDisplayName() + " as migration of local data volume is not allowed");
} }
StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId()); StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(exstingVolumeOfVm.getPoolId());
try { try {
newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), vmRootVolumePool.getClusterId(),
vmRootVolumePool.getClusterId(), volumeToAttachHyperType); volumeToAttachHyperType);
} catch (ConcurrentOperationException e) { } catch (ConcurrentOperationException e) {
s_logger.debug("move volume failed", e); s_logger.debug("move volume failed", e);
throw new CloudRuntimeException("move volume failed", e); throw new CloudRuntimeException("move volume failed", e);
@ -1490,7 +1474,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
List<VolumeVO> existingDataVolumes = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK); List<VolumeVO> existingDataVolumes = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK);
int maxAttachableDataVolumesSupported = getMaxDataVolumesSupported(vm); int maxAttachableDataVolumesSupported = getMaxDataVolumesSupported(vm);
if (existingDataVolumes.size() >= maxAttachableDataVolumesSupported) { if (existingDataVolumes.size() >= maxAttachableDataVolumesSupported) {
throw new InvalidParameterValueException("The specified VM already has the maximum number of data disks (" + maxAttachableDataVolumesSupported + ") attached. Please specify another VM."); throw new InvalidParameterValueException(
"The specified VM already has the maximum number of data disks (" + maxAttachableDataVolumesSupported + ") attached. Please specify another VM.");
} }
} }
@ -1513,14 +1498,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
// permission check // permission check
_accountMgr.checkAccess(caller, null, true, volumeToAttach, vm); _accountMgr.checkAccess(caller, null, true, volumeToAttach, vm);
if (!(Volume.State.Allocated.equals(volumeToAttach.getState()) || Volume.State.Ready.equals(volumeToAttach.getState()) || Volume.State.Uploaded.equals(volumeToAttach if (!(Volume.State.Allocated.equals(volumeToAttach.getState()) || Volume.State.Ready.equals(volumeToAttach.getState()) || Volume.State.Uploaded.equals(volumeToAttach.getState()))) {
.getState()))) {
throw new InvalidParameterValueException("Volume state must be in Allocated, Ready or in Uploaded state"); throw new InvalidParameterValueException("Volume state must be in Allocated, Ready or in Uploaded state");
} }
Account owner = _accountDao.findById(volumeToAttach.getAccountId()); Account owner = _accountDao.findById(volumeToAttach.getAccountId());
if(!(volumeToAttach.getState() == Volume.State.Allocated || volumeToAttach.getState() == Volume.State.Ready)){ if (!(volumeToAttach.getState() == Volume.State.Allocated || volumeToAttach.getState() == Volume.State.Ready)) {
try { try {
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, volumeToAttach.getSize()); _resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, volumeToAttach.getSize());
} catch (ResourceAllocationException e) { } catch (ResourceAllocationException e) {
@ -1602,7 +1586,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
VolumeVO volume = _volsDao.findById(volumeId); VolumeVO volume = _volsDao.findById(volumeId);
if(volume == null) { if (volume == null) {
throw new InvalidParameterValueException("The volume id doesn't exist"); throw new InvalidParameterValueException("The volume id doesn't exist");
} }
@ -1610,7 +1594,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
volume.setPath(path); volume.setPath(path);
} }
if(chainInfo != null){ if (chainInfo != null) {
volume.setChainInfo(chainInfo); volume.setChainInfo(chainInfo);
} }
@ -1642,9 +1626,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return volume; return volume;
} }
@Override @Override
public void updateDisplay(Volume volume, Boolean displayVolume){ public void updateDisplay(Volume volume, Boolean displayVolume) {
// 1. Resource limit changes // 1. Resource limit changes
updateResourceCount(volume, displayVolume); updateResourceCount(volume, displayVolume);
@ -1652,40 +1635,40 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
saveUsageEvent(volume, displayVolume); saveUsageEvent(volume, displayVolume);
// 3. Set the flag // 3. Set the flag
if (displayVolume != null && displayVolume != volume.isDisplayVolume()){ if (displayVolume != null && displayVolume != volume.isDisplayVolume()) {
// FIXME - Confused - typecast for now. // FIXME - Confused - typecast for now.
((VolumeVO)volume).setDisplayVolume(displayVolume); ((VolumeVO)volume).setDisplayVolume(displayVolume);
_volsDao.update(volume.getId(), (VolumeVO) volume); _volsDao.update(volume.getId(), (VolumeVO)volume);
} }
} }
private void updateResourceCount(Volume volume, Boolean displayVolume){ private void updateResourceCount(Volume volume, Boolean displayVolume) {
// Update only when the flag has changed. // Update only when the flag has changed.
if (displayVolume != null && displayVolume != volume.isDisplayVolume()){ if (displayVolume != null && displayVolume != volume.isDisplayVolume()) {
_resourceLimitMgr.changeResourceCount(volume.getAccountId(), ResourceType.volume, displayVolume); _resourceLimitMgr.changeResourceCount(volume.getAccountId(), ResourceType.volume, displayVolume);
_resourceLimitMgr.changeResourceCount(volume.getAccountId(), ResourceType.primary_storage, displayVolume, new Long(volume.getSize())); _resourceLimitMgr.changeResourceCount(volume.getAccountId(), ResourceType.primary_storage, displayVolume, new Long(volume.getSize()));
} }
} }
private void saveUsageEvent(Volume volume, Boolean displayVolume){ private void saveUsageEvent(Volume volume, Boolean displayVolume) {
// Update only when the flag has changed && only when volume in a non-destroyed state. // Update only when the flag has changed && only when volume in a non-destroyed state.
if ((displayVolume != null && displayVolume != volume.isDisplayVolume()) && !isVolumeDestroyed(volume)){ if ((displayVolume != null && displayVolume != volume.isDisplayVolume()) && !isVolumeDestroyed(volume)) {
if (displayVolume){ if (displayVolume) {
// flag turned 1 equivalent to freshly created volume // flag turned 1 equivalent to freshly created volume
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), volume.getDiskOfferingId(),
volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid()); volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid());
}else { } else {
// flag turned 0 equivalent to deleting a volume // flag turned 0 equivalent to deleting a volume
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), Volume.class.getName(),
Volume.class.getName(), volume.getUuid()); volume.getUuid());
} }
} }
} }
private boolean isVolumeDestroyed(Volume volume){ private boolean isVolumeDestroyed(Volume volume) {
if(volume.getState() == Volume.State.Destroy || volume.getState() == Volume.State.Expunging && volume.getState() == Volume.State.Expunged) { if (volume.getState() == Volume.State.Destroy || volume.getState() == Volume.State.Expunging && volume.getState() == Volume.State.Expunged) {
return true; return true;
} }
return false; return false;
@ -1695,8 +1678,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_DETACH, eventDescription = "detaching volume", async = true) @ActionEvent(eventType = EventTypes.EVENT_VOLUME_DETACH, eventDescription = "detaching volume", async = true)
public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) {
Account caller = CallContext.current().getCallingAccount(); Account caller = CallContext.current().getCallingAccount();
if ((cmmd.getId() == null && cmmd.getDeviceId() == null && cmmd.getVirtualMachineId() == null) if ((cmmd.getId() == null && cmmd.getDeviceId() == null && cmmd.getVirtualMachineId() == null) || (cmmd.getId() != null && (cmmd.getDeviceId() != null || cmmd.getVirtualMachineId() != null))
|| (cmmd.getId() != null && (cmmd.getDeviceId() != null || cmmd.getVirtualMachineId() != null))
|| (cmmd.getId() == null && (cmmd.getDeviceId() == null || cmmd.getVirtualMachineId() == null))) { || (cmmd.getId() == null && (cmmd.getDeviceId() == null || cmmd.getVirtualMachineId() == null))) {
throw new InvalidParameterValueException("Please provide either a volume id, or a tuple(device id, instance id)"); throw new InvalidParameterValueException("Please provide either a volume id, or a tuple(device id, instance id)");
} }
@ -1795,7 +1777,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} else if (jobResult instanceof Throwable) { } else if (jobResult instanceof Throwable) {
throw new RuntimeException("Unexpected exception", (Throwable)jobResult); throw new RuntimeException("Unexpected exception", (Throwable)jobResult);
} else if (jobResult instanceof Long) { } else if (jobResult instanceof Long) {
vol = _volsDao.findById((Long) jobResult); vol = _volsDao.findById((Long)jobResult);
} }
} }
return vol; return vol;
@ -1803,8 +1785,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
private void validateRootVolumeDetachAttach(VolumeVO volume, UserVmVO vm) { private void validateRootVolumeDetachAttach(VolumeVO volume, UserVmVO vm) {
if (!(vm.getHypervisorType() == HypervisorType.XenServer || vm.getHypervisorType() == HypervisorType.VMware || vm.getHypervisorType() == HypervisorType.KVM || vm.getHypervisorType() == HypervisorType.Simulator)) { if (!(vm.getHypervisorType() == HypervisorType.XenServer || vm.getHypervisorType() == HypervisorType.VMware || vm.getHypervisorType() == HypervisorType.KVM
throw new InvalidParameterValueException("Root volume detach is not supported for hypervisor type " + vm.getHypervisorType() ); || vm.getHypervisorType() == HypervisorType.Simulator)) {
throw new InvalidParameterValueException("Root volume detach is not supported for hypervisor type " + vm.getHypervisorType());
} }
if (!(vm.getState() == State.Stopped) || (vm.getState() == State.Destroyed)) { if (!(vm.getState() == State.Stopped) || (vm.getState() == State.Destroyed)) {
throw new InvalidParameterValueException("Root volume detach can happen only when vm is in states: " + State.Stopped.toString() + " or " + State.Destroyed.toString()); throw new InvalidParameterValueException("Root volume detach can happen only when vm is in states: " + State.Stopped.toString() + " or " + State.Destroyed.toString());
@ -1954,8 +1937,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
String msg = "Unable to get an answer to the modify targets command"; String msg = "Unable to get an answer to the modify targets command";
s_logger.warn(msg); s_logger.warn(msg);
} } else if (!answer.getResult()) {
else if (!answer.getResult()) {
String msg = "Unable to modify target on the following host: " + hostId; String msg = "Unable to modify target on the following host: " + hostId;
s_logger.warn(msg); s_logger.warn(msg);
@ -1993,7 +1975,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (vm != null && vm.getState() == State.Running) { if (vm != null && vm.getState() == State.Running) {
// Check if the VM is GPU enabled. // Check if the VM is GPU enabled.
if(_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported"); throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported");
} }
// Check if the underlying hypervisor supports storage motion. // Check if the underlying hypervisor supports storage motion.
@ -2025,8 +2007,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (destPool == null) { if (destPool == null) {
throw new InvalidParameterValueException("Failed to find the destination storage pool: " + storagePoolId); throw new InvalidParameterValueException("Failed to find the destination storage pool: " + storagePoolId);
} else if (destPool.isInMaintenance()) { } else if (destPool.isInMaintenance()) {
throw new InvalidParameterValueException("Cannot migrate volume " + vol + "to the destination storage pool " + destPool.getName() + throw new InvalidParameterValueException("Cannot migrate volume " + vol + "to the destination storage pool " + destPool.getName() + " as the storage pool is in maintenance mode.");
" as the storage pool is in maintenance mode.");
} }
if (liveMigrateVolume && destPool.getClusterId() != null && srcClusterId != null) { if (liveMigrateVolume && destPool.getClusterId() != null && srcClusterId != null) {
@ -2052,7 +2033,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
updateMissingRootDiskController(vm, vol.getChainInfo()); updateMissingRootDiskController(vm, vol.getChainInfo());
} }
} }
DiskOfferingVO newDiskOffering = retrieveAndValidateNewDiskOffering(cmd);
validateConditionsToReplaceDiskOfferingOfVolume(vol, newDiskOffering, destPool);
if (vm != null) { if (vm != null) {
// serialize VM operation // serialize VM operation
AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
@ -2062,13 +2044,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
VmWorkJobVO placeHolder = null; VmWorkJobVO placeHolder = null;
placeHolder = createPlaceHolderWork(vm.getId()); placeHolder = createPlaceHolderWork(vm.getId());
try { try {
return orchestrateMigrateVolume(vol.getId(), destPool.getId(), liveMigrateVolume); return orchestrateMigrateVolume(vol, destPool, liveMigrateVolume, newDiskOffering);
} finally { } finally {
_workJobDao.expunge(placeHolder.getId()); _workJobDao.expunge(placeHolder.getId());
} }
} else { } else {
Outcome<Volume> outcome = migrateVolumeThroughJobQueue(vm.getId(), vol.getId(), destPool.getId(), liveMigrateVolume); Outcome<Volume> outcome = migrateVolumeThroughJobQueue(vm, vol, destPool, liveMigrateVolume, newDiskOffering);
try { try {
outcome.get(); outcome.get();
@ -2097,26 +2079,102 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
} }
return orchestrateMigrateVolume(vol.getId(), destPool.getId(), liveMigrateVolume); return orchestrateMigrateVolume(vol, destPool, liveMigrateVolume, newDiskOffering);
} }
private Volume orchestrateMigrateVolume(long volumeId, long destPoolId, boolean liveMigrateVolume) { /**
VolumeVO vol = _volsDao.findById(volumeId); * Retrieves the new disk offering UUID that might be sent to replace the current one in the volume being migrated.
assert (vol != null); * If no disk offering UUID is provided we return null. Otherwise, we perform the following checks.
StoragePool destPool = (StoragePool)dataStoreMgr.getDataStore(destPoolId, DataStoreRole.Primary); * <ul>
assert (destPool != null); * <li>Is the disk offering UUID entered valid? If not, an {@link InvalidParameterValueException} is thrown;
* <li>If the disk offering was already removed, we thrown an {@link InvalidParameterValueException} is thrown;
* <li>We then check if the user executing the operation has access to the given disk offering.
* </ul>
*
* If all checks pass, we move forward returning the disk offering object.
*/
private DiskOfferingVO retrieveAndValidateNewDiskOffering(MigrateVolumeCmd cmd) {
String newDiskOfferingUuid = cmd.getNewDiskOfferingUuid();
if (org.apache.commons.lang.StringUtils.isBlank(newDiskOfferingUuid)) {
return null;
}
DiskOfferingVO newDiskOffering = _diskOfferingDao.findByUuid(newDiskOfferingUuid);
if (newDiskOffering == null) {
throw new InvalidParameterValueException(String.format("The disk offering informed is not valid [id=%s].", newDiskOfferingUuid));
}
if (newDiskOffering.getRemoved() != null) {
throw new InvalidParameterValueException(String.format("We cannot assign a removed disk offering [id=%s] to a volume. ", newDiskOffering.getUuid()));
}
Account caller = CallContext.current().getCallingAccount();
_accountMgr.checkAccess(caller, newDiskOffering);
return newDiskOffering;
}
/**
* Performs the validations required for replacing the disk offering while migrating the volume of storage. If no new disk offering is provided, we do not execute any validation.
* If a disk offering is informed, we then proceed with the following checks.
* <ul>
* <li>We check if the given volume is of ROOT type. We cannot change the disk offering of a ROOT volume. Therefore, we thrown an {@link InvalidParameterValueException}.
* <li>We the disk is being migrated to shared storage and the new disk offering is for local storage (or vice versa), we throw an {@link InvalidParameterValueException}. Bear in mind that we are validating only the new disk offering. If none is provided we can override the current disk offering. This means, placing a volume with shared disk offering in local storage and vice versa.
* <li>We then proceed checking if the tags of the new disk offerings match the tags of the target storage. If they do not match an {@link InvalidParameterValueException} is thrown.
* </ul>
*
* If all of the above validations pass, we check if the size of the new disk offering is different from the volume. If it is, we log a warning message.
*/
protected void validateConditionsToReplaceDiskOfferingOfVolume(VolumeVO volume, DiskOfferingVO newDiskOffering, StoragePool destPool) {
if (newDiskOffering == null) {
return;
}
if (Volume.Type.ROOT.equals(volume.getVolumeType())) {
throw new InvalidParameterValueException(String.format("Cannot change the disk offering of a ROOT volume [id=%s].", volume.getUuid()));
}
if ((destPool.isShared() && newDiskOffering.getUseLocalStorage()) || destPool.isLocal() && newDiskOffering.isShared()) {
throw new InvalidParameterValueException("You cannot move the volume to a shared storage and assing a disk offering for local storage and vice versa.");
}
String storageTags = getStoragePoolTags(destPool);
if (!StringUtils.areTagsEqual(storageTags, newDiskOffering.getTags())) {
throw new InvalidParameterValueException(String.format("Target Storage [id=%s] tags [%s] does not match new disk offering [id=%s] tags [%s].", destPool.getUuid(), storageTags,
newDiskOffering.getUuid(), newDiskOffering.getTags()));
}
if (volume.getSize() != newDiskOffering.getDiskSize()) {
DiskOfferingVO oldDiskOffering = this._diskOfferingDao.findById(volume.getDiskOfferingId());
s_logger.warn(String.format(
"You are migrating a volume [id=%s] and changing the disk offering[from id=%s to id=%s] to reflect this migration. However, the sizes of the volume and the new disk offering are different.",
volume.getUuid(), oldDiskOffering.getUuid(), newDiskOffering.getUuid()));
}
}
/**
* Retrieves the storage pool tags as a {@link String}. If the storage pool does not have tags we return a null value.
*/
protected String getStoragePoolTags(StoragePool destPool) {
List<StoragePoolDetailVO> storagePoolDetails = storagePoolDetailsDao.listDetails(destPool.getId());
if (CollectionUtils.isEmpty(storagePoolDetails)) {
return null;
}
String storageTags = "";
for (StoragePoolDetailVO storagePoolDetailVO : storagePoolDetails) {
storageTags = storageTags + storagePoolDetailVO.getName() + ",";
}
return storageTags.substring(0, storageTags.length() - 1);
}
private Volume orchestrateMigrateVolume(VolumeVO volume, StoragePool destPool, boolean liveMigrateVolume, DiskOfferingVO newDiskOffering) {
Volume newVol = null; Volume newVol = null;
try { try {
if (liveMigrateVolume) { if (liveMigrateVolume) {
newVol = liveMigrateVolume(vol, destPool); newVol = liveMigrateVolume(volume, destPool);
} else { } else {
newVol = _volumeMgr.migrateVolume(vol, destPool); newVol = _volumeMgr.migrateVolume(volume, destPool);
}
if (newDiskOffering != null) {
_volsDao.updateDiskOffering(newVol.getId(), newDiskOffering.getId());
} }
} catch (StorageUnavailableException e) { } catch (StorageUnavailableException e) {
s_logger.debug("Failed to migrate volume", e); s_logger.debug("Failed to migrate volume", e);
throw new CloudRuntimeException(e.getMessage()); throw new CloudRuntimeException(e.getMessage());
} catch (Exception e) { } catch (Exception e) {
s_logger.debug("Failed to migrate volume", e); s_logger.debug("Failed to migrate volume", e);
throw new CloudRuntimeException(e.getMessage()); throw new CloudRuntimeException(e.getMessage());
} }
@ -2126,7 +2184,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
@DB @DB
protected Volume liveMigrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException { protected Volume liveMigrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException {
VolumeInfo vol = volFactory.getVolume(volume.getId()); VolumeInfo vol = volFactory.getVolume(volume.getId());
AsyncCallFuture<VolumeApiResult> future = volService.migrateVolume(vol, (DataStore)destPool);
DataStore dataStoreTarget = dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary);
AsyncCallFuture<VolumeApiResult> future = volService.migrateVolume(vol, dataStoreTarget);
try { try {
VolumeApiResult result = future.get(); VolumeApiResult result = future.get();
if (result.isFailed()) { if (result.isFailed()) {
@ -2217,9 +2277,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
} }
private Snapshot orchestrateTakeVolumeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, private Snapshot orchestrateTakeVolumeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup)
boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup) throws ResourceAllocationException {
throws ResourceAllocationException {
VolumeInfo volume = volFactory.getVolume(volumeId); VolumeInfo volume = volFactory.getVolume(volumeId);
@ -2266,7 +2325,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot.");
} }
if (ImageFormat.DIR.equals(volume.getFormat())){ if (ImageFormat.DIR.equals(volume.getFormat())) {
throw new InvalidParameterValueException("Snapshot not supported for volume:" + volumeId); throw new InvalidParameterValueException("Snapshot not supported for volume:" + volumeId);
} }
@ -2327,9 +2386,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot."); throw new InvalidParameterValueException("VolumeId: " + volumeId + " is not in " + Volume.State.Ready + " state but " + volume.getState() + ". Cannot take snapshot.");
} }
if ( volume.getTemplateId() != null ) { if (volume.getTemplateId() != null) {
VMTemplateVO template = _templateDao.findById(volume.getTemplateId()); VMTemplateVO template = _templateDao.findById(volume.getTemplateId());
if( template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM ) { if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) {
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported");
} }
} }
@ -2374,8 +2433,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
// instance is stopped // instance is stopped
if (volume.getInstanceId() != null && ApiDBUtils.findVMInstanceById(volume.getInstanceId()).getState() != State.Stopped) { if (volume.getInstanceId() != null && ApiDBUtils.findVMInstanceById(volume.getInstanceId()).getState() != State.Stopped) {
s_logger.debug("Invalid state of the volume with ID: " + volumeId + ". It should be either detached or the VM should be in stopped state."); s_logger.debug("Invalid state of the volume with ID: " + volumeId + ". It should be either detached or the VM should be in stopped state.");
PermissionDeniedException ex = new PermissionDeniedException( PermissionDeniedException ex = new PermissionDeniedException("Invalid state of the volume with specified ID. It should be either detached or the VM should be in stopped state.");
"Invalid state of the volume with specified ID. It should be either detached or the VM should be in stopped state.");
ex.addProxyObject(volume.getUuid(), "volumeId"); ex.addProxyObject(volume.getUuid(), "volumeId");
throw ex; throw ex;
} }
@ -2625,8 +2683,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (host != null) { if (host != null) {
try { try {
volService.grantAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore); volService.grantAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
} } catch (Exception e) {
catch (Exception e) {
volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore); volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
throw new CloudRuntimeException(e.getMessage()); throw new CloudRuntimeException(e.getMessage());
@ -2634,9 +2691,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} }
if (sendCommand) { if (sendCommand) {
if (host != null && host.getHypervisorType() == HypervisorType.KVM && if (host != null && host.getHypervisorType() == HypervisorType.KVM && volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) {
volumeToAttachStoragePool.isManaged() &&
volumeToAttach.getPath() == null) {
volumeToAttach.setPath(volumeToAttach.get_iScsiName()); volumeToAttach.setPath(volumeToAttach.get_iScsiName());
_volsDao.update(volumeToAttach.getId(), volumeToAttach); _volsDao.update(volumeToAttach.getId(), volumeToAttach);
@ -2646,8 +2701,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
deviceId = getDeviceId(vm, deviceId); deviceId = getDeviceId(vm, deviceId);
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, volumeToAttach.getVolumeType(), deviceId, volumeToAttach.getPath(), DiskTO disk = storageMgr.getDiskWithThrottling(volTO, volumeToAttach.getVolumeType(), deviceId, volumeToAttach.getPath(), vm.getServiceOfferingId(),
vm.getServiceOfferingId(), volumeToAttach.getDiskOfferingId()); volumeToAttach.getDiskOfferingId());
AttachCommand cmd = new AttachCommand(disk, vm.getInstanceName()); AttachCommand cmd = new AttachCommand(disk, vm.getInstanceName());
@ -2676,12 +2731,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
controllerInfo.put(VmDetailConstants.ROOT_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.ROOT_DISK_CONTROLLER)); controllerInfo.put(VmDetailConstants.ROOT_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.ROOT_DISK_CONTROLLER));
controllerInfo.put(VmDetailConstants.DATA_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.DATA_DISK_CONTROLLER)); controllerInfo.put(VmDetailConstants.DATA_DISK_CONTROLLER, vm.getDetail(VmDetailConstants.DATA_DISK_CONTROLLER));
cmd.setControllerInfo(controllerInfo); cmd.setControllerInfo(controllerInfo);
s_logger.debug("Attach volume id:" + volumeToAttach.getId() + " on VM id:" + vm.getId() + " has controller info:" + controllerInfo); s_logger.debug("Attach volume id:" + volumeToAttach.getId() + " on VM id:" + vm.getId() + " has controller info:" + controllerInfo);
try { try {
answer = (AttachAnswer)_agentMgr.send(hostId, cmd); answer = (AttachAnswer)_agentMgr.send(hostId, cmd);
} catch (Exception e) { } catch (Exception e) {
if(host!=null) { if (host != null) {
volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore); volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore);
} }
throw new CloudRuntimeException(errorMsg + " due to: " + e.getMessage()); throw new CloudRuntimeException(errorMsg + " due to: " + e.getMessage());
@ -2739,7 +2794,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
} finally { } finally {
Volume.Event ev = Volume.Event.OperationFailed; Volume.Event ev = Volume.Event.OperationFailed;
VolumeInfo volInfo = volFactory.getVolume(volumeToAttach.getId()); VolumeInfo volInfo = volFactory.getVolume(volumeToAttach.getId());
if(attached) { if (attached) {
ev = Volume.Event.OperationSucceeded; ev = Volume.Event.OperationSucceeded;
s_logger.debug("Volume: " + volInfo.getName() + " successfully attached to VM: " + volInfo.getAttachedVmName()); s_logger.debug("Volume: " + volInfo.getName() + " successfully attached to VM: " + volInfo.getAttachedVmName());
} else { } else {
@ -2776,7 +2831,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
int maxDeviceId = maxDevices - 1; int maxDeviceId = maxDevices - 1;
List<VolumeVO> vols = _volsDao.findByInstance(vm.getId()); List<VolumeVO> vols = _volsDao.findByInstance(vm.getId());
if (deviceId != null) { if (deviceId != null) {
if (deviceId.longValue() < 0 || deviceId.longValue() > maxDeviceId || deviceId.longValue() == 3) { if (deviceId.longValue() < 0 || deviceId.longValue() > maxDeviceId || deviceId.longValue() == 3) {
throw new RuntimeException("deviceId should be 0,1,2,4-" + maxDeviceId); throw new RuntimeException("deviceId should be 0,1,2,4-" + maxDeviceId);
} }
for (VolumeVO vol : vols) { for (VolumeVO vol : vols) {
@ -2907,8 +2962,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId());
// save work context info (there are some duplications) // save work context info (there are some duplications)
VmWorkAttachVolume workInfo = new VmWorkAttachVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VmWorkAttachVolume workInfo = new VmWorkAttachVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, deviceId);
VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, deviceId);
workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo));
_jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId());
@ -2942,8 +2996,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId());
// save work context info (there are some duplications) // save work context info (there are some duplications)
VmWorkDetachVolume workInfo = new VmWorkDetachVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VmWorkDetachVolume workInfo = new VmWorkDetachVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId);
VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId);
workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo));
_jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId());
@ -2953,9 +3006,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return new VmJobVolumeOutcome(workJob, volumeId); return new VmJobVolumeOutcome(workJob, volumeId);
} }
public Outcome<Volume> resizeVolumeThroughJobQueue(final Long vmId, final long volumeId, final long currentSize, final long newSize, public Outcome<Volume> resizeVolumeThroughJobQueue(final Long vmId, final long volumeId, final long currentSize, final long newSize, final Long newMinIops, final Long newMaxIops,
final Long newMinIops, final Long newMaxIops, final Integer newHypervisorSnapshotReserve, final Integer newHypervisorSnapshotReserve, final Long newServiceOfferingId, final boolean shrinkOk) {
final Long newServiceOfferingId, final boolean shrinkOk) {
final CallContext context = CallContext.current(); final CallContext context = CallContext.current();
final User callingUser = context.getCallingUser(); final User callingUser = context.getCallingUser();
final Account callingAccount = context.getCallingAccount(); final Account callingAccount = context.getCallingAccount();
@ -2975,19 +3027,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId());
// save work context info (there are some duplications) // save work context info (there are some duplications)
VmWorkResizeVolume workInfo = new VmWorkResizeVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VmWorkResizeVolume workInfo = new VmWorkResizeVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, currentSize, newSize,
VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, newServiceOfferingId, shrinkOk); newMinIops, newMaxIops, newHypervisorSnapshotReserve, newServiceOfferingId, shrinkOk);
workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo));
_jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId());
AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId());
return new VmJobVolumeOutcome(workJob,volumeId); return new VmJobVolumeOutcome(workJob, volumeId);
} }
public Outcome<String> extractVolumeThroughJobQueue(final Long vmId, final long volumeId, public Outcome<String> extractVolumeThroughJobQueue(final Long vmId, final long volumeId, final long zoneId) {
final long zoneId) {
final CallContext context = CallContext.current(); final CallContext context = CallContext.current();
final User callingUser = context.getCallingUser(); final User callingUser = context.getCallingUser();
@ -3008,8 +3059,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId());
// save work context info (there are some duplications) // save work context info (there are some duplications)
VmWorkExtractVolume workInfo = new VmWorkExtractVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VmWorkExtractVolume workInfo = new VmWorkExtractVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, zoneId);
VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, zoneId);
workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo));
_jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId());
@ -3019,14 +3069,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return new VmJobVolumeUrlOutcome(workJob); return new VmJobVolumeUrlOutcome(workJob);
} }
public Outcome<Volume> migrateVolumeThroughJobQueue(final Long vmId, final long volumeId, private Outcome<Volume> migrateVolumeThroughJobQueue(VMInstanceVO vm, VolumeVO vol, StoragePool destPool, boolean liveMigrateVolume, DiskOfferingVO newDiskOffering) {
final long destPoolId, final boolean liveMigrate) { CallContext context = CallContext.current();
User callingUser = context.getCallingUser();
final CallContext context = CallContext.current(); Account callingAccount = context.getCallingAccount();
final User callingUser = context.getCallingUser();
final Account callingAccount = context.getCallingAccount();
final VMInstanceVO vm = _vmInstanceDao.findById(vmId);
VmWorkJobVO workJob = new VmWorkJobVO(context.getContextId()); VmWorkJobVO workJob = new VmWorkJobVO(context.getContextId());
@ -3040,20 +3086,22 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
workJob.setVmInstanceId(vm.getId()); workJob.setVmInstanceId(vm.getId());
workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId());
Long newDiskOfferingId = newDiskOffering != null ? newDiskOffering.getId() : null;
// save work context info (there are some duplications) // save work context info (there are some duplications)
VmWorkMigrateVolume workInfo = new VmWorkMigrateVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VmWorkMigrateVolume workInfo = new VmWorkMigrateVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, vol.getId(), destPool.getId(),
VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, destPoolId, liveMigrate); liveMigrateVolume, newDiskOfferingId);
workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo));
_jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId());
AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId());
return new VmJobVolumeOutcome(workJob,volumeId); return new VmJobVolumeOutcome(workJob, vol.getId());
} }
public Outcome<Snapshot> takeVolumeSnapshotThroughJobQueue(final Long vmId, final Long volumeId, public Outcome<Snapshot> takeVolumeSnapshotThroughJobQueue(final Long vmId, final Long volumeId, final Long policyId, final Long snapshotId, final Long accountId, final boolean quiesceVm,
final Long policyId, final Long snapshotId, final Long accountId, final boolean quiesceVm, final Snapshot.LocationType locationType, final boolean asyncBackup) { final Snapshot.LocationType locationType, final boolean asyncBackup) {
final CallContext context = CallContext.current(); final CallContext context = CallContext.current();
final User callingUser = context.getCallingUser(); final User callingUser = context.getCallingUser();
@ -3074,16 +3122,15 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); workJob.setRelated(AsyncJobExecutionContext.getOriginJobId());
// save work context info (there are some duplications) // save work context info (there are some duplications)
VmWorkTakeVolumeSnapshot workInfo = new VmWorkTakeVolumeSnapshot( VmWorkTakeVolumeSnapshot workInfo = new VmWorkTakeVolumeSnapshot(callingUser.getId(), accountId != null ? accountId : callingAccount.getId(), vm.getId(),
callingUser.getId(), accountId != null ? accountId : callingAccount.getId(), vm.getId(), VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, policyId, snapshotId, quiesceVm, locationType, asyncBackup);
VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, policyId, snapshotId, quiesceVm, locationType, asyncBackup);
workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo));
_jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId());
AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId());
return new VmJobSnapshotOutcome(workJob,snapshotId); return new VmJobSnapshotOutcome(workJob, snapshotId);
} }
@ReflectionUse @ReflectionUse
@ -3096,39 +3143,38 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
private Pair<JobInfo.Status, String> orchestrateAttachVolumeToVM(VmWorkAttachVolume work) throws Exception { private Pair<JobInfo.Status, String> orchestrateAttachVolumeToVM(VmWorkAttachVolume work) throws Exception {
Volume vol = orchestrateAttachVolumeToVM(work.getVmId(), work.getVolumeId(), work.getDeviceId()); Volume vol = orchestrateAttachVolumeToVM(work.getVmId(), work.getVolumeId(), work.getDeviceId());
return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(new Long(vol.getId())));
_jobMgr.marshallResultObject(new Long(vol.getId())));
} }
@ReflectionUse @ReflectionUse
private Pair<JobInfo.Status, String> orchestrateDetachVolumeFromVM(VmWorkDetachVolume work) throws Exception { private Pair<JobInfo.Status, String> orchestrateDetachVolumeFromVM(VmWorkDetachVolume work) throws Exception {
Volume vol = orchestrateDetachVolumeFromVM(work.getVmId(), work.getVolumeId()); Volume vol = orchestrateDetachVolumeFromVM(work.getVmId(), work.getVolumeId());
return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(new Long(vol.getId())));
_jobMgr.marshallResultObject(new Long(vol.getId())));
} }
@ReflectionUse @ReflectionUse
private Pair<JobInfo.Status, String> orchestrateResizeVolume(VmWorkResizeVolume work) throws Exception { private Pair<JobInfo.Status, String> orchestrateResizeVolume(VmWorkResizeVolume work) throws Exception {
Volume vol = orchestrateResizeVolume(work.getVolumeId(), work.getCurrentSize(), work.getNewSize(), work.getNewMinIops(), work.getNewMaxIops(), Volume vol = orchestrateResizeVolume(work.getVolumeId(), work.getCurrentSize(), work.getNewSize(), work.getNewMinIops(), work.getNewMaxIops(), work.getNewHypervisorSnapshotReserve(),
work.getNewHypervisorSnapshotReserve(), work.getNewServiceOfferingId(), work.isShrinkOk()); work.getNewServiceOfferingId(), work.isShrinkOk());
return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(new Long(vol.getId())));
_jobMgr.marshallResultObject(new Long(vol.getId())));
} }
@ReflectionUse @ReflectionUse
private Pair<JobInfo.Status, String> orchestrateMigrateVolume(VmWorkMigrateVolume work) throws Exception { private Pair<JobInfo.Status, String> orchestrateMigrateVolume(VmWorkMigrateVolume work) throws Exception {
Volume newVol = orchestrateMigrateVolume(work.getVolumeId(), work.getDestPoolId(), work.isLiveMigrate()); VolumeVO volume = _volsDao.findById(work.getVolumeId());
return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, StoragePoolVO targetStoragePool = _storagePoolDao.findById(work.getDestPoolId());
_jobMgr.marshallResultObject(new Long(newVol.getId()))); DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(work.getNewDiskOfferingId());
Volume newVol = orchestrateMigrateVolume(volume, targetStoragePool, work.isLiveMigrate(), newDiskOffering);
return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(newVol.getId()));
} }
@ReflectionUse @ReflectionUse
private Pair<JobInfo.Status, String> orchestrateTakeVolumeSnapshot(VmWorkTakeVolumeSnapshot work) throws Exception { private Pair<JobInfo.Status, String> orchestrateTakeVolumeSnapshot(VmWorkTakeVolumeSnapshot work) throws Exception {
Account account = _accountDao.findById(work.getAccountId()); Account account = _accountDao.findById(work.getAccountId());
orchestrateTakeVolumeSnapshot(work.getVolumeId(), work.getPolicyId(), work.getSnapshotId(), orchestrateTakeVolumeSnapshot(work.getVolumeId(), work.getPolicyId(), work.getSnapshotId(), account, work.isQuiesceVm(), work.getLocationType(), work.isAsyncBackup());
account, work.isQuiesceVm(), work.getLocationType(), work.isAsyncBackup()); return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(work.getSnapshotId()));
return new Pair<JobInfo.Status, String>(JobInfo.Status.SUCCEEDED,
_jobMgr.marshallResultObject(work.getSnapshotId()));
} }
@Override @Override

View File

@ -45,6 +45,8 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager;
import org.apache.cloudstack.framework.jobs.dao.AsyncJobJoinMapDao; import org.apache.cloudstack.framework.jobs.dao.AsyncJobJoinMapDao;
import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
@ -53,6 +55,7 @@ import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.rules.ExpectedException; import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.mockito.InOrder;
import org.mockito.InjectMocks; import org.mockito.InjectMocks;
import org.mockito.Mock; import org.mockito.Mock;
import org.mockito.Mockito; import org.mockito.Mockito;
@ -68,6 +71,7 @@ import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.org.Grouping; import com.cloud.org.Grouping;
import com.cloud.serializer.GsonHelper; import com.cloud.serializer.GsonHelper;
import com.cloud.storage.Volume.Type;
import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account; import com.cloud.user.Account;
import com.cloud.user.AccountManager; import com.cloud.user.AccountManager;
@ -128,13 +132,24 @@ public class VolumeApiServiceImplTest {
private AccountDao _accountDao; private AccountDao _accountDao;
@Mock @Mock
private HostDao _hostDao; private HostDao _hostDao;
@Mock
private StoragePoolDetailsDao storagePoolDetailsDao;
private DetachVolumeCmd detachCmd = new DetachVolumeCmd(); private DetachVolumeCmd detachCmd = new DetachVolumeCmd();
private Class<?> _detachCmdClass = detachCmd.getClass(); private Class<?> _detachCmdClass = detachCmd.getClass();
@Mock
private StoragePool storagePoolMock;
private long storagePoolMockId = 1;
@Mock
private VolumeVO volumeVOMock;
@Mock
private DiskOfferingVO newDiskOfferingMock;
@Before @Before
public void setup() throws Exception { public void before() throws Exception {
Mockito.when(storagePoolMock.getId()).thenReturn(storagePoolMockId);
volumeApiServiceImpl._gson = GsonHelper.getGsonLogger(); volumeApiServiceImpl._gson = GsonHelper.getGsonLogger();
// mock caller context // mock caller context
@ -151,38 +166,31 @@ public class VolumeApiServiceImplTest {
TransactionLegacy txn = TransactionLegacy.open("runVolumeDaoImplTest"); TransactionLegacy txn = TransactionLegacy.open("runVolumeDaoImplTest");
try { try {
// volume of running vm id=1 // volume of running vm id=1
VolumeVO volumeOfRunningVm = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, VolumeVO volumeOfRunningVm = new VolumeVO("root", 1L, 1L, 1L, 1L, 1L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
null, "root", Volume.Type.ROOT);
when(_volumeDao.findById(1L)).thenReturn(volumeOfRunningVm); when(_volumeDao.findById(1L)).thenReturn(volumeOfRunningVm);
UserVmVO runningVm = new UserVmVO(1L, "vm", "vm", 1, HypervisorType.XenServer, 1L, false, UserVmVO runningVm = new UserVmVO(1L, "vm", "vm", 1, HypervisorType.XenServer, 1L, false, false, 1L, 1L, 1, 1L, null, "vm", null);
false, 1L, 1L, 1, 1L, null, "vm", null);
runningVm.setState(State.Running); runningVm.setState(State.Running);
runningVm.setDataCenterId(1L); runningVm.setDataCenterId(1L);
when(_userVmDao.findById(1L)).thenReturn(runningVm); when(_userVmDao.findById(1L)).thenReturn(runningVm);
// volume of stopped vm id=2 // volume of stopped vm id=2
VolumeVO volumeOfStoppedVm = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, VolumeVO volumeOfStoppedVm = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
null, "root", Volume.Type.ROOT);
volumeOfStoppedVm.setPoolId(1L); volumeOfStoppedVm.setPoolId(1L);
when(_volumeDao.findById(2L)).thenReturn(volumeOfStoppedVm); when(_volumeDao.findById(2L)).thenReturn(volumeOfStoppedVm);
UserVmVO stoppedVm = new UserVmVO(2L, "vm", "vm", 1, HypervisorType.XenServer, 1L, false, UserVmVO stoppedVm = new UserVmVO(2L, "vm", "vm", 1, HypervisorType.XenServer, 1L, false, false, 1L, 1L, 1, 1L, null, "vm", null);
false, 1L, 1L, 1, 1L, null, "vm", null);
stoppedVm.setState(State.Stopped); stoppedVm.setState(State.Stopped);
stoppedVm.setDataCenterId(1L); stoppedVm.setDataCenterId(1L);
when(_userVmDao.findById(2L)).thenReturn(stoppedVm); when(_userVmDao.findById(2L)).thenReturn(stoppedVm);
// volume of hyperV vm id=3 // volume of hyperV vm id=3
UserVmVO hyperVVm = new UserVmVO(3L, "vm", "vm", 1, HypervisorType.Hyperv, 1L, false, UserVmVO hyperVVm = new UserVmVO(3L, "vm", "vm", 1, HypervisorType.Hyperv, 1L, false, false, 1L, 1L, 1, 1L, null, "vm", null);
false, 1L, 1L, 1, 1L, null, "vm", null);
hyperVVm.setState(State.Stopped); hyperVVm.setState(State.Stopped);
hyperVVm.setDataCenterId(1L); hyperVVm.setDataCenterId(1L);
when(_userVmDao.findById(3L)).thenReturn(hyperVVm); when(_userVmDao.findById(3L)).thenReturn(hyperVVm);
VolumeVO volumeOfStoppeHyperVVm = new VolumeVO("root", 1L, 1L, 1L, 1L, 3L, "root", "root", Storage.ProvisioningType.THIN, 1, null, VolumeVO volumeOfStoppeHyperVVm = new VolumeVO("root", 1L, 1L, 1L, 1L, 3L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
null, "root", Volume.Type.ROOT);
volumeOfStoppeHyperVVm.setPoolId(1L); volumeOfStoppeHyperVVm.setPoolId(1L);
when(_volumeDao.findById(3L)).thenReturn(volumeOfStoppeHyperVVm); when(_volumeDao.findById(3L)).thenReturn(volumeOfStoppeHyperVVm);
@ -194,8 +202,7 @@ public class VolumeApiServiceImplTest {
StoragePoolVO managedPool = new StoragePoolVO(); StoragePoolVO managedPool = new StoragePoolVO();
managedPool.setManaged(true); managedPool.setManaged(true);
when(_storagePoolDao.findById(2L)).thenReturn(managedPool); when(_storagePoolDao.findById(2L)).thenReturn(managedPool);
VolumeVO managedPoolVolume = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, VolumeVO managedPoolVolume = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
null, "root", Volume.Type.ROOT);
managedPoolVolume.setPoolId(2L); managedPoolVolume.setPoolId(2L);
when(_volumeDao.findById(4L)).thenReturn(managedPoolVolume); when(_volumeDao.findById(4L)).thenReturn(managedPoolVolume);
@ -216,8 +223,7 @@ public class VolumeApiServiceImplTest {
when(correctRootVolume.getPoolId()).thenReturn(1L); when(correctRootVolume.getPoolId()).thenReturn(1L);
when(_volFactory.getVolume(6L)).thenReturn(correctRootVolume); when(_volFactory.getVolume(6L)).thenReturn(correctRootVolume);
VolumeVO correctRootVolumeVO = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, VolumeVO correctRootVolumeVO = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
null, "root", Volume.Type.ROOT);
when(_volumeDao.findById(6L)).thenReturn(correctRootVolumeVO); when(_volumeDao.findById(6L)).thenReturn(correctRootVolumeVO);
// managed root volume // managed root volume
@ -229,15 +235,13 @@ public class VolumeApiServiceImplTest {
when(managedVolume.getPoolId()).thenReturn(2L); when(managedVolume.getPoolId()).thenReturn(2L);
when(_volFactory.getVolume(7L)).thenReturn(managedVolume); when(_volFactory.getVolume(7L)).thenReturn(managedVolume);
VolumeVO managedVolume1 = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, VolumeVO managedVolume1 = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
null, "root", Volume.Type.ROOT);
managedVolume1.setPoolId(2L); managedVolume1.setPoolId(2L);
managedVolume1.setDataCenterId(1L); managedVolume1.setDataCenterId(1L);
when(_volumeDao.findById(7L)).thenReturn(managedVolume1); when(_volumeDao.findById(7L)).thenReturn(managedVolume1);
// vm having root volume // vm having root volume
UserVmVO vmHavingRootVolume = new UserVmVO(4L, "vm", "vm", 1, HypervisorType.XenServer, 1L, false, UserVmVO vmHavingRootVolume = new UserVmVO(4L, "vm", "vm", 1, HypervisorType.XenServer, 1L, false, false, 1L, 1L, 1, 1L, null, "vm", null);
false, 1L, 1L, 1, 1L, null, "vm", null);
vmHavingRootVolume.setState(State.Stopped); vmHavingRootVolume.setState(State.Stopped);
vmHavingRootVolume.setDataCenterId(1L); vmHavingRootVolume.setDataCenterId(1L);
when(_userVmDao.findById(4L)).thenReturn(vmHavingRootVolume); when(_userVmDao.findById(4L)).thenReturn(vmHavingRootVolume);
@ -255,8 +259,7 @@ public class VolumeApiServiceImplTest {
when(uploadedVolume.getState()).thenReturn(Volume.State.Uploaded); when(uploadedVolume.getState()).thenReturn(Volume.State.Uploaded);
when(_volFactory.getVolume(8L)).thenReturn(uploadedVolume); when(_volFactory.getVolume(8L)).thenReturn(uploadedVolume);
VolumeVO upVolume = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, VolumeVO upVolume = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L, "root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root", Volume.Type.ROOT);
null, "root", Volume.Type.ROOT);
upVolume.setPoolId(1L); upVolume.setPoolId(1L);
upVolume.setDataCenterId(1L); upVolume.setDataCenterId(1L);
upVolume.setState(Volume.State.Uploaded); upVolume.setState(Volume.State.Uploaded);
@ -283,7 +286,6 @@ public class VolumeApiServiceImplTest {
/** /**
* TESTS FOR DETACH ROOT VOLUME, COUNT=4 * TESTS FOR DETACH ROOT VOLUME, COUNT=4
* @throws Exception
*/ */
@Test(expected = InvalidParameterValueException.class) @Test(expected = InvalidParameterValueException.class)
@ -384,7 +386,7 @@ public class VolumeApiServiceImplTest {
when(volumeInfoMock.getState()).thenReturn(Volume.State.Ready); when(volumeInfoMock.getState()).thenReturn(Volume.State.Ready);
when(volumeInfoMock.getInstanceId()).thenReturn(null); when(volumeInfoMock.getInstanceId()).thenReturn(null);
when(volumeInfoMock.getPoolId()).thenReturn(1L); when(volumeInfoMock.getPoolId()).thenReturn(1L);
when (volService.takeSnapshot(Mockito.any(VolumeInfo.class))).thenReturn(snapshotInfoMock); when(volService.takeSnapshot(Mockito.any(VolumeInfo.class))).thenReturn(snapshotInfoMock);
volumeApiServiceImpl.takeSnapshot(5L, Snapshot.MANUAL_POLICY_ID, 3L, null, false, null, false); volumeApiServiceImpl.takeSnapshot(5L, Snapshot.MANUAL_POLICY_ID, 3L, null, false, null, false);
} }
@ -427,10 +429,10 @@ public class VolumeApiServiceImplTest {
verify(userVmManager, times(1)).persistDeviceBusInfo(any(UserVmVO.class), eq("scsi")); verify(userVmManager, times(1)).persistDeviceBusInfo(any(UserVmVO.class), eq("scsi"));
} }
@Test
/** /**
* Setting locationType for a non-managed storage should give an error * Setting locationType for a non-managed storage should give an error
*/ */
@Test
public void testAllocSnapshotNonManagedStorageArchive() { public void testAllocSnapshotNonManagedStorageArchive() {
try { try {
volumeApiServiceImpl.allocSnapshot(6L, 1L, "test", Snapshot.LocationType.SECONDARY); volumeApiServiceImpl.allocSnapshot(6L, 1L, "test", Snapshot.LocationType.SECONDARY);
@ -446,9 +448,6 @@ public class VolumeApiServiceImplTest {
/** /**
* The resource limit check for primary storage should not be skipped for Volume in 'Uploaded' state. * The resource limit check for primary storage should not be skipped for Volume in 'Uploaded' state.
* @throws NoSuchFieldException
* @throws IllegalAccessException
* @throws ResourceAllocationException
*/ */
@Test @Test
public void testResourceLimitCheckForUploadedVolume() throws NoSuchFieldException, IllegalAccessException, ResourceAllocationException { public void testResourceLimitCheckForUploadedVolume() throws NoSuchFieldException, IllegalAccessException, ResourceAllocationException {
@ -477,9 +476,114 @@ public class VolumeApiServiceImplTest {
} }
} }
@After @After
public void tearDown() { public void tearDown() {
CallContext.unregister(); CallContext.unregister();
} }
@Test
public void getStoragePoolTagsTestStorageWithoutTags() {
Mockito.when(storagePoolDetailsDao.listDetails(storagePoolMockId)).thenReturn(new ArrayList<>());
String returnedStoragePoolTags = volumeApiServiceImpl.getStoragePoolTags(storagePoolMock);
Assert.assertNull(returnedStoragePoolTags);
}
@Test
public void getStoragePoolTagsTestStorageWithTags() {
ArrayList<StoragePoolDetailVO> tags = new ArrayList<>();
StoragePoolDetailVO tag1 = new StoragePoolDetailVO(1l, "tag1", "value", true);
StoragePoolDetailVO tag2 = new StoragePoolDetailVO(1l, "tag2", "value", true);
StoragePoolDetailVO tag3 = new StoragePoolDetailVO(1l, "tag3", "value", true);
tags.add(tag1);
tags.add(tag2);
tags.add(tag3);
Mockito.when(storagePoolDetailsDao.listDetails(storagePoolMockId)).thenReturn(tags);
String returnedStoragePoolTags = volumeApiServiceImpl.getStoragePoolTags(storagePoolMock);
Assert.assertEquals("tag1,tag2,tag3", returnedStoragePoolTags);
}
@Test
public void validateConditionsToReplaceDiskOfferingOfVolumeTestNoNewDiskOffering() {
volumeApiServiceImpl.validateConditionsToReplaceDiskOfferingOfVolume(volumeVOMock, null, storagePoolMock);
Mockito.verify(volumeVOMock, times(0)).getVolumeType();
}
@Test(expected = InvalidParameterValueException.class)
public void validateConditionsToReplaceDiskOfferingOfVolumeTestRootVolume() {
Mockito.when(volumeVOMock.getVolumeType()).thenReturn(Type.ROOT);
volumeApiServiceImpl.validateConditionsToReplaceDiskOfferingOfVolume(volumeVOMock, newDiskOfferingMock, storagePoolMock);
}
@Test(expected = InvalidParameterValueException.class)
public void validateConditionsToReplaceDiskOfferingOfVolumeTestTargetPoolSharedDiskOfferingLocal() {
Mockito.when(volumeVOMock.getVolumeType()).thenReturn(Type.DATADISK);
Mockito.when(newDiskOfferingMock.getUseLocalStorage()).thenReturn(true);
Mockito.when(storagePoolMock.isShared()).thenReturn(true);
volumeApiServiceImpl.validateConditionsToReplaceDiskOfferingOfVolume(volumeVOMock, newDiskOfferingMock, storagePoolMock);
}
@Test(expected = InvalidParameterValueException.class)
public void validateConditionsToReplaceDiskOfferingOfVolumeTestTargetPoolLocalDiskOfferingShared() {
Mockito.when(volumeVOMock.getVolumeType()).thenReturn(Type.DATADISK);
Mockito.when(newDiskOfferingMock.isShared()).thenReturn(true);
Mockito.when(storagePoolMock.isLocal()).thenReturn(true);
volumeApiServiceImpl.validateConditionsToReplaceDiskOfferingOfVolume(volumeVOMock, newDiskOfferingMock, storagePoolMock);
}
@Test(expected = InvalidParameterValueException.class)
public void validateConditionsToReplaceDiskOfferingOfVolumeTestTagsDoNotMatch() {
Mockito.when(volumeVOMock.getVolumeType()).thenReturn(Type.DATADISK);
Mockito.when(newDiskOfferingMock.getUseLocalStorage()).thenReturn(false);
Mockito.when(storagePoolMock.isShared()).thenReturn(true);
Mockito.when(newDiskOfferingMock.isShared()).thenReturn(true);
Mockito.when(storagePoolMock.isLocal()).thenReturn(false);
Mockito.when(newDiskOfferingMock.getTags()).thenReturn("tag1");
Mockito.doReturn(null).when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
volumeApiServiceImpl.validateConditionsToReplaceDiskOfferingOfVolume(volumeVOMock, newDiskOfferingMock, storagePoolMock);
}
@Test
public void validateConditionsToReplaceDiskOfferingOfVolumeTestEverythingWorking() {
Mockito.when(volumeVOMock.getVolumeType()).thenReturn(Type.DATADISK);
Mockito.when(newDiskOfferingMock.getUseLocalStorage()).thenReturn(false);
Mockito.when(storagePoolMock.isShared()).thenReturn(true);
Mockito.when(newDiskOfferingMock.isShared()).thenReturn(true);
Mockito.when(storagePoolMock.isLocal()).thenReturn(false);
Mockito.when(newDiskOfferingMock.getTags()).thenReturn("tag1");
Mockito.doReturn("tag1").when(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
volumeApiServiceImpl.validateConditionsToReplaceDiskOfferingOfVolume(volumeVOMock, newDiskOfferingMock, storagePoolMock);
InOrder inOrder = Mockito.inOrder(volumeVOMock, newDiskOfferingMock, storagePoolMock, volumeApiServiceImpl);
inOrder.verify(volumeVOMock).getVolumeType();
inOrder.verify(storagePoolMock).isShared();
inOrder.verify(newDiskOfferingMock).getUseLocalStorage();
inOrder.verify(storagePoolMock).isLocal();
inOrder.verify(newDiskOfferingMock, times(0)).isShared();
inOrder.verify(volumeApiServiceImpl).getStoragePoolTags(storagePoolMock);
inOrder.verify(newDiskOfferingMock).getTags();
inOrder.verify(volumeVOMock).getSize();
inOrder.verify(newDiskOfferingMock).getDiskSize();
}
} }

View File

@ -37,7 +37,8 @@ from marvin.lib.common import (get_domain,
get_zone, get_zone,
get_template, get_template,
find_storage_pool_type, find_storage_pool_type,
get_pod) get_pod,
list_disk_offering)
from marvin.lib.utils import checkVolumeSize from marvin.lib.utils import checkVolumeSize
from marvin.codes import SUCCESS, FAILED, XEN_SERVER from marvin.codes import SUCCESS, FAILED, XEN_SERVER
from nose.plugins.attrib import attr from nose.plugins.attrib import attr
@ -875,3 +876,75 @@ class TestVolumes(cloudstackTestCase):
if not res: if not res:
self.fail("Failed to return root volume response") self.fail("Failed to return root volume response")
return response return response
@attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_11_migrate_volume_and_change_offering(self):
# Validates the following
#
# 1. Creates a new Volume with a small disk offering
#
# 2. Migrates the Volume to another primary storage and changes the offering
#
# 3. Verifies the Volume has new offering when migrated to the new storage.
small_offering = list_disk_offering(
self.apiclient,
name = "Small"
)[0]
large_offering = list_disk_offering(
self.apiclient,
name = "Large"
)[0]
volume = Volume.create(
self.apiClient,
self.services,
zoneid = self.zone.id,
account = self.account.name,
domainid = self.account.domainid,
diskofferingid = small_offering.id
)
self.debug("Created a small volume: %s" % volume.id)
self.virtual_machine.attach_volume(self.apiclient, volume=volume)
if self.virtual_machine.hypervisor == "KVM":
self.virtual_machine.stop(self.apiclient)
pools = StoragePool.listForMigration(
self.apiclient,
id=volume.id
)
pool = None
if pools and len(pools) > 0:
pool = pools[0]
else:
raise self.skipTest("Not enough storage pools found, skipping test")
if hasattr(pool, 'tags'):
StoragePool.update(self.apiclient, id=pool.id, tags="")
self.debug("Migrating Volume-ID: %s to Pool: %s" % (volume.id, pool.id))
Volume.migrate(
self.apiclient,
volumeid = volume.id,
storageid = pool.id,
newdiskofferingid = large_offering.id
)
if self.virtual_machine.hypervisor == "KVM":
self.virtual_machine.start(self.apiclient
)
migrated_vol = Volume.list(
self.apiclient,
id = volume.id
)[0]
self.assertEqual(
migrated_vol.diskofferingname,
large_offering.name,
"Offering name did not match with the new one "
)
return

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)", "label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "Disk Offering", "label.disk.offering": "Disk Offering",
"label.disk.offering.details": "Disk offering details", "label.disk.offering.details": "Disk offering details",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type", "label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Disk Read (Bytes)", "label.disk.read.bytes": "Disk Read (Bytes)",
@ -1091,6 +1093,8 @@ var dictionary = {
"label.migrate.to.host": "التحول إلى المضيف", "label.migrate.to.host": "التحول إلى المضيف",
"label.migrate.to.storage": "التحول إلى التخزين", "label.migrate.to.storage": "التحول إلى التخزين",
"label.migrate.volume": "Migrate Volume", "label.migrate.volume": "Migrate Volume",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.migrate.volume.to.primary.storage": "Migrate volume to another primary storage", "label.migrate.volume.to.primary.storage": "Migrate volume to another primary storage",
"label.min.instances": "Min Instances", "label.min.instances": "Min Instances",
"label.min.past.the.hr": "min past the hr", "label.min.past.the.hr": "min past the hr",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)", "label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "Disk Offering", "label.disk.offering": "Disk Offering",
"label.disk.offering.details": "Disk offering details", "label.disk.offering.details": "Disk offering details",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type", "label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Disk Read (Bytes)", "label.disk.read.bytes": "Disk Read (Bytes)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Migrate to storage", "label.migrate.to.storage": "Migrate to storage",
"label.migrate.volume": "Migrate Volume", "label.migrate.volume": "Migrate Volume",
"label.migrate.volume.to.primary.storage": "Migrate volume to another primary storage", "label.migrate.volume.to.primary.storage": "Migrate volume to another primary storage",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Min Instances", "label.min.instances": "Min Instances",
"label.min.past.the.hr": "min past the hr", "label.min.past.the.hr": "min past the hr",
"label.minimum": "Minimum", "label.minimum": "Minimum",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Festplatten-Schreibrate (IOPS)", "label.disk.iops.write.rate": "Festplatten-Schreibrate (IOPS)",
"label.disk.offering": "Festplattenangebot", "label.disk.offering": "Festplattenangebot",
"label.disk.offering.details": "Festplattenangebotdetails", "label.disk.offering.details": "Festplattenangebotdetails",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisionierungstyp", "label.disk.provisioningtype": "Provisionierungstyp",
"label.disk.read.bytes": "Festplatte Lesen (Bytes)", "label.disk.read.bytes": "Festplatte Lesen (Bytes)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Zu Speicher migrieren", "label.migrate.to.storage": "Zu Speicher migrieren",
"label.migrate.volume": "Volumen migrieren", "label.migrate.volume": "Volumen migrieren",
"label.migrate.volume.to.primary.storage": "Migriere ein Speichervolumen zu einem anderen Hauptspeicher", "label.migrate.volume.to.primary.storage": "Migriere ein Speichervolumen zu einem anderen Hauptspeicher",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Min Instanzen", "label.min.instances": "Min Instanzen",
"label.min.past.the.hr": "min seit Std. vergangen", "label.min.past.the.hr": "min seit Std. vergangen",
"label.minimum": "Minimum", "label.minimum": "Minimum",

View File

@ -680,6 +680,8 @@ var dictionary = {
"label.disk.iops.write.rate":"Disk Write Rate (IOPS)", "label.disk.iops.write.rate":"Disk Write Rate (IOPS)",
"label.disk.offering":"Disk Offering", "label.disk.offering":"Disk Offering",
"label.disk.offering.details":"Disk offering details", "label.disk.offering.details":"Disk offering details",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype":"Provisioning Type", "label.disk.provisioningtype":"Provisioning Type",
"label.disk.read.bytes":"Disk Read (Bytes)", "label.disk.read.bytes":"Disk Read (Bytes)",
@ -1122,6 +1124,8 @@ var dictionary = {
"label.migrate.to.storage":"Migrate to storage", "label.migrate.to.storage":"Migrate to storage",
"label.migrate.volume":"Migrate Volume", "label.migrate.volume":"Migrate Volume",
"label.migrate.volume.to.primary.storage":"Migrate volume to another primary storage", "label.migrate.volume.to.primary.storage":"Migrate volume to another primary storage",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances":"Min Instances", "label.min.instances":"Min Instances",
"label.min.past.the.hr":"min past the hr", "label.min.past.the.hr":"min past the hr",
"label.minimum":"Minimum", "label.minimum":"Minimum",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Tasa Escritura de Disco (IOPS)", "label.disk.iops.write.rate": "Tasa Escritura de Disco (IOPS)",
"label.disk.offering": "Oferta de Disco", "label.disk.offering": "Oferta de Disco",
"label.disk.offering.details": "Detalles de Oferta de Disco", "label.disk.offering.details": "Detalles de Oferta de Disco",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Tipo de Aprovisionamiento", "label.disk.provisioningtype": "Tipo de Aprovisionamiento",
"label.disk.read.bytes": "Lectura Disco (Bytes)", "label.disk.read.bytes": "Lectura Disco (Bytes)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Migrar a almacenamiento", "label.migrate.to.storage": "Migrar a almacenamiento",
"label.migrate.volume": "Migrar Volumen", "label.migrate.volume": "Migrar Volumen",
"label.migrate.volume.to.primary.storage": "Migrar volumen a otro almacenamiento primario", "label.migrate.volume.to.primary.storage": "Migrar volumen a otro almacenamiento primario",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Instancias Mínimas", "label.min.instances": "Instancias Mínimas",
"label.min.past.the.hr": "minuto(s) después de la hora", "label.min.past.the.hr": "minuto(s) después de la hora",
"label.minimum": "Mínimo", "label.minimum": "Mínimo",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Débit écriture disque (IOPS)", "label.disk.iops.write.rate": "Débit écriture disque (IOPS)",
"label.disk.offering": "Offre de Disque", "label.disk.offering": "Offre de Disque",
"label.disk.offering.details": "Détails offre de disque", "label.disk.offering.details": "Détails offre de disque",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Type de provisionnement", "label.disk.provisioningtype": "Type de provisionnement",
"label.disk.read.bytes": "Lecture Disque (Octets)", "label.disk.read.bytes": "Lecture Disque (Octets)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Migrer vers un stockage", "label.migrate.to.storage": "Migrer vers un stockage",
"label.migrate.volume": "Volume Migré", "label.migrate.volume": "Volume Migré",
"label.migrate.volume.to.primary.storage": "Migration du volume vers un autre stockage primaire", "label.migrate.volume.to.primary.storage": "Migration du volume vers un autre stockage primaire",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Instances Min.", "label.min.instances": "Instances Min.",
"label.min.past.the.hr": "min ap. l'heure", "label.min.past.the.hr": "min ap. l'heure",
"label.minimum": "Minimum", "label.minimum": "Minimum",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Írási ráta (IOPS)", "label.disk.iops.write.rate": "Írási ráta (IOPS)",
"label.disk.offering": "Merevlemez ajánlat", "label.disk.offering": "Merevlemez ajánlat",
"label.disk.offering.details": "Merevlemez ajánlat részletei", "label.disk.offering.details": "Merevlemez ajánlat részletei",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Létrehozás típusa", "label.disk.provisioningtype": "Létrehozás típusa",
"label.disk.read.bytes": "Merevlemez olvasás (Byte)", "label.disk.read.bytes": "Merevlemez olvasás (Byte)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Mozgatás tárra", "label.migrate.to.storage": "Mozgatás tárra",
"label.migrate.volume": "Kötet mozgatása", "label.migrate.volume": "Kötet mozgatása",
"label.migrate.volume.to.primary.storage": "Kötet mozgatása másik elsődleges tárra", "label.migrate.volume.to.primary.storage": "Kötet mozgatása másik elsődleges tárra",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Példányok minimális száma", "label.min.instances": "Példányok minimális száma",
"label.min.past.the.hr": "percben", "label.min.past.the.hr": "percben",
"label.minimum": "Minimum", "label.minimum": "Minimum",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)", "label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "Offerta Disco", "label.disk.offering": "Offerta Disco",
"label.disk.offering.details": "Disk offering details", "label.disk.offering.details": "Disk offering details",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Tipo di Provisioning", "label.disk.provisioningtype": "Tipo di Provisioning",
"label.disk.read.bytes": "Disk Read (Bytes)", "label.disk.read.bytes": "Disk Read (Bytes)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Migrare verso uno storage", "label.migrate.to.storage": "Migrare verso uno storage",
"label.migrate.volume": "Migrate Volume", "label.migrate.volume": "Migrate Volume",
"label.migrate.volume.to.primary.storage": "Migrare un volume verso un altro primary storage", "label.migrate.volume.to.primary.storage": "Migrare un volume verso un altro primary storage",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Min Instances", "label.min.instances": "Min Instances",
"label.min.past.the.hr": "min past the hr", "label.min.past.the.hr": "min past the hr",
"label.minimum": "Minimum", "label.minimum": "Minimum",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "ディスク書き込み速度 (IOPS)", "label.disk.iops.write.rate": "ディスク書き込み速度 (IOPS)",
"label.disk.offering": "ディスク オファリング", "label.disk.offering": "ディスク オファリング",
"label.disk.offering.details": "ディスクオファリングの詳細", "label.disk.offering.details": "ディスクオファリングの詳細",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "プロビジョニングの種類", "label.disk.provisioningtype": "プロビジョニングの種類",
"label.disk.read.bytes": "ディスク読み取り (バイト)", "label.disk.read.bytes": "ディスク読み取り (バイト)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "ストレージへ移行", "label.migrate.to.storage": "ストレージへ移行",
"label.migrate.volume": "ボリュームの移行", "label.migrate.volume": "ボリュームの移行",
"label.migrate.volume.to.primary.storage": "別のプライマリ ストレージへのボリュームの移行", "label.migrate.volume.to.primary.storage": "別のプライマリ ストレージへのボリュームの移行",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "最小インスタンス数", "label.min.instances": "最小インスタンス数",
"label.min.past.the.hr": "分(毎時)", "label.min.past.the.hr": "分(毎時)",
"label.minimum": "最小", "label.minimum": "最小",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)", "label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "디스크 제공", "label.disk.offering": "디스크 제공",
"label.disk.offering.details": "Disk offering details", "label.disk.offering.details": "Disk offering details",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type", "label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Disk Read (Bytes)", "label.disk.read.bytes": "Disk Read (Bytes)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Migrate to storage", "label.migrate.to.storage": "Migrate to storage",
"label.migrate.volume": "Migrate Volume", "label.migrate.volume": "Migrate Volume",
"label.migrate.volume.to.primary.storage": "다른 기본 스토리지에 볼륨 이전", "label.migrate.volume.to.primary.storage": "다른 기본 스토리지에 볼륨 이전",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Min Instances", "label.min.instances": "Min Instances",
"label.min.past.the.hr": "min past the hr", "label.min.past.the.hr": "min past the hr",
"label.minimum": "최소", "label.minimum": "최소",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Diskskrivehastighet (IOPS)", "label.disk.iops.write.rate": "Diskskrivehastighet (IOPS)",
"label.disk.offering": "Disktilbud", "label.disk.offering": "Disktilbud",
"label.disk.offering.details": "Disktilbud detaljer", "label.disk.offering.details": "Disktilbud detaljer",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisjoneringstype", "label.disk.provisioningtype": "Provisjoneringstype",
"label.disk.read.bytes": "Disk lese (Bytes)", "label.disk.read.bytes": "Disk lese (Bytes)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Migrer til lagring", "label.migrate.to.storage": "Migrer til lagring",
"label.migrate.volume": "Migrer volum", "label.migrate.volume": "Migrer volum",
"label.migrate.volume.to.primary.storage": "Migrer volumet til en annen primærlagring.", "label.migrate.volume.to.primary.storage": "Migrer volumet til en annen primærlagring.",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Min Instanser", "label.min.instances": "Min Instanser",
"label.min.past.the.hr": "minutter etter time", "label.min.past.the.hr": "minutter etter time",
"label.minimum": "Minimum", "label.minimum": "Minimum",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Schrijf snelheid Schijf (IOPS)", "label.disk.iops.write.rate": "Schrijf snelheid Schijf (IOPS)",
"label.disk.offering": "Schijf Aanbieding", "label.disk.offering": "Schijf Aanbieding",
"label.disk.offering.details": "schijfe offerte gegevens", "label.disk.offering.details": "schijfe offerte gegevens",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning type", "label.disk.provisioningtype": "Provisioning type",
"label.disk.read.bytes": "Schijf lezen (Bytes)", "label.disk.read.bytes": "Schijf lezen (Bytes)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Migreer naar opslag", "label.migrate.to.storage": "Migreer naar opslag",
"label.migrate.volume": "Migreer volume", "label.migrate.volume": "Migreer volume",
"label.migrate.volume.to.primary.storage": "Migreer volume naar andere primaire opslag", "label.migrate.volume.to.primary.storage": "Migreer volume naar andere primaire opslag",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Min Instances", "label.min.instances": "Min Instances",
"label.min.past.the.hr": "min na het uur", "label.min.past.the.hr": "min na het uur",
"label.minimum": "Minimum", "label.minimum": "Minimum",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Disk Write Rate (IOPS)", "label.disk.iops.write.rate": "Disk Write Rate (IOPS)",
"label.disk.offering": "Disk Offering", "label.disk.offering": "Disk Offering",
"label.disk.offering.details": "Disk offering details", "label.disk.offering.details": "Disk offering details",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type", "label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Disk Read (Bytes)", "label.disk.read.bytes": "Disk Read (Bytes)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Migrate to storage", "label.migrate.to.storage": "Migrate to storage",
"label.migrate.volume": "Migrate Volume", "label.migrate.volume": "Migrate Volume",
"label.migrate.volume.to.primary.storage": "Migrate volume to another primary storage", "label.migrate.volume.to.primary.storage": "Migrate volume to another primary storage",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Min Instances", "label.min.instances": "Min Instances",
"label.min.past.the.hr": "min past the hr", "label.min.past.the.hr": "min past the hr",
"label.minimum": "Minimum", "label.minimum": "Minimum",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Taxa de Escrita no Disco (IOPS)", "label.disk.iops.write.rate": "Taxa de Escrita no Disco (IOPS)",
"label.disk.offering": "Oferta de Disco", "label.disk.offering": "Oferta de Disco",
"label.disk.offering.details": "Detalhes da oferta de disco", "label.disk.offering.details": "Detalhes da oferta de disco",
"label.disk.newOffering": "Nova oferta de disco",
"label.disk.newOffering.description": "Oferta de disco a ser aplicada no volume após migração.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Tipo de Provisionamento", "label.disk.provisioningtype": "Tipo de Provisionamento",
"label.disk.read.bytes": "Leitura do Disco (Bytes)", "label.disk.read.bytes": "Leitura do Disco (Bytes)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Migrar para storage", "label.migrate.to.storage": "Migrar para storage",
"label.migrate.volume": "Migrar Volume", "label.migrate.volume": "Migrar Volume",
"label.migrate.volume.to.primary.storage": "Migrar volume para outro storage primário", "label.migrate.volume.to.primary.storage": "Migrar volume para outro storage primário",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Instâncias Min", "label.min.instances": "Instâncias Min",
"label.min.past.the.hr": "minutos passados da última hora", "label.min.past.the.hr": "minutos passados da última hora",
"label.minimum": "Mí­nimo", "label.minimum": "Mí­nimo",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "Скорость записи диска (IOPS)", "label.disk.iops.write.rate": "Скорость записи диска (IOPS)",
"label.disk.offering": "Услуга дискового пространства", "label.disk.offering": "Услуга дискового пространства",
"label.disk.offering.details": "Disk offering details", "label.disk.offering.details": "Disk offering details",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "Provisioning Type", "label.disk.provisioningtype": "Provisioning Type",
"label.disk.read.bytes": "Прочитано с диска (Байт)", "label.disk.read.bytes": "Прочитано с диска (Байт)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "Перенести на хранилище", "label.migrate.to.storage": "Перенести на хранилище",
"label.migrate.volume": "Перенос диска", "label.migrate.volume": "Перенос диска",
"label.migrate.volume.to.primary.storage": "Перенести диск в другое основное хранилище", "label.migrate.volume.to.primary.storage": "Перенести диск в другое основное хранилище",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "Min Instances", "label.min.instances": "Min Instances",
"label.min.past.the.hr": "min past the hr", "label.min.past.the.hr": "min past the hr",
"label.minimum": "Минимум", "label.minimum": "Минимум",

View File

@ -666,6 +666,8 @@ var dictionary = {
"label.disk.iops.write.rate": "磁盘写入速度(IOPS)", "label.disk.iops.write.rate": "磁盘写入速度(IOPS)",
"label.disk.offering": "磁盘方案", "label.disk.offering": "磁盘方案",
"label.disk.offering.details": "磁盘方案详情", "label.disk.offering.details": "磁盘方案详情",
"label.disk.newOffering": "New Disk Offering",
"label.disk.newOffering.description": "New disk offering to be used by this volume after the migration.",
"label.disk.physicalsize":"Physical Size", "label.disk.physicalsize":"Physical Size",
"label.disk.provisioningtype": "置备类型", "label.disk.provisioningtype": "置备类型",
"label.disk.read.bytes": "磁盘读取(字节)", "label.disk.read.bytes": "磁盘读取(字节)",
@ -1092,6 +1094,8 @@ var dictionary = {
"label.migrate.to.storage": "迁移到存储", "label.migrate.to.storage": "迁移到存储",
"label.migrate.volume": "迁移卷", "label.migrate.volume": "迁移卷",
"label.migrate.volume.to.primary.storage": "将卷迁移到其他主存储", "label.migrate.volume.to.primary.storage": "将卷迁移到其他主存储",
"label.migrate.volume.newDiskOffering": "Replace disk offering?",
"label.migrate.volume.newDiskOffering.desc": "This option allows administrators to replace the old disk offering, using one that better suits the new placement of the volume.",
"label.min.instances": "最小实例数", "label.min.instances": "最小实例数",
"label.min.past.the.hr": "分 每小时", "label.min.past.the.hr": "分 每小时",
"label.minimum": "最小值", "label.minimum": "最小值",

View File

@ -15,6 +15,155 @@
// specific language governing permissions and limitations // specific language governing permissions and limitations
// under the License. // under the License.
(function(cloudStack) { (function(cloudStack) {
var migrateVolumeCreateFormAction = {
title: 'label.migrate.volume',
fields: {
storagePool: {
label: 'label.storage.pool',
validation: {
required: true
},
select: function(args) {
var mapStoragePoolsByUuid = new Map();
var volumeId = args.context.volumes[0].id;
var volumeBeingMigrated = undefined;
$.ajax({
url: createURL("listVolumes&id=" + volumeId),
dataType: "json",
async: false,
success: function(json){
volumeBeingMigrated = json.listvolumesresponse.volume[0];
}
});
var currentStoragePool = undefined;
$.ajax({
url: createURL("listStoragePools&id=" + volumeBeingMigrated.storageid),
dataType: "json",
async: false,
success: function(json){
currentStoragePool = json.liststoragepoolsresponse.storagepool[0];
}
});
var isVolumeNotAttachedToVm = volumeBeingMigrated.virtualmachineid == undefined;
var urlToRetrieveStoragePools = "findStoragePoolsForMigration&id=" + args.context.volumes[0].id;
if(isVolumeNotAttachedToVm){
urlToRetrieveStoragePools = "listStoragePools&zoneid=" + args.context.volumes[0].zoneid;
}
$.ajax({
url: createURL(urlToRetrieveStoragePools),
dataType: "json",
async: true,
success: function(json) {
var pools = undefined;
if(isVolumeNotAttachedToVm){
pools = json.liststoragepoolsresponse.storagepool;
}else{
pools = json.findstoragepoolsformigrationresponse.storagepool;
}
var items = [];
$(pools).each(function() {
mapStoragePoolsByUuid.set(this.id, this);
var description = this.name;
if(!isVolumeNotAttachedToVm){
description = description + " (" + (this.suitableformigration ? "Suitable" : "Not Suitable") + ")";
}
items.push({
id: this.id,
description: description
});
});
args.response.success({
data: items
});
$('select[name=storagePool]').change(function(){
var uuidOfStoragePoolSelected = $(this).val();
var storagePoolSelected = mapStoragePoolsByUuid.get(uuidOfStoragePoolSelected);
if(currentStoragePool.scope === storagePoolSelected.scope){
$('div[rel=newDiskOffering],div[rel=useNewDiskOffering]').hide();
}else{
$('div[rel=newDiskOffering],div[rel=useNewDiskOffering]').show();
}
});
var functionHideShowNewDiskOfferint = function(){
if($('div[rel=useNewDiskOffering] input[type=checkbox]').is(':checked')){
$('div[rel=newDiskOffering]').show();
}else{
$('div[rel=newDiskOffering]').hide();
}
};
$('div[rel=useNewDiskOffering] input[type=checkbox]').click(functionHideShowNewDiskOfferint);
$('select[name=storagePool]').change();
functionHideShowNewDiskOfferint();
}
});
}
},
useNewDiskOffering:{
label: 'label.migrate.volume.newDiskOffering',
desc: 'label.migrate.volume.newDiskOffering.desc',
validation: {
required: false
},
isEditable: true,
isBoolean: true,
defaultValue: 'Yes'
},
newDiskOffering: {
label: 'label.disk.newOffering',
desc: 'label.disk.newOffering.description',
validation: {
required: false
},
select: function(args){
$.ajax({
url: createURL("listDiskOfferings&listall=true"),
dataType: "json",
async: true,
success: function(json){
var diskOfferings = json.listdiskofferingsresponse.diskoffering;
var items = [];
$(diskOfferings).each(function() {
items.push({
id: this.id,
description: this.name
});
});
args.response.success({
data: items
});
}
});
}
}
}
};
var functionMigrateVolume = function(args) {
var volumeBeingMigrated = args.context.volumes[0];
var isLiveMigrate = volumeBeingMigrated.vmstate == 'Running';
var migrateVolumeUrl = "migrateVolume&livemigrate="+ isLiveMigrate +"&storageid=" + args.data.storagePool + "&volumeid=" + volumeBeingMigrated.id;
if($('div[rel=useNewDiskOffering] input[name=useNewDiskOffering]:checkbox').is(':checked')){
migrateVolumeUrl = migrateVolumeUrl + '&newdiskofferingid=' + $('div[rel=newDiskOffering] select').val();
}
$.ajax({
url: createURL(migrateVolumeUrl),
dataType: "json",
async: true,
success: function(json) {
$(window).trigger('cloudStack.fullRefresh');
var jid = json.migratevolumeresponse.jobid;
args.response.success({
_custom: {
jobId: jid
}
});
}
});
}
var diskofferingObjs, selectedDiskOfferingObj; var diskofferingObjs, selectedDiskOfferingObj;
@ -746,7 +895,6 @@
label: 'label.snapshots' label: 'label.snapshots'
}, },
actions: { actions: {
migrateVolume: { migrateVolume: {
label: 'label.migrate.volume', label: 'label.migrate.volume',
messages: { messages: {
@ -758,56 +906,9 @@
} }
}, },
createForm: { createForm: migrateVolumeCreateFormAction,
title: 'label.migrate.volume',
desc: '',
fields: {
storagePool: {
label: 'label.storage.pool',
validation: {
required: true
},
select: function(args) {
$.ajax({
url: createURL("findStoragePoolsForMigration&id=" + args.context.volumes[0].id),
dataType: "json",
async: true,
success: function(json) {
var pools = json.findstoragepoolsformigrationresponse.storagepool;
var items = [];
$(pools).each(function() {
items.push({
id: this.id,
description: this.name + " (" + (this.suitableformigration ? "Suitable" : "Not Suitable") + ")"
});
});
args.response.success({
data: items
});
} action: functionMigrateVolume,
});
}
}
}
},
action: function(args) {
$.ajax({
url: createURL("migrateVolume&livemigrate=true&storageid=" + args.data.storagePool + "&volumeid=" + args.context.volumes[0].id),
dataType: "json",
async: true,
success: function(json) {
var jid = json.migratevolumeresponse.jobid;
args.response.success({
_custom: {
jobId: jid
}
});
}
});
},
notification: { notification: {
poll: pollAsyncJobResult poll: pollAsyncJobResult
} }
@ -1314,59 +1415,8 @@
return 'label.migrate.volume.to.primary.storage'; return 'label.migrate.volume.to.primary.storage';
} }
}, },
createForm: { createForm: $.extend({}, migrateVolumeCreateFormAction, {title: 'label.migrate.volume.to.primary.storage'}),
title: 'label.migrate.volume.to.primary.storage', action: functionMigrateVolume,
desc: '',
fields: {
storageId: {
label: 'label.primary.storage',
validation: {
required: true
},
select: function(args) {
$.ajax({
url: createURL("listStoragePools&zoneid=" + args.context.volumes[0].zoneid),
dataType: "json",
async: true,
success: function(json) {
var pools = json.liststoragepoolsresponse.storagepool;
var items = [];
$(pools).each(function() {
items.push({
id: this.id,
description: this.name
});
});
args.response.success({
data: items
});
}
});
}
}
}
},
action: function(args) {
$.ajax({
url: createURL("migrateVolume&storageid=" + args.data.storageId + "&volumeid=" + args.context.volumes[0].id),
dataType: "json",
async: true,
success: function(json) {
var jid = json.migratevolumeresponse.jobid;
args.response.success({
_custom: {
jobId: jid,
getUpdatedItem: function(json) {
return json.queryasyncjobresultresponse.jobresult.volume;
},
getActionFilter: function() {
return volumeActionfilter;
}
}
});
}
});
},
notification: { notification: {
poll: pollAsyncJobResult poll: pollAsyncJobResult
} }
@ -2539,7 +2589,7 @@
} }
} else { // Disk not attached } else { // Disk not attached
allowedActions.push("remove"); allowedActions.push("remove");
if (jsonObj.state == "Ready" && isAdmin() && jsonObj.storagetype == "shared") { if (jsonObj.state == "Ready" && isAdmin()) {
allowedActions.push("migrateToAnotherStorage"); allowedActions.push("migrateToAnotherStorage");
} }
allowedActions.push("attachDisk"); allowedActions.push("attachDisk");