From b080eaf3270ffae32f7809e5d492139f33603e89 Mon Sep 17 00:00:00 2001 From: Mike Tutkowski Date: Wed, 16 Jul 2014 08:23:49 -0600 Subject: [PATCH] Updates to the way resizing a volume works --- .../command/user/volume/ResizeVolumeCmd.java | 16 +- .../subsystem/api/storage/VolumeService.java | 2 + .../storage/volume/VolumeServiceImpl.java | 30 +++ .../SolidFirePrimaryDataStoreDriver.java | 58 ++++- .../SolidFirePrimaryDataStoreLifeCycle.java | 2 +- .../com/cloud/network/NetworkServiceImpl.java | 4 +- .../cloud/storage/ResizeVolumePayload.java | 6 +- .../com/cloud/storage/VmWorkResizeVolume.java | 14 +- .../cloud/storage/VolumeApiServiceImpl.java | 242 ++++++++++++------ utils/src/com/cloud/utils/StringUtils.java | 32 +++ 10 files changed, 320 insertions(+), 86 deletions(-) diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java index 1cf4f29dc29..49fd6ca194f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java @@ -55,7 +55,13 @@ public class ResizeVolumeCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.ID, entityType = VolumeResponse.class, required = true, type = CommandType.UUID, description = "the ID of the disk volume") private Long id; - @Parameter(name = ApiConstants.SIZE, type = CommandType.LONG, required = false, description = "New volume size in G") + @Parameter(name = ApiConstants.MIN_IOPS, type = CommandType.LONG, required = false, description = "New minimum number of IOPS") + private Long minIops; + + @Parameter(name = ApiConstants.MAX_IOPS, type = CommandType.LONG, required = false, description = "New maximum number of IOPS") + private Long maxIops; + + @Parameter(name = ApiConstants.SIZE, type = CommandType.LONG, required = false, description = "New volume size in GB") private Long size; @Parameter(name = ApiConstants.SHRINK_OK, type = CommandType.BOOLEAN, required = false, description = "Verify OK to Shrink") @@ -81,6 +87,14 @@ public class ResizeVolumeCmd extends BaseAsyncCmd { return getEntityId(); } + public Long getMinIops() { + return minIops; + } + + public Long getMaxIops() { + return maxIops; + } + public Long getSize() { return size; } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java index b6e61069e77..cadce56c588 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java @@ -96,6 +96,8 @@ public interface VolumeService { AsyncCallFuture resize(VolumeInfo volume); + void resizeVolumeOnHypervisor(long volumeId, long newSize, long destHostId, String instanceName); + void handleVolumeSync(DataStore store); SnapshotInfo takeSnapshot(VolumeInfo volume); diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 3a71147f8aa..3fc43ea2d87 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -51,6 +51,7 @@ import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.DeleteCommand; @@ -65,6 +66,8 @@ import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.ListVolumeAnswer; import com.cloud.agent.api.storage.ListVolumeCommand; +import com.cloud.agent.api.storage.ResizeVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.alert.AlertManager; import com.cloud.configuration.Config; @@ -1280,6 +1283,33 @@ public class VolumeServiceImpl implements VolumeService { return future; } + @Override + public void resizeVolumeOnHypervisor(long volumeId, long newSize, long destHostId, String instanceName) { + final String errMsg = "Resize command failed"; + + try { + Answer answer = null; + Host destHost = _hostDao.findById(destHostId); + EndPoint ep = RemoteHostEndPoint.getHypervisorHostEndPoint(destHost); + + if (ep != null) { + VolumeVO volume = _volumeDao.findById(volumeId); + PrimaryDataStore primaryDataStore = this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); + ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(volume.getPath(), new StorageFilerTO(primaryDataStore), volume.getSize(), newSize, true, instanceName); + + answer = ep.sendMessage(resizeCmd); + } else { + throw new CloudRuntimeException("Could not find a remote endpoint to send command to. Check if host or SSVM is down."); + } + + if (answer == null || !answer.getResult()) { + throw new CloudRuntimeException(answer != null ? answer.getDetails() : errMsg); + } + } catch (Exception e) { + throw new CloudRuntimeException(errMsg, e); + } + } + protected Void resizeVolumeCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { CreateCmdResult result = callback.getResult(); AsyncCallFuture future = context.future; diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index c06a72895ad..5a23fcf3991 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -42,6 +42,7 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; +import com.cloud.capacity.CapacityManager; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.dao.DataCenterDao; @@ -49,6 +50,7 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; @@ -57,10 +59,12 @@ import com.cloud.user.AccountDetailVO; import com.cloud.user.AccountDetailsDao; import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; +import com.cloud.utils.exception.CloudRuntimeException; public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { @Inject private AccountDao _accountDao; @Inject private AccountDetailsDao _accountDetailsDao; + @Inject private CapacityManager _capacityMgr; @Inject private ClusterDetailsDao _clusterDetailsDao; @Inject private DataCenterDao _zoneDao; @Inject private HostDao _hostDao; @@ -400,8 +404,58 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { } @Override - public void resize(DataObject data, AsyncCompletionCallback callback) { - throw new UnsupportedOperationException(); + public void resize(DataObject dataObject, AsyncCompletionCallback callback) { + String iqn = null; + String errMsg = null; + + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeInfo volumeInfo = (VolumeInfo)dataObject; + iqn = volumeInfo.get_iScsiName(); + long storagePoolId = volumeInfo.getPoolId(); + long sfVolumeId = Long.parseLong(volumeInfo.getFolder()); + ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload(); + + SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, _storagePoolDetailsDao); + SolidFireUtil.SolidFireVolume sfVolume = SolidFireUtil.getSolidFireVolume(sfConnection, sfVolumeId); + + verifySufficientIopsForStoragePool(storagePoolId, volumeInfo.getId(), payload.newMinIops); + + SolidFireUtil.modifySolidFireVolume(sfConnection, sfVolumeId, sfVolume.getTotalSize(), payload.newMinIops, payload.newMaxIops, + getDefaultBurstIops(storagePoolId, payload.newMaxIops)); + + VolumeVO volume = _volumeDao.findById(sfVolumeId); + + volume.setMinIops(payload.newMinIops); + volume.setMaxIops(payload.newMaxIops); + + _volumeDao.update(volume.getId(), volume); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize"; + } + + CreateCmdResult result = new CreateCmdResult(iqn, new Answer(null, errMsg == null, errMsg)); + + result.setResult(errMsg); + + callback.complete(result); + } + + private void verifySufficientIopsForStoragePool(long storagePoolId, long volumeId, long newMinIops) { + StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); + VolumeVO volume = _volumeDao.findById(volumeId); + + long currentMinIops = volume.getMinIops(); + long diffInMinIops = newMinIops - currentMinIops; + + // if the desire is for more IOPS + if (diffInMinIops > 0) { + long usedIops = _capacityMgr.getUsedIops(storagePool); + long capacityIops = storagePool.getCapacityIops(); + + if (usedIops + diffInMinIops > capacityIops) { + throw new CloudRuntimeException("Insufficient number of IOPS available in this storage pool"); + } + } } @Override diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java index c23db14a6a7..bc08704519f 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java @@ -53,7 +53,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreLifeCycle.class); - @Inject CapacityManager _capacityMgr; + @Inject private CapacityManager _capacityMgr; @Inject private DataCenterDao zoneDao; @Inject private PrimaryDataStoreDao storagePoolDao; @Inject private PrimaryDataStoreHelper dataStoreHelper; diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index c8105e82744..c5d71348a24 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -158,6 +158,7 @@ import com.cloud.user.dao.UserDao; import com.cloud.utils.Journal; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; @@ -2403,7 +2404,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { s_logger.debug("New network offering id=" + newNetworkOfferingId + " has tags and old network offering id=" + oldNetworkOfferingId + " doesn't, can't upgrade"); return false; } - if (!oldNetworkOffering.getTags().equalsIgnoreCase(newNetworkOffering.getTags())) { + + if (!StringUtils.areTagsEqual(oldNetworkOffering.getTags(), newNetworkOffering.getTags())) { s_logger.debug("Network offerings " + newNetworkOffering.getUuid() + " and " + oldNetworkOffering.getUuid() + " have different tags, can't upgrade"); return false; } diff --git a/server/src/com/cloud/storage/ResizeVolumePayload.java b/server/src/com/cloud/storage/ResizeVolumePayload.java index 55dc66161f8..7a927b2179e 100644 --- a/server/src/com/cloud/storage/ResizeVolumePayload.java +++ b/server/src/com/cloud/storage/ResizeVolumePayload.java @@ -19,12 +19,16 @@ package com.cloud.storage; public class ResizeVolumePayload { public final Long newSize; + public final Long newMinIops; + public final Long newMaxIops; public final boolean shrinkOk; public final String instanceName; public final long[] hosts; - public ResizeVolumePayload(Long newSize, boolean shrinkOk, String instanceName, long[] hosts) { + public ResizeVolumePayload(Long newSize, Long newMinIops, Long newMaxIops, boolean shrinkOk, String instanceName, long[] hosts) { this.newSize = newSize; + this.newMinIops = newMinIops; + this.newMaxIops = newMaxIops; this.shrinkOk = shrinkOk; this.instanceName = instanceName; this.hosts = hosts; diff --git a/server/src/com/cloud/storage/VmWorkResizeVolume.java b/server/src/com/cloud/storage/VmWorkResizeVolume.java index 3ccaecd2429..1caab10c9ce 100644 --- a/server/src/com/cloud/storage/VmWorkResizeVolume.java +++ b/server/src/com/cloud/storage/VmWorkResizeVolume.java @@ -24,17 +24,21 @@ public class VmWorkResizeVolume extends VmWork { private long volumeId; private long currentSize; private long newSize; + private Long newMinIops; + private Long newMaxIops; private Long newServiceOfferingId; private boolean shrinkOk; public VmWorkResizeVolume(long userId, long accountId, long vmId, String handlerName, - long volumeId, long currentSize, long newSize, Long newServiceOfferingId, boolean shrinkOk) { + long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, Long newServiceOfferingId, boolean shrinkOk) { super(userId, accountId, vmId, handlerName); this.volumeId = volumeId; this.currentSize = currentSize; this.newSize = newSize; + this.newMinIops = newMinIops; + this.newMaxIops = newMaxIops; this.newServiceOfferingId = newServiceOfferingId; this.shrinkOk = shrinkOk; } @@ -51,6 +55,14 @@ public class VmWorkResizeVolume extends VmWork { return newSize; } + public long getNewMinIops() { + return newMinIops; + } + + public long getNewMaxIops() { + return newMaxIops; + } + public Long getNewServiceOfferingId() { return newServiceOfferingId; } diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index e788cb2971c..49cb520cda0 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -121,6 +121,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Predicate; import com.cloud.utils.ReflectionUse; +import com.cloud.utils.StringUtils; import com.cloud.utils.UriUtils; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; @@ -637,9 +638,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic public boolean validateVolumeSizeRange(long size) { if (size < 0 || (size > 0 && size < (1024 * 1024 * 1024))) { - throw new InvalidParameterValueException("Please specify a size of at least 1 Gb."); + throw new InvalidParameterValueException("Please specify a size of at least 1 GB."); } else if (size > (_maxVolumeSizeInGb * 1024 * 1024 * 1024)) { - throw new InvalidParameterValueException("volume size " + size + ", but the maximum size allowed is " + _maxVolumeSizeInGb + " Gb."); + throw new InvalidParameterValueException("Requested volume size is " + size + ", but the maximum size allowed is " + _maxVolumeSizeInGb + " GB."); } return true; @@ -716,67 +717,99 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true) public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationException { Long newSize = null; + Long newMinIops = null; + Long newMaxIops = null; boolean shrinkOk = cmd.getShrinkOk(); VolumeVO volume = _volsDao.findById(cmd.getEntityId()); + if (volume == null) { throw new InvalidParameterValueException("No such volume"); } + /* Does the caller have authority to act on this volume? */ + _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume); + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); - DiskOfferingVO newDiskOffering = null; + DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(cmd.getNewDiskOfferingId()); - newDiskOffering = _diskOfferingDao.findById(cmd.getNewDiskOfferingId()); + /* Only works for KVM/XenServer/VMware for now, and volumes with 'None' since they're just allocated in DB */ - /* Only works for KVM/Xen/VMware for now, and volumes with 'None' since they're just allocated in db */ - if (_volsDao.getHypervisorType(volume.getId()) != HypervisorType.KVM - && _volsDao.getHypervisorType(volume.getId()) != HypervisorType.XenServer - && _volsDao.getHypervisorType(volume.getId()) != HypervisorType.VMware - && _volsDao.getHypervisorType(volume.getId()) != HypervisorType.None) { - throw new InvalidParameterValueException("Cloudstack currently only supports volumes marked as KVM, VMware, XenServer hypervisor for resize"); + HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId()); + + if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.XenServer && + hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.None) { + throw new InvalidParameterValueException("CloudStack currently only supports volumes marked as the KVM, VMware, or XenServer hypervisor type for resize."); } if (volume.getState() != Volume.State.Ready && volume.getState() != Volume.State.Allocated) { - throw new InvalidParameterValueException("Volume should be in ready or allocated state before attempting a resize. " - + "Volume " + volume.getUuid() + " state is:" + volume.getState()); + throw new InvalidParameterValueException("Volume should be in ready or allocated state before attempting a resize. Volume " + + volume.getUuid() + " is in state " + volume.getState() + "."); } - /* - * figure out whether or not a new disk offering or size parameter is - * required, get the correct size value - */ + // if we are to use the existing disk offering if (newDiskOffering == null) { - if (diskOffering.isCustomized() || volume.getVolumeType().equals(Volume.Type.ROOT)) { - newSize = cmd.getSize(); + newSize = cmd.getSize(); - if (newSize == null) { - throw new InvalidParameterValueException("new offering is of custom size, need to specify a size"); + // if the caller is looking to change the size of the volume + if (newSize != null) { + if (!diskOffering.isCustomized() && !volume.getVolumeType().equals(Volume.Type.ROOT)) { + throw new InvalidParameterValueException("To change a volume's size without providing a new disk offering, its current disk offering must be " + + "customizable or it must be a root volume."); } - newSize = (newSize << 30); - } else { - throw new InvalidParameterValueException("current offering" + volume.getDiskOfferingId() + " cannot be resized, need to specify a disk offering"); + // convert from bytes to GiB + newSize = newSize << 30; } - } else { - if (!volume.getVolumeType().equals(Volume.Type.DATADISK)) { - throw new InvalidParameterValueException("Can only resize Data volumes via new disk offering"); + else { + // no parameter provided; just use the original size of the volume + newSize = volume.getSize(); } - if (newDiskOffering.getRemoved() != null || !DiskOfferingVO.Type.Disk.equals(newDiskOffering.getType())) { - throw new InvalidParameterValueException("Disk offering ID is missing or invalid"); + newMinIops = cmd.getMinIops(); + + if (newMinIops != null) { + if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) { + throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Min IOPS' parameter."); + } + } + else { + // no parameter provided; just use the original min IOPS of the volume + newMinIops = volume.getMinIops(); + } + + newMaxIops = cmd.getMaxIops(); + + if (newMaxIops != null) { + if (diskOffering.isCustomizedIops() == null || !diskOffering.isCustomizedIops()) { + throw new InvalidParameterValueException("The current disk offering does not support customization of the 'Max IOPS' parameter."); + } + } + else { + // no parameter provided; just use the original max IOPS of the volume + newMaxIops = volume.getMaxIops(); + } + + validateIops(newMinIops, newMaxIops); + } else { + if (newDiskOffering.getRemoved() != null) { + throw new InvalidParameterValueException("Requested disk offering has been removed."); + } + + if (!DiskOfferingVO.Type.Disk.equals(newDiskOffering.getType())) { + throw new InvalidParameterValueException("Requested disk offering type is invalid."); } if (diskOffering.getTags() != null) { - if (newDiskOffering.getTags() == null || !newDiskOffering.getTags().equals(diskOffering.getTags())) { - throw new InvalidParameterValueException("Tags on new and old disk offerings must match"); + if (!StringUtils.areTagsEqual(diskOffering.getTags(), newDiskOffering.getTags())) { + throw new InvalidParameterValueException("The tags on the new and old disk offerings must match."); } } else if (newDiskOffering.getTags() != null) { - throw new InvalidParameterValueException("There are no tags on current disk offering, new disk offering needs to have no tags"); + throw new InvalidParameterValueException("There are no tags on the current disk offering. The new disk offering needs to have no tags, as well."); } - if (newDiskOffering.getDomainId() == null) { - // do nothing as offering is public - } else { + if (newDiskOffering.getDomainId() != null) { + // not a public offering; check access _configMgr.checkDiskOfferingAccess(CallContext.current().getCallingAccount(), newDiskOffering); } @@ -784,108 +817,147 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic newSize = cmd.getSize(); if (newSize == null) { - throw new InvalidParameterValueException("new offering is of custom size, need to specify a size"); + throw new InvalidParameterValueException("The new disk offering requires that a size be specified."); } - newSize = (newSize << 30); + // convert from bytes to GiB + newSize = newSize << 30; } else { newSize = newDiskOffering.getDiskSize(); } - } - if (newSize == null) { - throw new InvalidParameterValueException("could not detect a size parameter or fetch one from the diskofferingid parameter"); - } + if (volume.getSize() != newSize && !volume.getVolumeType().equals(Volume.Type.DATADISK)) { + throw new InvalidParameterValueException("Only data volumes can be resized via a new disk offering."); + } - if (!validateVolumeSizeRange(newSize)) { - throw new InvalidParameterValueException("Requested size out of range"); - } + if (newDiskOffering.isCustomizedIops() != null && newDiskOffering.isCustomizedIops()) { + newMinIops = cmd.getMinIops() != null ? cmd.getMinIops() : volume.getMinIops(); + newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops(); - /* does the caller have the authority to act on this volume? */ - _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume); + validateIops(newMinIops, newMaxIops); + } + else { + newMinIops = newDiskOffering.getMinIops(); + newMaxIops = newDiskOffering.getMaxIops(); + } + } long currentSize = volume.getSize(); - /* - * lets make certain they (think they) know what they're doing if they - * want to shrink, by forcing them to provide the shrinkok parameter. - * This will be checked again at the hypervisor level where we can see - * the actual disk size - */ - if (currentSize > newSize && !shrinkOk) { - throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize - + " would shrink the volume, need to sign off by supplying the shrinkok parameter with value of true"); + // if the caller is looking to change the size of the volume + if (currentSize != newSize) { + if (!validateVolumeSizeRange(newSize)) { + throw new InvalidParameterValueException("Requested size out of range"); + } + + /* + * Let's make certain they (think they) know what they're doing if they + * want to shrink by forcing them to provide the shrinkok parameter. + * This will be checked again at the hypervisor level where we can see + * the actual disk size. + */ + if (currentSize > newSize && !shrinkOk) { + throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " + newSize + " would shrink the volume." + + "Need to sign off by supplying the shrinkok parameter with value of true."); + } + + if (newSize > currentSize) { + /* Check resource limit for this account on primary storage resource */ + _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(volume.getAccountId()), ResourceType.primary_storage, volume.isDisplayVolume(), + new Long(newSize - currentSize).longValue()); + } } - if (!shrinkOk) { - /* Check resource limit for this account on primary storage resource */ - _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(volume.getAccountId()), ResourceType.primary_storage, volume.isDisplayVolume(), new Long(newSize - - currentSize).longValue()); - } + // Note: The storage plug-in in question should perform validation on the IOPS to check if a sufficient number of IOPS are available to perform + // the requested change - /* If this volume has never been beyond allocated state, short circuit everything and simply update the database */ + /* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */ if (volume.getState() == Volume.State.Allocated) { - s_logger.debug("Volume is allocated, but never created, simply updating database with new size"); + s_logger.debug("Volume is in the allocated state, but has never been created. Simply updating database with new size and IOPS."); + volume.setSize(newSize); + volume.setMinIops(newMinIops); + volume.setMaxIops(newMaxIops); + if (newDiskOffering != null) { volume.setDiskOfferingId(cmd.getNewDiskOfferingId()); } + _volsDao.update(volume.getId(), volume); + return volume; } UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); - if (userVm != null) { // serialize VM operation AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); + if (!VmJobEnabled.value() || jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance VmWorkJobVO placeHolder = null; + if (VmJobEnabled.value()) { placeHolder = createPlaceHolderWork(userVm.getId()); } + try { - return orchestrateResizeVolume(volume.getId(), currentSize, newSize, + return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); } finally { - if (VmJobEnabled.value()) + if (VmJobEnabled.value()) { _workJobDao.expunge(placeHolder.getId()); + } } - } else { - Outcome outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, + Outcome outcome = resizeVolumeThroughJobQueue(userVm.getId(), volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); - Volume vol = null; try { outcome.get(); } catch (InterruptedException e) { - throw new RuntimeException("Operation is interrupted", e); + throw new RuntimeException("Operation was interrupted", e); } catch (java.util.concurrent.ExecutionException e) { - throw new RuntimeException("Execution excetion", e); + throw new RuntimeException("Execution exception", e); } Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); + if (jobResult != null) { - if (jobResult instanceof ConcurrentOperationException) + if (jobResult instanceof ConcurrentOperationException) { throw (ConcurrentOperationException)jobResult; - else if (jobResult instanceof Throwable) + } + else if (jobResult instanceof Throwable) { throw new RuntimeException("Unexpected exception", (Throwable)jobResult); + } else if (jobResult instanceof Long) { - vol = _volsDao.findById((Long)jobResult); + return _volsDao.findById((Long)jobResult); } } + return volume; } } - return orchestrateResizeVolume(volume.getId(), currentSize, newSize, + + return orchestrateResizeVolume(volume.getId(), currentSize, newSize, newMinIops, newMaxIops, newDiskOffering != null ? cmd.getNewDiskOfferingId() : null, shrinkOk); + } + + private void validateIops(Long minIops, Long maxIops) { + if ((minIops == null && maxIops != null) || (minIops != null && maxIops == null)) { + throw new InvalidParameterValueException("Either 'miniops' and 'maxiops' must both be provided or neither must be provided."); } - private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long newSize, Long newDiskOfferingId, boolean shrinkOk) { + if (minIops != null && maxIops != null) { + if (minIops > maxIops) { + throw new InvalidParameterValueException("The 'miniops' parameter must be less than or equal to the 'maxiops' parameter."); + } + } + } + + private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, Long newDiskOfferingId, boolean shrinkOk) { VolumeVO volume = _volsDao.findById(volumeId); UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); /* @@ -905,12 +977,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } /* Xen only works offline, SR does not support VDI.resizeOnline */ - if (_volsDao.getHypervisorType(volume.getId()) == HypervisorType.XenServer && !userVm.getState().equals(State.Stopped)) { + if (currentSize != newSize && _volsDao.getHypervisorType(volume.getId()) == HypervisorType.XenServer && !userVm.getState().equals(State.Stopped)) { throw new InvalidParameterValueException("VM must be stopped or disk detached in order to resize with the Xen HV"); } } - ResizeVolumePayload payload = new ResizeVolumePayload(newSize, shrinkOk, instanceName, hosts); + ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, shrinkOk, instanceName, hosts); try { VolumeInfo vol = volFactory.getVolume(volume.getId()); @@ -925,6 +997,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic volume = _volsDao.findById(volume.getId()); + StoragePoolVO storagePool = _storagePoolDao.findById(vol.getPoolId()); + + if (storagePool.isManaged()) { + if (hosts.length > 0) { + volService.resizeVolumeOnHypervisor(volumeId, newSize, hosts[0], instanceName); + } + + volume.setSize(newSize); + + /** @todo let the storage driver know the CloudStack volume within the storage volume in question has a new size */ + } + if (newDiskOfferingId != null) { volume.setDiskOfferingId(newDiskOfferingId); } @@ -2364,7 +2448,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } public Outcome resizeVolumeThroughJobQueue(final Long vmId, final long volumeId, - final long currentSize, final long newSize, final Long newServiceOfferingId, final boolean shrinkOk) { + final long currentSize, final long newSize, final Long newMinIops, final Long newMaxIops, final Long newServiceOfferingId, final boolean shrinkOk) { final CallContext context = CallContext.current(); final User callingUser = context.getCallingUser(); @@ -2394,7 +2478,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // save work context info (there are some duplications) VmWorkResizeVolume workInfo = new VmWorkResizeVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), - VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, currentSize, newSize, newServiceOfferingId, shrinkOk); + VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, currentSize, newSize, newMinIops, newMaxIops, newServiceOfferingId, shrinkOk); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); @@ -2529,7 +2613,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @ReflectionUse private Pair orchestrateResizeVolume(VmWorkResizeVolume work) throws Exception { - Volume vol = orchestrateResizeVolume(work.getVolumeId(), work.getCurrentSize(), work.getNewSize(), + Volume vol = orchestrateResizeVolume(work.getVolumeId(), work.getCurrentSize(), work.getNewSize(), work.getNewMinIops(), work.getNewMaxIops(), work.getNewServiceOfferingId(), work.isShrinkOk()); return new Pair(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(new Long(vol.getId()))); diff --git a/utils/src/com/cloud/utils/StringUtils.java b/utils/src/com/cloud/utils/StringUtils.java index 09045aa7352..3b8cf71ca30 100644 --- a/utils/src/com/cloud/utils/StringUtils.java +++ b/utils/src/com/cloud/utils/StringUtils.java @@ -198,6 +198,38 @@ public class StringUtils { return cleanResult; } + public static boolean areTagsEqual(String tags1, String tags2) { + if (tags1 == null && tags2 == null) { + return true; + } + + if (tags1 != null && tags2 == null) { + return false; + } + + if (tags1 == null && tags2 != null) { + return false; + } + + final String delimiter = ","; + + List lstTags1 = new ArrayList(); + String[] aTags1 = tags1.split(delimiter); + + for (String tag1 : aTags1) { + lstTags1.add(tag1.toLowerCase()); + } + + List lstTags2 = new ArrayList(); + String[] aTags2 = tags2.split(delimiter); + + for (String tag2 : aTags2) { + lstTags2.add(tag2.toLowerCase()); + } + + return lstTags1.containsAll(lstTags2) && lstTags2.containsAll(lstTags1); + } + public static String stripControlCharacters(String s) { return StringUtilities.stripControls(s); }