From 6ba5e082217ab7af0ee5042bfdff1053fad41430 Mon Sep 17 00:00:00 2001 From: Rene Peinthor Date: Thu, 29 Jan 2026 10:08:12 +0100 Subject: [PATCH 1/4] Linstor: support live migration from other primary storage (#12532) * Linstor: Refactor resource creation methods to LinstorUtil Move reusable methods from LinstorPrimaryDataStoreDriverImpl to LinstorUtil to enable sharing with other components: - logLinstorAnswer, logLinstorAnswers, checkLinstorAnswersThrow - getRscGrp, getEncryptedLayerList, applyQoSSettings - createResourceBase, createResource, spawnResource - canShareTemplateForResourceGroup, foundShareableTemplate Add LIN_PROP_DRBDOPT_EXACT_SIZE constant and exactSize parameter support for DRBD exact-size property handling during resource creation. * Linstor: Add LinstorDataMotionStrategy for VM live migration Implement DataMotionStrategy for live migration of VMs with volumes on Linstor or other primary storage. Key features: - Support live migration with storage from other primary storages - Preserve DRBD exact-size property during migration --- plugins/storage/volume/linstor/CHANGELOG.md | 6 + .../LinstorPrimaryDataStoreDriverImpl.java | 316 +------------ .../storage/datastore/util/LinstorUtil.java | 293 +++++++++++- .../motion/LinstorDataMotionStrategy.java | 437 ++++++++++++++++++ .../spring-storage-volume-linstor-context.xml | 2 + ...LinstorPrimaryDataStoreDriverImplTest.java | 7 +- 6 files changed, 764 insertions(+), 297 deletions(-) create mode 100644 plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/motion/LinstorDataMotionStrategy.java diff --git a/plugins/storage/volume/linstor/CHANGELOG.md b/plugins/storage/volume/linstor/CHANGELOG.md index 47d1ddeb06c..a5d609325d6 100644 --- a/plugins/storage/volume/linstor/CHANGELOG.md +++ b/plugins/storage/volume/linstor/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to Linstor CloudStack plugin will be documented in this file The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2026-01-17] + +### Added + +- Support live migrate from other primary storage + ## [2025-12-18] ### Changed diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java index c2bce6e5a04..27b5f7ef7ec 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java @@ -21,33 +21,25 @@ import com.linbit.linstor.api.CloneWaiter; import com.linbit.linstor.api.DevelopersApi; import com.linbit.linstor.api.model.ApiCallRc; import com.linbit.linstor.api.model.ApiCallRcList; -import com.linbit.linstor.api.model.AutoSelectFilter; import com.linbit.linstor.api.model.LayerType; -import com.linbit.linstor.api.model.Properties; import com.linbit.linstor.api.model.ResourceDefinition; import com.linbit.linstor.api.model.ResourceDefinitionCloneRequest; import com.linbit.linstor.api.model.ResourceDefinitionCloneStarted; import com.linbit.linstor.api.model.ResourceDefinitionCreate; import com.linbit.linstor.api.model.ResourceDefinitionModify; -import com.linbit.linstor.api.model.ResourceGroup; -import com.linbit.linstor.api.model.ResourceGroupSpawn; import com.linbit.linstor.api.model.ResourceMakeAvailable; import com.linbit.linstor.api.model.ResourceWithVolumes; import com.linbit.linstor.api.model.Snapshot; import com.linbit.linstor.api.model.SnapshotRestore; -import com.linbit.linstor.api.model.VolumeDefinition; import com.linbit.linstor.api.model.VolumeDefinitionModify; import javax.annotation.Nonnull; -import javax.annotation.Nullable; import javax.inject.Inject; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; @@ -117,10 +109,9 @@ import org.apache.cloudstack.storage.snapshot.SnapshotObject; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.volume.VolumeObject; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import org.apache.commons.collections.CollectionUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.nio.charset.StandardCharsets; @@ -335,275 +326,11 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } } - private void logLinstorAnswer(@Nonnull ApiCallRc answer) { - if (answer.isError()) { - logger.error(answer.getMessage()); - } else if (answer.isWarning()) { - logger.warn(answer.getMessage()); - } else if (answer.isInfo()) { - logger.info(answer.getMessage()); - } - } - - private void logLinstorAnswers(@Nonnull ApiCallRcList answers) { - answers.forEach(this::logLinstorAnswer); - } - - private void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) { - logLinstorAnswers(answers); - if (answers.hasError()) - { - String errMsg = answers.stream() - .filter(ApiCallRc::isError) - .findFirst() - .map(ApiCallRc::getMessage).orElse("Unknown linstor error"); - throw new CloudRuntimeException(errMsg); - } - } - private String checkLinstorAnswers(@Nonnull ApiCallRcList answers) { - logLinstorAnswers(answers); + LinstorUtil.logLinstorAnswers(answers); return answers.stream().filter(ApiCallRc::isError).findFirst().map(ApiCallRc::getMessage).orElse(null); } - private void applyQoSSettings(StoragePoolVO storagePool, DevelopersApi api, String rscName, Long maxIops) - throws ApiException - { - Long currentQosIops = null; - List vlmDfns = api.volumeDefinitionList(rscName, null, null); - if (!vlmDfns.isEmpty()) - { - Properties props = vlmDfns.get(0).getProps(); - long iops = Long.parseLong(props.getOrDefault("sys/fs/blkio_throttle_write_iops", "0")); - currentQosIops = iops > 0 ? iops : null; - } - - if (!Objects.equals(maxIops, currentQosIops)) - { - VolumeDefinitionModify vdm = new VolumeDefinitionModify(); - if (maxIops != null) - { - Properties props = new Properties(); - props.put("sys/fs/blkio_throttle_read_iops", "" + maxIops); - props.put("sys/fs/blkio_throttle_write_iops", "" + maxIops); - vdm.overrideProps(props); - logger.info("Apply qos setting: " + maxIops + " to " + rscName); - } - else - { - logger.info("Remove QoS setting for " + rscName); - vdm.deleteProps(Arrays.asList("sys/fs/blkio_throttle_read_iops", "sys/fs/blkio_throttle_write_iops")); - } - ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, vdm); - checkLinstorAnswersThrow(answers); - - Long capacityIops = storagePool.getCapacityIops(); - if (capacityIops != null) - { - long vcIops = currentQosIops != null ? currentQosIops * -1 : 0; - long vMaxIops = maxIops != null ? maxIops : 0; - long newIops = vcIops + vMaxIops; - capacityIops -= newIops; - logger.info(String.format("Current storagepool %s iops capacity: %d", storagePool, capacityIops)); - storagePool.setCapacityIops(Math.max(0, capacityIops)); - _storagePoolDao.update(storagePool.getId(), storagePool); - } - } - } - - private String getRscGrp(StoragePool storagePool) { - return storagePool.getUserInfo() != null && !storagePool.getUserInfo().isEmpty() ? - storagePool.getUserInfo() : "DfltRscGrp"; - } - - /** - * Returns the layerlist of the resourceGroup with encryption(LUKS) added above STORAGE. - * If the resourceGroup layer list already contains LUKS this layer list will be returned. - * @param api Linstor developers API - * @param resourceGroup Resource group to get the encryption layer list - * @return layer list with LUKS added - */ - public List getEncryptedLayerList(DevelopersApi api, String resourceGroup) { - try { - List rscGrps = api.resourceGroupList( - Collections.singletonList(resourceGroup), Collections.emptyList(), null, null); - - if (CollectionUtils.isEmpty(rscGrps)) { - throw new CloudRuntimeException( - String.format("Resource Group %s not found on Linstor cluster.", resourceGroup)); - } - - final ResourceGroup rscGrp = rscGrps.get(0); - List layers = Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE); - List curLayerStack = rscGrp.getSelectFilter() != null ? - rscGrp.getSelectFilter().getLayerStack() : Collections.emptyList(); - if (CollectionUtils.isNotEmpty(curLayerStack)) { - layers = curLayerStack.stream().map(LayerType::valueOf).collect(Collectors.toList()); - if (!layers.contains(LayerType.LUKS)) { - layers.add(layers.size() - 1, LayerType.LUKS); // lowest layer is STORAGE - } - } - return layers; - } catch (ApiException e) { - throw new CloudRuntimeException( - String.format("Resource Group %s not found on Linstor cluster.", resourceGroup)); - } - } - - /** - * Spawns a new Linstor resource with the given arguments. - * @param api - * @param newRscName - * @param sizeInBytes - * @param isTemplate - * @param rscGrpName - * @param volName - * @param vmName - * @throws ApiException - */ - private void spawnResource( - DevelopersApi api, String newRscName, long sizeInBytes, boolean isTemplate, String rscGrpName, - String volName, String vmName, @Nullable Long passPhraseId, @Nullable byte[] passPhrase) throws ApiException - { - ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn(); - rscGrpSpawn.setResourceDefinitionName(newRscName); - rscGrpSpawn.addVolumeSizesItem(sizeInBytes / 1024); - if (passPhraseId != null) { - AutoSelectFilter asf = new AutoSelectFilter(); - List luksLayers = getEncryptedLayerList(api, rscGrpName); - asf.setLayerStack(luksLayers.stream().map(LayerType::toString).collect(Collectors.toList())); - rscGrpSpawn.setSelectFilter(asf); - if (passPhrase != null) { - String utf8Passphrase = new String(passPhrase, StandardCharsets.UTF_8); - rscGrpSpawn.setVolumePassphrases(Collections.singletonList(utf8Passphrase)); - } - } - - if (isTemplate) { - Properties props = new Properties(); - props.put(LinstorUtil.getTemplateForAuxPropKey(rscGrpName), "true"); - rscGrpSpawn.setResourceDefinitionProps(props); - } - - logger.info("Linstor: Spawn resource " + newRscName); - ApiCallRcList answers = api.resourceGroupSpawn(rscGrpName, rscGrpSpawn); - checkLinstorAnswersThrow(answers); - - answers = LinstorUtil.applyAuxProps(api, newRscName, volName, vmName); - checkLinstorAnswersThrow(answers); - } - - /** - * Condition if a template resource can be shared with the given resource group. - * @param tgtRscGrp - * @param tgtLayerStack - * @param rg - * @return True if the template resource can be shared, else false. - */ - private boolean canShareTemplateForResourceGroup( - ResourceGroup tgtRscGrp, List tgtLayerStack, ResourceGroup rg) { - List rgLayerStack = rg.getSelectFilter() != null ? - rg.getSelectFilter().getLayerStack() : null; - return Objects.equals(tgtLayerStack, rgLayerStack) && - Objects.equals(tgtRscGrp.getSelectFilter().getStoragePoolList(), - rg.getSelectFilter().getStoragePoolList()); - } - - /** - * Searches for a shareable template for this rscGrpName and sets the aux template property. - * @param api - * @param rscName - * @param rscGrpName - * @param existingRDs - * @return - * @throws ApiException - */ - private boolean foundShareableTemplate( - DevelopersApi api, String rscName, String rscGrpName, - List> existingRDs) throws ApiException { - if (!existingRDs.isEmpty()) { - ResourceGroup tgtRscGrp = api.resourceGroupList( - Collections.singletonList(rscGrpName), null, null, null).get(0); - List tgtLayerStack = tgtRscGrp.getSelectFilter() != null ? - tgtRscGrp.getSelectFilter().getLayerStack() : null; - - // check if there is already a template copy, that we could reuse - // this means if select filters are similar enough to allow cloning from - for (Pair rdPair : existingRDs) { - ResourceGroup rg = rdPair.second(); - if (canShareTemplateForResourceGroup(tgtRscGrp, tgtLayerStack, rg)) { - LinstorUtil.setAuxTemplateForProperty(api, rscName, rscGrpName); - return true; - } - } - } - return false; - } - - /** - * Creates a new Linstor resource. - * @param rscName - * @param sizeInBytes - * @param volName - * @param vmName - * @param api - * @param rscGrp - * @param poolId - * @param isTemplate indicates if the resource is a template - * @return true if a new resource was created, false if it already existed or was reused. - */ - private boolean createResourceBase( - String rscName, long sizeInBytes, String volName, String vmName, - @Nullable Long passPhraseId, @Nullable byte[] passPhrase, DevelopersApi api, - String rscGrp, long poolId, boolean isTemplate) - { - try - { - logger.debug("createRscBase: {} :: {} :: {}", rscName, rscGrp, isTemplate); - List> existingRDs = LinstorUtil.getRDAndRGListStartingWith(api, rscName); - - String fullRscName = String.format("%s-%d", rscName, poolId); - boolean alreadyCreated = existingRDs.stream() - .anyMatch(p -> p.first().getName().equalsIgnoreCase(fullRscName)) || - existingRDs.stream().anyMatch(p -> p.first().getProps().containsKey(LinstorUtil.getTemplateForAuxPropKey(rscGrp))); - if (!alreadyCreated) { - boolean createNewRsc = !foundShareableTemplate(api, rscName, rscGrp, existingRDs); - if (createNewRsc) { - String newRscName = existingRDs.isEmpty() ? rscName : fullRscName; - spawnResource(api, newRscName, sizeInBytes, isTemplate, rscGrp, - volName, vmName, passPhraseId, passPhrase); - } - return createNewRsc; - } - return false; - } catch (ApiException apiEx) - { - logger.error("Linstor: ApiEx - " + apiEx.getMessage()); - throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); - } - } - - private String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO) { - DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress()); - final String rscGrp = getRscGrp(storagePoolVO); - - final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid(); - createResourceBase( - rscName, vol.getSize(), vol.getName(), vol.getAttachedVmName(), vol.getPassphraseId(), vol.getPassphrase(), - linstorApi, rscGrp, storagePoolVO.getId(), false); - - try - { - applyQoSSettings(storagePoolVO, linstorApi, rscName, vol.getMaxIops()); - - return LinstorUtil.getDevicePath(linstorApi, rscName); - } catch (ApiException apiEx) - { - logger.error("Linstor: ApiEx - " + apiEx.getMessage()); - throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); - } - } - private void resizeResource(DevelopersApi api, String resourceName, long sizeByte) throws ApiException { VolumeDefinitionModify dfm = new VolumeDefinitionModify(); dfm.setSizeKib(sizeByte / 1024); @@ -688,13 +415,14 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver try { ResourceDefinition templateRD = LinstorUtil.findResourceDefinition( - linstorApi, templateRscName, getRscGrp(storagePoolVO)); + linstorApi, templateRscName, LinstorUtil.getRscGrp(storagePoolVO)); final String cloneRes = templateRD != null ? templateRD.getName() : templateRscName; logger.info("Clone resource definition {} to {}", cloneRes, rscName); ResourceDefinitionCloneRequest cloneRequest = new ResourceDefinitionCloneRequest(); cloneRequest.setName(rscName); if (volumeInfo.getPassphraseId() != null) { - List encryptionLayer = getEncryptedLayerList(linstorApi, getRscGrp(storagePoolVO)); + List encryptionLayer = LinstorUtil.getEncryptedLayerList( + linstorApi, LinstorUtil.getRscGrp(storagePoolVO)); cloneRequest.setLayerList(encryptionLayer); if (volumeInfo.getPassphrase() != null) { String utf8Passphrase = new String(volumeInfo.getPassphrase(), StandardCharsets.UTF_8); @@ -704,7 +432,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver ResourceDefinitionCloneStarted cloneStarted = linstorApi.resourceDefinitionClone( cloneRes, cloneRequest); - checkLinstorAnswersThrow(cloneStarted.getMessages()); + LinstorUtil.checkLinstorAnswersThrow(cloneStarted.getMessages()); if (!CloneWaiter.waitFor(linstorApi, cloneStarted)) { throw new CloudRuntimeException("Clone for resource " + rscName + " failed."); @@ -716,11 +444,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver resizeResource(linstorApi, rscName, volumeInfo.getSize()); } - updateRscGrpIfNecessary(linstorApi, rscName, getRscGrp(storagePoolVO)); + updateRscGrpIfNecessary(linstorApi, rscName, LinstorUtil.getRscGrp(storagePoolVO)); deleteTemplateForProps(linstorApi, rscName); LinstorUtil.applyAuxProps(linstorApi, rscName, volumeInfo.getName(), volumeInfo.getAttachedVmName()); - applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops()); + LinstorUtil.applyQoSSettings( + _storagePoolDao, storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops()); return LinstorUtil.getDevicePath(linstorApi, rscName); } catch (ApiException apiEx) { @@ -744,7 +473,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } private String createResourceFromSnapshot(long csSnapshotId, String rscName, StoragePoolVO storagePoolVO) { - final String rscGrp = getRscGrp(storagePoolVO); + final String rscGrp = LinstorUtil.getRscGrp(storagePoolVO); final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress()); SnapshotVO snapshotVO = _snapshotDao.findById(csSnapshotId); @@ -757,22 +486,22 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver logger.debug("Create new resource definition: " + rscName); ResourceDefinitionCreate rdCreate = createResourceDefinitionCreate(rscName, rscGrp); ApiCallRcList answers = linstorApi.resourceDefinitionCreate(rdCreate); - checkLinstorAnswersThrow(answers); + LinstorUtil.checkLinstorAnswersThrow(answers); SnapshotRestore snapshotRestore = new SnapshotRestore(); snapshotRestore.toResource(rscName); logger.debug("Create new volume definition for snapshot: " + cloneRes + ":" + snapName); answers = linstorApi.resourceSnapshotsRestoreVolumeDefinition(cloneRes, snapName, snapshotRestore); - checkLinstorAnswersThrow(answers); + LinstorUtil.checkLinstorAnswersThrow(answers); // restore snapshot to new resource logger.info("Restore resource from snapshot: " + cloneRes + ":" + snapName); answers = linstorApi.resourceSnapshotRestore(cloneRes, snapName, snapshotRestore); - checkLinstorAnswersThrow(answers); + LinstorUtil.checkLinstorAnswersThrow(answers); LinstorUtil.applyAuxProps(linstorApi, rscName, volumeVO.getName(), null); - applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeVO.getMaxIops()); + LinstorUtil.applyQoSSettings(_storagePoolDao, storagePoolVO, linstorApi, rscName, volumeVO.getMaxIops()); return LinstorUtil.getDevicePath(linstorApi, rscName); } catch (ApiException apiEx) { @@ -790,7 +519,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } else if (csTemplateId > 0) { return cloneResource(csTemplateId, volumeInfo, storagePoolVO); } else { - return createResource(volumeInfo, storagePoolVO); + return LinstorUtil.createResource(volumeInfo, storagePoolVO, _storagePoolDao); } } @@ -1140,7 +869,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver String rscName, String snapshotName, String restoredName) throws ApiException { - final String rscGrp = getRscGrp(storagePoolVO); + final String rscGrp = LinstorUtil.getRscGrp(storagePoolVO); // try to delete -rst resource, could happen if the copy failed and noone deleted it. deleteResourceDefinition(storagePoolVO, restoredName); ResourceDefinitionCreate rdc = createResourceDefinitionCreate(restoredName, rscGrp); @@ -1185,7 +914,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver final StoragePoolVO pool = _storagePoolDao.findById(dstData.getDataStore().getId()); final DevelopersApi api = LinstorUtil.getLinstorAPI(pool.getHostAddress()); final String rscName = LinstorUtil.RSC_PREFIX + dstData.getUuid(); - boolean newCreated = createResourceBase( + boolean newCreated = LinstorUtil.createResourceBase( LinstorUtil.RSC_PREFIX + dstData.getUuid(), tInfo.getSize(), tInfo.getName(), @@ -1193,9 +922,10 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver null, null, api, - getRscGrp(pool), + LinstorUtil.getRscGrp(pool), pool.getId(), - true); + true, + false); Answer answer; if (newCreated) { @@ -1429,7 +1159,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { resizeResource(api, rscName, resizeParameter.newSize); - applyQoSSettings(pool, api, rscName, resizeParameter.newMaxIops); + LinstorUtil.applyQoSSettings(_storagePoolDao, pool, api, rscName, resizeParameter.newMaxIops); { final VolumeVO volume = _volumeDao.findById(vol.getId()); volume.setMinIops(resizeParameter.newMinIops); @@ -1534,7 +1264,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver @Override public Pair getStorageStats(StoragePool storagePool) { logger.debug(String.format("Requesting storage stats: %s", storagePool)); - return LinstorUtil.getStorageStats(storagePool.getHostAddress(), getRscGrp(storagePool)); + return LinstorUtil.getStorageStats(storagePool.getHostAddress(), LinstorUtil.getRscGrp(storagePool)); } @Override diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java index 4196c12b116..7c45493dddc 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java @@ -22,6 +22,8 @@ import com.linbit.linstor.api.ApiException; import com.linbit.linstor.api.DevelopersApi; import com.linbit.linstor.api.model.ApiCallRc; import com.linbit.linstor.api.model.ApiCallRcList; +import com.linbit.linstor.api.model.AutoSelectFilter; +import com.linbit.linstor.api.model.LayerType; import com.linbit.linstor.api.model.Node; import com.linbit.linstor.api.model.Properties; import com.linbit.linstor.api.model.ProviderKind; @@ -29,24 +31,36 @@ import com.linbit.linstor.api.model.Resource; import com.linbit.linstor.api.model.ResourceDefinition; import com.linbit.linstor.api.model.ResourceDefinitionModify; import com.linbit.linstor.api.model.ResourceGroup; +import com.linbit.linstor.api.model.ResourceGroupSpawn; import com.linbit.linstor.api.model.ResourceWithVolumes; import com.linbit.linstor.api.model.StoragePool; import com.linbit.linstor.api.model.Volume; +import com.linbit.linstor.api.model.VolumeDefinition; +import com.linbit.linstor.api.model.VolumeDefinitionModify; import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; import com.cloud.hypervisor.kvm.storage.KVMStoragePool; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.logging.log4j.Logger; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.collections.CollectionUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.nio.charset.StandardCharsets; public class LinstorUtil { protected static Logger LOGGER = LogManager.getLogger(LinstorUtil.class); @@ -56,6 +70,8 @@ public class LinstorUtil { public static final String RSC_GROUP = "resourceGroup"; public static final String CS_TEMPLATE_FOR_PREFIX = "_cs-template-for-"; + public static final String LIN_PROP_DRBDOPT_EXACT_SIZE = "DrbdOptions/ExactSize"; + public static final String TEMP_VOLUME_ID = "tempVolumeId"; public static final String CLUSTER_DEFAULT_MIN_IOPS = "clusterDefaultMinIops"; @@ -76,6 +92,32 @@ public class LinstorUtil { .orElse((answers.get(0)).getMessage()) : null; } + public static void logLinstorAnswer(@Nonnull ApiCallRc answer) { + if (answer.isError()) { + LOGGER.error(answer.getMessage()); + } else if (answer.isWarning()) { + LOGGER.warn(answer.getMessage()); + } else if (answer.isInfo()) { + LOGGER.info(answer.getMessage()); + } + } + + public static void logLinstorAnswers(@Nonnull ApiCallRcList answers) { + answers.forEach(LinstorUtil::logLinstorAnswer); + } + + public static void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) { + logLinstorAnswers(answers); + if (answers.hasError()) + { + String errMsg = answers.stream() + .filter(ApiCallRc::isError) + .findFirst() + .map(ApiCallRc::getMessage).orElse("Unknown linstor error"); + throw new CloudRuntimeException(errMsg); + } + } + public static List getLinstorNodeNames(@Nonnull DevelopersApi api) throws ApiException { List nodes = api.nodeList( @@ -488,4 +530,253 @@ public class LinstorUtil { } return false; } + + public static String getRscGrp(com.cloud.storage.StoragePool storagePool) { + return storagePool.getUserInfo() != null && !storagePool.getUserInfo().isEmpty() ? + storagePool.getUserInfo() : "DfltRscGrp"; + } + + /** + * Condition if a template resource can be shared with the given resource group. + * @param tgtRscGrp + * @param tgtLayerStack + * @param rg + * @return True if the template resource can be shared, else false. + */ + private static boolean canShareTemplateForResourceGroup( + ResourceGroup tgtRscGrp, List tgtLayerStack, ResourceGroup rg) { + List rgLayerStack = rg.getSelectFilter() != null ? + rg.getSelectFilter().getLayerStack() : null; + return Objects.equals(tgtLayerStack, rgLayerStack) && + Objects.equals(tgtRscGrp.getSelectFilter().getStoragePoolList(), + rg.getSelectFilter().getStoragePoolList()); + } + + /** + * Searches for a shareable template for this rscGrpName and sets the aux template property. + * @param api + * @param rscName + * @param rscGrpName + * @param existingRDs + * @return + * @throws ApiException + */ + private static boolean foundShareableTemplate( + DevelopersApi api, String rscName, String rscGrpName, + List> existingRDs) throws ApiException { + if (!existingRDs.isEmpty()) { + ResourceGroup tgtRscGrp = api.resourceGroupList( + Collections.singletonList(rscGrpName), null, null, null).get(0); + List tgtLayerStack = tgtRscGrp.getSelectFilter() != null ? + tgtRscGrp.getSelectFilter().getLayerStack() : null; + + // check if there is already a template copy, that we could reuse + // this means if select filters are similar enough to allow cloning from + for (Pair rdPair : existingRDs) { + ResourceGroup rg = rdPair.second(); + if (canShareTemplateForResourceGroup(tgtRscGrp, tgtLayerStack, rg)) { + LinstorUtil.setAuxTemplateForProperty(api, rscName, rscGrpName); + return true; + } + } + } + return false; + } + + /** + * Returns the layerlist of the resourceGroup with encryption(LUKS) added above STORAGE. + * If the resourceGroup layer list already contains LUKS this layer list will be returned. + * @param api Linstor developers API + * @param resourceGroup Resource group to get the encryption layer list + * @return layer list with LUKS added + */ + public static List getEncryptedLayerList(DevelopersApi api, String resourceGroup) { + try { + List rscGrps = api.resourceGroupList( + Collections.singletonList(resourceGroup), Collections.emptyList(), null, null); + + if (CollectionUtils.isEmpty(rscGrps)) { + throw new CloudRuntimeException( + String.format("Resource Group %s not found on Linstor cluster.", resourceGroup)); + } + + final ResourceGroup rscGrp = rscGrps.get(0); + List layers = Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE); + List curLayerStack = rscGrp.getSelectFilter() != null ? + rscGrp.getSelectFilter().getLayerStack() : Collections.emptyList(); + if (CollectionUtils.isNotEmpty(curLayerStack)) { + layers = curLayerStack.stream().map(LayerType::valueOf).collect(Collectors.toList()); + if (!layers.contains(LayerType.LUKS)) { + layers.add(layers.size() - 1, LayerType.LUKS); // lowest layer is STORAGE + } + } + return layers; + } catch (ApiException e) { + throw new CloudRuntimeException( + String.format("Resource Group %s not found on Linstor cluster.", resourceGroup)); + } + } + + /** + * Spawns a new Linstor resource with the given arguments. + * @param api + * @param newRscName + * @param sizeInBytes + * @param isTemplate + * @param rscGrpName + * @param volName + * @param vmName + * @throws ApiException + */ + private static void spawnResource( + DevelopersApi api, String newRscName, long sizeInBytes, boolean isTemplate, String rscGrpName, + String volName, String vmName, @Nullable Long passPhraseId, @Nullable byte[] passPhrase, + boolean exactSize) throws ApiException + { + ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn(); + rscGrpSpawn.setResourceDefinitionName(newRscName); + rscGrpSpawn.addVolumeSizesItem(sizeInBytes / 1024); + if (passPhraseId != null) { + AutoSelectFilter asf = new AutoSelectFilter(); + List luksLayers = getEncryptedLayerList(api, rscGrpName); + asf.setLayerStack(luksLayers.stream().map(LayerType::toString).collect(Collectors.toList())); + rscGrpSpawn.setSelectFilter(asf); + if (passPhrase != null) { + String utf8Passphrase = new String(passPhrase, StandardCharsets.UTF_8); + rscGrpSpawn.setVolumePassphrases(Collections.singletonList(utf8Passphrase)); + } + } + + Properties props = new Properties(); + if (isTemplate) { + props.put(LinstorUtil.getTemplateForAuxPropKey(rscGrpName), "true"); + } + if (exactSize) { + props.put(LIN_PROP_DRBDOPT_EXACT_SIZE, "true"); + } + rscGrpSpawn.setResourceDefinitionProps(props); + + LOGGER.info("Linstor: Spawn resource " + newRscName); + ApiCallRcList answers = api.resourceGroupSpawn(rscGrpName, rscGrpSpawn); + checkLinstorAnswersThrow(answers); + + answers = LinstorUtil.applyAuxProps(api, newRscName, volName, vmName); + checkLinstorAnswersThrow(answers); + } + + /** + * Creates a new Linstor resource. + * @param rscName + * @param sizeInBytes + * @param volName + * @param vmName + * @param api + * @param rscGrp + * @param poolId + * @param isTemplate indicates if the resource is a template + * @return true if a new resource was created, false if it already existed or was reused. + */ + public static boolean createResourceBase( + String rscName, long sizeInBytes, String volName, String vmName, + @Nullable Long passPhraseId, @Nullable byte[] passPhrase, DevelopersApi api, + String rscGrp, long poolId, boolean isTemplate, boolean exactSize) + { + try + { + LOGGER.debug("createRscBase: {} :: {} :: {} :: {}", rscName, rscGrp, isTemplate, exactSize); + List> existingRDs = LinstorUtil.getRDAndRGListStartingWith(api, rscName); + + String fullRscName = String.format("%s-%d", rscName, poolId); + boolean alreadyCreated = existingRDs.stream() + .anyMatch(p -> p.first().getName().equalsIgnoreCase(fullRscName)) || + existingRDs.stream().anyMatch(p -> p.first().getProps().containsKey(LinstorUtil.getTemplateForAuxPropKey(rscGrp))); + if (!alreadyCreated) { + boolean createNewRsc = !foundShareableTemplate(api, rscName, rscGrp, existingRDs); + if (createNewRsc) { + String newRscName = existingRDs.isEmpty() ? rscName : fullRscName; + spawnResource(api, newRscName, sizeInBytes, isTemplate, rscGrp, + volName, vmName, passPhraseId, passPhrase, exactSize); + } + return createNewRsc; + } + return false; + } catch (ApiException apiEx) + { + LOGGER.error("Linstor: ApiEx - {}", apiEx.getMessage()); + throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); + } + } + + public static void applyQoSSettings(PrimaryDataStoreDao primaryDataStoreDao, + StoragePoolVO storagePool, DevelopersApi api, String rscName, Long maxIops) + throws ApiException + { + Long currentQosIops = null; + List vlmDfns = api.volumeDefinitionList(rscName, null, null); + if (!vlmDfns.isEmpty()) + { + Properties props = vlmDfns.get(0).getProps(); + long iops = Long.parseLong(props.getOrDefault("sys/fs/blkio_throttle_write_iops", "0")); + currentQosIops = iops > 0 ? iops : null; + } + + if (!Objects.equals(maxIops, currentQosIops)) + { + VolumeDefinitionModify vdm = new VolumeDefinitionModify(); + if (maxIops != null) + { + Properties props = new Properties(); + props.put("sys/fs/blkio_throttle_read_iops", "" + maxIops); + props.put("sys/fs/blkio_throttle_write_iops", "" + maxIops); + vdm.overrideProps(props); + LOGGER.info("Apply qos setting: {} to {}", maxIops, rscName); + } + else + { + LOGGER.info("Remove QoS setting for {}", rscName); + vdm.deleteProps(Arrays.asList("sys/fs/blkio_throttle_read_iops", "sys/fs/blkio_throttle_write_iops")); + } + ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, vdm); + LinstorUtil.checkLinstorAnswersThrow(answers); + + Long capacityIops = storagePool.getCapacityIops(); + if (capacityIops != null) + { + long vcIops = currentQosIops != null ? currentQosIops * -1 : 0; + long vMaxIops = maxIops != null ? maxIops : 0; + long newIops = vcIops + vMaxIops; + capacityIops -= newIops; + LOGGER.info("Current storagepool {} iops capacity: {}", storagePool, capacityIops); + storagePool.setCapacityIops(Math.max(0, capacityIops)); + primaryDataStoreDao.update(storagePool.getId(), storagePool); + } + } + } + + public static String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO, + PrimaryDataStoreDao primaryDataStoreDao) { + return createResource(vol, storagePoolVO, primaryDataStoreDao, false); + } + + public static String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO, + PrimaryDataStoreDao primaryDataStoreDao, boolean exactSize) { + DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress()); + final String rscGrp = getRscGrp(storagePoolVO); + + final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid(); + createResourceBase( + rscName, vol.getSize(), vol.getName(), vol.getAttachedVmName(), vol.getPassphraseId(), vol.getPassphrase(), + linstorApi, rscGrp, storagePoolVO.getId(), false, exactSize); + + try + { + applyQoSSettings(primaryDataStoreDao, storagePoolVO, linstorApi, rscName, vol.getMaxIops()); + + return LinstorUtil.getDevicePath(linstorApi, rscName); + } catch (ApiException apiEx) + { + LOGGER.error("Linstor: ApiEx - " + apiEx.getMessage()); + throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); + } + } } diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/motion/LinstorDataMotionStrategy.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/motion/LinstorDataMotionStrategy.java new file mode 100644 index 00000000000..cab2820f09a --- /dev/null +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/motion/LinstorDataMotionStrategy.java @@ -0,0 +1,437 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.storage.motion; + +import com.linbit.linstor.api.ApiException; +import com.linbit.linstor.api.DevelopersApi; +import com.linbit.linstor.api.model.ApiCallRcList; +import com.linbit.linstor.api.model.ResourceDefinition; +import com.linbit.linstor.api.model.ResourceDefinitionModify; + +import javax.inject.Inject; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.MigrateAnswer; +import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.LinstorUtil; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.springframework.stereotype.Component; + + +/** + * Current state: + * just changing the resource-group on same storage pool resource-group is not really good enough. + * Linstor lacks currently of a good way to move resources to another resource-group and respecting + * every auto-filter setting. + * Also linstor clone would simply set the new resource-group without any adjustments of storage pools or + * auto-select resource placement. + * So currently, we will create a new resource in the wanted primary storage and let qemu copy the data into the + * devices. + */ + +@Component +public class LinstorDataMotionStrategy implements DataMotionStrategy { + protected Logger logger = LogManager.getLogger(getClass()); + + @Inject + private SnapshotDataStoreDao _snapshotStoreDao; + @Inject + private PrimaryDataStoreDao _storagePool; + @Inject + private VolumeDao _volumeDao; + @Inject + private VolumeDataFactory _volumeDataFactory; + @Inject + private VMInstanceDao _vmDao; + @Inject + private GuestOSDao _guestOsDao; + @Inject + private VolumeService _volumeService; + @Inject + private GuestOSCategoryDao _guestOsCategoryDao; + @Inject + private SnapshotDao _snapshotDao; + @Inject + private AgentManager _agentManager; + @Inject + private PrimaryDataStoreDao _storagePoolDao; + + @Override + public StrategyPriority canHandle(DataObject srcData, DataObject dstData) { + DataObjectType srcType = srcData.getType(); + DataObjectType dstType = dstData.getType(); + logger.debug("canHandle: {} -> {}", srcType, dstType); + return StrategyPriority.CANT_HANDLE; + } + + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, + AsyncCompletionCallback callback) { + throw new CloudRuntimeException("not implemented"); + } + + private boolean isDestinationLinstorPrimaryStorage(Map volumeMap) { + if (MapUtils.isNotEmpty(volumeMap)) { + for (DataStore dataStore : volumeMap.values()) { + StoragePoolVO storagePoolVO = _storagePool.findById(dataStore.getId()); + if (storagePoolVO == null + || !storagePoolVO.getStorageProviderName().equals(LinstorUtil.PROVIDER_NAME)) { + return false; + } + } + } else { + return false; + } + return true; + } + + @Override + public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { + logger.debug("canHandle -- {}: {} -> {}", volumeMap, srcHost, destHost); + if (srcHost.getId() != destHost.getId() && isDestinationLinstorPrimaryStorage(volumeMap)) { + return StrategyPriority.HIGHEST; + } + return StrategyPriority.CANT_HANDLE; + } + + private VolumeVO createNewVolumeVO(Volume volume, StoragePoolVO storagePoolVO) { + VolumeVO newVol = new VolumeVO(volume); + newVol.setInstanceId(null); + newVol.setChainInfo(null); + newVol.setPath(newVol.getUuid()); + newVol.setFolder(null); + newVol.setPodId(storagePoolVO.getPodId()); + newVol.setPoolId(storagePoolVO.getId()); + newVol.setLastPoolId(volume.getPoolId()); + + return _volumeDao.persist(newVol); + } + + private void removeExactSizeProperty(VolumeInfo volumeInfo) { + StoragePoolVO destStoragePool = _storagePool.findById(volumeInfo.getDataStore().getId()); + DevelopersApi api = LinstorUtil.getLinstorAPI(destStoragePool.getHostAddress()); + + ResourceDefinitionModify rdm = new ResourceDefinitionModify(); + rdm.setDeleteProps(Collections.singletonList(LinstorUtil.LIN_PROP_DRBDOPT_EXACT_SIZE)); + try { + String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getPath(); + ApiCallRcList answers = api.resourceDefinitionModify(rscName, rdm); + LinstorUtil.checkLinstorAnswersThrow(answers); + } catch (ApiException apiEx) { + logger.error("Linstor: ApiEx - {}", apiEx.getMessage()); + throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); + } + } + + private void handlePostMigration(boolean success, Map srcVolumeInfoToDestVolumeInfo, + VirtualMachineTO vmTO, Host destHost) { + if (!success) { + try { + PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO); + + pfmc.setRollback(true); + + Answer pfma = _agentManager.send(destHost.getId(), pfmc); + + if (pfma == null || !pfma.getResult()) { + String details = pfma != null ? pfma.getDetails() : "null answer returned"; + String msg = "Unable to rollback prepare for migration due to the following: " + details; + + throw new AgentUnavailableException(msg, destHost.getId()); + } + } catch (Exception e) { + logger.debug("Failed to disconnect one or more (original) dest volumes", e); + } + } + + for (Map.Entry entry : srcVolumeInfoToDestVolumeInfo.entrySet()) { + VolumeInfo srcVolumeInfo = entry.getKey(); + VolumeInfo destVolumeInfo = entry.getValue(); + + if (success) { + srcVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationSucceeded); + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationSucceeded); + + _volumeDao.updateUuid(srcVolumeInfo.getId(), destVolumeInfo.getId()); + + VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + + volumeVO.setFormat(Storage.ImageFormat.QCOW2); + + _volumeDao.update(volumeVO.getId(), volumeVO); + + // remove exact size property + removeExactSizeProperty(destVolumeInfo); + + try { + _volumeService.destroyVolume(srcVolumeInfo.getId()); + + srcVolumeInfo = _volumeDataFactory.getVolume(srcVolumeInfo.getId()); + + AsyncCallFuture destroyFuture = + _volumeService.expungeVolumeAsync(srcVolumeInfo); + + if (destroyFuture.get().isFailed()) { + logger.debug("Failed to clean up source volume on storage"); + } + } catch (Exception e) { + logger.debug("Failed to clean up source volume on storage", e); + } + + // Update the volume ID for snapshots on secondary storage + if (!_snapshotDao.listByVolumeId(srcVolumeInfo.getId()).isEmpty()) { + _snapshotDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId()); + _snapshotStoreDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId()); + } + } else { + try { + _volumeService.revokeAccess(destVolumeInfo, destHost, destVolumeInfo.getDataStore()); + } catch (Exception e) { + logger.debug("Failed to revoke access from dest volume", e); + } + + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + srcVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + + try { + _volumeService.destroyVolume(destVolumeInfo.getId()); + + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId()); + + AsyncCallFuture destroyFuture = + _volumeService.expungeVolumeAsync(destVolumeInfo); + + if (destroyFuture.get().isFailed()) { + logger.debug("Failed to clean up dest volume on storage"); + } + } catch (Exception e) { + logger.debug("Failed to clean up dest volume on storage", e); + } + } + } + } + + /** + * Determines whether the destination volume should have the DRBD exact-size property set + * during migration. + * + *

This method queries the Linstor API to check if the source volume's resource definition + * has the exact-size DRBD option enabled. The exact-size property ensures that DRBD uses + * the precise volume size rather than rounding, which is important for maintaining size + * consistency during migrations.

+ * + * @param srcVolumeInfo the source volume information to check + * @return {@code true} if the exact-size property should be set on the destination volume, + * which occurs when the source volume has this property enabled, or when the + * property cannot be determined (defaults to {@code true} for safety); + * {@code false} only when the source is confirmed to not have the exact-size property + */ + private boolean needsExactSizeProp(VolumeInfo srcVolumeInfo) { + StoragePoolVO srcStoragePool = _storagePool.findById(srcVolumeInfo.getDataStore().getId()); + if (srcStoragePool.getPoolType() == Storage.StoragePoolType.Linstor) { + DevelopersApi api = LinstorUtil.getLinstorAPI(srcStoragePool.getHostAddress()); + + String rscName = LinstorUtil.RSC_PREFIX + srcVolumeInfo.getPath(); + try { + List rscDfns = api.resourceDefinitionList( + Collections.singletonList(rscName), + false, + Collections.emptyList(), + null, + null); + if (!CollectionUtils.isEmpty(rscDfns)) { + ResourceDefinition srcRsc = rscDfns.get(0); + String exactSizeProp = srcRsc.getProps().get(LinstorUtil.LIN_PROP_DRBDOPT_EXACT_SIZE); + return "true".equalsIgnoreCase(exactSizeProp); + } else { + logger.warn("Unknown resource {} on {}", rscName, srcStoragePool.getHostAddress()); + } + } catch (ApiException apiEx) { + logger.error("Unable to fetch resource definition {}: {}", rscName, apiEx.getBestMessage()); + } + } + return true; + } + + @Override + public void copyAsync(Map volumeDataStoreMap, VirtualMachineTO vmTO, Host srcHost, + Host destHost, AsyncCompletionCallback callback) { + + if (srcHost.getHypervisorType() != Hypervisor.HypervisorType.KVM) { + throw new CloudRuntimeException( + String.format("Invalid hypervisor type [%s]. Only KVM supported", srcHost.getHypervisorType())); + } + + String errMsg = null; + VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId()); + vmTO.setState(vmInstance.getState()); + List migrateDiskInfoList = new ArrayList<>(); + + Map migrateStorage = new HashMap<>(); + Map srcVolumeInfoToDestVolumeInfo = new HashMap<>(); + + try { + for (Map.Entry entry : volumeDataStoreMap.entrySet()) { + VolumeInfo srcVolumeInfo = entry.getKey(); + DataStore destDataStore = entry.getValue(); + VolumeVO srcVolume = _volumeDao.findById(srcVolumeInfo.getId()); + StoragePoolVO destStoragePool = _storagePool.findById(destDataStore.getId()); + + if (srcVolumeInfo.getPassphraseId() != null) { + throw new CloudRuntimeException( + String.format("Cannot live migrate encrypted volume: %s", srcVolumeInfo.getVolume())); + } + + VolumeVO destVolume = createNewVolumeVO(srcVolume, destStoragePool); + + VolumeInfo destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore); + + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationCopyRequested); + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationCopySucceeded); + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); + + boolean exactSize = needsExactSizeProp(srcVolumeInfo); + + String devPath = LinstorUtil.createResource( + destVolumeInfo, destStoragePool, _storagePoolDao, exactSize); + + _volumeDao.update(destVolume.getId(), destVolume); + destVolume = _volumeDao.findById(destVolume.getId()); + + destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore); + + MigrateCommand.MigrateDiskInfo migrateDiskInfo = new MigrateCommand.MigrateDiskInfo( + srcVolumeInfo.getPath(), + MigrateCommand.MigrateDiskInfo.DiskType.BLOCK, + MigrateCommand.MigrateDiskInfo.DriverType.RAW, + MigrateCommand.MigrateDiskInfo.Source.DEV, + devPath); + migrateDiskInfoList.add(migrateDiskInfo); + + migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo); + + srcVolumeInfoToDestVolumeInfo.put(srcVolumeInfo, destVolumeInfo); + } + + PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO); + try { + Answer pfma = _agentManager.send(destHost.getId(), pfmc); + + if (pfma == null || !pfma.getResult()) { + String details = pfma != null ? pfma.getDetails() : "null answer returned"; + errMsg = String.format("Unable to prepare for migration due to the following: %s", details); + + throw new AgentUnavailableException(errMsg, destHost.getId()); + } + } catch (final OperationTimedoutException e) { + errMsg = String.format("Operation timed out due to %s", e.getMessage()); + throw new AgentUnavailableException(errMsg, destHost.getId()); + } + + VMInstanceVO vm = _vmDao.findById(vmTO.getId()); + boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()) + .getName().equalsIgnoreCase("Windows"); + + MigrateCommand migrateCommand = new MigrateCommand(vmTO.getName(), + destHost.getPrivateIpAddress(), isWindows, vmTO, true); + migrateCommand.setWait(StorageManager.KvmStorageOnlineMigrationWait.value()); + migrateCommand.setMigrateStorage(migrateStorage); + migrateCommand.setMigrateStorageManaged(true); + migrateCommand.setNewVmCpuShares( + vmTO.getCpus() * ObjectUtils.defaultIfNull(vmTO.getMinSpeed(), vmTO.getSpeed())); + migrateCommand.setMigrateDiskInfoList(migrateDiskInfoList); + + boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value(); + migrateCommand.setAutoConvergence(kvmAutoConvergence); + + MigrateAnswer migrateAnswer = (MigrateAnswer) _agentManager.send(srcHost.getId(), migrateCommand); + boolean success = migrateAnswer != null && migrateAnswer.getResult(); + + handlePostMigration(success, srcVolumeInfoToDestVolumeInfo, vmTO, destHost); + + if (migrateAnswer == null) { + throw new CloudRuntimeException("Unable to get an answer to the migrate command"); + } + + if (!migrateAnswer.getResult()) { + errMsg = migrateAnswer.getDetails(); + + throw new CloudRuntimeException(errMsg); + } + } catch (AgentUnavailableException | OperationTimedoutException | CloudRuntimeException ex) { + errMsg = String.format( + "Copy volume(s) of VM [%s] to storage(s) [%s] and VM to host [%s] failed in LinstorDataMotionStrategy.copyAsync. Error message: [%s].", + vmTO, srcHost, destHost, ex.getMessage()); + logger.error(errMsg, ex); + + throw new CloudRuntimeException(errMsg); + } finally { + CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg); + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(errMsg); + callback.complete(result); + } + } +} diff --git a/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml b/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml index a900323ede5..88d1051c71e 100644 --- a/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml +++ b/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml @@ -33,4 +33,6 @@ class="org.apache.cloudstack.storage.snapshot.LinstorVMSnapshotStrategy" /> + diff --git a/plugins/storage/volume/linstor/src/test/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImplTest.java b/plugins/storage/volume/linstor/src/test/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImplTest.java index 75276739468..4653cfa358b 100644 --- a/plugins/storage/volume/linstor/src/test/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImplTest.java +++ b/plugins/storage/volume/linstor/src/test/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImplTest.java @@ -26,6 +26,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import org.apache.cloudstack.storage.datastore.util.LinstorUtil; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -75,13 +76,13 @@ public class LinstorPrimaryDataStoreDriverImplTest { when(api.resourceGroupList(Collections.singletonList("EncryptedGrp"), Collections.emptyList(), null, null)) .thenReturn(Collections.singletonList(encryptedGrp)); - List layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "DfltRscGrp"); + List layers = LinstorUtil.getEncryptedLayerList(api, "DfltRscGrp"); Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE), layers); - layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "BcacheGrp"); + layers = LinstorUtil.getEncryptedLayerList(api, "BcacheGrp"); Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.BCACHE, LayerType.LUKS, LayerType.STORAGE), layers); - layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "EncryptedGrp"); + layers = LinstorUtil.getEncryptedLayerList(api, "EncryptedGrp"); Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE), layers); } } From 3ba5c2b610f1a13cae612462720ffaf624b76501 Mon Sep 17 00:00:00 2001 From: Andrija Panic <45762285+andrijapanicsb@users.noreply.github.com> Date: Thu, 29 Jan 2026 11:27:46 +0100 Subject: [PATCH 2/4] Fix logs mismatch between Network GC wait and interval (#10776) Co-authored-by: nvazquez Co-authored-by: dahn Co-authored-by: Wei Zhou --- .../engine/orchestration/NetworkOrchestrator.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index d19a4f1d4a0..899ce51022b 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -3559,8 +3559,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final HashMap stillFree = new HashMap(); final List networkIds = _networksDao.findNetworksToGarbageCollect(); - final int netGcWait = NumbersUtil.parseInt(_configDao.getValue(NetworkGcWait.key()), 60); - logger.info("NetworkGarbageCollector uses '{}' seconds for GC interval.", netGcWait); + final int netGcWait = NetworkGcWait.value(); + final int netGcInterval = NetworkGcInterval.value(); + logger.info("NetworkGarbageCollector uses '{}' seconds for GC wait and '{}' seconds for GC interval.", netGcWait, netGcInterval); for (final Long networkId : networkIds) { if (!_networkModel.isNetworkReadyForGc(networkId)) { @@ -4882,9 +4883,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } public static final ConfigKey NetworkGcWait = new ConfigKey(Integer.class, "network.gc.wait", "Advanced", "600", - "Time (in seconds) to wait before shutting down a network that's not in used", false, Scope.Global, null); + "Time (in seconds) to wait before shutting down a network that's not in used", true, Scope.Global, null); public static final ConfigKey NetworkGcInterval = new ConfigKey(Integer.class, "network.gc.interval", "Advanced", "600", - "Seconds to wait before checking for networks to shutdown", true, Scope.Global, null); + "Seconds to wait before checking for networks to shutdown", false, Scope.Global, null); @Override public ConfigKey[] getConfigKeys() { From aef3df75771ecb2eaf237b3b803b87cf332a8554 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Thu, 29 Jan 2026 13:47:08 +0100 Subject: [PATCH 3/4] server: pass network label of physical network when plug nic for private gateway on hypervisor (#11846) --- .../network/dao/PhysicalNetworkTrafficTypeDaoImpl.java | 4 +++- .../com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java | 2 +- .../src/main/java/com/cloud/network/NetworkModelImpl.java | 6 +++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java index 4811b59d31e..6504bb1f3c8 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java @@ -137,7 +137,9 @@ public class PhysicalNetworkTrafficTypeDaoImpl extends GenericDaoBase tag = customSearch(sc, null); return tag.size() == 0 ? null : tag.get(0); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java index 3b66529ccaf..b4f7fbd6dac 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java @@ -232,7 +232,7 @@ public class BridgeVifDriver extends VifDriverBase { String brName = createVnetBr(vNetId, trafficLabel, protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps); } else { - String brName = createVnetBr(vNetId, _bridges.get("private"), protocol); + String brName = createVnetBr(vNetId, _bridges.get("guest"), protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps); } } else { diff --git a/server/src/main/java/com/cloud/network/NetworkModelImpl.java b/server/src/main/java/com/cloud/network/NetworkModelImpl.java index 4a5b7199430..86791b87851 100644 --- a/server/src/main/java/com/cloud/network/NetworkModelImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkModelImpl.java @@ -1442,11 +1442,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi return null; } + NetworkOffering offering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); Long physicalNetworkId = null; if (effectiveTrafficType != TrafficType.Guest) { physicalNetworkId = getNonGuestNetworkPhysicalNetworkId(network, effectiveTrafficType); } else { - NetworkOffering offering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); physicalNetworkId = network.getPhysicalNetworkId(); if (physicalNetworkId == null) { physicalNetworkId = findPhysicalNetworkId(network.getDataCenterId(), offering.getTags(), offering.getTrafficType()); @@ -1459,6 +1459,10 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi return null; } + if (offering != null && TrafficType.Guest.equals(offering.getTrafficType()) && offering.isSystemOnly()) { + // For private gateway, do not check the Guest traffic type + return _pNTrafficTypeDao.getNetworkTag(physicalNetworkId, null, hType); + } return _pNTrafficTypeDao.getNetworkTag(physicalNetworkId, effectiveTrafficType, hType); } From 7d52cd0e43a5e225f10bce251f7cd357060382cd Mon Sep 17 00:00:00 2001 From: Fabricio Duarte Date: Thu, 29 Jan 2026 10:38:12 -0300 Subject: [PATCH 4/4] Fix calculation of the next time that Usage will execute in `removeRawUsageRecords` (#12518) * Fix calculation of the next time that Usage will execute in `removeRawUsageRecords` * Address copilot reviews --- .../com/cloud/usage/UsageServiceImpl.java | 62 ++++---- .../com/cloud/usage/UsageManagerImpl.java | 37 ++--- .../cloudstack/utils/usage/UsageUtils.java | 51 +++++++ .../utils/usage/UsageUtilsTest.java | 135 ++++++++++++++++++ 4 files changed, 232 insertions(+), 53 deletions(-) create mode 100644 utils/src/test/java/org/apache/cloudstack/utils/usage/UsageUtilsTest.java diff --git a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java index edaa22c3bcf..de8d4633d22 100644 --- a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java @@ -17,7 +17,6 @@ package com.cloud.usage; import java.util.ArrayList; -import java.util.Calendar; import java.util.Date; import java.util.List; import java.util.Map; @@ -35,6 +34,7 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.usage.Usage; import org.apache.cloudstack.usage.UsageService; import org.apache.cloudstack.usage.UsageTypes; +import org.apache.cloudstack.utils.usage.UsageUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.jetbrains.annotations.NotNull; @@ -127,14 +127,25 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag @Inject private NetworkOfferingDao _networkOfferingDao; + private TimeZone usageExecutionTimeZone = TimeZone.getTimeZone("GMT"); + + private static final long REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS = 15 * 60 * 1000; + public UsageServiceImpl() { } @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); + String timeZoneStr = ObjectUtils.defaultIfNull(_configDao.getValue(Config.UsageAggregationTimezone.toString()), "GMT"); _usageTimezone = TimeZone.getTimeZone(timeZoneStr); + + String executionTimeZone = _configDao.getValue(Config.UsageExecutionTimezone.toString()); + if (executionTimeZone != null) { + usageExecutionTimeZone = TimeZone.getTimeZone(executionTimeZone); + } + return true; } @@ -465,35 +476,28 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag @Override public boolean removeRawUsageRecords(RemoveRawUsageRecordsCmd cmd) throws InvalidParameterValueException { Integer interval = cmd.getInterval(); - if (interval != null && interval > 0 ) { - String jobExecTime = _configDao.getValue(Config.UsageStatsJobExecTime.toString()); - if (jobExecTime != null ) { - String[] segments = jobExecTime.split(":"); - if (segments.length == 2) { - String timeZoneStr = _configDao.getValue(Config.UsageExecutionTimezone.toString()); - if (timeZoneStr == null) { - timeZoneStr = "GMT"; - } - TimeZone tz = TimeZone.getTimeZone(timeZoneStr); - Calendar cal = Calendar.getInstance(tz); - cal.setTime(new Date()); - long curTS = cal.getTimeInMillis(); - cal.set(Calendar.HOUR_OF_DAY, Integer.parseInt(segments[0])); - cal.set(Calendar.MINUTE, Integer.parseInt(segments[1])); - cal.set(Calendar.SECOND, 0); - cal.set(Calendar.MILLISECOND, 0); - long execTS = cal.getTimeInMillis(); - logger.debug("Trying to remove old raw cloud_usage records older than " + interval + " day(s), current time=" + curTS + " next job execution time=" + execTS); - // Let's avoid cleanup when job runs and around a 15 min interval - if (Math.abs(curTS - execTS) < 15 * 60 * 1000) { - return false; - } - } - } - _usageDao.expungeAllOlderThan(interval, ConfigurationManagerImpl.DELETE_QUERY_BATCH_SIZE.value()); - } else { - throw new InvalidParameterValueException("Invalid interval value. Interval to remove cloud_usage records should be greater than 0"); + if (interval == null || interval <= 0) { + throw new InvalidParameterValueException("Interval should be greater than 0."); } + + String jobExecTime = _configDao.getValue(Config.UsageStatsJobExecTime.toString()); + Date previousJobExecTime = UsageUtils.getPreviousJobExecutionTime(usageExecutionTimeZone, jobExecTime); + Date nextJobExecTime = UsageUtils.getNextJobExecutionTime(usageExecutionTimeZone, jobExecTime); + if (ObjectUtils.allNotNull(previousJobExecTime, nextJobExecTime)) { + logger.debug("Next Usage job is scheduled to execute at [{}]; previous execution was at [{}].", + DateUtil.displayDateInTimezone(usageExecutionTimeZone, nextJobExecTime), DateUtil.displayDateInTimezone(usageExecutionTimeZone, previousJobExecTime)); + Date now = new Date(); + if (nextJobExecTime.getTime() - now.getTime() < REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS) { + logger.info("Not removing any cloud_usage records because the next Usage job is scheduled to execute in less than {} minute(s).", REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS / 60000); + return false; + } else if (now.getTime() - previousJobExecTime.getTime() < REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS) { + logger.info("Not removing any cloud_usage records because the last Usage job executed in less than {} minute(s) ago.", REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS / 60000); + return false; + } + } + + logger.info("Removing cloud_usage records older than {} day(s).", interval); + _usageDao.expungeAllOlderThan(interval, ConfigurationManagerImpl.DELETE_QUERY_BATCH_SIZE.value()); return true; } } diff --git a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java index 30cdfcf21f0..9da64889fc3 100644 --- a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java +++ b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java @@ -198,7 +198,9 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna private Future _heartbeat = null; private Future _sanity = null; private boolean usageSnapshotSelection = false; + private static TimeZone usageAggregationTimeZone = TimeZone.getTimeZone("GMT"); + private static TimeZone usageExecutionTimeZone = TimeZone.getTimeZone("GMT"); public UsageManagerImpl() { } @@ -253,6 +255,9 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna if (aggregationTimeZone != null && !aggregationTimeZone.isEmpty()) { usageAggregationTimeZone = TimeZone.getTimeZone(aggregationTimeZone); } + if (execTimeZone != null) { + usageExecutionTimeZone = TimeZone.getTimeZone(execTimeZone); + } try { if ((execTime == null) || (aggregationRange == null)) { @@ -261,34 +266,18 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna throw new ConfigurationException("Missing configuration values for usage job, usage.stats.job.exec.time = " + execTime + ", usage.stats.job.aggregation.range = " + aggregationRange); } - String[] execTimeSegments = execTime.split(":"); - if (execTimeSegments.length != 2) { - logger.error("Unable to parse usage.stats.job.exec.time"); - throw new ConfigurationException("Unable to parse usage.stats.job.exec.time '" + execTime + "'"); - } - int hourOfDay = Integer.parseInt(execTimeSegments[0]); - int minutes = Integer.parseInt(execTimeSegments[1]); - - Date currentDate = new Date(); - _jobExecTime.setTime(currentDate); - - _jobExecTime.set(Calendar.HOUR_OF_DAY, hourOfDay); - _jobExecTime.set(Calendar.MINUTE, minutes); - _jobExecTime.set(Calendar.SECOND, 0); - _jobExecTime.set(Calendar.MILLISECOND, 0); - - TimeZone jobExecTimeZone = execTimeZone != null ? TimeZone.getTimeZone(execTimeZone) : Calendar.getInstance().getTimeZone(); - _jobExecTime.setTimeZone(jobExecTimeZone); - - // if the hour to execute the job has already passed, roll the day forward to the next day - if (_jobExecTime.getTime().before(currentDate)) { - _jobExecTime.roll(Calendar.DAY_OF_YEAR, true); + + Date nextJobExecTime = UsageUtils.getNextJobExecutionTime(usageExecutionTimeZone, execTime); + if (nextJobExecTime == null) { + throw new ConfigurationException(String.format("Unable to parse configuration 'usage.stats.job.exec.time' value [%s].", execTime)); } + _jobExecTime.setTimeZone(usageExecutionTimeZone); + _jobExecTime.setTime(nextJobExecTime); logger.info("Usage is configured to execute in time zone [{}], at [{}], each [{}] minutes; the current time in that timezone is [{}] and the " + "next job is scheduled to execute at [{}]. During its execution, Usage will aggregate stats according to the time zone [{}] defined in global setting [usage.aggregation.timezone].", - jobExecTimeZone.getID(), execTime, aggregationRange, DateUtil.displayDateInTimezone(jobExecTimeZone, currentDate), - DateUtil.displayDateInTimezone(jobExecTimeZone, _jobExecTime.getTime()), usageAggregationTimeZone.getID()); + usageExecutionTimeZone.getID(), execTime, aggregationRange, DateUtil.displayDateInTimezone(usageExecutionTimeZone, new Date()), + DateUtil.displayDateInTimezone(usageExecutionTimeZone, _jobExecTime.getTime()), usageAggregationTimeZone.getID()); _aggregationDuration = Integer.parseInt(aggregationRange); if (_aggregationDuration < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { diff --git a/utils/src/main/java/org/apache/cloudstack/utils/usage/UsageUtils.java b/utils/src/main/java/org/apache/cloudstack/utils/usage/UsageUtils.java index a97aed15d36..861788d1918 100644 --- a/utils/src/main/java/org/apache/cloudstack/utils/usage/UsageUtils.java +++ b/utils/src/main/java/org/apache/cloudstack/utils/usage/UsageUtils.java @@ -19,6 +19,57 @@ package org.apache.cloudstack.utils.usage; +import com.cloud.utils.DateUtil; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.Calendar; +import java.util.Date; +import java.util.TimeZone; + public class UsageUtils { + protected static Logger logger = LogManager.getLogger(UsageUtils.class); + public static final int USAGE_AGGREGATION_RANGE_MIN = 1; + + public static Date getNextJobExecutionTime(TimeZone usageTimeZone, String jobExecTimeConfig) { + return getJobExecutionTime(usageTimeZone, jobExecTimeConfig, true); + } + + public static Date getPreviousJobExecutionTime(TimeZone usageTimeZone, String jobExecTimeConfig) { + return getJobExecutionTime(usageTimeZone, jobExecTimeConfig, false); + } + + protected static Date getJobExecutionTime(TimeZone usageTimeZone, String jobExecTimeConfig, boolean next) { + String[] execTimeSegments = jobExecTimeConfig.split(":"); + if (execTimeSegments.length != 2) { + logger.warn("Unable to parse configuration 'usage.stats.job.exec.time'."); + return null; + } + int hourOfDay; + int minutes; + try { + hourOfDay = Integer.parseInt(execTimeSegments[0]); + minutes = Integer.parseInt(execTimeSegments[1]); + } catch (NumberFormatException e) { + logger.warn("Unable to parse configuration 'usage.stats.job.exec.time' due to non-numeric values in [{}].", jobExecTimeConfig, e); + return null; + } + + Date currentDate = DateUtil.currentGMTTime(); + Calendar jobExecTime = Calendar.getInstance(usageTimeZone); + jobExecTime.setTime(currentDate); + jobExecTime.set(Calendar.HOUR_OF_DAY, hourOfDay); + jobExecTime.set(Calendar.MINUTE, minutes); + jobExecTime.set(Calendar.SECOND, 0); + jobExecTime.set(Calendar.MILLISECOND, 0); + + if (next && jobExecTime.getTime().before(currentDate)) { + jobExecTime.add(Calendar.DAY_OF_YEAR, 1); + } else if (!next && jobExecTime.getTime().after(currentDate)) { + jobExecTime.add(Calendar.DAY_OF_YEAR, -1); + } + + return jobExecTime.getTime(); + } } diff --git a/utils/src/test/java/org/apache/cloudstack/utils/usage/UsageUtilsTest.java b/utils/src/test/java/org/apache/cloudstack/utils/usage/UsageUtilsTest.java new file mode 100644 index 00000000000..8b9b4910e39 --- /dev/null +++ b/utils/src/test/java/org/apache/cloudstack/utils/usage/UsageUtilsTest.java @@ -0,0 +1,135 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.utils.usage; + +import com.cloud.utils.DateUtil; +import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Date; +import java.util.TimeZone; + +@RunWith(MockitoJUnitRunner.class) +public class UsageUtilsTest extends TestCase { + + TimeZone usageTimeZone = TimeZone.getTimeZone("GMT-3"); + + @Test + public void getJobExecutionTimeTestReturnsNullWhenConfigurationValueIsInvalid() { + Date result = UsageUtils.getNextJobExecutionTime(usageTimeZone, "test"); + assertNull(result); + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsTrueAndExecutionTimeHasNotPassed() { + Date currentDate = new Date(); + currentDate.setTime(1724296800000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", true); + + Assert.assertNotNull(result); + Assert.assertEquals(1724297400000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsTrueAndExecutionTimeHasPassed() { + Date currentDate = new Date(); + currentDate.setTime(1724297460000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", true); + + Assert.assertNotNull(result); + Assert.assertEquals(1724383800000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsFalseAndExecutionTimeHasNotPassed() { + Date currentDate = new Date(); + currentDate.setTime(1724296800000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", false); + + Assert.assertNotNull(result); + Assert.assertEquals(1724211000000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsFalseAndExecutionTimeHasPassed() { + Date currentDate = new Date(); + currentDate.setTime(1724297460000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", false); + + Assert.assertNotNull(result); + Assert.assertEquals(1724297400000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextExecutionIsOnNextYear() { + Date currentDate = new Date(); + currentDate.setTime(1767236340000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:00", true); + + Assert.assertNotNull(result); + Assert.assertEquals(1767236400000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenPreviousExecutionWasOnPreviousYear() { + Date currentDate = new Date(); + currentDate.setTime(1767236460000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "23:59", false); + + Assert.assertNotNull(result); + Assert.assertEquals(1767236340000L, result.getTime()); + } + } + +}