Merge branch '4.20' into 4.22

This commit is contained in:
Suresh Kumar Anaparti 2026-01-29 19:41:29 +05:30
commit f5778eaa32
No known key found for this signature in database
GPG Key ID: D7CEAE3A9E71D0AA
14 changed files with 1011 additions and 359 deletions

View File

@ -3586,8 +3586,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
final HashMap<Long, Long> stillFree = new HashMap<>();
final List<Long> networkIds = _networksDao.findNetworksToGarbageCollect();
final int netGcWait = NumbersUtil.parseInt(_configDao.getValue(NetworkGcWait.key()), 60);
logger.info("NetworkGarbageCollector uses '{}' seconds for GC interval.", netGcWait);
final int netGcWait = NetworkGcWait.value();
final int netGcInterval = NetworkGcInterval.value();
logger.info("NetworkGarbageCollector uses '{}' seconds for GC wait and '{}' seconds for GC interval.", netGcWait, netGcInterval);
for (final Long networkId : networkIds) {
if (!_networkModel.isNetworkReadyForGc(networkId)) {
@ -4908,10 +4909,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
return NetworkOrchestrationService.class.getSimpleName();
}
public static final ConfigKey<Integer> NetworkGcWait = new ConfigKey<>(Integer.class, "network.gc.wait", "Advanced", "600",
"Time (in seconds) to wait before shutting down a network that's not in used", false, Scope.Global, null);
public static final ConfigKey<Integer> NetworkGcInterval = new ConfigKey<>(Integer.class, "network.gc.interval", "Advanced", "600",
"Seconds to wait before checking for networks to shutdown", true, Scope.Global, null);
public static final ConfigKey<Integer> NetworkGcWait = new ConfigKey<Integer>(Integer.class, "network.gc.wait", "Advanced", "600",
"Time (in seconds) to wait before shutting down a network that's not in used", true, Scope.Global, null);
public static final ConfigKey<Integer> NetworkGcInterval = new ConfigKey<Integer>(Integer.class, "network.gc.interval", "Advanced", "600",
"Seconds to wait before checking for networks to shutdown", false, Scope.Global, null);
@Override
public ConfigKey<?>[] getConfigKeys() {

View File

@ -137,7 +137,9 @@ public class PhysicalNetworkTrafficTypeDaoImpl extends GenericDaoBase<PhysicalNe
}
sc.setParameters("physicalNetworkId", physicalNetworkId);
sc.setParameters("trafficType", trafficType);
if (trafficType != null) {
sc.setParameters("trafficType", trafficType);
}
List<String> tag = customSearch(sc, null);
return tag.size() == 0 ? null : tag.get(0);

View File

@ -236,7 +236,7 @@ public class BridgeVifDriver extends VifDriverBase {
String brName = createVnetBr(vNetId, trafficLabel, protocol);
intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps);
} else {
String brName = createVnetBr(vNetId, _bridges.get("private"), protocol);
String brName = createVnetBr(vNetId, _bridges.get("guest"), protocol);
intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps);
}
} else {

View File

@ -24,6 +24,12 @@ All notable changes to Linstor CloudStack plugin will be documented in this file
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [2026-01-17]
### Added
- Support live migrate from other primary storage
## [2025-12-18]
### Changed

View File

@ -21,33 +21,25 @@ import com.linbit.linstor.api.CloneWaiter;
import com.linbit.linstor.api.DevelopersApi;
import com.linbit.linstor.api.model.ApiCallRc;
import com.linbit.linstor.api.model.ApiCallRcList;
import com.linbit.linstor.api.model.AutoSelectFilter;
import com.linbit.linstor.api.model.LayerType;
import com.linbit.linstor.api.model.Properties;
import com.linbit.linstor.api.model.ResourceDefinition;
import com.linbit.linstor.api.model.ResourceDefinitionCloneRequest;
import com.linbit.linstor.api.model.ResourceDefinitionCloneStarted;
import com.linbit.linstor.api.model.ResourceDefinitionCreate;
import com.linbit.linstor.api.model.ResourceDefinitionModify;
import com.linbit.linstor.api.model.ResourceGroup;
import com.linbit.linstor.api.model.ResourceGroupSpawn;
import com.linbit.linstor.api.model.ResourceMakeAvailable;
import com.linbit.linstor.api.model.ResourceWithVolumes;
import com.linbit.linstor.api.model.Snapshot;
import com.linbit.linstor.api.model.SnapshotRestore;
import com.linbit.linstor.api.model.VolumeDefinition;
import com.linbit.linstor.api.model.VolumeDefinitionModify;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
@ -117,10 +109,9 @@ import org.apache.cloudstack.storage.snapshot.SnapshotObject;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.apache.commons.collections.CollectionUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.nio.charset.StandardCharsets;
@ -335,275 +326,11 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
}
private void logLinstorAnswer(@Nonnull ApiCallRc answer) {
if (answer.isError()) {
logger.error(answer.getMessage());
} else if (answer.isWarning()) {
logger.warn(answer.getMessage());
} else if (answer.isInfo()) {
logger.info(answer.getMessage());
}
}
private void logLinstorAnswers(@Nonnull ApiCallRcList answers) {
answers.forEach(this::logLinstorAnswer);
}
private void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) {
logLinstorAnswers(answers);
if (answers.hasError())
{
String errMsg = answers.stream()
.filter(ApiCallRc::isError)
.findFirst()
.map(ApiCallRc::getMessage).orElse("Unknown linstor error");
throw new CloudRuntimeException(errMsg);
}
}
private String checkLinstorAnswers(@Nonnull ApiCallRcList answers) {
logLinstorAnswers(answers);
LinstorUtil.logLinstorAnswers(answers);
return answers.stream().filter(ApiCallRc::isError).findFirst().map(ApiCallRc::getMessage).orElse(null);
}
private void applyQoSSettings(StoragePoolVO storagePool, DevelopersApi api, String rscName, Long maxIops)
throws ApiException
{
Long currentQosIops = null;
List<VolumeDefinition> vlmDfns = api.volumeDefinitionList(rscName, null, null);
if (!vlmDfns.isEmpty())
{
Properties props = vlmDfns.get(0).getProps();
long iops = Long.parseLong(props.getOrDefault("sys/fs/blkio_throttle_write_iops", "0"));
currentQosIops = iops > 0 ? iops : null;
}
if (!Objects.equals(maxIops, currentQosIops))
{
VolumeDefinitionModify vdm = new VolumeDefinitionModify();
if (maxIops != null)
{
Properties props = new Properties();
props.put("sys/fs/blkio_throttle_read_iops", "" + maxIops);
props.put("sys/fs/blkio_throttle_write_iops", "" + maxIops);
vdm.overrideProps(props);
logger.info("Apply qos setting: " + maxIops + " to " + rscName);
}
else
{
logger.info("Remove QoS setting for " + rscName);
vdm.deleteProps(Arrays.asList("sys/fs/blkio_throttle_read_iops", "sys/fs/blkio_throttle_write_iops"));
}
ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, vdm);
checkLinstorAnswersThrow(answers);
Long capacityIops = storagePool.getCapacityIops();
if (capacityIops != null)
{
long vcIops = currentQosIops != null ? currentQosIops * -1 : 0;
long vMaxIops = maxIops != null ? maxIops : 0;
long newIops = vcIops + vMaxIops;
capacityIops -= newIops;
logger.info(String.format("Current storagepool %s iops capacity: %d", storagePool, capacityIops));
storagePool.setCapacityIops(Math.max(0, capacityIops));
_storagePoolDao.update(storagePool.getId(), storagePool);
}
}
}
private String getRscGrp(StoragePool storagePool) {
return storagePool.getUserInfo() != null && !storagePool.getUserInfo().isEmpty() ?
storagePool.getUserInfo() : "DfltRscGrp";
}
/**
* Returns the layerlist of the resourceGroup with encryption(LUKS) added above STORAGE.
* If the resourceGroup layer list already contains LUKS this layer list will be returned.
* @param api Linstor developers API
* @param resourceGroup Resource group to get the encryption layer list
* @return layer list with LUKS added
*/
public List<LayerType> getEncryptedLayerList(DevelopersApi api, String resourceGroup) {
try {
List<ResourceGroup> rscGrps = api.resourceGroupList(
Collections.singletonList(resourceGroup), Collections.emptyList(), null, null);
if (CollectionUtils.isEmpty(rscGrps)) {
throw new CloudRuntimeException(
String.format("Resource Group %s not found on Linstor cluster.", resourceGroup));
}
final ResourceGroup rscGrp = rscGrps.get(0);
List<LayerType> layers = Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE);
List<String> curLayerStack = rscGrp.getSelectFilter() != null ?
rscGrp.getSelectFilter().getLayerStack() : Collections.emptyList();
if (CollectionUtils.isNotEmpty(curLayerStack)) {
layers = curLayerStack.stream().map(LayerType::valueOf).collect(Collectors.toList());
if (!layers.contains(LayerType.LUKS)) {
layers.add(layers.size() - 1, LayerType.LUKS); // lowest layer is STORAGE
}
}
return layers;
} catch (ApiException e) {
throw new CloudRuntimeException(
String.format("Resource Group %s not found on Linstor cluster.", resourceGroup));
}
}
/**
* Spawns a new Linstor resource with the given arguments.
* @param api
* @param newRscName
* @param sizeInBytes
* @param isTemplate
* @param rscGrpName
* @param volName
* @param vmName
* @throws ApiException
*/
private void spawnResource(
DevelopersApi api, String newRscName, long sizeInBytes, boolean isTemplate, String rscGrpName,
String volName, String vmName, @Nullable Long passPhraseId, @Nullable byte[] passPhrase) throws ApiException
{
ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn();
rscGrpSpawn.setResourceDefinitionName(newRscName);
rscGrpSpawn.addVolumeSizesItem(sizeInBytes / 1024);
if (passPhraseId != null) {
AutoSelectFilter asf = new AutoSelectFilter();
List<LayerType> luksLayers = getEncryptedLayerList(api, rscGrpName);
asf.setLayerStack(luksLayers.stream().map(LayerType::toString).collect(Collectors.toList()));
rscGrpSpawn.setSelectFilter(asf);
if (passPhrase != null) {
String utf8Passphrase = new String(passPhrase, StandardCharsets.UTF_8);
rscGrpSpawn.setVolumePassphrases(Collections.singletonList(utf8Passphrase));
}
}
if (isTemplate) {
Properties props = new Properties();
props.put(LinstorUtil.getTemplateForAuxPropKey(rscGrpName), "true");
rscGrpSpawn.setResourceDefinitionProps(props);
}
logger.info("Linstor: Spawn resource " + newRscName);
ApiCallRcList answers = api.resourceGroupSpawn(rscGrpName, rscGrpSpawn);
checkLinstorAnswersThrow(answers);
answers = LinstorUtil.applyAuxProps(api, newRscName, volName, vmName);
checkLinstorAnswersThrow(answers);
}
/**
* Condition if a template resource can be shared with the given resource group.
* @param tgtRscGrp
* @param tgtLayerStack
* @param rg
* @return True if the template resource can be shared, else false.
*/
private boolean canShareTemplateForResourceGroup(
ResourceGroup tgtRscGrp, List<String> tgtLayerStack, ResourceGroup rg) {
List<String> rgLayerStack = rg.getSelectFilter() != null ?
rg.getSelectFilter().getLayerStack() : null;
return Objects.equals(tgtLayerStack, rgLayerStack) &&
Objects.equals(tgtRscGrp.getSelectFilter().getStoragePoolList(),
rg.getSelectFilter().getStoragePoolList());
}
/**
* Searches for a shareable template for this rscGrpName and sets the aux template property.
* @param api
* @param rscName
* @param rscGrpName
* @param existingRDs
* @return
* @throws ApiException
*/
private boolean foundShareableTemplate(
DevelopersApi api, String rscName, String rscGrpName,
List<Pair<ResourceDefinition, ResourceGroup>> existingRDs) throws ApiException {
if (!existingRDs.isEmpty()) {
ResourceGroup tgtRscGrp = api.resourceGroupList(
Collections.singletonList(rscGrpName), null, null, null).get(0);
List<String> tgtLayerStack = tgtRscGrp.getSelectFilter() != null ?
tgtRscGrp.getSelectFilter().getLayerStack() : null;
// check if there is already a template copy, that we could reuse
// this means if select filters are similar enough to allow cloning from
for (Pair<ResourceDefinition, ResourceGroup> rdPair : existingRDs) {
ResourceGroup rg = rdPair.second();
if (canShareTemplateForResourceGroup(tgtRscGrp, tgtLayerStack, rg)) {
LinstorUtil.setAuxTemplateForProperty(api, rscName, rscGrpName);
return true;
}
}
}
return false;
}
/**
* Creates a new Linstor resource.
* @param rscName
* @param sizeInBytes
* @param volName
* @param vmName
* @param api
* @param rscGrp
* @param poolId
* @param isTemplate indicates if the resource is a template
* @return true if a new resource was created, false if it already existed or was reused.
*/
private boolean createResourceBase(
String rscName, long sizeInBytes, String volName, String vmName,
@Nullable Long passPhraseId, @Nullable byte[] passPhrase, DevelopersApi api,
String rscGrp, long poolId, boolean isTemplate)
{
try
{
logger.debug("createRscBase: {} :: {} :: {}", rscName, rscGrp, isTemplate);
List<Pair<ResourceDefinition, ResourceGroup>> existingRDs = LinstorUtil.getRDAndRGListStartingWith(api, rscName);
String fullRscName = String.format("%s-%d", rscName, poolId);
boolean alreadyCreated = existingRDs.stream()
.anyMatch(p -> p.first().getName().equalsIgnoreCase(fullRscName)) ||
existingRDs.stream().anyMatch(p -> p.first().getProps().containsKey(LinstorUtil.getTemplateForAuxPropKey(rscGrp)));
if (!alreadyCreated) {
boolean createNewRsc = !foundShareableTemplate(api, rscName, rscGrp, existingRDs);
if (createNewRsc) {
String newRscName = existingRDs.isEmpty() ? rscName : fullRscName;
spawnResource(api, newRscName, sizeInBytes, isTemplate, rscGrp,
volName, vmName, passPhraseId, passPhrase);
}
return createNewRsc;
}
return false;
} catch (ApiException apiEx)
{
logger.error("Linstor: ApiEx - " + apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
private String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO) {
DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
final String rscGrp = getRscGrp(storagePoolVO);
final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid();
createResourceBase(
rscName, vol.getSize(), vol.getName(), vol.getAttachedVmName(), vol.getPassphraseId(), vol.getPassphrase(),
linstorApi, rscGrp, storagePoolVO.getId(), false);
try
{
applyQoSSettings(storagePoolVO, linstorApi, rscName, vol.getMaxIops());
return LinstorUtil.getDevicePath(linstorApi, rscName);
} catch (ApiException apiEx)
{
logger.error("Linstor: ApiEx - " + apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
private void resizeResource(DevelopersApi api, String resourceName, long sizeByte) throws ApiException {
VolumeDefinitionModify dfm = new VolumeDefinitionModify();
dfm.setSizeKib(sizeByte / 1024);
@ -688,13 +415,14 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
try {
ResourceDefinition templateRD = LinstorUtil.findResourceDefinition(
linstorApi, templateRscName, getRscGrp(storagePoolVO));
linstorApi, templateRscName, LinstorUtil.getRscGrp(storagePoolVO));
final String cloneRes = templateRD != null ? templateRD.getName() : templateRscName;
logger.info("Clone resource definition {} to {}", cloneRes, rscName);
ResourceDefinitionCloneRequest cloneRequest = new ResourceDefinitionCloneRequest();
cloneRequest.setName(rscName);
if (volumeInfo.getPassphraseId() != null) {
List<LayerType> encryptionLayer = getEncryptedLayerList(linstorApi, getRscGrp(storagePoolVO));
List<LayerType> encryptionLayer = LinstorUtil.getEncryptedLayerList(
linstorApi, LinstorUtil.getRscGrp(storagePoolVO));
cloneRequest.setLayerList(encryptionLayer);
if (volumeInfo.getPassphrase() != null) {
String utf8Passphrase = new String(volumeInfo.getPassphrase(), StandardCharsets.UTF_8);
@ -704,7 +432,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
ResourceDefinitionCloneStarted cloneStarted = linstorApi.resourceDefinitionClone(
cloneRes, cloneRequest);
checkLinstorAnswersThrow(cloneStarted.getMessages());
LinstorUtil.checkLinstorAnswersThrow(cloneStarted.getMessages());
if (!CloneWaiter.waitFor(linstorApi, cloneStarted)) {
throw new CloudRuntimeException("Clone for resource " + rscName + " failed.");
@ -716,11 +444,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
resizeResource(linstorApi, rscName, volumeInfo.getSize());
}
updateRscGrpIfNecessary(linstorApi, rscName, getRscGrp(storagePoolVO));
updateRscGrpIfNecessary(linstorApi, rscName, LinstorUtil.getRscGrp(storagePoolVO));
deleteTemplateForProps(linstorApi, rscName);
LinstorUtil.applyAuxProps(linstorApi, rscName, volumeInfo.getName(), volumeInfo.getAttachedVmName());
applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops());
LinstorUtil.applyQoSSettings(
_storagePoolDao, storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops());
return LinstorUtil.getDevicePath(linstorApi, rscName);
} catch (ApiException apiEx) {
@ -744,7 +473,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
private String createResourceFromSnapshot(long csSnapshotId, String rscName, StoragePoolVO storagePoolVO) {
final String rscGrp = getRscGrp(storagePoolVO);
final String rscGrp = LinstorUtil.getRscGrp(storagePoolVO);
final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
SnapshotVO snapshotVO = _snapshotDao.findById(csSnapshotId);
@ -757,22 +486,22 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
logger.debug("Create new resource definition: " + rscName);
ResourceDefinitionCreate rdCreate = createResourceDefinitionCreate(rscName, rscGrp);
ApiCallRcList answers = linstorApi.resourceDefinitionCreate(rdCreate);
checkLinstorAnswersThrow(answers);
LinstorUtil.checkLinstorAnswersThrow(answers);
SnapshotRestore snapshotRestore = new SnapshotRestore();
snapshotRestore.toResource(rscName);
logger.debug("Create new volume definition for snapshot: " + cloneRes + ":" + snapName);
answers = linstorApi.resourceSnapshotsRestoreVolumeDefinition(cloneRes, snapName, snapshotRestore);
checkLinstorAnswersThrow(answers);
LinstorUtil.checkLinstorAnswersThrow(answers);
// restore snapshot to new resource
logger.info("Restore resource from snapshot: " + cloneRes + ":" + snapName);
answers = linstorApi.resourceSnapshotRestore(cloneRes, snapName, snapshotRestore);
checkLinstorAnswersThrow(answers);
LinstorUtil.checkLinstorAnswersThrow(answers);
LinstorUtil.applyAuxProps(linstorApi, rscName, volumeVO.getName(), null);
applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeVO.getMaxIops());
LinstorUtil.applyQoSSettings(_storagePoolDao, storagePoolVO, linstorApi, rscName, volumeVO.getMaxIops());
return LinstorUtil.getDevicePath(linstorApi, rscName);
} catch (ApiException apiEx) {
@ -790,7 +519,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
} else if (csTemplateId > 0) {
return cloneResource(csTemplateId, volumeInfo, storagePoolVO);
} else {
return createResource(volumeInfo, storagePoolVO);
return LinstorUtil.createResource(volumeInfo, storagePoolVO, _storagePoolDao);
}
}
@ -1141,7 +870,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
String rscName,
String snapshotName,
String restoredName) throws ApiException {
final String rscGrp = getRscGrp(storagePoolVO);
final String rscGrp = LinstorUtil.getRscGrp(storagePoolVO);
// try to delete -rst resource, could happen if the copy failed and noone deleted it.
deleteResourceDefinition(storagePoolVO, restoredName);
ResourceDefinitionCreate rdc = createResourceDefinitionCreate(restoredName, rscGrp);
@ -1186,7 +915,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
final StoragePoolVO pool = _storagePoolDao.findById(dstData.getDataStore().getId());
final DevelopersApi api = LinstorUtil.getLinstorAPI(pool.getHostAddress());
final String rscName = LinstorUtil.RSC_PREFIX + dstData.getUuid();
boolean newCreated = createResourceBase(
boolean newCreated = LinstorUtil.createResourceBase(
LinstorUtil.RSC_PREFIX + dstData.getUuid(),
tInfo.getSize(),
tInfo.getName(),
@ -1194,9 +923,10 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
null,
null,
api,
getRscGrp(pool),
LinstorUtil.getRscGrp(pool),
pool.getId(),
true);
true,
false);
Answer answer;
if (newCreated) {
@ -1430,7 +1160,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
{
resizeResource(api, rscName, resizeParameter.newSize);
applyQoSSettings(pool, api, rscName, resizeParameter.newMaxIops);
LinstorUtil.applyQoSSettings(_storagePoolDao, pool, api, rscName, resizeParameter.newMaxIops);
{
final VolumeVO volume = _volumeDao.findById(vol.getId());
volume.setMinIops(resizeParameter.newMinIops);
@ -1535,7 +1265,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
@Override
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
logger.debug(String.format("Requesting storage stats: %s", storagePool));
return LinstorUtil.getStorageStats(storagePool.getHostAddress(), getRscGrp(storagePool));
return LinstorUtil.getStorageStats(storagePool.getHostAddress(), LinstorUtil.getRscGrp(storagePool));
}
@Override

View File

@ -22,6 +22,8 @@ import com.linbit.linstor.api.ApiException;
import com.linbit.linstor.api.DevelopersApi;
import com.linbit.linstor.api.model.ApiCallRc;
import com.linbit.linstor.api.model.ApiCallRcList;
import com.linbit.linstor.api.model.AutoSelectFilter;
import com.linbit.linstor.api.model.LayerType;
import com.linbit.linstor.api.model.Node;
import com.linbit.linstor.api.model.Properties;
import com.linbit.linstor.api.model.ProviderKind;
@ -29,24 +31,36 @@ import com.linbit.linstor.api.model.Resource;
import com.linbit.linstor.api.model.ResourceDefinition;
import com.linbit.linstor.api.model.ResourceDefinitionModify;
import com.linbit.linstor.api.model.ResourceGroup;
import com.linbit.linstor.api.model.ResourceGroupSpawn;
import com.linbit.linstor.api.model.ResourceWithVolumes;
import com.linbit.linstor.api.model.StoragePool;
import com.linbit.linstor.api.model.Volume;
import com.linbit.linstor.api.model.VolumeDefinition;
import com.linbit.linstor.api.model.VolumeDefinitionModify;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.logging.log4j.Logger;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.nio.charset.StandardCharsets;
public class LinstorUtil {
protected static Logger LOGGER = LogManager.getLogger(LinstorUtil.class);
@ -56,6 +70,8 @@ public class LinstorUtil {
public static final String RSC_GROUP = "resourceGroup";
public static final String CS_TEMPLATE_FOR_PREFIX = "_cs-template-for-";
public static final String LIN_PROP_DRBDOPT_EXACT_SIZE = "DrbdOptions/ExactSize";
public static final String TEMP_VOLUME_ID = "tempVolumeId";
public static final String CLUSTER_DEFAULT_MIN_IOPS = "clusterDefaultMinIops";
@ -76,6 +92,32 @@ public class LinstorUtil {
.orElse((answers.get(0)).getMessage()) : null;
}
public static void logLinstorAnswer(@Nonnull ApiCallRc answer) {
if (answer.isError()) {
LOGGER.error(answer.getMessage());
} else if (answer.isWarning()) {
LOGGER.warn(answer.getMessage());
} else if (answer.isInfo()) {
LOGGER.info(answer.getMessage());
}
}
public static void logLinstorAnswers(@Nonnull ApiCallRcList answers) {
answers.forEach(LinstorUtil::logLinstorAnswer);
}
public static void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) {
logLinstorAnswers(answers);
if (answers.hasError())
{
String errMsg = answers.stream()
.filter(ApiCallRc::isError)
.findFirst()
.map(ApiCallRc::getMessage).orElse("Unknown linstor error");
throw new CloudRuntimeException(errMsg);
}
}
public static List<String> getLinstorNodeNames(@Nonnull DevelopersApi api) throws ApiException
{
List<Node> nodes = api.nodeList(
@ -488,4 +530,253 @@ public class LinstorUtil {
}
return false;
}
public static String getRscGrp(com.cloud.storage.StoragePool storagePool) {
return storagePool.getUserInfo() != null && !storagePool.getUserInfo().isEmpty() ?
storagePool.getUserInfo() : "DfltRscGrp";
}
/**
* Condition if a template resource can be shared with the given resource group.
* @param tgtRscGrp
* @param tgtLayerStack
* @param rg
* @return True if the template resource can be shared, else false.
*/
private static boolean canShareTemplateForResourceGroup(
ResourceGroup tgtRscGrp, List<String> tgtLayerStack, ResourceGroup rg) {
List<String> rgLayerStack = rg.getSelectFilter() != null ?
rg.getSelectFilter().getLayerStack() : null;
return Objects.equals(tgtLayerStack, rgLayerStack) &&
Objects.equals(tgtRscGrp.getSelectFilter().getStoragePoolList(),
rg.getSelectFilter().getStoragePoolList());
}
/**
* Searches for a shareable template for this rscGrpName and sets the aux template property.
* @param api
* @param rscName
* @param rscGrpName
* @param existingRDs
* @return
* @throws ApiException
*/
private static boolean foundShareableTemplate(
DevelopersApi api, String rscName, String rscGrpName,
List<Pair<ResourceDefinition, ResourceGroup>> existingRDs) throws ApiException {
if (!existingRDs.isEmpty()) {
ResourceGroup tgtRscGrp = api.resourceGroupList(
Collections.singletonList(rscGrpName), null, null, null).get(0);
List<String> tgtLayerStack = tgtRscGrp.getSelectFilter() != null ?
tgtRscGrp.getSelectFilter().getLayerStack() : null;
// check if there is already a template copy, that we could reuse
// this means if select filters are similar enough to allow cloning from
for (Pair<ResourceDefinition, ResourceGroup> rdPair : existingRDs) {
ResourceGroup rg = rdPair.second();
if (canShareTemplateForResourceGroup(tgtRscGrp, tgtLayerStack, rg)) {
LinstorUtil.setAuxTemplateForProperty(api, rscName, rscGrpName);
return true;
}
}
}
return false;
}
/**
* Returns the layerlist of the resourceGroup with encryption(LUKS) added above STORAGE.
* If the resourceGroup layer list already contains LUKS this layer list will be returned.
* @param api Linstor developers API
* @param resourceGroup Resource group to get the encryption layer list
* @return layer list with LUKS added
*/
public static List<LayerType> getEncryptedLayerList(DevelopersApi api, String resourceGroup) {
try {
List<ResourceGroup> rscGrps = api.resourceGroupList(
Collections.singletonList(resourceGroup), Collections.emptyList(), null, null);
if (CollectionUtils.isEmpty(rscGrps)) {
throw new CloudRuntimeException(
String.format("Resource Group %s not found on Linstor cluster.", resourceGroup));
}
final ResourceGroup rscGrp = rscGrps.get(0);
List<LayerType> layers = Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE);
List<String> curLayerStack = rscGrp.getSelectFilter() != null ?
rscGrp.getSelectFilter().getLayerStack() : Collections.emptyList();
if (CollectionUtils.isNotEmpty(curLayerStack)) {
layers = curLayerStack.stream().map(LayerType::valueOf).collect(Collectors.toList());
if (!layers.contains(LayerType.LUKS)) {
layers.add(layers.size() - 1, LayerType.LUKS); // lowest layer is STORAGE
}
}
return layers;
} catch (ApiException e) {
throw new CloudRuntimeException(
String.format("Resource Group %s not found on Linstor cluster.", resourceGroup));
}
}
/**
* Spawns a new Linstor resource with the given arguments.
* @param api
* @param newRscName
* @param sizeInBytes
* @param isTemplate
* @param rscGrpName
* @param volName
* @param vmName
* @throws ApiException
*/
private static void spawnResource(
DevelopersApi api, String newRscName, long sizeInBytes, boolean isTemplate, String rscGrpName,
String volName, String vmName, @Nullable Long passPhraseId, @Nullable byte[] passPhrase,
boolean exactSize) throws ApiException
{
ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn();
rscGrpSpawn.setResourceDefinitionName(newRscName);
rscGrpSpawn.addVolumeSizesItem(sizeInBytes / 1024);
if (passPhraseId != null) {
AutoSelectFilter asf = new AutoSelectFilter();
List<LayerType> luksLayers = getEncryptedLayerList(api, rscGrpName);
asf.setLayerStack(luksLayers.stream().map(LayerType::toString).collect(Collectors.toList()));
rscGrpSpawn.setSelectFilter(asf);
if (passPhrase != null) {
String utf8Passphrase = new String(passPhrase, StandardCharsets.UTF_8);
rscGrpSpawn.setVolumePassphrases(Collections.singletonList(utf8Passphrase));
}
}
Properties props = new Properties();
if (isTemplate) {
props.put(LinstorUtil.getTemplateForAuxPropKey(rscGrpName), "true");
}
if (exactSize) {
props.put(LIN_PROP_DRBDOPT_EXACT_SIZE, "true");
}
rscGrpSpawn.setResourceDefinitionProps(props);
LOGGER.info("Linstor: Spawn resource " + newRscName);
ApiCallRcList answers = api.resourceGroupSpawn(rscGrpName, rscGrpSpawn);
checkLinstorAnswersThrow(answers);
answers = LinstorUtil.applyAuxProps(api, newRscName, volName, vmName);
checkLinstorAnswersThrow(answers);
}
/**
* Creates a new Linstor resource.
* @param rscName
* @param sizeInBytes
* @param volName
* @param vmName
* @param api
* @param rscGrp
* @param poolId
* @param isTemplate indicates if the resource is a template
* @return true if a new resource was created, false if it already existed or was reused.
*/
public static boolean createResourceBase(
String rscName, long sizeInBytes, String volName, String vmName,
@Nullable Long passPhraseId, @Nullable byte[] passPhrase, DevelopersApi api,
String rscGrp, long poolId, boolean isTemplate, boolean exactSize)
{
try
{
LOGGER.debug("createRscBase: {} :: {} :: {} :: {}", rscName, rscGrp, isTemplate, exactSize);
List<Pair<ResourceDefinition, ResourceGroup>> existingRDs = LinstorUtil.getRDAndRGListStartingWith(api, rscName);
String fullRscName = String.format("%s-%d", rscName, poolId);
boolean alreadyCreated = existingRDs.stream()
.anyMatch(p -> p.first().getName().equalsIgnoreCase(fullRscName)) ||
existingRDs.stream().anyMatch(p -> p.first().getProps().containsKey(LinstorUtil.getTemplateForAuxPropKey(rscGrp)));
if (!alreadyCreated) {
boolean createNewRsc = !foundShareableTemplate(api, rscName, rscGrp, existingRDs);
if (createNewRsc) {
String newRscName = existingRDs.isEmpty() ? rscName : fullRscName;
spawnResource(api, newRscName, sizeInBytes, isTemplate, rscGrp,
volName, vmName, passPhraseId, passPhrase, exactSize);
}
return createNewRsc;
}
return false;
} catch (ApiException apiEx)
{
LOGGER.error("Linstor: ApiEx - {}", apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
public static void applyQoSSettings(PrimaryDataStoreDao primaryDataStoreDao,
StoragePoolVO storagePool, DevelopersApi api, String rscName, Long maxIops)
throws ApiException
{
Long currentQosIops = null;
List<VolumeDefinition> vlmDfns = api.volumeDefinitionList(rscName, null, null);
if (!vlmDfns.isEmpty())
{
Properties props = vlmDfns.get(0).getProps();
long iops = Long.parseLong(props.getOrDefault("sys/fs/blkio_throttle_write_iops", "0"));
currentQosIops = iops > 0 ? iops : null;
}
if (!Objects.equals(maxIops, currentQosIops))
{
VolumeDefinitionModify vdm = new VolumeDefinitionModify();
if (maxIops != null)
{
Properties props = new Properties();
props.put("sys/fs/blkio_throttle_read_iops", "" + maxIops);
props.put("sys/fs/blkio_throttle_write_iops", "" + maxIops);
vdm.overrideProps(props);
LOGGER.info("Apply qos setting: {} to {}", maxIops, rscName);
}
else
{
LOGGER.info("Remove QoS setting for {}", rscName);
vdm.deleteProps(Arrays.asList("sys/fs/blkio_throttle_read_iops", "sys/fs/blkio_throttle_write_iops"));
}
ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, vdm);
LinstorUtil.checkLinstorAnswersThrow(answers);
Long capacityIops = storagePool.getCapacityIops();
if (capacityIops != null)
{
long vcIops = currentQosIops != null ? currentQosIops * -1 : 0;
long vMaxIops = maxIops != null ? maxIops : 0;
long newIops = vcIops + vMaxIops;
capacityIops -= newIops;
LOGGER.info("Current storagepool {} iops capacity: {}", storagePool, capacityIops);
storagePool.setCapacityIops(Math.max(0, capacityIops));
primaryDataStoreDao.update(storagePool.getId(), storagePool);
}
}
}
public static String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO,
PrimaryDataStoreDao primaryDataStoreDao) {
return createResource(vol, storagePoolVO, primaryDataStoreDao, false);
}
public static String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO,
PrimaryDataStoreDao primaryDataStoreDao, boolean exactSize) {
DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
final String rscGrp = getRscGrp(storagePoolVO);
final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid();
createResourceBase(
rscName, vol.getSize(), vol.getName(), vol.getAttachedVmName(), vol.getPassphraseId(), vol.getPassphrase(),
linstorApi, rscGrp, storagePoolVO.getId(), false, exactSize);
try
{
applyQoSSettings(primaryDataStoreDao, storagePoolVO, linstorApi, rscName, vol.getMaxIops());
return LinstorUtil.getDevicePath(linstorApi, rscName);
} catch (ApiException apiEx)
{
LOGGER.error("Linstor: ApiEx - " + apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
}

View File

@ -0,0 +1,437 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.motion;
import com.linbit.linstor.api.ApiException;
import com.linbit.linstor.api.DevelopersApi;
import com.linbit.linstor.api.model.ApiCallRcList;
import com.linbit.linstor.api.model.ResourceDefinition;
import com.linbit.linstor.api.model.ResourceDefinitionModify;
import javax.inject.Inject;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.MigrateAnswer;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.PrepareForMigrationCommand;
import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.stereotype.Component;
/**
* Current state:
* just changing the resource-group on same storage pool resource-group is not really good enough.
* Linstor lacks currently of a good way to move resources to another resource-group and respecting
* every auto-filter setting.
* Also linstor clone would simply set the new resource-group without any adjustments of storage pools or
* auto-select resource placement.
* So currently, we will create a new resource in the wanted primary storage and let qemu copy the data into the
* devices.
*/
@Component
public class LinstorDataMotionStrategy implements DataMotionStrategy {
protected Logger logger = LogManager.getLogger(getClass());
@Inject
private SnapshotDataStoreDao _snapshotStoreDao;
@Inject
private PrimaryDataStoreDao _storagePool;
@Inject
private VolumeDao _volumeDao;
@Inject
private VolumeDataFactory _volumeDataFactory;
@Inject
private VMInstanceDao _vmDao;
@Inject
private GuestOSDao _guestOsDao;
@Inject
private VolumeService _volumeService;
@Inject
private GuestOSCategoryDao _guestOsCategoryDao;
@Inject
private SnapshotDao _snapshotDao;
@Inject
private AgentManager _agentManager;
@Inject
private PrimaryDataStoreDao _storagePoolDao;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject dstData) {
DataObjectType srcType = srcData.getType();
DataObjectType dstType = dstData.getType();
logger.debug("canHandle: {} -> {}", srcType, dstType);
return StrategyPriority.CANT_HANDLE;
}
@Override
public void copyAsync(DataObject srcData, DataObject destData, Host destHost,
AsyncCompletionCallback<CopyCommandResult> callback) {
throw new CloudRuntimeException("not implemented");
}
private boolean isDestinationLinstorPrimaryStorage(Map<VolumeInfo, DataStore> volumeMap) {
if (MapUtils.isNotEmpty(volumeMap)) {
for (DataStore dataStore : volumeMap.values()) {
StoragePoolVO storagePoolVO = _storagePool.findById(dataStore.getId());
if (storagePoolVO == null
|| !storagePoolVO.getStorageProviderName().equals(LinstorUtil.PROVIDER_NAME)) {
return false;
}
}
} else {
return false;
}
return true;
}
@Override
public StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
logger.debug("canHandle -- {}: {} -> {}", volumeMap, srcHost, destHost);
if (srcHost.getId() != destHost.getId() && isDestinationLinstorPrimaryStorage(volumeMap)) {
return StrategyPriority.HIGHEST;
}
return StrategyPriority.CANT_HANDLE;
}
private VolumeVO createNewVolumeVO(Volume volume, StoragePoolVO storagePoolVO) {
VolumeVO newVol = new VolumeVO(volume);
newVol.setInstanceId(null);
newVol.setChainInfo(null);
newVol.setPath(newVol.getUuid());
newVol.setFolder(null);
newVol.setPodId(storagePoolVO.getPodId());
newVol.setPoolId(storagePoolVO.getId());
newVol.setLastPoolId(volume.getPoolId());
return _volumeDao.persist(newVol);
}
private void removeExactSizeProperty(VolumeInfo volumeInfo) {
StoragePoolVO destStoragePool = _storagePool.findById(volumeInfo.getDataStore().getId());
DevelopersApi api = LinstorUtil.getLinstorAPI(destStoragePool.getHostAddress());
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
rdm.setDeleteProps(Collections.singletonList(LinstorUtil.LIN_PROP_DRBDOPT_EXACT_SIZE));
try {
String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getPath();
ApiCallRcList answers = api.resourceDefinitionModify(rscName, rdm);
LinstorUtil.checkLinstorAnswersThrow(answers);
} catch (ApiException apiEx) {
logger.error("Linstor: ApiEx - {}", apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
private void handlePostMigration(boolean success, Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo,
VirtualMachineTO vmTO, Host destHost) {
if (!success) {
try {
PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
pfmc.setRollback(true);
Answer pfma = _agentManager.send(destHost.getId(), pfmc);
if (pfma == null || !pfma.getResult()) {
String details = pfma != null ? pfma.getDetails() : "null answer returned";
String msg = "Unable to rollback prepare for migration due to the following: " + details;
throw new AgentUnavailableException(msg, destHost.getId());
}
} catch (Exception e) {
logger.debug("Failed to disconnect one or more (original) dest volumes", e);
}
}
for (Map.Entry<VolumeInfo, VolumeInfo> entry : srcVolumeInfoToDestVolumeInfo.entrySet()) {
VolumeInfo srcVolumeInfo = entry.getKey();
VolumeInfo destVolumeInfo = entry.getValue();
if (success) {
srcVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationSucceeded);
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationSucceeded);
_volumeDao.updateUuid(srcVolumeInfo.getId(), destVolumeInfo.getId());
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
volumeVO.setFormat(Storage.ImageFormat.QCOW2);
_volumeDao.update(volumeVO.getId(), volumeVO);
// remove exact size property
removeExactSizeProperty(destVolumeInfo);
try {
_volumeService.destroyVolume(srcVolumeInfo.getId());
srcVolumeInfo = _volumeDataFactory.getVolume(srcVolumeInfo.getId());
AsyncCallFuture<VolumeService.VolumeApiResult> destroyFuture =
_volumeService.expungeVolumeAsync(srcVolumeInfo);
if (destroyFuture.get().isFailed()) {
logger.debug("Failed to clean up source volume on storage");
}
} catch (Exception e) {
logger.debug("Failed to clean up source volume on storage", e);
}
// Update the volume ID for snapshots on secondary storage
if (!_snapshotDao.listByVolumeId(srcVolumeInfo.getId()).isEmpty()) {
_snapshotDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId());
_snapshotStoreDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId());
}
} else {
try {
_volumeService.revokeAccess(destVolumeInfo, destHost, destVolumeInfo.getDataStore());
} catch (Exception e) {
logger.debug("Failed to revoke access from dest volume", e);
}
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
srcVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
try {
_volumeService.destroyVolume(destVolumeInfo.getId());
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId());
AsyncCallFuture<VolumeService.VolumeApiResult> destroyFuture =
_volumeService.expungeVolumeAsync(destVolumeInfo);
if (destroyFuture.get().isFailed()) {
logger.debug("Failed to clean up dest volume on storage");
}
} catch (Exception e) {
logger.debug("Failed to clean up dest volume on storage", e);
}
}
}
}
/**
* Determines whether the destination volume should have the DRBD exact-size property set
* during migration.
*
* <p>This method queries the Linstor API to check if the source volume's resource definition
* has the exact-size DRBD option enabled. The exact-size property ensures that DRBD uses
* the precise volume size rather than rounding, which is important for maintaining size
* consistency during migrations.</p>
*
* @param srcVolumeInfo the source volume information to check
* @return {@code true} if the exact-size property should be set on the destination volume,
* which occurs when the source volume has this property enabled, or when the
* property cannot be determined (defaults to {@code true} for safety);
* {@code false} only when the source is confirmed to not have the exact-size property
*/
private boolean needsExactSizeProp(VolumeInfo srcVolumeInfo) {
StoragePoolVO srcStoragePool = _storagePool.findById(srcVolumeInfo.getDataStore().getId());
if (srcStoragePool.getPoolType() == Storage.StoragePoolType.Linstor) {
DevelopersApi api = LinstorUtil.getLinstorAPI(srcStoragePool.getHostAddress());
String rscName = LinstorUtil.RSC_PREFIX + srcVolumeInfo.getPath();
try {
List<ResourceDefinition> rscDfns = api.resourceDefinitionList(
Collections.singletonList(rscName),
false,
Collections.emptyList(),
null,
null);
if (!CollectionUtils.isEmpty(rscDfns)) {
ResourceDefinition srcRsc = rscDfns.get(0);
String exactSizeProp = srcRsc.getProps().get(LinstorUtil.LIN_PROP_DRBDOPT_EXACT_SIZE);
return "true".equalsIgnoreCase(exactSizeProp);
} else {
logger.warn("Unknown resource {} on {}", rscName, srcStoragePool.getHostAddress());
}
} catch (ApiException apiEx) {
logger.error("Unable to fetch resource definition {}: {}", rscName, apiEx.getBestMessage());
}
}
return true;
}
@Override
public void copyAsync(Map<VolumeInfo, DataStore> volumeDataStoreMap, VirtualMachineTO vmTO, Host srcHost,
Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
if (srcHost.getHypervisorType() != Hypervisor.HypervisorType.KVM) {
throw new CloudRuntimeException(
String.format("Invalid hypervisor type [%s]. Only KVM supported", srcHost.getHypervisorType()));
}
String errMsg = null;
VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId());
vmTO.setState(vmInstance.getState());
List<MigrateCommand.MigrateDiskInfo> migrateDiskInfoList = new ArrayList<>();
Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage = new HashMap<>();
Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo = new HashMap<>();
try {
for (Map.Entry<VolumeInfo, DataStore> entry : volumeDataStoreMap.entrySet()) {
VolumeInfo srcVolumeInfo = entry.getKey();
DataStore destDataStore = entry.getValue();
VolumeVO srcVolume = _volumeDao.findById(srcVolumeInfo.getId());
StoragePoolVO destStoragePool = _storagePool.findById(destDataStore.getId());
if (srcVolumeInfo.getPassphraseId() != null) {
throw new CloudRuntimeException(
String.format("Cannot live migrate encrypted volume: %s", srcVolumeInfo.getVolume()));
}
VolumeVO destVolume = createNewVolumeVO(srcVolume, destStoragePool);
VolumeInfo destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationCopyRequested);
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationCopySucceeded);
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested);
boolean exactSize = needsExactSizeProp(srcVolumeInfo);
String devPath = LinstorUtil.createResource(
destVolumeInfo, destStoragePool, _storagePoolDao, exactSize);
_volumeDao.update(destVolume.getId(), destVolume);
destVolume = _volumeDao.findById(destVolume.getId());
destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
MigrateCommand.MigrateDiskInfo migrateDiskInfo = new MigrateCommand.MigrateDiskInfo(
srcVolumeInfo.getPath(),
MigrateCommand.MigrateDiskInfo.DiskType.BLOCK,
MigrateCommand.MigrateDiskInfo.DriverType.RAW,
MigrateCommand.MigrateDiskInfo.Source.DEV,
devPath);
migrateDiskInfoList.add(migrateDiskInfo);
migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo);
srcVolumeInfoToDestVolumeInfo.put(srcVolumeInfo, destVolumeInfo);
}
PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
try {
Answer pfma = _agentManager.send(destHost.getId(), pfmc);
if (pfma == null || !pfma.getResult()) {
String details = pfma != null ? pfma.getDetails() : "null answer returned";
errMsg = String.format("Unable to prepare for migration due to the following: %s", details);
throw new AgentUnavailableException(errMsg, destHost.getId());
}
} catch (final OperationTimedoutException e) {
errMsg = String.format("Operation timed out due to %s", e.getMessage());
throw new AgentUnavailableException(errMsg, destHost.getId());
}
VMInstanceVO vm = _vmDao.findById(vmTO.getId());
boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId())
.getName().equalsIgnoreCase("Windows");
MigrateCommand migrateCommand = new MigrateCommand(vmTO.getName(),
destHost.getPrivateIpAddress(), isWindows, vmTO, true);
migrateCommand.setWait(StorageManager.KvmStorageOnlineMigrationWait.value());
migrateCommand.setMigrateStorage(migrateStorage);
migrateCommand.setMigrateStorageManaged(true);
migrateCommand.setNewVmCpuShares(
vmTO.getCpus() * ObjectUtils.defaultIfNull(vmTO.getMinSpeed(), vmTO.getSpeed()));
migrateCommand.setMigrateDiskInfoList(migrateDiskInfoList);
boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
migrateCommand.setAutoConvergence(kvmAutoConvergence);
MigrateAnswer migrateAnswer = (MigrateAnswer) _agentManager.send(srcHost.getId(), migrateCommand);
boolean success = migrateAnswer != null && migrateAnswer.getResult();
handlePostMigration(success, srcVolumeInfoToDestVolumeInfo, vmTO, destHost);
if (migrateAnswer == null) {
throw new CloudRuntimeException("Unable to get an answer to the migrate command");
}
if (!migrateAnswer.getResult()) {
errMsg = migrateAnswer.getDetails();
throw new CloudRuntimeException(errMsg);
}
} catch (AgentUnavailableException | OperationTimedoutException | CloudRuntimeException ex) {
errMsg = String.format(
"Copy volume(s) of VM [%s] to storage(s) [%s] and VM to host [%s] failed in LinstorDataMotionStrategy.copyAsync. Error message: [%s].",
vmTO, srcHost, destHost, ex.getMessage());
logger.error(errMsg, ex);
throw new CloudRuntimeException(errMsg);
} finally {
CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg);
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(errMsg);
callback.complete(result);
}
}
}

View File

@ -33,4 +33,6 @@
class="org.apache.cloudstack.storage.snapshot.LinstorVMSnapshotStrategy" />
<bean id="linstorConfigManager"
class="org.apache.cloudstack.storage.datastore.util.LinstorConfigurationManager" />
<bean id="linstorDataMotionStrategy"
class="org.apache.cloudstack.storage.motion.LinstorDataMotionStrategy" />
</beans>

View File

@ -26,6 +26,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -75,13 +76,13 @@ public class LinstorPrimaryDataStoreDriverImplTest {
when(api.resourceGroupList(Collections.singletonList("EncryptedGrp"), Collections.emptyList(), null, null))
.thenReturn(Collections.singletonList(encryptedGrp));
List<LayerType> layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "DfltRscGrp");
List<LayerType> layers = LinstorUtil.getEncryptedLayerList(api, "DfltRscGrp");
Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE), layers);
layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "BcacheGrp");
layers = LinstorUtil.getEncryptedLayerList(api, "BcacheGrp");
Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.BCACHE, LayerType.LUKS, LayerType.STORAGE), layers);
layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "EncryptedGrp");
layers = LinstorUtil.getEncryptedLayerList(api, "EncryptedGrp");
Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE), layers);
}
}

View File

@ -1443,11 +1443,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi
return null;
}
NetworkOffering offering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId());
Long physicalNetworkId = null;
if (effectiveTrafficType != TrafficType.Guest) {
physicalNetworkId = getNonGuestNetworkPhysicalNetworkId(network, effectiveTrafficType);
} else {
NetworkOffering offering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId());
physicalNetworkId = network.getPhysicalNetworkId();
if (physicalNetworkId == null) {
physicalNetworkId = findPhysicalNetworkId(network.getDataCenterId(), offering.getTags(), offering.getTrafficType());
@ -1460,6 +1460,10 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi
return null;
}
if (offering != null && TrafficType.Guest.equals(offering.getTrafficType()) && offering.isSystemOnly()) {
// For private gateway, do not check the Guest traffic type
return _pNTrafficTypeDao.getNetworkTag(physicalNetworkId, null, hType);
}
return _pNTrafficTypeDao.getNetworkTag(physicalNetworkId, effectiveTrafficType, hType);
}

View File

@ -17,7 +17,6 @@
package com.cloud.usage;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import java.util.Map;
@ -35,6 +34,7 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.usage.Usage;
import org.apache.cloudstack.usage.UsageService;
import org.apache.cloudstack.usage.UsageTypes;
import org.apache.cloudstack.utils.usage.UsageUtils;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.commons.lang3.StringUtils;
import org.jetbrains.annotations.NotNull;
@ -127,14 +127,25 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag
@Inject
private NetworkOfferingDao _networkOfferingDao;
private TimeZone usageExecutionTimeZone = TimeZone.getTimeZone("GMT");
private static final long REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS = 15 * 60 * 1000;
public UsageServiceImpl() {
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
String timeZoneStr = ObjectUtils.defaultIfNull(_configDao.getValue(Config.UsageAggregationTimezone.toString()), "GMT");
_usageTimezone = TimeZone.getTimeZone(timeZoneStr);
String executionTimeZone = _configDao.getValue(Config.UsageExecutionTimezone.toString());
if (executionTimeZone != null) {
usageExecutionTimeZone = TimeZone.getTimeZone(executionTimeZone);
}
return true;
}
@ -465,35 +476,28 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag
@Override
public boolean removeRawUsageRecords(RemoveRawUsageRecordsCmd cmd) throws InvalidParameterValueException {
Integer interval = cmd.getInterval();
if (interval != null && interval > 0 ) {
String jobExecTime = _configDao.getValue(Config.UsageStatsJobExecTime.toString());
if (jobExecTime != null ) {
String[] segments = jobExecTime.split(":");
if (segments.length == 2) {
String timeZoneStr = _configDao.getValue(Config.UsageExecutionTimezone.toString());
if (timeZoneStr == null) {
timeZoneStr = "GMT";
}
TimeZone tz = TimeZone.getTimeZone(timeZoneStr);
Calendar cal = Calendar.getInstance(tz);
cal.setTime(new Date());
long curTS = cal.getTimeInMillis();
cal.set(Calendar.HOUR_OF_DAY, Integer.parseInt(segments[0]));
cal.set(Calendar.MINUTE, Integer.parseInt(segments[1]));
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
long execTS = cal.getTimeInMillis();
logger.debug("Trying to remove old raw cloud_usage records older than " + interval + " day(s), current time=" + curTS + " next job execution time=" + execTS);
// Let's avoid cleanup when job runs and around a 15 min interval
if (Math.abs(curTS - execTS) < 15 * 60 * 1000) {
return false;
}
}
}
_usageDao.expungeAllOlderThan(interval, ConfigurationManagerImpl.DELETE_QUERY_BATCH_SIZE.value());
} else {
throw new InvalidParameterValueException("Invalid interval value. Interval to remove cloud_usage records should be greater than 0");
if (interval == null || interval <= 0) {
throw new InvalidParameterValueException("Interval should be greater than 0.");
}
String jobExecTime = _configDao.getValue(Config.UsageStatsJobExecTime.toString());
Date previousJobExecTime = UsageUtils.getPreviousJobExecutionTime(usageExecutionTimeZone, jobExecTime);
Date nextJobExecTime = UsageUtils.getNextJobExecutionTime(usageExecutionTimeZone, jobExecTime);
if (ObjectUtils.allNotNull(previousJobExecTime, nextJobExecTime)) {
logger.debug("Next Usage job is scheduled to execute at [{}]; previous execution was at [{}].",
DateUtil.displayDateInTimezone(usageExecutionTimeZone, nextJobExecTime), DateUtil.displayDateInTimezone(usageExecutionTimeZone, previousJobExecTime));
Date now = new Date();
if (nextJobExecTime.getTime() - now.getTime() < REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS) {
logger.info("Not removing any cloud_usage records because the next Usage job is scheduled to execute in less than {} minute(s).", REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS / 60000);
return false;
} else if (now.getTime() - previousJobExecTime.getTime() < REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS) {
logger.info("Not removing any cloud_usage records because the last Usage job executed in less than {} minute(s) ago.", REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS / 60000);
return false;
}
}
logger.info("Removing cloud_usage records older than {} day(s).", interval);
_usageDao.expungeAllOlderThan(interval, ConfigurationManagerImpl.DELETE_QUERY_BATCH_SIZE.value());
return true;
}
}

View File

@ -187,6 +187,7 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
private boolean usageSnapshotSelection = false;
private static TimeZone usageAggregationTimeZone = TimeZone.getTimeZone("GMT");
private static TimeZone usageExecutionTimeZone = TimeZone.getTimeZone("GMT");
public UsageManagerImpl() {
}
@ -241,6 +242,9 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
if (aggregationTimeZone != null && !aggregationTimeZone.isEmpty()) {
usageAggregationTimeZone = TimeZone.getTimeZone(aggregationTimeZone);
}
if (execTimeZone != null) {
usageExecutionTimeZone = TimeZone.getTimeZone(execTimeZone);
}
try {
if ((execTime == null) || (aggregationRange == null)) {
@ -249,34 +253,18 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna
throw new ConfigurationException("Missing configuration values for usage job, usage.stats.job.exec.time = " + execTime +
", usage.stats.job.aggregation.range = " + aggregationRange);
}
String[] execTimeSegments = execTime.split(":");
if (execTimeSegments.length != 2) {
logger.error("Unable to parse usage.stats.job.exec.time");
throw new ConfigurationException("Unable to parse usage.stats.job.exec.time '" + execTime + "'");
}
int hourOfDay = Integer.parseInt(execTimeSegments[0]);
int minutes = Integer.parseInt(execTimeSegments[1]);
Date currentDate = new Date();
_jobExecTime.setTime(currentDate);
_jobExecTime.set(Calendar.HOUR_OF_DAY, hourOfDay);
_jobExecTime.set(Calendar.MINUTE, minutes);
_jobExecTime.set(Calendar.SECOND, 0);
_jobExecTime.set(Calendar.MILLISECOND, 0);
TimeZone jobExecTimeZone = execTimeZone != null ? TimeZone.getTimeZone(execTimeZone) : Calendar.getInstance().getTimeZone();
_jobExecTime.setTimeZone(jobExecTimeZone);
// if the hour to execute the job has already passed, roll the day forward to the next day
if (_jobExecTime.getTime().before(currentDate)) {
_jobExecTime.roll(Calendar.DAY_OF_YEAR, true);
Date nextJobExecTime = UsageUtils.getNextJobExecutionTime(usageExecutionTimeZone, execTime);
if (nextJobExecTime == null) {
throw new ConfigurationException(String.format("Unable to parse configuration 'usage.stats.job.exec.time' value [%s].", execTime));
}
_jobExecTime.setTimeZone(usageExecutionTimeZone);
_jobExecTime.setTime(nextJobExecTime);
logger.info("Usage is configured to execute in time zone [{}], at [{}], each [{}] minutes; the current time in that timezone is [{}] and the " +
"next job is scheduled to execute at [{}]. During its execution, Usage will aggregate stats according to the time zone [{}] defined in global setting [usage.aggregation.timezone].",
jobExecTimeZone.getID(), execTime, aggregationRange, DateUtil.displayDateInTimezone(jobExecTimeZone, currentDate),
DateUtil.displayDateInTimezone(jobExecTimeZone, _jobExecTime.getTime()), usageAggregationTimeZone.getID());
usageExecutionTimeZone.getID(), execTime, aggregationRange, DateUtil.displayDateInTimezone(usageExecutionTimeZone, new Date()),
DateUtil.displayDateInTimezone(usageExecutionTimeZone, _jobExecTime.getTime()), usageAggregationTimeZone.getID());
_aggregationDuration = Integer.parseInt(aggregationRange);
if (_aggregationDuration < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) {

View File

@ -19,6 +19,57 @@
package org.apache.cloudstack.utils.usage;
import com.cloud.utils.DateUtil;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.Calendar;
import java.util.Date;
import java.util.TimeZone;
public class UsageUtils {
protected static Logger logger = LogManager.getLogger(UsageUtils.class);
public static final int USAGE_AGGREGATION_RANGE_MIN = 1;
public static Date getNextJobExecutionTime(TimeZone usageTimeZone, String jobExecTimeConfig) {
return getJobExecutionTime(usageTimeZone, jobExecTimeConfig, true);
}
public static Date getPreviousJobExecutionTime(TimeZone usageTimeZone, String jobExecTimeConfig) {
return getJobExecutionTime(usageTimeZone, jobExecTimeConfig, false);
}
protected static Date getJobExecutionTime(TimeZone usageTimeZone, String jobExecTimeConfig, boolean next) {
String[] execTimeSegments = jobExecTimeConfig.split(":");
if (execTimeSegments.length != 2) {
logger.warn("Unable to parse configuration 'usage.stats.job.exec.time'.");
return null;
}
int hourOfDay;
int minutes;
try {
hourOfDay = Integer.parseInt(execTimeSegments[0]);
minutes = Integer.parseInt(execTimeSegments[1]);
} catch (NumberFormatException e) {
logger.warn("Unable to parse configuration 'usage.stats.job.exec.time' due to non-numeric values in [{}].", jobExecTimeConfig, e);
return null;
}
Date currentDate = DateUtil.currentGMTTime();
Calendar jobExecTime = Calendar.getInstance(usageTimeZone);
jobExecTime.setTime(currentDate);
jobExecTime.set(Calendar.HOUR_OF_DAY, hourOfDay);
jobExecTime.set(Calendar.MINUTE, minutes);
jobExecTime.set(Calendar.SECOND, 0);
jobExecTime.set(Calendar.MILLISECOND, 0);
if (next && jobExecTime.getTime().before(currentDate)) {
jobExecTime.add(Calendar.DAY_OF_YEAR, 1);
} else if (!next && jobExecTime.getTime().after(currentDate)) {
jobExecTime.add(Calendar.DAY_OF_YEAR, -1);
}
return jobExecTime.getTime();
}
}

View File

@ -0,0 +1,135 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package org.apache.cloudstack.utils.usage;
import com.cloud.utils.DateUtil;
import junit.framework.TestCase;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.Date;
import java.util.TimeZone;
@RunWith(MockitoJUnitRunner.class)
public class UsageUtilsTest extends TestCase {
TimeZone usageTimeZone = TimeZone.getTimeZone("GMT-3");
@Test
public void getJobExecutionTimeTestReturnsNullWhenConfigurationValueIsInvalid() {
Date result = UsageUtils.getNextJobExecutionTime(usageTimeZone, "test");
assertNull(result);
}
@Test
public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsTrueAndExecutionTimeHasNotPassed() {
Date currentDate = new Date();
currentDate.setTime(1724296800000L);
try (MockedStatic<DateUtil> dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) {
dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate);
Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", true);
Assert.assertNotNull(result);
Assert.assertEquals(1724297400000L, result.getTime());
}
}
@Test
public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsTrueAndExecutionTimeHasPassed() {
Date currentDate = new Date();
currentDate.setTime(1724297460000L);
try (MockedStatic<DateUtil> dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) {
dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate);
Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", true);
Assert.assertNotNull(result);
Assert.assertEquals(1724383800000L, result.getTime());
}
}
@Test
public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsFalseAndExecutionTimeHasNotPassed() {
Date currentDate = new Date();
currentDate.setTime(1724296800000L);
try (MockedStatic<DateUtil> dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) {
dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate);
Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", false);
Assert.assertNotNull(result);
Assert.assertEquals(1724211000000L, result.getTime());
}
}
@Test
public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsFalseAndExecutionTimeHasPassed() {
Date currentDate = new Date();
currentDate.setTime(1724297460000L);
try (MockedStatic<DateUtil> dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) {
dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate);
Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", false);
Assert.assertNotNull(result);
Assert.assertEquals(1724297400000L, result.getTime());
}
}
@Test
public void getJobExecutionTimeTestReturnsExpectedDateWhenNextExecutionIsOnNextYear() {
Date currentDate = new Date();
currentDate.setTime(1767236340000L);
try (MockedStatic<DateUtil> dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) {
dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate);
Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:00", true);
Assert.assertNotNull(result);
Assert.assertEquals(1767236400000L, result.getTime());
}
}
@Test
public void getJobExecutionTimeTestReturnsExpectedDateWhenPreviousExecutionWasOnPreviousYear() {
Date currentDate = new Date();
currentDate.setTime(1767236460000L);
try (MockedStatic<DateUtil> dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) {
dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate);
Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "23:59", false);
Assert.assertNotNull(result);
Assert.assertEquals(1767236340000L, result.getTime());
}
}
}