Merge release branch 4.22 to main

* 4.22:
  Update templateConfig.sh to not break with directorys with space on t… (#10898)
  Fix VM and volume metrics listing regressions (#12284)
  packaging: use latest cmk release link directly (#11429)
  api:rename RegisterCmd.java => RegisterUserKeyCmd.java (#12259)
  Prioritize copying templates from other secondary storages instead of downloading them (#10363)
  Show time correctly in the backup schedule UI  (#12012)
  kvm: use preallocation option for fat disk resize (#11986)
  Python exception processing static routes fixed (#11967)
  KVM memballooning requires free page reporting and autodeflate (#11932)
  api: create/register/upload template with empty template tag (#12234)
This commit is contained in:
Daan Hoogland 2025-12-22 10:29:24 +01:00
commit e2d3773362
37 changed files with 934 additions and 254 deletions

View File

@ -25,7 +25,7 @@ import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.acl.SecurityChecker.AccessType;
import org.apache.cloudstack.api.command.admin.account.CreateAccountCmd; import org.apache.cloudstack.api.command.admin.account.CreateAccountCmd;
import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd; import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd;
import org.apache.cloudstack.api.command.admin.user.RegisterCmd; import org.apache.cloudstack.api.command.admin.user.RegisterUserKeyCmd;
import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd;
import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter;
@ -95,7 +95,7 @@ public interface AccountService {
void markUserRegistered(long userId); void markUserRegistered(long userId);
public String[] createApiKeyAndSecretKey(RegisterCmd cmd); public String[] createApiKeyAndSecretKey(RegisterUserKeyCmd cmd);
public String[] createApiKeyAndSecretKey(final long userId); public String[] createApiKeyAndSecretKey(final long userId);

View File

@ -26,14 +26,14 @@ import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.RegisterResponse; import org.apache.cloudstack.api.response.RegisterUserKeyResponse;
import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserResponse;
import java.util.Map; import java.util.Map;
@APICommand(name = "getUserKeys", @APICommand(name = "getUserKeys",
description = "This command allows the user to query the seceret and API keys for the account", description = "This command allows the user to query the seceret and API keys for the account",
responseObject = RegisterResponse.class, responseObject = RegisterUserKeyResponse.class,
requestHasSensitiveInfo = false, requestHasSensitiveInfo = false,
responseHasSensitiveInfo = true, responseHasSensitiveInfo = true,
authorized = {RoleType.User, RoleType.Admin, RoleType.DomainAdmin, RoleType.ResourceAdmin}, authorized = {RoleType.User, RoleType.Admin, RoleType.DomainAdmin, RoleType.ResourceAdmin},
@ -57,7 +57,7 @@ public class GetUserKeysCmd extends BaseCmd{
public void execute(){ public void execute(){
Pair<Boolean, Map<String, String>> keys = _accountService.getKeys(this); Pair<Boolean, Map<String, String>> keys = _accountService.getKeys(this);
RegisterResponse response = new RegisterResponse(); RegisterUserKeyResponse response = new RegisterUserKeyResponse();
if(keys != null){ if(keys != null){
response.setApiKeyAccess(keys.first()); response.setApiKeyAccess(keys.first());
response.setApiKey(keys.second().get("apikey")); response.setApiKey(keys.second().get("apikey"));

View File

@ -22,17 +22,17 @@ import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.response.RegisterResponse; import org.apache.cloudstack.api.response.RegisterUserKeyResponse;
import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserResponse;
import com.cloud.user.Account; import com.cloud.user.Account;
import com.cloud.user.User; import com.cloud.user.User;
@APICommand(name = "registerUserKeys", @APICommand(name = "registerUserKeys",
responseObject = RegisterResponse.class, responseObject = RegisterUserKeyResponse.class,
description = "This command allows a user to register for the developer API, returning a secret key and an API key. This request is made through the integration API port, so it is a privileged command and must be made on behalf of a user. It is up to the implementer just how the username and password are entered, and then how that translates to an integration API request. Both secret key and API key should be returned to the user", description = "This command allows a user to register for the developer API, returning a secret key and an API key. This request is made through the integration API port, so it is a privileged command and must be made on behalf of a user. It is up to the implementer just how the username and password are entered, and then how that translates to an integration API request. Both secret key and API key should be returned to the user",
requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) requestHasSensitiveInfo = false, responseHasSensitiveInfo = true)
public class RegisterCmd extends BaseCmd { public class RegisterUserKeyCmd extends BaseCmd {
///////////////////////////////////////////////////// /////////////////////////////////////////////////////
@ -81,7 +81,7 @@ public class RegisterCmd extends BaseCmd {
@Override @Override
public void execute() { public void execute() {
String[] keys = _accountService.createApiKeyAndSecretKey(this); String[] keys = _accountService.createApiKeyAndSecretKey(this);
RegisterResponse response = new RegisterResponse(); RegisterUserKeyResponse response = new RegisterUserKeyResponse();
if (keys != null) { if (keys != null) {
response.setApiKey(keys[0]); response.setApiKey(keys[0]);
response.setSecretKey(keys[1]); response.setSecretKey(keys[1]);

View File

@ -211,7 +211,7 @@ public class CreateTemplateCmd extends BaseAsyncCreateCmd implements UserCmd {
} }
public String getTemplateTag() { public String getTemplateTag() {
return templateTag; return StringUtils.isBlank(templateTag) ? null : templateTag;
} }
public Map getDetails() { public Map getDetails() {

View File

@ -170,7 +170,7 @@ public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd {
} }
public String getTemplateTag() { public String getTemplateTag() {
return templateTag; return StringUtils.isBlank(templateTag) ? null : templateTag;
} }
public boolean isDeployAsIs() { public boolean isDeployAsIs() {

View File

@ -279,7 +279,7 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd {
} }
public String getTemplateTag() { public String getTemplateTag() {
return templateTag; return StringUtils.isBlank(templateTag) ? null : templateTag;
} }
public Map getDetails() { public Map getDetails() {

View File

@ -23,7 +23,7 @@ import org.apache.cloudstack.api.BaseResponse;
import com.cloud.serializer.Param; import com.cloud.serializer.Param;
public class RegisterResponse extends BaseResponse { public class RegisterUserKeyResponse extends BaseResponse {
@SerializedName(ApiConstants.API_KEY) @SerializedName(ApiConstants.API_KEY)
@Param(description = "the api key of the registered user", isSensitive = true) @Param(description = "the api key of the registered user", isSensitive = true)
private String apiKey; private String apiKey;

3
debian/rules vendored
View File

@ -4,7 +4,6 @@ VERSION := $(shell grep '<version>' pom.xml | head -2 | tail -1 | cut -d'>' -f2
PACKAGE = $(shell dh_listpackages|head -n 1|cut -d '-' -f 1) PACKAGE = $(shell dh_listpackages|head -n 1|cut -d '-' -f 1)
SYSCONFDIR = "/etc" SYSCONFDIR = "/etc"
DESTDIR = "debian/tmp" DESTDIR = "debian/tmp"
CMK_REL := $(shell wget -O - "https://api.github.com/repos/apache/cloudstack-cloudmonkey/releases" 2>/dev/null | jq -r '.[0].tag_name')
%: %:
dh $@ --with systemd dh $@ --with systemd
@ -90,7 +89,7 @@ override_dh_auto_install:
rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/sha512sum.txt rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/sha512sum.txt
# Bundle cmk in cloudstack-management # Bundle cmk in cloudstack-management
wget https://github.com/apache/cloudstack-cloudmonkey/releases/download/$(CMK_REL)/cmk.linux.x86-64 -O $(DESTDIR)/usr/bin/cmk wget https://github.com/apache/cloudstack-cloudmonkey/releases/latest/download/cmk.linux.x86-64 -O $(DESTDIR)/usr/bin/cmk
chmod +x $(DESTDIR)/usr/bin/cmk chmod +x $(DESTDIR)/usr/bin/cmk
# nast hack for a couple of configuration files # nast hack for a couple of configuration files

View File

@ -18,12 +18,18 @@
package org.apache.cloudstack.engine.orchestration.service; package org.apache.cloudstack.engine.orchestration.service;
import java.util.List; import java.util.List;
import java.util.concurrent.Future;
import org.apache.cloudstack.api.response.MigrationResponse; import org.apache.cloudstack.api.response.MigrationResponse;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult;
import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy; import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy;
public interface StorageOrchestrationService { public interface StorageOrchestrationService {
MigrationResponse migrateData(Long srcDataStoreId, List<Long> destDatastores, MigrationPolicy migrationPolicy); MigrationResponse migrateData(Long srcDataStoreId, List<Long> destDatastores, MigrationPolicy migrationPolicy);
MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreId, List<Long> templateIdList, List<Long> snapshotIdList); MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreId, List<Long> templateIdList, List<Long> snapshotIdList);
Future<TemplateApiResult> orchestrateTemplateCopyToImageStore(TemplateInfo source, DataStore destStore);
} }

View File

@ -78,4 +78,6 @@ public interface TemplateService {
AsyncCallFuture<TemplateApiResult> createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable); AsyncCallFuture<TemplateApiResult> createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable);
List<DatadiskTO> getTemplateDatadisksOnImageStore(TemplateInfo templateInfo, String configurationId); List<DatadiskTO> getTemplateDatadisksOnImageStore(TemplateInfo templateInfo, String configurationId);
AsyncCallFuture<TemplateApiResult> copyTemplateToImageStore(DataObject source, DataStore destStore);
} }

View File

@ -161,22 +161,22 @@ public interface StorageManager extends StorageService {
ConfigKey.Scope.StoragePool, ConfigKey.Scope.StoragePool,
null); null);
ConfigKey<Integer> PRIMARY_STORAGE_DOWNLOAD_WAIT = new ConfigKey<Integer>("Storage", Integer.class, "primary.storage.download.wait", "10800", ConfigKey<Integer> PRIMARY_STORAGE_DOWNLOAD_WAIT = new ConfigKey<>("Storage", Integer.class, "primary.storage.download.wait", "10800",
"In second, timeout for download template to primary storage", false); "In second, timeout for download template to primary storage", false);
ConfigKey<Integer> SecStorageMaxMigrateSessions = new ConfigKey<Integer>("Advanced", Integer.class, "secstorage.max.migrate.sessions", "2", ConfigKey<Integer> SecStorageMaxMigrateSessions = new ConfigKey<>("Advanced", Integer.class, "secstorage.max.migrate.sessions", "2",
"The max number of concurrent copy command execution sessions that an SSVM can handle", false, ConfigKey.Scope.Global); "The max number of concurrent copy command execution sessions that an SSVM can handle", false, ConfigKey.Scope.Global);
ConfigKey<Boolean> SecStorageVMAutoScaleDown = new ConfigKey<Boolean>("Advanced", Boolean.class, "secstorage.vm.auto.scale.down", "false", ConfigKey<Boolean> SecStorageVMAutoScaleDown = new ConfigKey<>("Advanced", Boolean.class, "secstorage.vm.auto.scale.down", "false",
"Setting this to 'true' will auto scale down SSVMs", true, ConfigKey.Scope.Global); "Setting this to 'true' will auto scale down SSVMs", true, ConfigKey.Scope.Global);
ConfigKey<Integer> MaxDataMigrationWaitTime = new ConfigKey<Integer>("Advanced", Integer.class, "max.data.migration.wait.time", "15", ConfigKey<Integer> MaxDataMigrationWaitTime = new ConfigKey<>("Advanced", Integer.class, "max.data.migration.wait.time", "15",
"Maximum wait time (in minutes) for a data migration task before spawning a new SSVM", false, ConfigKey.Scope.Global); "Maximum wait time (in minutes) for a data migration task before spawning a new SSVM", false, ConfigKey.Scope.Global);
ConfigKey<Boolean> DiskProvisioningStrictness = new ConfigKey<Boolean>("Storage", Boolean.class, "disk.provisioning.type.strictness", "false", ConfigKey<Boolean> DiskProvisioningStrictness = new ConfigKey<>("Storage", Boolean.class, "disk.provisioning.type.strictness", "false",
"If set to true, the disk is created only when there is a suitable storage pool that supports the disk provisioning type specified by the service/disk offering. " + "If set to true, the disk is created only when there is a suitable storage pool that supports the disk provisioning type specified by the service/disk offering. " +
"If set to false, the disk is created with a disk provisioning type supported by the pool. Default value is false, and this is currently supported for VMware only.", "If set to false, the disk is created with a disk provisioning type supported by the pool. Default value is false, and this is currently supported for VMware only.",
true, ConfigKey.Scope.Zone); true, ConfigKey.Scope.Zone);
ConfigKey<String> PreferredStoragePool = new ConfigKey<String>(String.class, "preferred.storage.pool", "Advanced", "", ConfigKey<String> PreferredStoragePool = new ConfigKey<>(String.class, "preferred.storage.pool", "Advanced", "",
"The UUID of preferred storage pool for allocation.", true, ConfigKey.Scope.Account, null); "The UUID of preferred storage pool for allocation.", true, ConfigKey.Scope.Account, null);
ConfigKey<Boolean> MountDisabledStoragePool = new ConfigKey<>(Boolean.class, ConfigKey<Boolean> MountDisabledStoragePool = new ConfigKey<>(Boolean.class,
@ -203,7 +203,7 @@ public interface StorageManager extends StorageService {
true, true,
ConfigKey.Scope.Global, ConfigKey.Scope.Global,
null); null);
static final ConfigKey<Boolean> DataStoreDownloadFollowRedirects = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, ConfigKey<Boolean> DataStoreDownloadFollowRedirects = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED,
Boolean.class, "store.download.follow.redirects", "false", Boolean.class, "store.download.follow.redirects", "false",
"Whether HTTP redirect is followed during store downloads for objects such as template, volume etc.", "Whether HTTP redirect is followed during store downloads for objects such as template, volume etc.",
true, ConfigKey.Scope.Global); true, ConfigKey.Scope.Global);
@ -211,7 +211,7 @@ public interface StorageManager extends StorageService {
ConfigKey<Long> HEURISTICS_SCRIPT_TIMEOUT = new ConfigKey<>("Advanced", Long.class, "heuristics.script.timeout", "3000", ConfigKey<Long> HEURISTICS_SCRIPT_TIMEOUT = new ConfigKey<>("Advanced", Long.class, "heuristics.script.timeout", "3000",
"The maximum runtime, in milliseconds, to execute the heuristic rule; if it is reached, a timeout will happen.", true); "The maximum runtime, in milliseconds, to execute the heuristic rule; if it is reached, a timeout will happen.", true);
ConfigKey<Boolean> AllowVolumeReSizeBeyondAllocation = new ConfigKey<Boolean>("Advanced", Boolean.class, "volume.resize.allowed.beyond.allocation", "false", ConfigKey<Boolean> AllowVolumeReSizeBeyondAllocation = new ConfigKey<>("Advanced", Boolean.class, "volume.resize.allowed.beyond.allocation", "false",
"Determines whether volume size can exceed the pool capacity allocation disable threshold (pool.storage.allocated.capacity.disablethreshold) " + "Determines whether volume size can exceed the pool capacity allocation disable threshold (pool.storage.allocated.capacity.disablethreshold) " +
"when resize a volume upto resize capacity disable threshold (pool.storage.allocated.resize.capacity.disablethreshold)", "when resize a volume upto resize capacity disable threshold (pool.storage.allocated.resize.capacity.disablethreshold)",
true, List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone)); true, List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone));
@ -228,6 +228,10 @@ public interface StorageManager extends StorageService {
ConfigKey.Scope.Global, ConfigKey.Scope.Global,
null); null);
ConfigKey<Boolean> COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES = new ConfigKey<>(Boolean.class, "copy.public.templates.from.other.storages",
"Storage", "true", "Allow SSVMs to try copying public templates from one secondary storage to another instead of downloading them from the source.",
true, ConfigKey.Scope.Zone, null);
/** /**
* should we execute in sequence not involving any storages? * should we execute in sequence not involving any storages?
* @return true if commands should execute in sequence * @return true if commands should execute in sequence
@ -252,14 +256,14 @@ public interface StorageManager extends StorageService {
/** /**
* Returns a comma separated list of tags for the specified storage pool * Returns a comma separated list of tags for the specified storage pool
* @param poolId * @param poolId the id of the pool to get tags for
* @return comma separated list of tags * @return comma separated list of tags
*/ */
String getStoragePoolTags(long poolId); String getStoragePoolTags(long poolId);
/** /**
* Returns a list of Strings with tags for the specified storage pool * Returns a list of Strings with tags for the specified storage pool
* @param poolId * @param poolId the id of the pool to get tags for
* @return comma separated list of tags * @return comma separated list of tags
*/ */
List<String> getStoragePoolTagList(long poolId); List<String> getStoragePoolTagList(long poolId);
@ -276,7 +280,7 @@ public interface StorageManager extends StorageService {
Pair<Long, Answer> sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List<Long> hostIdsToAvoid, Command cmd) throws StorageUnavailableException; Pair<Long, Answer> sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List<Long> hostIdsToAvoid, Command cmd) throws StorageUnavailableException;
public Answer getVolumeStats(StoragePool pool, Command cmd); Answer getVolumeStats(StoragePool pool, Command cmd);
boolean canPoolProvideStorageStats(StoragePool pool); boolean canPoolProvideStorageStats(StoragePool pool);

View File

@ -22,10 +22,12 @@ import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap; import java.util.LinkedHashMap;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set;
import javax.inject.Inject; import javax.inject.Inject;
@ -206,12 +208,22 @@ public class DataMigrationUtility {
protected List<DataObject> getAllReadyTemplates(DataStore srcDataStore, Map<DataObject, Pair<List<TemplateInfo>, Long>> childTemplates, List<TemplateDataStoreVO> templates) { protected List<DataObject> getAllReadyTemplates(DataStore srcDataStore, Map<DataObject, Pair<List<TemplateInfo>, Long>> childTemplates, List<TemplateDataStoreVO> templates) {
List<TemplateInfo> files = new LinkedList<>(); List<TemplateInfo> files = new LinkedList<>();
Set<Long> idsForMigration = new HashSet<>();
for (TemplateDataStoreVO template : templates) { for (TemplateDataStoreVO template : templates) {
VMTemplateVO templateVO = templateDao.findById(template.getTemplateId()); long templateId = template.getTemplateId();
if (shouldMigrateTemplate(template, templateVO)) { if (idsForMigration.contains(templateId)) {
logger.warn("Template store reference [{}] is duplicated; not considering it for migration.", template);
continue;
}
VMTemplateVO templateVO = templateDao.findById(templateId);
if (!shouldMigrateTemplate(template, templateVO)) {
continue;
}
files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore)); files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore));
idsForMigration.add(templateId);
} }
}
for (TemplateInfo template: files) { for (TemplateInfo template: files) {
List<VMTemplateVO> children = templateDao.listByParentTemplatetId(template.getId()); List<VMTemplateVO> children = templateDao.listByParentTemplatetId(template.getId());
List<TemplateInfo> temps = new ArrayList<>(); List<TemplateInfo> temps = new ArrayList<>();
@ -221,6 +233,7 @@ public class DataMigrationUtility {
} }
childTemplates.put(template, new Pair<>(temps, getTotalChainSize(temps))); childTemplates.put(template, new Pair<>(temps, getTotalChainSize(temps)));
} }
return (List<DataObject>) (List<?>) files; return (List<DataObject>) (List<?>) files;
} }
@ -263,16 +276,37 @@ public class DataMigrationUtility {
*/ */
protected List<DataObject> getAllReadySnapshotsAndChains(DataStore srcDataStore, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChains, List<SnapshotDataStoreVO> snapshots) { protected List<DataObject> getAllReadySnapshotsAndChains(DataStore srcDataStore, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChains, List<SnapshotDataStoreVO> snapshots) {
List<SnapshotInfo> files = new LinkedList<>(); List<SnapshotInfo> files = new LinkedList<>();
Set<Long> idsForMigration = new HashSet<>();
for (SnapshotDataStoreVO snapshot : snapshots) { for (SnapshotDataStoreVO snapshot : snapshots) {
SnapshotVO snapshotVO = snapshotDao.findById(snapshot.getSnapshotId()); long snapshotId = snapshot.getSnapshotId();
if (snapshot.getState() == ObjectInDataStoreStateMachine.State.Ready && if (idsForMigration.contains(snapshotId)) {
snapshotVO != null && snapshotVO.getHypervisorType() != Hypervisor.HypervisorType.Simulator logger.warn("Snapshot store reference [{}] is duplicated; not considering it for migration.", snapshot);
&& snapshot.getParentSnapshotId() == 0 ) { continue;
}
if (snapshot.getState() != ObjectInDataStoreStateMachine.State.Ready) {
logger.warn("Not migrating snapshot [{}] because its state is not ready.", snapshot);
continue;
}
SnapshotVO snapshotVO = snapshotDao.findById(snapshotId);
if (snapshotVO == null) {
logger.debug("Not migrating snapshot [{}] because we could not find its database entry.", snapshot);
continue;
}
if (snapshotVO.getHypervisorType() == Hypervisor.HypervisorType.Simulator) {
logger.debug("Not migrating snapshot [{}] because its hypervisor type is simulator.", snapshot);
continue;
}
if (snapshot.getParentSnapshotId() != 0) {
continue; // The child snapshot will be migrated in the for loop below.
}
SnapshotInfo snap = snapshotFactory.getSnapshot(snapshotVO.getSnapshotId(), snapshot.getDataStoreId(), snapshot.getRole()); SnapshotInfo snap = snapshotFactory.getSnapshot(snapshotVO.getSnapshotId(), snapshot.getDataStoreId(), snapshot.getRole());
if (snap != null) { if (snap == null) {
logger.debug("Not migrating snapshot [{}] because we could not get its information.", snapshot);
continue;
}
files.add(snap); files.add(snap);
} idsForMigration.add(snapshotId);
}
} }
for (SnapshotInfo parent : files) { for (SnapshotInfo parent : files) {
@ -285,7 +319,7 @@ public class DataMigrationUtility {
chain.addAll(children); chain.addAll(children);
} }
} }
snapshotChains.put(parent, new Pair<List<SnapshotInfo>, Long>(chain, getTotalChainSize(chain))); snapshotChains.put(parent, new Pair<>(chain, getTotalChainSize(chain)));
} }
return (List<DataObject>) (List<?>) files; return (List<DataObject>) (List<?>) files;
@ -306,14 +340,31 @@ public class DataMigrationUtility {
protected List<DataObject> getAllReadyVolumes(DataStore srcDataStore, List<VolumeDataStoreVO> volumes) { protected List<DataObject> getAllReadyVolumes(DataStore srcDataStore, List<VolumeDataStoreVO> volumes) {
List<DataObject> files = new LinkedList<>(); List<DataObject> files = new LinkedList<>();
Set<Long> idsForMigration = new HashSet<>();
for (VolumeDataStoreVO volume : volumes) { for (VolumeDataStoreVO volume : volumes) {
if (volume.getState() == ObjectInDataStoreStateMachine.State.Ready) { long volumeId = volume.getVolumeId();
if (idsForMigration.contains(volumeId)) {
logger.warn("Volume store reference [{}] is duplicated; not considering it for migration.", volume);
continue;
}
if (volume.getState() != ObjectInDataStoreStateMachine.State.Ready) {
logger.debug("Not migrating volume [{}] because its state is not ready.", volume);
continue;
}
VolumeInfo volumeInfo = volumeFactory.getVolume(volume.getVolumeId(), srcDataStore); VolumeInfo volumeInfo = volumeFactory.getVolume(volume.getVolumeId(), srcDataStore);
if (volumeInfo != null && volumeInfo.getHypervisorType() != Hypervisor.HypervisorType.Simulator) { if (volumeInfo == null) {
logger.debug("Not migrating volume [{}] because we could not get its information.", volume);
continue;
}
if (volumeInfo.getHypervisorType() == Hypervisor.HypervisorType.Simulator) {
logger.debug("Not migrating volume [{}] because its hypervisor type is simulator.", volume);
continue;
}
files.add(volumeInfo); files.add(volumeInfo);
idsForMigration.add(volumeId);
} }
}
}
return files; return files;
} }
@ -325,10 +376,9 @@ public class DataMigrationUtility {
/** Returns the count of active SSVMs - SSVM with agents in connected state, so as to dynamically increase the thread pool /** Returns the count of active SSVMs - SSVM with agents in connected state, so as to dynamically increase the thread pool
* size when SSVMs scale * size when SSVMs scale
*/ */
protected int activeSSVMCount(DataStore dataStore) { protected int activeSSVMCount(Long zoneId) {
long datacenterId = dataStore.getScope().getScopeId();
List<SecondaryStorageVmVO> ssvms = List<SecondaryStorageVmVO> ssvms =
secStorageVmDao.getSecStorageVmListInStates(null, datacenterId, VirtualMachine.State.Running, VirtualMachine.State.Migrating); secStorageVmDao.getSecStorageVmListInStates(null, zoneId, VirtualMachine.State.Running, VirtualMachine.State.Migrating);
int activeSSVMs = 0; int activeSSVMs = 0;
for (SecondaryStorageVmVO vm : ssvms) { for (SecondaryStorageVmVO vm : ssvms) {
String name = "s-"+vm.getId()+"-VM"; String name = "s-"+vm.getId()+"-VM";

View File

@ -46,6 +46,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageServic
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult;
import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.Configurable;
@ -71,9 +73,12 @@ import com.cloud.storage.dao.SnapshotDao;
import com.cloud.utils.Pair; import com.cloud.utils.Pair;
import com.cloud.utils.component.ManagerBase; import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.logging.log4j.ThreadContext;
public class StorageOrchestrator extends ManagerBase implements StorageOrchestrationService, Configurable { public class StorageOrchestrator extends ManagerBase implements StorageOrchestrationService, Configurable {
private static final String LOGCONTEXTID = "logcontextid";
@Inject @Inject
SnapshotDataStoreDao snapshotDataStoreDao; SnapshotDataStoreDao snapshotDataStoreDao;
@Inject @Inject
@ -91,6 +96,8 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
@Inject @Inject
private SecondaryStorageService secStgSrv; private SecondaryStorageService secStgSrv;
@Inject @Inject
TemplateService templateService;
@Inject
TemplateDataStoreDao templateDataStoreDao; TemplateDataStoreDao templateDataStoreDao;
@Inject @Inject
VolumeDataStoreDao volumeDataStoreDao; VolumeDataStoreDao volumeDataStoreDao;
@ -106,6 +113,9 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
Integer numConcurrentCopyTasksPerSSVM = 2; Integer numConcurrentCopyTasksPerSSVM = 2;
private final Map<Long, ThreadPoolExecutor> zoneExecutorMap = new HashMap<>();
private final Map<Long, Integer> zonePendingWorkCountMap = new HashMap<>();
@Override @Override
public String getConfigComponentName() { public String getConfigComponentName() {
return StorageOrchestrationService.class.getName(); return StorageOrchestrationService.class.getName();
@ -167,8 +177,6 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
double meanstddev = getStandardDeviation(storageCapacities); double meanstddev = getStandardDeviation(storageCapacities);
double threshold = ImageStoreImbalanceThreshold.value(); double threshold = ImageStoreImbalanceThreshold.value();
MigrationResponse response = null; MigrationResponse response = null;
ThreadPoolExecutor executor = new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM , numConcurrentCopyTasksPerSSVM, 30,
TimeUnit.MINUTES, new MigrateBlockingQueue<>(numConcurrentCopyTasksPerSSVM));
Date start = new Date(); Date start = new Date();
if (meanstddev < threshold && migrationPolicy == MigrationPolicy.BALANCE) { if (meanstddev < threshold && migrationPolicy == MigrationPolicy.BALANCE) {
logger.debug("mean std deviation of the image stores is below threshold, no migration required"); logger.debug("mean std deviation of the image stores is below threshold, no migration required");
@ -177,7 +185,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
} }
int skipped = 0; int skipped = 0;
List<Future<AsyncCallFuture<DataObjectResult>>> futures = new ArrayList<>(); List<Future<DataObjectResult>> futures = new ArrayList<>();
while (true) { while (true) {
DataObject chosenFileForMigration = null; DataObject chosenFileForMigration = null;
if (files.size() > 0) { if (files.size() > 0) {
@ -206,7 +214,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
} }
if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, childTemplates, migrationPolicy)) { if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, childTemplates, migrationPolicy)) {
storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, srcDatastore, destDatastoreId, executor, futures); storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, srcDatastore, destDatastoreId, futures);
} else { } else {
if (migrationPolicy == MigrationPolicy.BALANCE) { if (migrationPolicy == MigrationPolicy.BALANCE) {
continue; continue;
@ -217,7 +225,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
} }
} }
Date end = new Date(); Date end = new Date();
handleSnapshotMigration(srcDataStoreId, start, end, migrationPolicy, futures, storageCapacities, executor); handleSnapshotMigration(srcDataStoreId, start, end, migrationPolicy, futures, storageCapacities);
return handleResponse(futures, migrationPolicy, message, success); return handleResponse(futures, migrationPolicy, message, success);
} }
@ -250,9 +258,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
storageCapacities = getStorageCapacities(storageCapacities, srcImgStoreId); storageCapacities = getStorageCapacities(storageCapacities, srcImgStoreId);
storageCapacities = getStorageCapacities(storageCapacities, destImgStoreId); storageCapacities = getStorageCapacities(storageCapacities, destImgStoreId);
ThreadPoolExecutor executor = new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM, numConcurrentCopyTasksPerSSVM, 30, List<Future<DataObjectResult>> futures = new ArrayList<>();
TimeUnit.MINUTES, new MigrateBlockingQueue<>(numConcurrentCopyTasksPerSSVM));
List<Future<AsyncCallFuture<DataObjectResult>>> futures = new ArrayList<>();
Date start = new Date(); Date start = new Date();
while (true) { while (true) {
@ -272,7 +278,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
} }
if (storageCapacityBelowThreshold(storageCapacities, destImgStoreId)) { if (storageCapacityBelowThreshold(storageCapacities, destImgStoreId)) {
storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, srcDatastore, destImgStoreId, executor, futures); storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, srcDatastore, destImgStoreId, futures);
} else { } else {
message = "Migration failed. Destination store doesn't have enough capacity for migration"; message = "Migration failed. Destination store doesn't have enough capacity for migration";
success = false; success = false;
@ -289,7 +295,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snap.getSnapshotId(), snap.getDataStoreId(), DataStoreRole.Image); SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snap.getSnapshotId(), snap.getDataStoreId(), DataStoreRole.Image);
SnapshotInfo parentSnapshot = snapshotInfo.getParent(); SnapshotInfo parentSnapshot = snapshotInfo.getParent();
if (snapshotInfo.getDataStore().getId() == srcImgStoreId && parentSnapshot != null && migratedSnapshotIdList.contains(parentSnapshot.getSnapshotId())) { if (snapshotInfo.getDataStore().getId() == srcImgStoreId && parentSnapshot != null && migratedSnapshotIdList.contains(parentSnapshot.getSnapshotId())) {
futures.add(executor.submit(new MigrateDataTask(snapshotInfo, srcDatastore, dataStoreManager.getDataStore(destImgStoreId, DataStoreRole.Image)))); futures.add(submit(srcDatastore.getScope().getScopeId(), new MigrateDataTask(snapshotInfo, srcDatastore, dataStoreManager.getDataStore(destImgStoreId, DataStoreRole.Image))));
} }
}); });
} }
@ -297,6 +303,11 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
return handleResponse(futures, null, message, success); return handleResponse(futures, null, message, success);
} }
@Override
public Future<TemplateApiResult> orchestrateTemplateCopyToImageStore(TemplateInfo source, DataStore destStore) {
return submit(destStore.getScope().getScopeId(), new CopyTemplateTask(source, destStore));
}
protected Pair<String, Boolean> migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List<DataObject> files, MigrationPolicy migrationPolicy, int skipped) { protected Pair<String, Boolean> migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List<DataObject> files, MigrationPolicy migrationPolicy, int skipped) {
String message = ""; String message = "";
boolean success = true; boolean success = true;
@ -332,19 +343,10 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChains, Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChains,
DataStore srcDatastore, DataStore srcDatastore,
Long destDatastoreId, Long destDatastoreId,
ThreadPoolExecutor executor, List<Future<DataObjectResult>> futures) {
List<Future<AsyncCallFuture<DataObjectResult>>> futures) {
Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains, templateChains); Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains, templateChains);
storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize); storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize);
long activeSsvms = migrationHelper.activeSSVMCount(srcDatastore);
long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM;
// Increase thread pool size with increase in number of SSVMs
if ( totalJobs > executor.getCorePoolSize()) {
executor.setMaximumPoolSize((int) (totalJobs));
executor.setCorePoolSize((int) (totalJobs));
}
MigrateDataTask task = new MigrateDataTask(chosenFileForMigration, srcDatastore, dataStoreManager.getDataStore(destDatastoreId, DataStoreRole.Image)); MigrateDataTask task = new MigrateDataTask(chosenFileForMigration, srcDatastore, dataStoreManager.getDataStore(destDatastoreId, DataStoreRole.Image));
if (chosenFileForMigration instanceof SnapshotInfo ) { if (chosenFileForMigration instanceof SnapshotInfo ) {
@ -353,19 +355,64 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
if (chosenFileForMigration instanceof TemplateInfo) { if (chosenFileForMigration instanceof TemplateInfo) {
task.setTemplateChain(templateChains); task.setTemplateChain(templateChains);
} }
futures.add((executor.submit(task))); futures.add(submit(srcDatastore.getScope().getScopeId(), task));
logger.debug("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()); logger.debug("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid());
return storageCapacities; return storageCapacities;
} }
protected <T> Future<T> submit(Long zoneId, Callable<T> task) {
ThreadPoolExecutor executor;
synchronized (this) {
if (!zoneExecutorMap.containsKey(zoneId)) {
zoneExecutorMap.put(zoneId, new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM, numConcurrentCopyTasksPerSSVM,
30, TimeUnit.MINUTES, new MigrateBlockingQueue<>(numConcurrentCopyTasksPerSSVM)));
zonePendingWorkCountMap.put(zoneId, 0);
}
zonePendingWorkCountMap.merge(zoneId, 1, Integer::sum);
scaleExecutorIfNecessary(zoneId);
executor = zoneExecutorMap.get(zoneId);
}
return executor.submit(task);
}
private MigrationResponse handleResponse(List<Future<AsyncCallFuture<DataObjectResult>>> futures, MigrationPolicy migrationPolicy, String message, boolean success) { protected void scaleExecutorIfNecessary(Long zoneId) {
long activeSsvms = migrationHelper.activeSSVMCount(zoneId);
long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM;
ThreadPoolExecutor executor = zoneExecutorMap.get(zoneId);
if (totalJobs > executor.getCorePoolSize()) {
logger.debug("Scaling up executor of zone [{}] from [{}] to [{}] threads.", zoneId, executor.getCorePoolSize(),
totalJobs);
executor.setMaximumPoolSize((int) (totalJobs));
executor.setCorePoolSize((int) (totalJobs));
}
}
protected synchronized void tryCleaningUpExecutor(Long zoneId) {
if (!zoneExecutorMap.containsKey(zoneId)) {
logger.debug("No executor exists for zone [{}].", zoneId);
return;
}
zonePendingWorkCountMap.merge(zoneId, -1, Integer::sum);
Integer pendingWorkCount = zonePendingWorkCountMap.get(zoneId);
if (pendingWorkCount > 0) {
logger.debug("Not cleaning executor of zone [{}] yet, as there is [{}] pending work.", zoneId, pendingWorkCount);
return;
}
logger.debug("Cleaning executor of zone [{}].", zoneId);
ThreadPoolExecutor executor = zoneExecutorMap.get(zoneId);
zoneExecutorMap.remove(zoneId);
executor.shutdown();
}
private MigrationResponse handleResponse(List<Future<DataObjectResult>> futures, MigrationPolicy migrationPolicy, String message, boolean success) {
int successCount = 0; int successCount = 0;
for (Future<AsyncCallFuture<DataObjectResult>> future : futures) { for (Future<DataObjectResult> future : futures) {
try { try {
AsyncCallFuture<DataObjectResult> res = future.get(); DataObjectResult res = future.get();
if (res.get().isSuccess()) { if (res.isSuccess()) {
successCount++; successCount++;
} }
} catch ( InterruptedException | ExecutionException e) { } catch ( InterruptedException | ExecutionException e) {
@ -379,7 +426,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
} }
private void handleSnapshotMigration(Long srcDataStoreId, Date start, Date end, MigrationPolicy policy, private void handleSnapshotMigration(Long srcDataStoreId, Date start, Date end, MigrationPolicy policy,
List<Future<AsyncCallFuture<DataObjectResult>>> futures, Map<Long, Pair<Long, Long>> storageCapacities, ThreadPoolExecutor executor) { List<Future<DataObjectResult>> futures, Map<Long, Pair<Long, Long>> storageCapacities) {
DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image); DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image);
List<SnapshotDataStoreVO> snaps = snapshotDataStoreDao.findSnapshots(srcDataStoreId, start, end); List<SnapshotDataStoreVO> snaps = snapshotDataStoreDao.findSnapshots(srcDataStoreId, start, end);
if (!snaps.isEmpty()) { if (!snaps.isEmpty()) {
@ -395,12 +442,12 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
storeId = dstores.get(1); storeId = dstores.get(1);
} }
DataStore datastore = dataStoreManager.getDataStore(storeId, DataStoreRole.Image); DataStore datastore = dataStoreManager.getDataStore(storeId, DataStoreRole.Image);
futures.add(executor.submit(new MigrateDataTask(snapshotInfo, srcDatastore, datastore))); futures.add(submit(srcDatastore.getScope().getScopeId(), new MigrateDataTask(snapshotInfo, srcDatastore, datastore)));
} }
if (parentSnapshot != null) { if (parentSnapshot != null) {
DataStore parentDS = dataStoreManager.getDataStore(parentSnapshot.getDataStore().getId(), DataStoreRole.Image); DataStore parentDS = dataStoreManager.getDataStore(parentSnapshot.getDataStore().getId(), DataStoreRole.Image);
if (parentDS.getId() != snapshotInfo.getDataStore().getId()) { if (parentDS.getId() != snapshotInfo.getDataStore().getId()) {
futures.add(executor.submit(new MigrateDataTask(snapshotInfo, srcDatastore, parentDS))); futures.add(submit(srcDatastore.getScope().getScopeId(), new MigrateDataTask(snapshotInfo, srcDatastore, parentDS)));
} }
} }
} }
@ -527,16 +574,19 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
return standardDeviation.evaluate(metricValues, mean); return standardDeviation.evaluate(metricValues, mean);
} }
private class MigrateDataTask implements Callable<AsyncCallFuture<DataObjectResult>> { private class MigrateDataTask implements Callable<DataObjectResult> {
private DataObject file; private DataObject file;
private DataStore srcDataStore; private DataStore srcDataStore;
private DataStore destDataStore; private DataStore destDataStore;
private Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain; private Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain;
private Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChain; private Map<DataObject, Pair<List<TemplateInfo>, Long>> templateChain;
private String logid;
public MigrateDataTask(DataObject file, DataStore srcDataStore, DataStore destDataStore) { public MigrateDataTask(DataObject file, DataStore srcDataStore, DataStore destDataStore) {
this.file = file; this.file = file;
this.srcDataStore = srcDataStore; this.srcDataStore = srcDataStore;
this.destDataStore = destDataStore; this.destDataStore = destDataStore;
this.logid = ThreadContext.get(LOGCONTEXTID);
} }
public void setSnapshotChains(Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain) { public void setSnapshotChains(Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChain) {
@ -557,8 +607,50 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
} }
@Override @Override
public AsyncCallFuture<DataObjectResult> call() throws Exception { public DataObjectResult call() {
return secStgSrv.migrateData(file, srcDataStore, destDataStore, snapshotChain, templateChain); ThreadContext.put(LOGCONTEXTID, logid);
DataObjectResult result;
AsyncCallFuture<DataObjectResult> future = secStgSrv.migrateData(file, srcDataStore, destDataStore, snapshotChain, templateChain);
try {
result = future.get();
} catch (ExecutionException | InterruptedException e) {
logger.warn("Exception while migrating data to another secondary storage: {}", e.toString());
result = new DataObjectResult(file);
result.setResult(e.toString());
}
tryCleaningUpExecutor(srcDataStore.getScope().getScopeId());
ThreadContext.clearAll();
return result;
}
}
private class CopyTemplateTask implements Callable<TemplateApiResult> {
private TemplateInfo sourceTmpl;
private DataStore destStore;
private String logid;
public CopyTemplateTask(TemplateInfo sourceTmpl, DataStore destStore) {
this.sourceTmpl = sourceTmpl;
this.destStore = destStore;
this.logid = ThreadContext.get(LOGCONTEXTID);
}
@Override
public TemplateApiResult call() {
ThreadContext.put(LOGCONTEXTID, logid);
TemplateApiResult result;
AsyncCallFuture<TemplateApiResult> future = templateService.copyTemplateToImageStore(sourceTmpl, destStore);
try {
result = future.get();
} catch (ExecutionException | InterruptedException e) {
logger.warn("Exception while copying template [{}] from image store [{}] to image store [{}]: {}",
sourceTmpl.getUniqueName(), sourceTmpl.getDataStore().getName(), destStore.getName(), e.toString());
result = new TemplateApiResult(sourceTmpl);
result.setResult(e.getMessage());
}
tryCleaningUpExecutor(destStore.getScope().getScopeId());
ThreadContext.clearAll();
return result;
} }
} }
} }

View File

@ -62,8 +62,8 @@ function getChecksum() {
} }
function createMetadataFile() { function createMetadataFile() {
local fileData=$(cat $SOURCEFILE) local fileData=$(cat "$SOURCEFILE")
echo -e "["default"]\nversion = $VERSION.${securityversion}\n" >> $METADATAFILE echo -e "["default"]\nversion = $VERSION.${securityversion}\n" >> "$METADATAFILE"
for template in "${templates[@]}" for template in "${templates[@]}"
do do
section="${template%%:*}" section="${template%%:*}"
@ -76,7 +76,7 @@ function createMetadataFile() {
templatename="systemvm-${sectionHv%.*}-${VERSION}-${arch}" templatename="systemvm-${sectionHv%.*}-${VERSION}-${arch}"
checksum=$(getChecksum "$fileData" "$VERSION-${arch}-$hvName") checksum=$(getChecksum "$fileData" "$VERSION-${arch}-$hvName")
filename=$(echo ${downloadurl##*'/'}) filename=$(echo ${downloadurl##*'/'})
echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\narch = $arch\nguestos = $guestos\n" >> $METADATAFILE echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\narch = $arch\nguestos = $guestos\n" >> "$METADATAFILE"
done done
} }
@ -91,8 +91,8 @@ templates=( "kvm-x86_64:https://download.cloudstack.org/systemvm/${CS_VERSION}/s
"ovm3:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-x86_64-ovm.raw.bz2" ) "ovm3:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-x86_64-ovm.raw.bz2" )
PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/" PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/"
mkdir -p $PARENTPATH mkdir -p "$PARENTPATH"
METADATAFILE=${PARENTPATH}"metadata.ini" METADATAFILE="${PARENTPATH}metadata.ini"
echo > $METADATAFILE echo > "$METADATAFILE"
SOURCEFILE=${PARENTPATH}'sha512sum.txt' SOURCEFILE="${PARENTPATH}sha512sum.txt"
createMetadataFile createMetadataFile

View File

@ -31,6 +31,7 @@ import java.util.concurrent.ExecutionException;
import javax.inject.Inject; import javax.inject.Inject;
import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService;
@ -42,7 +43,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager; import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
@ -58,6 +58,7 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageBus;
import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.framework.messagebus.PublishScope;
import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DeleteCommand;
import org.apache.cloudstack.storage.datastore.DataObjectManager; import org.apache.cloudstack.storage.datastore.DataObjectManager;
import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
@ -77,7 +78,6 @@ import com.cloud.agent.api.storage.ListTemplateCommand;
import com.cloud.agent.api.to.DatadiskTO; import com.cloud.agent.api.to.DatadiskTO;
import com.cloud.alert.AlertManager; import com.cloud.alert.AlertManager;
import com.cloud.configuration.Config; import com.cloud.configuration.Config;
import com.cloud.configuration.Resource;
import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.Resource.ResourceType;
import com.cloud.dc.DataCenterVO; import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.ClusterDao;
@ -157,6 +157,8 @@ public class TemplateServiceImpl implements TemplateService {
ImageStoreDetailsUtil imageStoreDetailsUtil; ImageStoreDetailsUtil imageStoreDetailsUtil;
@Inject @Inject
TemplateDataFactory imageFactory; TemplateDataFactory imageFactory;
@Inject
StorageOrchestrationService storageOrchestrator;
class TemplateOpContext<T> extends AsyncRpcContext<T> { class TemplateOpContext<T> extends AsyncRpcContext<T> {
final TemplateObject template; final TemplateObject template;
@ -321,7 +323,6 @@ public class TemplateServiceImpl implements TemplateService {
if (syncLock.lock(3)) { if (syncLock.lock(3)) {
try { try {
Long zoneId = store.getScope().getScopeId(); Long zoneId = store.getScope().getScopeId();
Map<String, TemplateProp> templateInfos = listTemplate(store); Map<String, TemplateProp> templateInfos = listTemplate(store);
if (templateInfos == null) { if (templateInfos == null) {
return; return;
@ -531,10 +532,6 @@ public class TemplateServiceImpl implements TemplateService {
availHypers.add(HypervisorType.None); // bug 9809: resume ISO availHypers.add(HypervisorType.None); // bug 9809: resume ISO
// download. // download.
for (VMTemplateVO tmplt : toBeDownloaded) { for (VMTemplateVO tmplt : toBeDownloaded) {
if (tmplt.getUrl() == null) { // If url is null, skip downloading
logger.info("Skip downloading template {} since no url is specified.", tmplt);
continue;
}
// if this is private template, skip sync to a new image store // if this is private template, skip sync to a new image store
if (isSkipTemplateStoreDownload(tmplt, zoneId)) { if (isSkipTemplateStoreDownload(tmplt, zoneId)) {
logger.info("Skip sync downloading private template {} to a new image store", tmplt); logger.info("Skip sync downloading private template {} to a new image store", tmplt);
@ -553,14 +550,10 @@ public class TemplateServiceImpl implements TemplateService {
} }
if (availHypers.contains(tmplt.getHypervisorType())) { if (availHypers.contains(tmplt.getHypervisorType())) {
logger.info("Downloading template {} to image store {}", tmplt, store); boolean copied = isCopyFromOtherStoragesEnabled(zoneId) && tryCopyingTemplateToImageStore(tmplt, store);
associateTemplateToZone(tmplt.getId(), zoneId); if (!copied) {
TemplateInfo tmpl = _templateFactory.getTemplate(tmplt.getId(), store); tryDownloadingTemplateToImageStore(tmplt, store);
TemplateOpContext<TemplateApiResult> context = new TemplateOpContext<>(null,(TemplateObject)tmpl, null); }
AsyncCallbackDispatcher<TemplateServiceImpl, TemplateApiResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().createTemplateAsyncCallBack(null, null));
caller.setContext(context);
createTemplateAsync(tmpl, store, caller);
} else { } else {
logger.info("Skip downloading template {} since current data center does not have hypervisor {}", tmplt, tmplt.getHypervisorType()); logger.info("Skip downloading template {} since current data center does not have hypervisor {}", tmplt, tmplt.getHypervisorType());
} }
@ -607,6 +600,127 @@ public class TemplateServiceImpl implements TemplateService {
} }
protected void tryDownloadingTemplateToImageStore(VMTemplateVO tmplt, DataStore destStore) {
if (tmplt.getUrl() == null) {
logger.info("Not downloading template [{}] to image store [{}], as it has no URL.", tmplt.getUniqueName(),
destStore.getName());
return;
}
logger.info("Downloading template [{}] to image store [{}].", tmplt.getUniqueName(), destStore.getName());
associateTemplateToZone(tmplt.getId(), destStore.getScope().getScopeId());
TemplateInfo tmpl = _templateFactory.getTemplate(tmplt.getId(), destStore);
TemplateOpContext<TemplateApiResult> context = new TemplateOpContext<>(null,(TemplateObject)tmpl, null);
AsyncCallbackDispatcher<TemplateServiceImpl, TemplateApiResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().createTemplateAsyncCallBack(null, null));
caller.setContext(context);
createTemplateAsync(tmpl, destStore, caller);
}
protected boolean tryCopyingTemplateToImageStore(VMTemplateVO tmplt, DataStore destStore) {
Long zoneId = destStore.getScope().getScopeId();
List<DataStore> storesInZone = _storeMgr.getImageStoresByZoneIds(zoneId);
for (DataStore sourceStore : storesInZone) {
Map<String, TemplateProp> existingTemplatesInSourceStore = listTemplate(sourceStore);
if (existingTemplatesInSourceStore == null || !existingTemplatesInSourceStore.containsKey(tmplt.getUniqueName())) {
logger.debug("Template [{}] does not exist on image store [{}]; searching on another one.",
tmplt.getUniqueName(), sourceStore.getName());
continue;
}
TemplateObject sourceTmpl = (TemplateObject) _templateFactory.getTemplate(tmplt.getId(), sourceStore);
if (sourceTmpl.getInstallPath() == null) {
logger.warn("Can not copy template [{}] from image store [{}], as it returned a null install path.", tmplt.getUniqueName(),
sourceStore.getName());
continue;
}
storageOrchestrator.orchestrateTemplateCopyToImageStore(sourceTmpl, destStore);
return true;
}
logger.debug("Can't copy template [{}] from another image store.", tmplt.getUniqueName());
return false;
}
@Override
public AsyncCallFuture<TemplateApiResult> copyTemplateToImageStore(DataObject source, DataStore destStore) {
TemplateObject sourceTmpl = (TemplateObject) source;
logger.debug("Copying template [{}] from image store [{}] to [{}].", sourceTmpl.getUniqueName(), sourceTmpl.getDataStore().getName(),
destStore.getName());
TemplateObject destTmpl = (TemplateObject) destStore.create(sourceTmpl);
destTmpl.processEvent(Event.CreateOnlyRequested);
AsyncCallFuture<TemplateApiResult> future = new AsyncCallFuture<>();
TemplateOpContext<TemplateApiResult> context = new TemplateOpContext<>(null, destTmpl, future);
AsyncCallbackDispatcher<TemplateServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
caller.setCallback(caller.getTarget().copyTemplateToImageStoreCallback(null, null)).setContext(context);
if (source.getDataStore().getId() == destStore.getId()) {
logger.debug("Destination image store [{}] is the same as the origin; returning success to normalize the metadata.");
CopyCmdAnswer answer = new CopyCmdAnswer(source.getTO());
CopyCommandResult result = new CopyCommandResult("", answer);
caller.complete(result);
return future;
}
_motionSrv.copyAsync(sourceTmpl, destTmpl, caller);
return future;
}
protected Void copyTemplateToImageStoreCallback(AsyncCallbackDispatcher<TemplateServiceImpl, CopyCommandResult> callback, TemplateOpContext<TemplateApiResult> context) {
TemplateInfo tmplt = context.getTemplate();
CopyCommandResult result = callback.getResult();
AsyncCallFuture<TemplateApiResult> future = context.getFuture();
TemplateApiResult res = new TemplateApiResult(tmplt);
if (result.isSuccess()) {
logger.info("Copied template [{}] to image store [{}].", tmplt.getUniqueName(), tmplt.getDataStore().getName());
tmplt.processEvent(Event.OperationSuccessed, result.getAnswer());
publishTemplateCreation(tmplt);
} else {
logger.warn("Failed to copy template [{}] to image store [{}].", tmplt.getUniqueName(), tmplt.getDataStore().getName());
res.setResult(result.getResult());
tmplt.processEvent(Event.OperationFailed);
}
future.complete(res);
return null;
}
protected boolean isCopyFromOtherStoragesEnabled(Long zoneId) {
return StorageManager.COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES.valueIn(zoneId);
}
protected void publishTemplateCreation(TemplateInfo tmplt) {
VMTemplateVO tmpltVo = _templateDao.findById(tmplt.getId());
if (tmpltVo.isPublicTemplate()) {
_messageBus.publish(null, TemplateManager.MESSAGE_REGISTER_PUBLIC_TEMPLATE_EVENT, PublishScope.LOCAL, tmpltVo.getId());
}
Long size = tmplt.getSize();
if (size == null) {
return;
}
DataStore store = tmplt.getDataStore();
TemplateDataStoreVO tmpltStore = _vmTemplateStoreDao.findByStoreTemplate(store.getId(), tmpltVo.getId());
long physicalSize = 0;
if (tmpltStore != null) {
physicalSize = tmpltStore.getPhysicalSize();
} else {
logger.warn("No entry found in template_store_ref for template [{}] and image store [{}] at the end of registering template!",
tmpltVo.getUniqueName(), store.getName());
}
Long zoneId = store.getScope().getScopeId();
if (zoneId != null) {
String usageEvent = tmplt.getFormat() == ImageFormat.ISO ? EventTypes.EVENT_ISO_CREATE : EventTypes.EVENT_TEMPLATE_CREATE;
UsageEventUtils.publishUsageEvent(usageEvent, tmpltVo.getAccountId(), zoneId, tmpltVo.getId(), tmpltVo.getName(),
null, null, physicalSize, size, VirtualMachineTemplate.class.getName(), tmpltVo.getUuid());
} else {
logger.warn("Zone-wide image store [{}] has a null scope ID.", store);
}
_resourceLimitMgr.incrementResourceCount(tmpltVo.getAccountId(), ResourceType.secondary_storage, size);
}
// persist entry in template_zone_ref table. zoneId can be empty for // persist entry in template_zone_ref table. zoneId can be empty for
// region-wide image store, in that case, // region-wide image store, in that case,
// we will associate the template to all the zones. // we will associate the template to all the zones.
@ -652,45 +766,14 @@ public class TemplateServiceImpl implements TemplateService {
protected Void createTemplateAsyncCallBack(AsyncCallbackDispatcher<TemplateServiceImpl, TemplateApiResult> callback, protected Void createTemplateAsyncCallBack(AsyncCallbackDispatcher<TemplateServiceImpl, TemplateApiResult> callback,
TemplateOpContext<TemplateApiResult> context) { TemplateOpContext<TemplateApiResult> context) {
TemplateInfo template = context.template;
TemplateApiResult result = callback.getResult(); TemplateApiResult result = callback.getResult();
if (result.isSuccess()) { if (result.isSuccess()) {
VMTemplateVO tmplt = _templateDao.findById(template.getId()); publishTemplateCreation(context.template);
// need to grant permission for public templates
if (tmplt.isPublicTemplate()) {
_messageBus.publish(null, TemplateManager.MESSAGE_REGISTER_PUBLIC_TEMPLATE_EVENT, PublishScope.LOCAL, tmplt.getId());
} }
long accountId = tmplt.getAccountId();
if (template.getSize() != null) {
// publish usage event
String etype = EventTypes.EVENT_TEMPLATE_CREATE;
if (tmplt.getFormat() == ImageFormat.ISO) {
etype = EventTypes.EVENT_ISO_CREATE;
}
// get physical size from template_store_ref table
long physicalSize = 0;
DataStore ds = template.getDataStore();
TemplateDataStoreVO tmpltStore = _vmTemplateStoreDao.findByStoreTemplate(ds.getId(), template.getId());
if (tmpltStore != null) {
physicalSize = tmpltStore.getPhysicalSize();
} else {
logger.warn("No entry found in template_store_ref for template: {} and image store: {} at the end of registering template!", template, ds);
}
Scope dsScope = ds.getScope();
if (dsScope.getScopeId() != null) {
UsageEventUtils.publishUsageEvent(etype, template.getAccountId(), dsScope.getScopeId(), template.getId(), template.getName(), null, null,
physicalSize, template.getSize(), VirtualMachineTemplate.class.getName(), template.getUuid());
} else {
logger.warn("Zone scope image store {} has a null scope id", ds);
}
_resourceLimitMgr.incrementResourceCount(accountId, Resource.ResourceType.secondary_storage, template.getSize());
}
}
return null; return null;
} }
private Map<String, TemplateProp> listTemplate(DataStore ssStore) { protected Map<String, TemplateProp> listTemplate(DataStore ssStore) {
String nfsVersion = imageStoreDetailsUtil.getNfsVersion(ssStore.getId()); String nfsVersion = imageStoreDetailsUtil.getNfsVersion(ssStore.getId());
ListTemplateCommand cmd = new ListTemplateCommand(ssStore.getTO(), nfsVersion); ListTemplateCommand cmd = new ListTemplateCommand(ssStore.getTO(), nfsVersion);
EndPoint ep = _epSelector.select(ssStore); EndPoint ep = _epSelector.select(ssStore);

View File

@ -18,9 +18,17 @@
*/ */
package org.apache.cloudstack.storage.image; package org.apache.cloudstack.storage.image;
import com.cloud.storage.template.TemplateProp;
import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.cloudstack.storage.image.store.TemplateObject;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.mockito.InjectMocks; import org.mockito.InjectMocks;
@ -33,6 +41,10 @@ import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Storage; import com.cloud.storage.Storage;
import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateVO;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@RunWith(MockitoJUnitRunner.class) @RunWith(MockitoJUnitRunner.class)
public class TemplateServiceImplTest { public class TemplateServiceImplTest {
@ -43,6 +55,49 @@ public class TemplateServiceImplTest {
@Mock @Mock
TemplateDataStoreDao templateDataStoreDao; TemplateDataStoreDao templateDataStoreDao;
@Mock
TemplateDataFactoryImpl templateDataFactoryMock;
@Mock
DataStoreManager dataStoreManagerMock;
@Mock
VMTemplateVO tmpltMock;
@Mock
TemplateProp tmpltPropMock;
@Mock
TemplateObject templateInfoMock;
@Mock
DataStore sourceStoreMock;
@Mock
DataStore destStoreMock;
@Mock
Scope zoneScopeMock;
@Mock
StorageOrchestrationService storageOrchestrator;
Map<String, TemplateProp> templatesInSourceStore = new HashMap<>();
@Before
public void setUp() {
Long zoneId = 1L;
Mockito.doReturn(2L).when(tmpltMock).getId();
Mockito.doReturn("unique-name").when(tmpltMock).getUniqueName();
Mockito.doReturn(zoneId).when(zoneScopeMock).getScopeId();
Mockito.doReturn(zoneScopeMock).when(destStoreMock).getScope();
Mockito.doReturn(List.of(sourceStoreMock, destStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(zoneId);
Mockito.doReturn(templatesInSourceStore).when(templateService).listTemplate(sourceStoreMock);
Mockito.doReturn(null).when(templateService).listTemplate(destStoreMock);
Mockito.doReturn("install-path").when(templateInfoMock).getInstallPath();
Mockito.doReturn(templateInfoMock).when(templateDataFactoryMock).getTemplate(2L, sourceStoreMock);
}
@Test @Test
public void testIsSkipTemplateStoreDownloadPublicTemplate() { public void testIsSkipTemplateStoreDownloadPublicTemplate() {
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class); VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);
@ -81,4 +136,51 @@ public class TemplateServiceImplTest {
Mockito.when(templateDataStoreDao.findByTemplateZone(id, id, DataStoreRole.Image)).thenReturn(Mockito.mock(TemplateDataStoreVO.class)); Mockito.when(templateDataStoreDao.findByTemplateZone(id, id, DataStoreRole.Image)).thenReturn(Mockito.mock(TemplateDataStoreVO.class));
Assert.assertTrue(templateService.isSkipTemplateStoreDownload(templateVO, id)); Assert.assertTrue(templateService.isSkipTemplateStoreDownload(templateVO, id));
} }
@Test
public void tryDownloadingTemplateToImageStoreTestDownloadsTemplateWhenUrlIsNotNull() {
Mockito.doReturn("url").when(tmpltMock).getUrl();
Mockito.doNothing().when(templateService).associateTemplateToZone(Mockito.anyLong(), Mockito.any(Long.class));
templateService.tryDownloadingTemplateToImageStore(tmpltMock, destStoreMock);
Mockito.verify(templateService).createTemplateAsync(Mockito.any(), Mockito.any(), Mockito.any());
}
@Test
public void tryDownloadingTemplateToImageStoreTestDoesNothingWhenUrlIsNull() {
templateService.tryDownloadingTemplateToImageStore(tmpltMock, destStoreMock);
Mockito.verify(templateService, Mockito.never()).createTemplateAsync(Mockito.any(), Mockito.any(), Mockito.any());
}
@Test
public void tryCopyingTemplateToImageStoreTestReturnsFalseWhenTemplateDoesNotExistOnAnotherImageStore() {
boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock);
Assert.assertFalse(result);
Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any());
}
@Test
public void tryCopyingTemplateToImageStoreTestReturnsFalseWhenInstallPathIsNull() {
templatesInSourceStore.put(tmpltMock.getUniqueName(), tmpltPropMock);
Mockito.doReturn(null).when(templateInfoMock).getInstallPath();
boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock);
Assert.assertFalse(result);
Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any());
}
@Test
public void tryCopyingTemplateToImageStoreTestReturnsTrueWhenTemplateExistsInAnotherStorageAndTaskWasScheduled() {
templatesInSourceStore.put(tmpltMock.getUniqueName(), tmpltPropMock);
Mockito.doReturn(new AsyncCallFuture<>()).when(storageOrchestrator).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any());
boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock);
Assert.assertTrue(result);
Mockito.verify(storageOrchestrator).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any());
}
} }

View File

@ -271,8 +271,7 @@ install -D client/target/utilities/bin/cloud-setup-baremetal ${RPM_BUILD_ROOT}%{
install -D client/target/utilities/bin/cloud-sysvmadm ${RPM_BUILD_ROOT}%{_bindir}/%{name}-sysvmadm install -D client/target/utilities/bin/cloud-sysvmadm ${RPM_BUILD_ROOT}%{_bindir}/%{name}-sysvmadm
install -D client/target/utilities/bin/cloud-update-xenserver-licenses ${RPM_BUILD_ROOT}%{_bindir}/%{name}-update-xenserver-licenses install -D client/target/utilities/bin/cloud-update-xenserver-licenses ${RPM_BUILD_ROOT}%{_bindir}/%{name}-update-xenserver-licenses
# Bundle cmk in cloudstack-management # Bundle cmk in cloudstack-management
CMK_REL=$(wget -O - "https://api.github.com/repos/apache/cloudstack-cloudmonkey/releases" 2>/dev/null | jq -r '.[0].tag_name') wget https://github.com/apache/cloudstack-cloudmonkey/releases/latest/download/cmk.linux.x86-64 -O ${RPM_BUILD_ROOT}%{_bindir}/cmk
wget https://github.com/apache/cloudstack-cloudmonkey/releases/download/$CMK_REL/cmk.linux.x86-64 -O ${RPM_BUILD_ROOT}%{_bindir}/cmk
chmod +x ${RPM_BUILD_ROOT}%{_bindir}/cmk chmod +x ${RPM_BUILD_ROOT}%{_bindir}/cmk
cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup

View File

@ -107,15 +107,6 @@
</execution> </execution>
</executions> </executions>
</plugin> </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<excludes>
<exclude>**/QemuImg*.java</exclude>
</excludes>
</configuration>
</plugin>
</plugins> </plugins>
</build> </build>
<profiles> <profiles>

View File

@ -1514,7 +1514,13 @@ public class LibvirtVMDef {
@Override @Override
public String toString() { public String toString() {
StringBuilder memBalloonBuilder = new StringBuilder(); StringBuilder memBalloonBuilder = new StringBuilder();
memBalloonBuilder.append("<memballoon model='" + memBalloonModel + "'>\n"); memBalloonBuilder.append("<memballoon model='" + memBalloonModel + "'");
/* Version integer format: major * 1,000,000 + minor * 1,000 + release.
* Require: libvirt 6.9.0, qemu 5.1.0 */
if (memBalloonModel != MemBalloonModel.NONE && s_qemuVersion >= 5001000 && s_libvirtVersion >= 6009000) {
memBalloonBuilder.append(" autodeflate='on' freePageReporting='on'");
}
memBalloonBuilder.append(">\n");
if (StringUtils.isNotBlank(memBalloonStatsPeriod)) { if (StringUtils.isNotBlank(memBalloonStatsPeriod)) {
memBalloonBuilder.append("<stats period='" + memBalloonStatsPeriod +"'/>\n"); memBalloonBuilder.append("<stats period='" + memBalloonStatsPeriod +"'/>\n");
} }

View File

@ -1081,7 +1081,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
destFile.setSize(size); destFile.setSize(size);
Map<String, String> options = new HashMap<String, String>(); Map<String, String> options = new HashMap<String, String>();
if (List.of(StoragePoolType.NetworkFilesystem, StoragePoolType.Filesystem).contains(pool.getType())) { if (List.of(StoragePoolType.NetworkFilesystem, StoragePoolType.Filesystem).contains(pool.getType())) {
options.put("preallocation", QemuImg.PreallocationType.getPreallocationType(provisioningType).toString()); options.put(QemuImg.PREALLOCATION, QemuImg.PreallocationType.getPreallocationType(provisioningType).toString());
} }
try (KeyFile keyFile = new KeyFile(passphrase)) { try (KeyFile keyFile = new KeyFile(passphrase)) {

View File

@ -25,6 +25,7 @@ import java.util.Map;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.cloudstack.storage.formatinspector.Qcow2Inspector; import org.apache.cloudstack.storage.formatinspector.Qcow2Inspector;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.NotImplementedException; import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.libvirt.LibvirtException; import org.libvirt.LibvirtException;
@ -51,6 +52,7 @@ public class QemuImg {
public static final String ENCRYPT_FORMAT = "encrypt.format"; public static final String ENCRYPT_FORMAT = "encrypt.format";
public static final String ENCRYPT_KEY_SECRET = "encrypt.key-secret"; public static final String ENCRYPT_KEY_SECRET = "encrypt.key-secret";
public static final String TARGET_ZERO_FLAG = "--target-is-zero"; public static final String TARGET_ZERO_FLAG = "--target-is-zero";
public static final String PREALLOCATION = "preallocation";
public static final long QEMU_2_10 = 2010000; public static final long QEMU_2_10 = 2010000;
public static final long QEMU_5_10 = 5010000; public static final long QEMU_5_10 = 5010000;
@ -393,6 +395,17 @@ public class QemuImg {
convert(srcFile, destFile, options, qemuObjects, srcImageOpts, snapshotName, forceSourceFormat, false); convert(srcFile, destFile, options, qemuObjects, srcImageOpts, snapshotName, forceSourceFormat, false);
} }
protected Map<String, String> getResizeOptionsFromConvertOptions(final Map<String, String> options) {
if (MapUtils.isEmpty(options)) {
return null;
}
Map<String, String> resizeOpts = new HashMap<>();
if (options.containsKey(PREALLOCATION)) {
resizeOpts.put(PREALLOCATION, options.get(PREALLOCATION));
}
return resizeOpts;
}
/** /**
* Converts an image from source to destination. * Converts an image from source to destination.
* *
@ -420,7 +433,6 @@ public class QemuImg {
public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, public void convert(final QemuImgFile srcFile, final QemuImgFile destFile,
final Map<String, String> options, final List<QemuObject> qemuObjects, final QemuImageOptions srcImageOpts, final String snapshotName, final boolean forceSourceFormat, final Map<String, String> options, final List<QemuObject> qemuObjects, final QemuImageOptions srcImageOpts, final String snapshotName, final boolean forceSourceFormat,
boolean keepBitmaps) throws QemuImgException { boolean keepBitmaps) throws QemuImgException {
Script script = new Script(_qemuImgPath, timeout); Script script = new Script(_qemuImgPath, timeout);
if (StringUtils.isNotBlank(snapshotName)) { if (StringUtils.isNotBlank(snapshotName)) {
String qemuPath = Script.runSimpleBashScript(getQemuImgPathScript); String qemuPath = Script.runSimpleBashScript(getQemuImgPathScript);
@ -484,7 +496,7 @@ public class QemuImg {
} }
if (srcFile.getSize() < destFile.getSize()) { if (srcFile.getSize() < destFile.getSize()) {
this.resize(destFile, destFile.getSize()); this.resize(destFile, destFile.getSize(), getResizeOptionsFromConvertOptions(options));
} }
} }
@ -691,8 +703,10 @@ public class QemuImg {
} }
} }
private void addScriptOptionsFromMap(Map<String, String> options, Script s) { protected void addScriptOptionsFromMap(Map<String, String> options, Script s) {
if (options != null && !options.isEmpty()) { if (MapUtils.isEmpty(options)) {
return;
}
s.add("-o"); s.add("-o");
final StringBuffer optionsBuffer = new StringBuffer(); final StringBuffer optionsBuffer = new StringBuffer();
for (final Map.Entry<String, String> option : options.entrySet()) { for (final Map.Entry<String, String> option : options.entrySet()) {
@ -702,6 +716,16 @@ public class QemuImg {
optionsStr = optionsStr.replaceAll(",$", ""); optionsStr = optionsStr.replaceAll(",$", "");
s.add(optionsStr); s.add(optionsStr);
} }
protected void addScriptResizeOptionsFromMap(Map<String, String> options, Script s) {
if (MapUtils.isEmpty(options)) {
return;
}
if (options.containsKey(PREALLOCATION)) {
s.add(String.format("--%s=%s", PREALLOCATION, options.get(PREALLOCATION)));
options.remove(PREALLOCATION);
}
addScriptOptionsFromMap(options, s);
} }
/** /**
@ -747,19 +771,17 @@ public class QemuImg {
/** /**
* Resizes an image. * Resizes an image.
* * <p>
* This method is a facade for 'qemu-img resize'. * This method is a facade for 'qemu-img resize'.
* * <p>
* A negative size value will get prefixed with '-' and a positive with '+'. Sizes are in bytes and will be passed on that way. * A negative size value will get prefixed with '-' and a positive with '+'. Sizes are in bytes and will be passed on that way.
* *
* @param file * @param file The file to be resized.
* The file to be resized. * @param size The new size.
* @param size * @param delta Flag to inform if the new size is a delta.
* The new size. * @param options Script options for resizing. Takes a Map<String, String> with key value
* @param delta
* Flag to inform if the new size is a delta.
*/ */
public void resize(final QemuImgFile file, final long size, final boolean delta) throws QemuImgException { public void resize(final QemuImgFile file, final long size, final boolean delta, Map<String, String> options) throws QemuImgException {
String newSize = null; String newSize = null;
if (size == 0) { if (size == 0) {
@ -781,6 +803,7 @@ public class QemuImg {
final Script s = new Script(_qemuImgPath); final Script s = new Script(_qemuImgPath);
s.add("resize"); s.add("resize");
addScriptResizeOptionsFromMap(options, s);
s.add(file.getFileName()); s.add(file.getFileName());
s.add(newSize); s.add(newSize);
s.execute(); s.execute();
@ -789,7 +812,7 @@ public class QemuImg {
/** /**
* Resizes an image. * Resizes an image.
* *
* This method is a facade for {@link QemuImg#resize(QemuImgFile, long, boolean)}. * This method is a facade for {@link QemuImg#resize(QemuImgFile, long, boolean, Map)}.
* *
* A negative size value will get prefixed with - and a positive with +. Sizes are in bytes and will be passed on that way. * A negative size value will get prefixed with - and a positive with +. Sizes are in bytes and will be passed on that way.
* *
@ -818,18 +841,17 @@ public class QemuImg {
/** /**
* Resizes an image. * Resizes an image.
* * <p>
* This method is a facade for {@link QemuImg#resize(QemuImgFile, long, boolean)}. * This method is a facade for {@link QemuImg#resize(QemuImgFile, long, boolean, Map)}.
* * <p>
* A negative size value will get prefixed with - and a positive with +. Sizes are in bytes and will be passed on that way. * A negative size value will get prefixed with - and a positive with +. Sizes are in bytes and will be passed on that way.
* *
* @param file * @param file The file to be resized.
* The file to be resized. * @param size The new size.
* @param size * @param options Script options for resizing. Takes a Map<String, String> with key value
* The new size.
*/ */
public void resize(final QemuImgFile file, final long size) throws QemuImgException { public void resize(final QemuImgFile file, final long size, Map<String, String> options) throws QemuImgException {
this.resize(file, size, false); this.resize(file, size, false, options);
} }
/** /**

View File

@ -196,7 +196,7 @@ public class LibvirtDomainXMLParserTest extends TestCase {
"<alias name='video0'/>" + "<alias name='video0'/>" +
"<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>" + "<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>" +
"</video>" + "</video>" +
"<memballoon model='virtio'>" + "<memballoon model='virtio' autodeflate='on' freePageReporting='on'>" +
"<stats period='60'/>" + "<stats period='60'/>" +
"<alias name='balloon0'/>" + "<alias name='balloon0'/>" +
"<address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>" + "<address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>" +
@ -379,7 +379,7 @@ public class LibvirtDomainXMLParserTest extends TestCase {
" <redirdev bus='usb' type='spicevmc'>\n" + " <redirdev bus='usb' type='spicevmc'>\n" +
" <address type='usb' bus='0' port='3'/>\n" + " <address type='usb' bus='0' port='3'/>\n" +
" </redirdev>\n" + " </redirdev>\n" +
" <memballoon model='virtio'>\n" + " <memballoon model='virtio' autodeflate='on' freePageReporting='on'>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>\n" + " <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>\n" +
" </memballoon>\n" + " </memballoon>\n" +
" </devices>\n" + " </devices>\n" +

View File

@ -465,6 +465,21 @@ public class LibvirtVMDefTest extends TestCase {
@Test @Test
public void memBalloonDefTestVirtio() { public void memBalloonDefTestVirtio() {
LibvirtVMDef.setGlobalQemuVersion(5001000L);
LibvirtVMDef.setGlobalLibvirtVersion(6009000L);
String expectedXml = "<memballoon model='virtio' autodeflate='on' freePageReporting='on'>\n<stats period='60'/>\n</memballoon>";
MemBalloonDef memBalloonDef = new MemBalloonDef();
memBalloonDef.defVirtioMemBalloon("60");
String xmlDef = memBalloonDef.toString();
assertEquals(expectedXml, xmlDef);
}
@Test
public void memBalloonDefTestVirtioOld() {
LibvirtVMDef.setGlobalQemuVersion(2006000L);
LibvirtVMDef.setGlobalLibvirtVersion(9008L);
String expectedXml = "<memballoon model='virtio'>\n<stats period='60'/>\n</memballoon>"; String expectedXml = "<memballoon model='virtio'>\n<stats period='60'/>\n</memballoon>";
MemBalloonDef memBalloonDef = new MemBalloonDef(); MemBalloonDef memBalloonDef = new MemBalloonDef();
memBalloonDef.defVirtioMemBalloon("60"); memBalloonDef.defVirtioMemBalloon("60");

View File

@ -208,7 +208,7 @@ public class LibvirtMigrateVolumeCommandWrapperTest {
" <alias name='watchdog0'/>\n" + " <alias name='watchdog0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>\n" + " <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>\n" +
" </watchdog>\n" + " </watchdog>\n" +
" <memballoon model='virtio'>\n" + " <memballoon model='virtio' autodeflate='on' freePageReporting='on'>\n" +
" <alias name='balloon0'/>\n" + " <alias name='balloon0'/>\n" +
" <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>\n" + " <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>\n" +
" </memballoon>\n" + " </memballoon>\n" +

View File

@ -22,26 +22,45 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.File; import java.io.File;
import com.cloud.utils.script.Script;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.UUID; import java.util.UUID;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import org.apache.commons.collections.MapUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Assume;
import org.junit.BeforeClass;
import org.junit.Ignore; import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.libvirt.Connect;
import org.libvirt.LibvirtException; import org.libvirt.LibvirtException;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
@Ignore import com.cloud.utils.script.Script;
@RunWith(MockitoJUnitRunner.class)
public class QemuImgTest { public class QemuImgTest {
@BeforeClass
public static void setUp() {
Assume.assumeTrue("qemu-img not found", Script.runSimpleBashScript("command -v qemu-img") != null);
boolean libVirtAvailable = false;
try {
Connect conn = new Connect("qemu:///system", false);
conn.getVersion();
libVirtAvailable = true;
} catch (LibvirtException ignored) {}
Assume.assumeTrue("libvirt not available", libVirtAvailable);
}
@Test @Test
public void testCreateAndInfo() throws QemuImgException, LibvirtException { public void testCreateAndInfo() throws QemuImgException, LibvirtException {
String filename = "/tmp/" + UUID.randomUUID() + ".qcow2"; String filename = "/tmp/" + UUID.randomUUID() + ".qcow2";
@ -130,8 +149,7 @@ public class QemuImgTest {
public void testCreateSparseVolume() throws QemuImgException, LibvirtException { public void testCreateSparseVolume() throws QemuImgException, LibvirtException {
String filename = "/tmp/" + UUID.randomUUID() + ".qcow2"; String filename = "/tmp/" + UUID.randomUUID() + ".qcow2";
/* 10TB virtual_size */ long size = 10 * 1024 * 1024L;
long size = 10995116277760l;
QemuImgFile file = new QemuImgFile(filename, size, PhysicalDiskFormat.QCOW2); QemuImgFile file = new QemuImgFile(filename, size, PhysicalDiskFormat.QCOW2);
String preallocation = "metadata"; String preallocation = "metadata";
Map<String, String> options = new HashMap<String, String>(); Map<String, String> options = new HashMap<String, String>();
@ -141,8 +159,8 @@ public class QemuImgTest {
QemuImg qemu = new QemuImg(0); QemuImg qemu = new QemuImg(0);
qemu.create(file, options); qemu.create(file, options);
String allocatedSize = Script.runSimpleBashScript(String.format("ls -alhs %s | awk '{print $1}'", file)); String allocatedSize = Script.runSimpleBashScript(String.format("ls -alhs %s | awk '{print $1}'", filename));
String declaredSize = Script.runSimpleBashScript(String.format("ls -alhs %s | awk '{print $6}'", file)); String declaredSize = Script.runSimpleBashScript(String.format("ls -alhs %s | awk '{print $6}'", filename));
assertFalse(allocatedSize.equals(declaredSize)); assertFalse(allocatedSize.equals(declaredSize));
@ -162,7 +180,7 @@ public class QemuImgTest {
try { try {
QemuImg qemu = new QemuImg(0); QemuImg qemu = new QemuImg(0);
qemu.create(file); qemu.create(file);
qemu.resize(file, endSize); qemu.resize(file, endSize, null);
Map<String, String> info = qemu.info(file); Map<String, String> info = qemu.info(file);
if (info == null) { if (info == null) {
@ -191,7 +209,7 @@ public class QemuImgTest {
try { try {
QemuImg qemu = new QemuImg(0); QemuImg qemu = new QemuImg(0);
qemu.create(file); qemu.create(file);
qemu.resize(file, increment, true); qemu.resize(file, increment, true, null);
Map<String, String> info = qemu.info(file); Map<String, String> info = qemu.info(file);
if (info == null) { if (info == null) {
@ -208,6 +226,9 @@ public class QemuImgTest {
f.delete(); f.delete();
} }
// This test is failing and needs changes in QemuImg.resize to support shrinking images with delta sizes.
// Earlier whole test suite was ignored, now only this test is ignored to allow other tests to run.
@Ignore
@Test @Test
public void testCreateAndResizeDeltaNegative() throws QemuImgException, LibvirtException { public void testCreateAndResizeDeltaNegative() throws QemuImgException, LibvirtException {
String filename = "/tmp/" + UUID.randomUUID() + ".qcow2"; String filename = "/tmp/" + UUID.randomUUID() + ".qcow2";
@ -219,7 +240,7 @@ public class QemuImgTest {
try { try {
QemuImg qemu = new QemuImg(0); QemuImg qemu = new QemuImg(0);
qemu.create(file); qemu.create(file);
qemu.resize(file, increment, true); qemu.resize(file, increment, true, null);
Map<String, String> info = qemu.info(file); Map<String, String> info = qemu.info(file);
if (info == null) { if (info == null) {
@ -249,7 +270,7 @@ public class QemuImgTest {
QemuImg qemu = new QemuImg(0); QemuImg qemu = new QemuImg(0);
try { try {
qemu.create(file); qemu.create(file);
qemu.resize(file, endSize); qemu.resize(file, endSize, null);
} finally { } finally {
File f = new File(filename); File f = new File(filename);
f.delete(); f.delete();
@ -265,7 +286,7 @@ public class QemuImgTest {
QemuImg qemu = new QemuImg(0); QemuImg qemu = new QemuImg(0);
qemu.create(file); qemu.create(file);
qemu.resize(file, 0); qemu.resize(file, 0, null);
File f = new File(filename); File f = new File(filename);
f.delete(); f.delete();
@ -377,7 +398,7 @@ public class QemuImgTest {
try { try {
QemuImg qemu = new QemuImg(0); QemuImg qemu = new QemuImg(0);
qemu.checkAndRepair(file, null, null, null); qemu.checkAndRepair(file, new QemuImageOptions(Collections.emptyMap()), Collections.emptyList(), null);
} catch (QemuImgException e) { } catch (QemuImgException e) {
fail(e.getMessage()); fail(e.getMessage());
} }
@ -385,4 +406,160 @@ public class QemuImgTest {
File f = new File(filename); File f = new File(filename);
f.delete(); f.delete();
} }
@Test
public void addScriptOptionsFromMapAddsValidOptions() throws LibvirtException, QemuImgException {
Script script = Mockito.mock(Script.class);
Map<String, String> options = new HashMap<>();
options.put("key1", "value1");
options.put("key2", "value2");
QemuImg qemu = new QemuImg(0);
qemu.addScriptOptionsFromMap(options, script);
Mockito.verify(script, Mockito.times(1)).add("-o");
Mockito.verify(script, Mockito.times(1)).add("key1=value1,key2=value2");
}
@Test
public void addScriptOptionsFromMapHandlesEmptyOptions() throws LibvirtException, QemuImgException {
Script script = Mockito.mock(Script.class);
Map<String, String> options = new HashMap<>();
QemuImg qemu = new QemuImg(0);
qemu.addScriptOptionsFromMap(options, script);
Mockito.verify(script, Mockito.never()).add(Mockito.anyString());
}
@Test
public void addScriptOptionsFromMapHandlesNullOptions() throws LibvirtException, QemuImgException {
Script script = Mockito.mock(Script.class);
QemuImg qemu = new QemuImg(0);
qemu.addScriptOptionsFromMap(null, script);
Mockito.verify(script, Mockito.never()).add(Mockito.anyString());
}
@Test
public void addScriptOptionsFromMapHandlesTrailingComma() throws LibvirtException, QemuImgException {
Script script = Mockito.mock(Script.class);
Map<String, String> options = new HashMap<>();
options.put("key1", "value1");
QemuImg qemu = new QemuImg(0);
qemu.addScriptOptionsFromMap(options, script);
Mockito.verify(script, Mockito.times(1)).add("-o");
Mockito.verify(script, Mockito.times(1)).add("key1=value1");
}
@Test
public void getResizeOptionsFromConvertOptionsReturnsNullForEmptyOptions() throws LibvirtException, QemuImgException {
QemuImg qemuImg = new QemuImg(0);
Map<String, String> options = new HashMap<>();
Map<String, String> result = qemuImg.getResizeOptionsFromConvertOptions(options);
Assert.assertNull(result);
}
@Test
public void getResizeOptionsFromConvertOptionsReturnsNullForNullOptions() throws LibvirtException, QemuImgException {
QemuImg qemuImg = new QemuImg(0);
Map<String, String> result = qemuImg.getResizeOptionsFromConvertOptions(null);
Assert.assertNull(result);
}
@Test
public void getResizeOptionsFromConvertOptionsReturnsPreallocationOption() throws LibvirtException, QemuImgException {
QemuImg qemuImg = new QemuImg(0);
Map<String, String> options = new HashMap<>();
options.put(QemuImg.PREALLOCATION, "metadata");
Map<String, String> result = qemuImg.getResizeOptionsFromConvertOptions(options);
Assert.assertNotNull(result);
assertEquals(1, result.size());
assertEquals("metadata", result.get(QemuImg.PREALLOCATION));
}
@Test
public void getResizeOptionsFromConvertOptionsIgnoresUnrelatedOptions() throws LibvirtException, QemuImgException {
QemuImg qemuImg = new QemuImg(0);
Map<String, String> options = new HashMap<>();
options.put("unrelatedKey", "unrelatedValue");
Map<String, String> result = qemuImg.getResizeOptionsFromConvertOptions(options);
Assert.assertTrue(MapUtils.isEmpty(result));
}
@Test
public void getResizeOptionsFromConvertOptionsHandlesMixedOptions() throws LibvirtException, QemuImgException {
QemuImg qemuImg = new QemuImg(0);
Map<String, String> options = new HashMap<>();
options.put(QemuImg.PREALLOCATION, "full");
options.put("unrelatedKey", "unrelatedValue");
Map<String, String> result = qemuImg.getResizeOptionsFromConvertOptions(options);
Assert.assertNotNull(result);
assertEquals(1, result.size());
assertEquals("full", result.get(QemuImg.PREALLOCATION));
}
@Test
public void addScriptResizeOptionsFromMapAddsPreallocationOption() throws LibvirtException, QemuImgException {
Script script = Mockito.mock(Script.class);
Map<String, String> options = new HashMap<>();
options.put(QemuImg.PREALLOCATION, "metadata");
QemuImg qemuImg = new QemuImg(0);
qemuImg.addScriptResizeOptionsFromMap(options, script);
Mockito.verify(script, Mockito.times(1)).add("--preallocation=metadata");
Mockito.verify(script, Mockito.never()).add("-o");
assertTrue(options.isEmpty());
}
@Test
public void addScriptResizeOptionsFromMapHandlesEmptyOptions() throws LibvirtException, QemuImgException {
Script script = Mockito.mock(Script.class);
Map<String, String> options = new HashMap<>();
QemuImg qemuImg = new QemuImg(0);
qemuImg.addScriptResizeOptionsFromMap(options, script);
Mockito.verify(script, Mockito.never()).add(Mockito.anyString());
}
@Test
public void addScriptResizeOptionsFromMapHandlesNullOptions() throws LibvirtException, QemuImgException {
Script script = Mockito.mock(Script.class);
QemuImg qemuImg = new QemuImg(0);
qemuImg.addScriptResizeOptionsFromMap(null, script);
Mockito.verify(script, Mockito.never()).add(Mockito.anyString());
}
@Test
public void addScriptResizeOptionsFromMapHandlesMixedOptions() throws LibvirtException, QemuImgException {
Script script = Mockito.mock(Script.class);
Map<String, String> options = new HashMap<>();
options.put(QemuImg.PREALLOCATION, "full");
options.put("key", "value");
QemuImg qemuImg = new QemuImg(0);
qemuImg.addScriptResizeOptionsFromMap(options, script);
Mockito.verify(script, Mockito.times(1)).add("--preallocation=full");
Mockito.verify(script, Mockito.times(1)).add("-o");
Mockito.verify(script, Mockito.times(1)).add("key=value");
assertFalse(options.containsKey(QemuImg.PREALLOCATION));
}
} }

View File

@ -21,7 +21,6 @@ import java.util.List;
import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.api.response.SystemVmResponse;
import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.response.VolumeMetricsStatsResponse; import org.apache.cloudstack.response.VolumeMetricsStatsResponse;
@ -37,7 +36,7 @@ public class ListVolumesUsageHistoryCmd extends BaseResourceUsageHistoryCmd {
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = VolumeResponse.class, description = "the ID of the volume.") @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = VolumeResponse.class, description = "the ID of the volume.")
private Long id; private Long id;
@Parameter(name=ApiConstants.IDS, type=CommandType.LIST, collectionType=CommandType.UUID, entityType= SystemVmResponse.class, description="the IDs of the volumes, mutually exclusive with id.") @Parameter(name = ApiConstants.IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = VolumeResponse.class, description = "the IDs of the volumes, mutually exclusive with id.")
private List<Long> ids; private List<Long> ids;
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "name of the volume (a substring match is made against the parameter value returning the data for all matching Volumes).") @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "name of the volume (a substring match is made against the parameter value returning the data for all matching Volumes).")

View File

@ -248,6 +248,30 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements
return createVolumeMetricsStatsResponse(volumeList, volumeStatsList); return createVolumeMetricsStatsResponse(volumeList, volumeStatsList);
} }
/**
* Outputs the parameters that should be used for access control in the query of a resource to
* {@code permittedAccounts} and {@code domainIdRecursiveListProject}.
* @param isIdProvided indicates whether any ID was provided to the command
*/
private void buildBaseACLSearchParametersForMetrics(boolean isIdProvided, List<Long> permittedAccounts, Ternary<Long, Boolean,
Project.ListProjectResourcesCriteria> domainIdRecursiveListProject) {
Account caller = CallContext.current().getCallingAccount();
Account.Type callerType = caller.getType();
boolean recursive = AccountTypesWithRecursiveUsageAccess.contains(callerType);
domainIdRecursiveListProject.second(recursive);
// If no ID was provided, then the listing will skip project resources (null); otherwise, project resources should
// be listed as well (any long allows this)
Long id = isIdProvided ? 1L : null;
// Allow users to also list metrics of resources owned by projects they belong to (-1L), and admins to list all
// metrics belonging to their domains recursively (null)
Long projectId = isIdProvided && callerType == Account.Type.NORMAL ? -1L : null;
accountMgr.buildACLSearchParameters(caller, id, null, projectId, permittedAccounts, domainIdRecursiveListProject, true, false);
}
/** /**
* Searches VMs based on {@code ListVMsUsageHistoryCmd} parameters. * Searches VMs based on {@code ListVMsUsageHistoryCmd} parameters.
* *
@ -255,18 +279,18 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements
* @return the list of VMs. * @return the list of VMs.
*/ */
protected Pair<List<UserVmVO>, Integer> searchForUserVmsInternal(ListVMsUsageHistoryCmd cmd) { protected Pair<List<UserVmVO>, Integer> searchForUserVmsInternal(ListVMsUsageHistoryCmd cmd) {
final Long id = cmd.getId(); List<Long> ids = getIdsListFromCmd(cmd.getId(), cmd.getIds());
Account caller = CallContext.current().getCallingAccount();
boolean isIdProvided = CollectionUtils.isNotEmpty(ids);
List<Long> permittedAccounts = new ArrayList<>(); List<Long> permittedAccounts = new ArrayList<>();
boolean recursive = AccountTypesWithRecursiveUsageAccess.contains(caller.getType()); Ternary<Long, Boolean, Project.ListProjectResourcesCriteria> domainIdRecursiveListProject = new Ternary<>(null, null, null);
Ternary<Long, Boolean, Project.ListProjectResourcesCriteria> domainIdRecursiveListProject = new Ternary<>(null, recursive, null); buildBaseACLSearchParametersForMetrics(isIdProvided, permittedAccounts, domainIdRecursiveListProject);
accountMgr.buildACLSearchParameters(caller, id, null, null, permittedAccounts, domainIdRecursiveListProject, true, false);
Long domainId = domainIdRecursiveListProject.first(); Long domainId = domainIdRecursiveListProject.first();
Boolean isRecursive = domainIdRecursiveListProject.second(); Boolean isRecursive = domainIdRecursiveListProject.second();
Project.ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); Project.ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third();
Filter searchFilter = new Filter(UserVmVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal()); Filter searchFilter = new Filter(UserVmVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal());
List<Long> ids = getIdsListFromCmd(cmd.getId(), cmd.getIds());
String name = cmd.getName(); String name = cmd.getName();
String keyword = cmd.getKeyword(); String keyword = cmd.getKeyword();
@ -361,18 +385,18 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements
* @return the list of VMs. * @return the list of VMs.
*/ */
protected Pair<List<VolumeVO>, Integer> searchForVolumesInternal(ListVolumesUsageHistoryCmd cmd) { protected Pair<List<VolumeVO>, Integer> searchForVolumesInternal(ListVolumesUsageHistoryCmd cmd) {
final Long id = cmd.getId(); List<Long> ids = getIdsListFromCmd(cmd.getId(), cmd.getIds());
Account caller = CallContext.current().getCallingAccount();
boolean isIdProvided = CollectionUtils.isNotEmpty(ids);
List<Long> permittedAccounts = new ArrayList<>(); List<Long> permittedAccounts = new ArrayList<>();
boolean recursive = AccountTypesWithRecursiveUsageAccess.contains(caller.getType()); Ternary<Long, Boolean, Project.ListProjectResourcesCriteria> domainIdRecursiveListProject = new Ternary<>(null, null, null);
Ternary<Long, Boolean, Project.ListProjectResourcesCriteria> domainIdRecursiveListProject = new Ternary<>(null, recursive, null); buildBaseACLSearchParametersForMetrics(isIdProvided, permittedAccounts, domainIdRecursiveListProject);
accountMgr.buildACLSearchParameters(caller, id, null, null, permittedAccounts, domainIdRecursiveListProject, true, false);
Long domainId = domainIdRecursiveListProject.first(); Long domainId = domainIdRecursiveListProject.first();
Boolean isRecursive = domainIdRecursiveListProject.second(); Boolean isRecursive = domainIdRecursiveListProject.second();
Project.ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); Project.ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third();
Filter searchFilter = new Filter(VolumeVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal()); Filter searchFilter = new Filter(VolumeVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal());
List<Long> ids = getIdsListFromCmd(cmd.getId(), cmd.getIds());
String name = cmd.getName(); String name = cmd.getName();
String keyword = cmd.getKeyword(); String keyword = cmd.getKeyword();

View File

@ -37,7 +37,7 @@ import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.acl.SecurityChecker.AccessType;
import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd;
import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd;
import org.apache.cloudstack.api.command.admin.user.RegisterCmd; import org.apache.cloudstack.api.command.admin.user.RegisterUserKeyCmd;
import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd;
import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.context.CallContext;
@ -118,7 +118,7 @@ public class MockAccountManager extends ManagerBase implements AccountManager {
} }
@Override @Override
public String[] createApiKeyAndSecretKey(RegisterCmd arg0) { public String[] createApiKeyAndSecretKey(RegisterUserKeyCmd arg0) {
// TODO Auto-generated method stub // TODO Auto-generated method stub
return null; return null;
} }

View File

@ -736,7 +736,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
logger.debug("Unable to allocate proxy {} with {} in {} due to [{}]. Retrying with another template", proxy, template, dc, e.getMessage(), e); logger.debug("Unable to allocate proxy {} with {} in {} due to [{}]. Retrying with another template", proxy, template, dc, e.getMessage(), e);
continue; continue;
} }
throw new CloudRuntimeException("Failed to allocate proxy [%s] in zone [%s] with available templates", e); throw new CloudRuntimeException(String.format("Failed to allocate proxy [%s] in zone [%s] with available templates", proxy, dc), e);
} }
} }

View File

@ -282,7 +282,7 @@ import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd;
import org.apache.cloudstack.api.command.admin.user.ListUsersCmd; import org.apache.cloudstack.api.command.admin.user.ListUsersCmd;
import org.apache.cloudstack.api.command.admin.user.LockUserCmd; import org.apache.cloudstack.api.command.admin.user.LockUserCmd;
import org.apache.cloudstack.api.command.admin.user.MoveUserCmd; import org.apache.cloudstack.api.command.admin.user.MoveUserCmd;
import org.apache.cloudstack.api.command.admin.user.RegisterCmd; import org.apache.cloudstack.api.command.admin.user.RegisterUserKeyCmd;
import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd;
import org.apache.cloudstack.api.command.admin.vlan.CreateVlanIpRangeCmd; import org.apache.cloudstack.api.command.admin.vlan.CreateVlanIpRangeCmd;
import org.apache.cloudstack.api.command.admin.vlan.DedicatePublicIpRangeCmd; import org.apache.cloudstack.api.command.admin.vlan.DedicatePublicIpRangeCmd;
@ -3931,7 +3931,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
cmdList.add(ListUsersCmd.class); cmdList.add(ListUsersCmd.class);
cmdList.add(LockUserCmd.class); cmdList.add(LockUserCmd.class);
cmdList.add(MoveUserCmd.class); cmdList.add(MoveUserCmd.class);
cmdList.add(RegisterCmd.class); cmdList.add(RegisterUserKeyCmd.class);
cmdList.add(UpdateUserCmd.class); cmdList.add(UpdateUserCmd.class);
cmdList.add(CreateVlanIpRangeCmd.class); cmdList.add(CreateVlanIpRangeCmd.class);
cmdList.add(UpdateVlanIpRangeCmd.class); cmdList.add(UpdateVlanIpRangeCmd.class);

View File

@ -3354,7 +3354,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
throw new InvalidParameterValueException(String.format("host: %s is not a secondary storage", secHost)); throw new InvalidParameterValueException(String.format("host: %s is not a secondary storage", secHost));
} }
URI uri = null; URI uri;
try { try {
uri = new URI(UriUtils.encodeURIComponent(newUrl)); uri = new URI(UriUtils.encodeURIComponent(newUrl));
if (uri.getScheme() == null) { if (uri.getScheme() == null) {
@ -3377,7 +3377,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
String oldUrl = secHost.getStorageUrl(); String oldUrl = secHost.getStorageUrl();
URI oldUri = null; URI oldUri;
try { try {
oldUri = new URI(UriUtils.encodeURIComponent(oldUrl)); oldUri = new URI(UriUtils.encodeURIComponent(oldUrl));
if (!oldUri.getScheme().equalsIgnoreCase(uri.getScheme())) { if (!oldUri.getScheme().equalsIgnoreCase(uri.getScheme())) {
@ -4602,7 +4602,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
DataStoreDownloadFollowRedirects, DataStoreDownloadFollowRedirects,
AllowVolumeReSizeBeyondAllocation, AllowVolumeReSizeBeyondAllocation,
StoragePoolHostConnectWorkers, StoragePoolHostConnectWorkers,
ObjectStorageCapacityThreshold ObjectStorageCapacityThreshold,
COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES
}; };
} }

View File

@ -61,7 +61,7 @@ import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd;
import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd;
import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd; import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd;
import org.apache.cloudstack.api.command.admin.user.MoveUserCmd; import org.apache.cloudstack.api.command.admin.user.MoveUserCmd;
import org.apache.cloudstack.api.command.admin.user.RegisterCmd; import org.apache.cloudstack.api.command.admin.user.RegisterUserKeyCmd;
import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd;
import org.apache.cloudstack.api.response.UserTwoFactorAuthenticationSetupResponse; import org.apache.cloudstack.api.response.UserTwoFactorAuthenticationSetupResponse;
import org.apache.cloudstack.auth.UserAuthenticator; import org.apache.cloudstack.auth.UserAuthenticator;
@ -3129,7 +3129,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
@Override @Override
@DB @DB
@ActionEvent(eventType = EventTypes.EVENT_REGISTER_FOR_SECRET_API_KEY, eventDescription = "register for the developer API keys") @ActionEvent(eventType = EventTypes.EVENT_REGISTER_FOR_SECRET_API_KEY, eventDescription = "register for the developer API keys")
public String[] createApiKeyAndSecretKey(RegisterCmd cmd) { public String[] createApiKeyAndSecretKey(RegisterUserKeyCmd cmd) {
Account caller = getCurrentCallingAccount(); Account caller = getCurrentCallingAccount();
final Long userId = cmd.getId(); final Long userId = cmd.getId();

View File

@ -603,7 +603,7 @@ class CsIP:
if item == "id": if item == "id":
continue continue
static_route = static_routes.get_bag()[item] static_route = static_routes.get_bag()[item]
if static_route['ip_address'] == self.address['public_ip'] and not static_route['revoke']: if 'ip_address' in static_route and static_route['ip_address'] == self.address['public_ip'] and not static_route['revoke']:
self.fw.append(["mangle", "", self.fw.append(["mangle", "",
"-A PREROUTING -m state --state NEW -i %s -s %s ! -d %s/32 -j ACL_OUTBOUND_%s" % "-A PREROUTING -m state --state NEW -i %s -s %s ! -d %s/32 -j ACL_OUTBOUND_%s" %
(self.dev, static_route['network'], static_route['ip_address'], self.dev)]) (self.dev, static_route['network'], static_route['ip_address'], self.dev)])

View File

@ -99,7 +99,7 @@ export default {
default: false default: false
}, },
dataSource: { dataSource: {
type: Object, type: Array,
required: true required: true
}, },
deleteFn: { deleteFn: {
@ -128,6 +128,7 @@ export default {
dataIndex: 'intervaltype' dataIndex: 'intervaltype'
}, },
{ {
key: 'time',
title: this.$t('label.time'), title: this.$t('label.time'),
dataIndex: 'schedule' dataIndex: 'schedule'
}, },

View File

@ -35,16 +35,16 @@
v-model:value="form.intervaltype" v-model:value="form.intervaltype"
button-style="solid" button-style="solid"
@change="handleChangeIntervalType"> @change="handleChangeIntervalType">
<a-radio-button value="hourly"> <a-radio-button value="hourly" :disabled="isIntervalDisabled('hourly')">
{{ $t('label.hourly') }} {{ $t('label.hourly') }}
</a-radio-button> </a-radio-button>
<a-radio-button value="daily"> <a-radio-button value="daily" :disabled="isIntervalDisabled('daily')">
{{ $t('label.daily') }} {{ $t('label.daily') }}
</a-radio-button> </a-radio-button>
<a-radio-button value="weekly"> <a-radio-button value="weekly" :disabled="isIntervalDisabled('weekly')">
{{ $t('label.weekly') }} {{ $t('label.weekly') }}
</a-radio-button> </a-radio-button>
<a-radio-button value="monthly"> <a-radio-button value="monthly" :disabled="isIntervalDisabled('monthly')">
{{ $t('label.monthly') }} {{ $t('label.monthly') }}
</a-radio-button> </a-radio-button>
</a-radio-group> </a-radio-group>
@ -54,6 +54,7 @@
<a-form-item :label="$t('label.time')" ref="time" name="time"> <a-form-item :label="$t('label.time')" ref="time" name="time">
<a-input-number <a-input-number
style="width: 100%" style="width: 100%"
:disabled="isIntervalDisabled(form.intervaltype)"
v-model:value="form.time" v-model:value="form.time"
:placeholder="$t('label.minute.past.hour')" :placeholder="$t('label.minute.past.hour')"
:min="1" :min="1"
@ -70,6 +71,7 @@
<a-time-picker <a-time-picker
use12Hours use12Hours
format="h:mm A" format="h:mm A"
:disabled="isIntervalDisabled(form.intervaltype)"
v-model:value="form.timeSelect" v-model:value="form.timeSelect"
style="width: 100%;" /> style="width: 100%;" />
</a-form-item> </a-form-item>
@ -79,6 +81,7 @@
<a-select <a-select
v-model:value="form['day-of-week']" v-model:value="form['day-of-week']"
showSearch showSearch
:disabled="isIntervalDisabled(form.intervaltype)"
optionFilterProp="label" optionFilterProp="label"
:filterOption="(input, option) => { :filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0 return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
@ -94,6 +97,7 @@
<a-select <a-select
v-model:value="form['day-of-month']" v-model:value="form['day-of-month']"
showSearch showSearch
:disabled="isIntervalDisabled(form.intervaltype)"
optionFilterProp="label" optionFilterProp="label"
:filterOption="(input, option) => { :filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0 return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
@ -180,6 +184,10 @@ export default {
type: Boolean, type: Boolean,
default: false default: false
}, },
dataSource: {
type: Array,
required: true
},
resource: { resource: {
type: Object, type: Object,
required: true required: true
@ -210,6 +218,38 @@ export default {
this.fetchTimeZone() this.fetchTimeZone()
this.fetchBackupOffering() this.fetchBackupOffering()
}, },
mounted () {
if (this.form.intervaltype && this.isIntervalDisabled(this.form.intervaltype)) {
const nextAvailable = this.getNextAvailableIntervalType(this.form.intervaltype)
if (nextAvailable) {
this.form.intervaltype = nextAvailable
this.handleChangeIntervalType()
}
}
},
watch: {
dataSource: {
handler () {
if (this.form.intervaltype && this.getNextAvailableIntervalType && this.isIntervalDisabled(this.form.intervaltype)) {
const nextAvailable = this.getNextAvailableIntervalType(this.form.intervaltype)
if (nextAvailable) {
this.form.intervaltype = nextAvailable
this.handleChangeIntervalType()
}
}
},
deep: true
},
'form.intervaltype' (newVal) {
if (newVal && this.getNextAvailableIntervalType && this.isIntervalDisabled(newVal)) {
const nextAvailable = this.getNextAvailableIntervalType(newVal)
if (nextAvailable) {
this.form.intervaltype = nextAvailable
this.handleChangeIntervalType()
}
}
}
},
inject: ['refreshSchedule', 'closeSchedule'], inject: ['refreshSchedule', 'closeSchedule'],
computed: { computed: {
isQuiesceVmSupported () { isQuiesceVmSupported () {
@ -274,19 +314,39 @@ export default {
}) })
} }
}, },
handleChangeIntervalType (e) { handleChangeIntervalType () {
switch (this.form.intervaltype) { if (this.form.intervaltype === 'weekly') {
case 'weekly':
this.fetchDayOfWeek() this.fetchDayOfWeek()
break } else if (this.form.intervaltype === 'monthly') {
case 'monthly':
this.intervalValue = 'MONTHLY'
this.fetchDayOfMonth() this.fetchDayOfMonth()
break
default:
break
} }
}, },
getNextAvailableIntervalType (currentIntervalType) {
const intervalTypes = ['hourly', 'daily', 'weekly', 'monthly']
const currentIndex = intervalTypes.indexOf(currentIntervalType ? currentIntervalType.toLowerCase() : '')
const startIndex = currentIndex >= 0 ? currentIndex : -1
for (let i = 1; i <= intervalTypes.length; i++) {
const nextIndex = (startIndex + i) % intervalTypes.length
const nextIntervalType = intervalTypes[nextIndex]
if (!this.isIntervalDisabled(nextIntervalType)) {
return nextIntervalType
}
}
return null
},
isIntervalDisabled (intervalType) {
intervalType = intervalType.toUpperCase()
if (this.dataSource?.length === 0) {
return false
}
const dataSource = this.dataSource.filter(item => item.intervaltype === intervalType)
if (dataSource && dataSource.length > 0) {
return true
}
return false
},
handleSubmit (e) { handleSubmit (e) {
if (this.actionLoading) return if (this.actionLoading) return
this.formRef.value.validate().then(() => { this.formRef.value.validate().then(() => {
@ -294,7 +354,7 @@ export default {
const values = this.handleRemoveFields(formRaw) const values = this.handleRemoveFields(formRaw)
const params = {} const params = {}
params.virtualmachineid = this.resource.id params.virtualmachineid = this.resource.id
params.intervaltype = values.intervaltype params.intervaltype = values.intervaltype.toUpperCase()
params.maxbackups = values.maxbackups params.maxbackups = values.maxbackups
params.timezone = values.timezone params.timezone = values.timezone
if (values.quiescevm) { if (values.quiescevm) {

View File

@ -55,6 +55,7 @@
@cancel="closeModals"> @cancel="closeModals">
<form-schedule <form-schedule
:resource="addFormResource" :resource="addFormResource"
:dataSource="dataSource"
:submitFn="handleAddBackupSchedule" /> :submitFn="handleAddBackupSchedule" />
</a-modal> </a-modal>
</div> </div>
@ -90,7 +91,8 @@ export default {
return { return {
backupOffering: null, backupOffering: null,
showAddBackupSchedule: false, showAddBackupSchedule: false,
localBackupOfferingId: this.backupOfferingId localBackupOfferingId: this.backupOfferingId,
dataSource: []
} }
}, },
provide () { provide () {

View File

@ -38,16 +38,16 @@
v-model:value="form.intervaltype" v-model:value="form.intervaltype"
buttonStyle="solid" buttonStyle="solid"
@change="handleChangeIntervalType"> @change="handleChangeIntervalType">
<a-radio-button value="hourly" :disabled="handleVisibleInterval(0)"> <a-radio-button value="hourly" :disabled="isIntervalDisabled('hourly')">
{{ $t('label.hourly') }} {{ $t('label.hourly') }}
</a-radio-button> </a-radio-button>
<a-radio-button value="daily" :disabled="handleVisibleInterval(1)"> <a-radio-button value="daily" :disabled="isIntervalDisabled('daily')">
{{ $t('label.daily') }} {{ $t('label.daily') }}
</a-radio-button> </a-radio-button>
<a-radio-button value="weekly" :disabled="handleVisibleInterval(2)"> <a-radio-button value="weekly" :disabled="isIntervalDisabled('weekly')">
{{ $t('label.weekly') }} {{ $t('label.weekly') }}
</a-radio-button> </a-radio-button>
<a-radio-button value="monthly" :disabled="handleVisibleInterval(3)"> <a-radio-button value="monthly" :disabled="isIntervalDisabled('monthly')">
{{ $t('label.monthly') }} {{ $t('label.monthly') }}
</a-radio-button> </a-radio-button>
</a-radio-group> </a-radio-group>
@ -60,6 +60,7 @@
:title="$t('label.minute.past.hour')"> :title="$t('label.minute.past.hour')">
<a-input-number <a-input-number
style="width: 100%" style="width: 100%"
:disabled="isIntervalDisabled(form.intervaltype)"
v-model:value="form.time" v-model:value="form.time"
:min="1" :min="1"
:max="59" :max="59"
@ -76,6 +77,7 @@
<a-time-picker <a-time-picker
use12Hours use12Hours
format="h:mm A" format="h:mm A"
:disabled="isIntervalDisabled(form.intervaltype)"
v-model:value="form.timeSelect" v-model:value="form.timeSelect"
style="width: 100%;" /> style="width: 100%;" />
</a-form-item> </a-form-item>
@ -85,6 +87,7 @@
<a-select <a-select
v-model:value="form['day-of-week']" v-model:value="form['day-of-week']"
showSearch showSearch
:disabled="isIntervalDisabled(form.intervaltype)"
optionFilterProp="label" optionFilterProp="label"
:filterOption="(input, option) => { :filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0 return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
@ -100,6 +103,7 @@
<a-select <a-select
v-model:value="form['day-of-month']" v-model:value="form['day-of-month']"
showSearch showSearch
:disabled="isIntervalDisabled(form.intervaltype)"
optionFilterProp="value" optionFilterProp="value"
:filterOption="(input, option) => { :filterOption="(input, option) => {
return option.value.toLowerCase().indexOf(input.toLowerCase()) >= 0 return option.value.toLowerCase().indexOf(input.toLowerCase()) >= 0
@ -311,6 +315,15 @@ export default {
this.volumeId = this.resource.id this.volumeId = this.resource.id
this.fetchTimeZone() this.fetchTimeZone()
}, },
mounted () {
if (this.form.intervaltype && this.isIntervalDisabled(this.form.intervaltype)) {
const nextAvailable = this.getNextAvailableIntervalType(this.form.intervaltype)
if (nextAvailable) {
this.form.intervaltype = nextAvailable
this.handleChangeIntervalType()
}
}
},
computed: { computed: {
formattedAdditionalZoneMessage () { formattedAdditionalZoneMessage () {
return `${this.$t('message.snapshot.additional.zones').replace('%x', this.resource.zonename)}` return `${this.$t('message.snapshot.additional.zones').replace('%x', this.resource.zonename)}`
@ -319,6 +332,20 @@ export default {
return isAdmin() return isAdmin()
} }
}, },
watch: {
dataSource: {
handler () {
if (this.form.intervaltype && this.getNextAvailableIntervalType && this.isIntervalDisabled(this.form.intervaltype)) {
const nextAvailable = this.getNextAvailableIntervalType(this.form.intervaltype)
if (nextAvailable) {
this.form.intervaltype = nextAvailable
this.handleChangeIntervalType()
}
}
},
deep: true
}
},
methods: { methods: {
initForm () { initForm () {
this.formRef = ref() this.formRef = ref()
@ -404,28 +431,46 @@ export default {
} }
}, },
handleChangeIntervalType () { handleChangeIntervalType () {
switch (this.form.intervaltype) { if (this.form.intervaltype === 'weekly') {
case 'hourly':
this.intervalValue = 0
break
case 'daily':
this.intervalValue = 1
break
case 'weekly':
this.intervalValue = 2
this.fetchDayOfWeek() this.fetchDayOfWeek()
break } else if (this.form.intervaltype === 'monthly') {
case 'monthly':
this.intervalValue = 3
this.fetchDayOfMonth() this.fetchDayOfMonth()
break }
this.intervalValue = this.getIntervalValue(this.formintervaltype)
},
getIntervalValue (intervalType) {
switch (intervalType) {
case 'hourly':
return 0
case 'daily':
return 1
case 'weekly':
return 2
case 'monthly':
return 3
} }
}, },
handleVisibleInterval (intervalType) { getNextAvailableIntervalType (currentIntervalType) {
const intervalTypes = ['hourly', 'daily', 'weekly', 'monthly']
const currentIndex = intervalTypes.indexOf(currentIntervalType)
const startIndex = currentIndex >= 0 ? currentIndex : -1
for (let i = 1; i <= intervalTypes.length; i++) {
const nextIndex = (startIndex + i) % intervalTypes.length
const nextIntervalType = intervalTypes[nextIndex]
if (!this.isIntervalDisabled(nextIntervalType)) {
return nextIntervalType
}
}
return null
},
isIntervalDisabled (intervalType) {
const intervalValue = this.getIntervalValue(intervalType)
if (this.dataSource.length === 0) { if (this.dataSource.length === 0) {
return false return false
} }
const dataSource = this.dataSource.filter(item => item.intervaltype === intervalType) const dataSource = this.dataSource.filter(item => item.intervaltype === intervalValue)
if (dataSource && dataSource.length > 0) { if (dataSource && dataSource.length > 0) {
return true return true
} }