Merge branch 'main' into cks-enhancements-upstream

This commit is contained in:
nvazquez 2025-02-23 23:13:36 -03:00
commit 0662117722
No known key found for this signature in database
GPG Key ID: 656E1BCC8CB54F84
217 changed files with 6534 additions and 2048 deletions

View File

@ -51,16 +51,13 @@ github:
collaborators:
- acs-robot
- rajujith
- GaOrtiga
- SadiJr
- winterhazel
- gpordeus
- hsato03
- bernardodemarco
- abh1sar
- FelipeM525
- lucas-a-martins
- nicoschmdt
protected_branches: ~

View File

@ -21,7 +21,7 @@ public interface Resource {
short RESOURCE_UNLIMITED = -1;
String UNLIMITED = "Unlimited";
enum ResourceType { // Primary and Secondary storage are allocated_storage and not the physical storage.
enum ResourceType { // All storage type resources are allocated_storage and not the physical storage.
user_vm("user_vm", 0),
public_ip("public_ip", 1),
volume("volume", 2),
@ -33,7 +33,11 @@ public interface Resource {
cpu("cpu", 8),
memory("memory", 9),
primary_storage("primary_storage", 10),
secondary_storage("secondary_storage", 11);
secondary_storage("secondary_storage", 11),
backup("backup", 12),
backup_storage("backup_storage", 13),
bucket("bucket", 14),
object_storage("object_storage", 15);
private String name;
private int ordinal;
@ -62,6 +66,10 @@ public interface Resource {
}
return null;
}
public static Boolean isStorageType(ResourceType type) {
return (type == primary_storage || type == secondary_storage || type == backup_storage || type == object_storage);
}
}
public static class ResourceOwnerType {

View File

@ -787,6 +787,9 @@ public class EventTypes {
public static final String EVENT_SHAREDFS_EXPUNGE = "SHAREDFS.EXPUNGE";
public static final String EVENT_SHAREDFS_RECOVER = "SHAREDFS.RECOVER";
// Resource Limit
public static final String EVENT_RESOURCE_LIMIT_UPDATE = "RESOURCE.LIMIT.UPDATE";
static {
// TODO: need a way to force author adding event types to declare the entity details as well, with out braking

View File

@ -26,7 +26,7 @@ import com.cloud.utils.SerialVersionUID;
public class StorageAccessException extends RuntimeException {
private static final long serialVersionUID = SerialVersionUID.StorageAccessException;
public StorageAccessException(String message) {
super(message);
public StorageAccessException(String message, Exception causer) {
super(message, causer);
}
}

View File

@ -16,14 +16,10 @@
// under the License.
package com.cloud.storage;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.lang3.StringUtils;
import java.util.ArrayList;
import java.util.List;
public class Storage {
public static enum ImageFormat {
@ -139,6 +135,21 @@ public class Storage {
ISODISK /* Template corresponding to a iso (non root disk) present in an OVA */
}
public enum EncryptionSupport {
/**
* Encryption not supported.
*/
Unsupported,
/**
* Will use hypervisor encryption driver (qemu -> luks)
*/
Hypervisor,
/**
* Storage pool handles encryption and just provides an encrypted volume
*/
Storage
}
/**
* StoragePoolTypes carry some details about the format and capabilities of a storage pool. While not necessarily a
* 1:1 with PrimaryDataStoreDriver (and for KVM agent, KVMStoragePool and StorageAdaptor) implementations, it is
@ -150,61 +161,37 @@ public class Storage {
* ensure this is available on the agent side as well. This is best done by defining the StoragePoolType in a common
* package available on both management server and agent plugin jars.
*/
public static class StoragePoolType {
private static final Map<String, StoragePoolType> map = new LinkedHashMap<>();
public static enum StoragePoolType {
Filesystem(false, true, EncryptionSupport.Hypervisor), // local directory
NetworkFilesystem(true, true, EncryptionSupport.Hypervisor), // NFS
IscsiLUN(true, false, EncryptionSupport.Unsupported), // shared LUN, with a clusterfs overlay
Iscsi(true, false, EncryptionSupport.Unsupported), // for e.g., ZFS Comstar
ISO(false, false, EncryptionSupport.Unsupported), // for iso image
LVM(false, false, EncryptionSupport.Unsupported), // XenServer local LVM SR
CLVM(true, false, EncryptionSupport.Unsupported),
RBD(true, true, EncryptionSupport.Unsupported), // http://libvirt.org/storage.html#StorageBackendRBD
SharedMountPoint(true, true, EncryptionSupport.Hypervisor),
VMFS(true, true, EncryptionSupport.Unsupported), // VMware VMFS storage
PreSetup(true, true, EncryptionSupport.Unsupported), // for XenServer, Storage Pool is set up by customers.
EXT(false, true, EncryptionSupport.Unsupported), // XenServer local EXT SR
OCFS2(true, false, EncryptionSupport.Unsupported),
SMB(true, false, EncryptionSupport.Unsupported),
Gluster(true, false, EncryptionSupport.Unsupported),
PowerFlex(true, true, EncryptionSupport.Hypervisor), // Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS)
ManagedNFS(true, false, EncryptionSupport.Unsupported),
Linstor(true, true, EncryptionSupport.Storage),
DatastoreCluster(true, true, EncryptionSupport.Unsupported), // for VMware, to abstract pool of clusters
StorPool(true, true, EncryptionSupport.Hypervisor),
FiberChannel(true, true, EncryptionSupport.Unsupported); // Fiber Channel Pool for KVM hypervisors is used to find the volume by WWN value (/dev/disk/by-id/wwn-<wwnvalue>)
public static final StoragePoolType Filesystem = new StoragePoolType("Filesystem", false, true, true);
public static final StoragePoolType NetworkFilesystem = new StoragePoolType("NetworkFilesystem", true, true, true);
public static final StoragePoolType IscsiLUN = new StoragePoolType("IscsiLUN", true, false, false);
public static final StoragePoolType Iscsi = new StoragePoolType("Iscsi", true, false, false);
public static final StoragePoolType ISO = new StoragePoolType("ISO", false, false, false);
public static final StoragePoolType LVM = new StoragePoolType("LVM", false, false, false);
public static final StoragePoolType CLVM = new StoragePoolType("CLVM", true, false, false);
public static final StoragePoolType RBD = new StoragePoolType("RBD", true, true, false);
public static final StoragePoolType SharedMountPoint = new StoragePoolType("SharedMountPoint", true, true, true);
public static final StoragePoolType VMFS = new StoragePoolType("VMFS", true, true, false);
public static final StoragePoolType PreSetup = new StoragePoolType("PreSetup", true, true, false);
public static final StoragePoolType EXT = new StoragePoolType("EXT", false, true, false);
public static final StoragePoolType OCFS2 = new StoragePoolType("OCFS2", true, false, false);
public static final StoragePoolType SMB = new StoragePoolType("SMB", true, false, false);
public static final StoragePoolType Gluster = new StoragePoolType("Gluster", true, false, false);
public static final StoragePoolType PowerFlex = new StoragePoolType("PowerFlex", true, true, true);
public static final StoragePoolType ManagedNFS = new StoragePoolType("ManagedNFS", true, false, false);
public static final StoragePoolType Linstor = new StoragePoolType("Linstor", true, true, false);
public static final StoragePoolType DatastoreCluster = new StoragePoolType("DatastoreCluster", true, true, false);
public static final StoragePoolType StorPool = new StoragePoolType("StorPool", true,true,true);
public static final StoragePoolType FiberChannel = new StoragePoolType("FiberChannel", true,true,false);
private final String name;
private final boolean shared;
private final boolean overProvisioning;
private final boolean encryption;
private final EncryptionSupport encryption;
/**
* New StoragePoolType, set the name to check with it in Dao (Note: Do not register it into the map of pool types).
* @param name name of the StoragePoolType.
*/
public StoragePoolType(String name) {
this.name = name;
this.shared = false;
this.overProvisioning = false;
this.encryption = false;
}
/**
* Define a new StoragePoolType, and register it into the map of pool types known to the management server.
* @param name Simple unique name of the StoragePoolType.
* @param shared Storage pool is shared/accessible to multiple hypervisors
* @param overProvisioning Storage pool supports overProvisioning
* @param encryption Storage pool supports encrypted volumes
*/
public StoragePoolType(String name, boolean shared, boolean overProvisioning, boolean encryption) {
this.name = name;
StoragePoolType(boolean shared, boolean overProvisioning, EncryptionSupport encryption) {
this.shared = shared;
this.overProvisioning = overProvisioning;
this.encryption = encryption;
addStoragePoolType(this);
}
public boolean isShared() {
@ -216,50 +203,12 @@ public class Storage {
}
public boolean supportsEncryption() {
return encryption == EncryptionSupport.Hypervisor || encryption == EncryptionSupport.Storage;
}
public EncryptionSupport encryptionSupportMode() {
return encryption;
}
private static void addStoragePoolType(StoragePoolType storagePoolType) {
map.putIfAbsent(storagePoolType.name, storagePoolType);
}
public static StoragePoolType[] values() {
return map.values().toArray(StoragePoolType[]::new).clone();
}
public static StoragePoolType valueOf(String name) {
if (StringUtils.isBlank(name)) {
return null;
}
StoragePoolType storage = map.get(name);
if (storage == null) {
throw new IllegalArgumentException("StoragePoolType '" + name + "' not found");
}
return storage;
}
@Override
public String toString() {
return name;
}
public String name() {
return name;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StoragePoolType that = (StoragePoolType) o;
return Objects.equals(name, that.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
}
public static List<StoragePoolType> getNonSharedStoragePoolTypes() {

View File

@ -190,4 +190,6 @@ public interface VolumeApiService {
boolean stateTransitTo(Volume vol, Volume.Event event) throws NoTransitionException;
Pair<String, String> checkAndRepairVolume(CheckAndRepairVolumeCmd cmd) throws ResourceAllocationException;
Long getVolumePhysicalSize(Storage.ImageFormat format, String path, String chainInfo);
}

View File

@ -51,12 +51,19 @@ public class ApiConstants {
public static final String AVAILABLE = "available";
public static final String AVAILABLE_SUBNETS = "availablesubnets";
public static final String AVAILABLE_VIRTUAL_MACHINE_COUNT = "availablevirtualmachinecount";
public static final String BACKUP_AVAILABLE = "backupavailable";
public static final String BACKUP_ID = "backupid";
public static final String BACKUP_LIMIT = "backuplimit";
public static final String BACKUP_OFFERING_NAME = "backupofferingname";
public static final String BACKUP_OFFERING_ID = "backupofferingid";
public static final String BACKUP_STORAGE_AVAILABLE = "backupstorageavailable";
public static final String BACKUP_STORAGE_LIMIT = "backupstoragelimit";
public static final String BACKUP_STORAGE_TOTAL = "backupstoragetotal";
public static final String BACKUP_TOTAL = "backuptotal";
public static final String BASE64_IMAGE = "base64image";
public static final String BGP_PEERS = "bgppeers";
public static final String BGP_PEER_IDS = "bgppeerids";
public static final String BATCH_SIZE = "batchsize";
public static final String BITS = "bits";
public static final String BOOTABLE = "bootable";
public static final String BIND_DN = "binddn";
@ -328,6 +335,7 @@ public class ApiConstants {
public static final String MANUAL_UPGRADE = "manualupgrade";
public static final String MAX = "max";
public static final String MAX_SNAPS = "maxsnaps";
public static final String MAX_BACKUPS = "maxbackups";
public static final String MAX_CPU_NUMBER = "maxcpunumber";
public static final String MAX_MEMORY = "maxmemory";
public static final String MIN_CPU_NUMBER = "mincpunumber";
@ -444,6 +452,7 @@ public class ApiConstants {
public static final String QUALIFIERS = "qualifiers";
public static final String QUERY_FILTER = "queryfilter";
public static final String SCHEDULE = "schedule";
public static final String SCHEDULE_ID = "scheduleid";
public static final String SCOPE = "scope";
public static final String SEARCH_BASE = "searchbase";
public static final String SECONDARY_IP = "secondaryip";
@ -455,7 +464,6 @@ public class ApiConstants {
public static final String SENT = "sent";
public static final String SENT_BYTES = "sentbytes";
public static final String SERIAL = "serial";
public static final String SERVICE_IP = "serviceip";
public static final String SERVICE_OFFERING_ID = "serviceofferingid";
public static final String SESSIONKEY = "sessionkey";
public static final String SHOW_CAPACITIES = "showcapacities";
@ -485,11 +493,12 @@ public class ApiConstants {
public static final String STATE = "state";
public static final String STATS = "stats";
public static final String STATUS = "status";
public static final String STORAGE_TYPE = "storagetype";
public static final String STORAGE_POLICY = "storagepolicy";
public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled";
public static final String STORAGE_CAPABILITIES = "storagecapabilities";
public static final String STORAGE_CUSTOM_STATS = "storagecustomstats";
public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled";
public static final String STORAGE_POLICY = "storagepolicy";
public static final String STORAGE_POOL = "storagepool";
public static final String STORAGE_TYPE = "storagetype";
public static final String SUBNET = "subnet";
public static final String OWNER = "owner";
public static final String SWAP_OWNER = "swapowner";
@ -969,7 +978,6 @@ public class ApiConstants {
public static final String AUTOSCALE_VMGROUP_NAME = "autoscalevmgroupname";
public static final String BAREMETAL_DISCOVER_NAME = "baremetaldiscovername";
public static final String BAREMETAL_RCT_URL = "baremetalrcturl";
public static final String BATCH_SIZE = "batchsize";
public static final String UCS_DN = "ucsdn";
public static final String GSLB_PROVIDER = "gslbprovider";
public static final String EXCLUSIVE_GSLB_PROVIDER = "isexclusivegslbprovider";
@ -1169,7 +1177,6 @@ public class ApiConstants {
public static final String MTU = "mtu";
public static final String AUTO_ENABLE_KVM_HOST = "autoenablekvmhost";
public static final String LIST_APIS = "listApis";
public static final String OBJECT_STORAGE_ID = "objectstorageid";
public static final String VERSIONING = "versioning";
public static final String OBJECT_LOCKING = "objectlocking";
public static final String ENCRYPTION = "encryption";
@ -1183,7 +1190,6 @@ public class ApiConstants {
public static final String DISK_PATH = "diskpath";
public static final String IMPORT_SOURCE = "importsource";
public static final String TEMP_PATH = "temppath";
public static final String OBJECT_STORAGE = "objectstore";
public static final String HEURISTIC_RULE = "heuristicrule";
public static final String HEURISTIC_TYPE_VALID_OPTIONS = "Valid options are: ISO, SNAPSHOT, TEMPLATE and VOLUME.";
public static final String MANAGEMENT = "management";
@ -1211,6 +1217,16 @@ public class ApiConstants {
public static final String SHAREDFSVM_MIN_CPU_COUNT = "sharedfsvmmincpucount";
public static final String SHAREDFSVM_MIN_RAM_SIZE = "sharedfsvmminramsize";
// Object Storage related
public static final String BUCKET_AVAILABLE = "bucketavailable";
public static final String BUCKET_LIMIT = "bucketlimit";
public static final String BUCKET_TOTAL = "buckettotal";
public static final String OBJECT_STORAGE_ID = "objectstorageid";
public static final String OBJECT_STORAGE = "objectstore";
public static final String OBJECT_STORAGE_AVAILABLE = "objectstorageavailable";
public static final String OBJECT_STORAGE_LIMIT = "objectstoragelimit";
public static final String OBJECT_STORAGE_TOTAL = "objectstoragetotal";
public static final String PARAMETER_DESCRIPTION_ACTIVATION_RULE = "Quota tariff's activation rule. It can receive a JS script that results in either " +
"a boolean or a numeric value: if it results in a boolean value, the tariff value will be applied according to the result; if it results in a numeric value, the " +
"numeric value will be applied; if the result is neither a boolean nor a numeric value, the tariff will not be applied. If the rule is not informed, the tariff " +
@ -1224,6 +1240,8 @@ public class ApiConstants {
"however, the following formats are also accepted: \"yyyy-MM-dd HH:mm:ss\" (e.g.: \"2023-01-01 12:00:00\") and \"yyyy-MM-dd\" (e.g.: \"2023-01-01\" - if the time is not " +
"added, it will be interpreted as \"23:59:59\"). If the recommended format is not used, the date will be considered in the server timezone.";
public static final String VMWARE_DC = "vmwaredc";
/**
* This enum specifies IO Drivers, each option controls specific policies on I/O.
* Qemu guests support "threads" and "native" options Since 0.8.8 ; "io_uring" is supported Since 6.3.0 (QEMU 5.0).

View File

@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.user.backup;
import javax.inject.Inject;
import com.cloud.storage.Snapshot;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiCommandResourceType;
@ -27,6 +28,7 @@ import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.BaseAsyncCreateCmd;
import org.apache.cloudstack.api.Parameter;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.BackupScheduleResponse;
import org.apache.cloudstack.api.response.SuccessResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import org.apache.cloudstack.backup.BackupManager;
@ -60,6 +62,13 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd {
description = "ID of the VM")
private Long vmId;
@Parameter(name = ApiConstants.SCHEDULE_ID,
type = CommandType.LONG,
entityType = BackupScheduleResponse.class,
description = "backup schedule ID of the VM, if this is null, it indicates that it is a manual backup.",
since = "4.21.0")
private Long scheduleId;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -68,6 +77,14 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd {
return vmId;
}
public Long getScheduleId() {
if (scheduleId != null) {
return scheduleId;
} else {
return Snapshot.MANUAL_POLICY_ID;
}
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@ -75,7 +92,7 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd {
@Override
public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException {
try {
boolean result = backupManager.createBackup(getVmId());
boolean result = backupManager.createBackup(getVmId(), getScheduleId());
if (result) {
SuccessResponse response = new SuccessResponse(getCommandName());
response.setResponseName(getCommandName());

View File

@ -75,6 +75,12 @@ public class CreateBackupScheduleCmd extends BaseCmd {
description = "Specifies a timezone for this command. For more information on the timezone parameter, see TimeZone Format.")
private String timezone;
@Parameter(name = ApiConstants.MAX_BACKUPS,
type = CommandType.INTEGER,
description = "maximum number of backups to retain",
since = "4.21.0")
private Integer maxBackups;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -95,6 +101,10 @@ public class CreateBackupScheduleCmd extends BaseCmd {
return timezone;
}
public Integer getMaxBackups() {
return maxBackups;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -72,7 +72,7 @@ public class CreateBucketCmd extends BaseAsyncCreateCmd implements UserCmd {
description = "Id of the Object Storage Pool where bucket is created")
private long objectStoragePoolId;
@Parameter(name = ApiConstants.QUOTA, type = CommandType.INTEGER,description = "Bucket Quota in GB")
@Parameter(name = ApiConstants.QUOTA, type = CommandType.INTEGER, required = true, description = "Bucket Quota in GiB")
private Integer quota;
@Parameter(name = ApiConstants.ENCRYPTION, type = CommandType.BOOLEAN, description = "Enable bucket encryption")

View File

@ -56,7 +56,7 @@ public class UpdateBucketCmd extends BaseCmd {
@Parameter(name = ApiConstants.POLICY, type = CommandType.STRING, description = "Bucket Access Policy")
private String policy;
@Parameter(name = ApiConstants.QUOTA, type = CommandType.INTEGER,description = "Bucket Quota in GB")
@Parameter(name = ApiConstants.QUOTA, type = CommandType.INTEGER, description = "Bucket Quota in GiB")
private Integer quota;
/////////////////////////////////////////////////////

View File

@ -127,6 +127,30 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou
@Param(description = "the total number of snapshots available for this account")
private String snapshotAvailable;
@SerializedName(ApiConstants.BACKUP_LIMIT)
@Param(description = "the total number of backups which can be stored by this account", since = "4.21.0")
private String backupLimit;
@SerializedName(ApiConstants.BACKUP_TOTAL)
@Param(description = "the total number of backups stored by this account", since = "4.21.0")
private Long backupTotal;
@SerializedName(ApiConstants.BACKUP_AVAILABLE)
@Param(description = "the total number of backups available to this account", since = "4.21.0")
private String backupAvailable;
@SerializedName(ApiConstants.BACKUP_STORAGE_LIMIT)
@Param(description = "the total backup storage space (in GiB) the account can own", since = "4.21.0")
private String backupStorageLimit;
@SerializedName(ApiConstants.BACKUP_STORAGE_TOTAL)
@Param(description = "the total backup storage space (in GiB) owned by the account", since = "4.21.0")
private Long backupStorageTotal;
@SerializedName(ApiConstants.BACKUP_STORAGE_AVAILABLE)
@Param(description = "the total backup storage space (in GiB) available to the account", since = "4.21.0")
private String backupStorageAvailable;
@SerializedName("templatelimit")
@Param(description = "the total number of templates which can be created by this account")
private String templateLimit;
@ -231,6 +255,30 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou
@Param(description = "the total secondary storage space (in GiB) available to be used for this account", since = "4.2.0")
private String secondaryStorageAvailable;
@SerializedName(ApiConstants.BUCKET_LIMIT)
@Param(description = "the total number of buckets which can be stored by this account", since = "4.21.0")
private String bucketLimit;
@SerializedName(ApiConstants.BUCKET_TOTAL)
@Param(description = "the total number of buckets stored by this account", since = "4.21.0")
private Long bucketTotal;
@SerializedName(ApiConstants.BUCKET_AVAILABLE)
@Param(description = "the total number of buckets available to this account", since = "4.21.0")
private String bucketAvailable;
@SerializedName(ApiConstants.OBJECT_STORAGE_LIMIT)
@Param(description = "the total object storage space (in GiB) the account can own", since = "4.21.0")
private String objectStorageLimit;
@SerializedName(ApiConstants.OBJECT_STORAGE_TOTAL)
@Param(description = "the total object storage space (in GiB) owned by the account", since = "4.21.0")
private Long objectStorageTotal;
@SerializedName(ApiConstants.OBJECT_STORAGE_AVAILABLE)
@Param(description = "the total object storage space (in GiB) available to the account", since = "4.21.0")
private String objectStorageAvailable;
@SerializedName(ApiConstants.STATE)
@Param(description = "the state of the account")
private String state;
@ -386,6 +434,36 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou
this.snapshotAvailable = snapshotAvailable;
}
@Override
public void setBackupLimit(String backupLimit) {
this.backupLimit = backupLimit;
}
@Override
public void setBackupTotal(Long backupTotal) {
this.backupTotal = backupTotal;
}
@Override
public void setBackupAvailable(String backupAvailable) {
this.backupAvailable = backupAvailable;
}
@Override
public void setBackupStorageLimit(String backupStorageLimit) {
this.backupStorageLimit = backupStorageLimit;
}
@Override
public void setBackupStorageTotal(Long backupStorageTotal) {
this.backupStorageTotal = backupStorageTotal;
}
@Override
public void setBackupStorageAvailable(String backupStorageAvailable) {
this.backupStorageAvailable = backupStorageAvailable;
}
@Override
public void setTemplateLimit(String templateLimit) {
this.templateLimit = templateLimit;
@ -537,6 +615,36 @@ public class AccountResponse extends BaseResponse implements ResourceLimitAndCou
this.secondaryStorageAvailable = secondaryStorageAvailable;
}
@Override
public void setBucketLimit(String bucketLimit) {
this.bucketLimit = bucketLimit;
}
@Override
public void setBucketTotal(Long bucketTotal) {
this.bucketTotal = bucketTotal;
}
@Override
public void setBucketAvailable(String bucketAvailable) {
this.bucketAvailable = bucketAvailable;
}
@Override
public void setObjectStorageLimit(String objectStorageLimit) {
this.objectStorageLimit = objectStorageLimit;
}
@Override
public void setObjectStorageTotal(Long objectStorageTotal) {
this.objectStorageTotal = objectStorageTotal;
}
@Override
public void setObjectStorageAvailable(String objectStorageAvailable) {
this.objectStorageAvailable = objectStorageAvailable;
}
public void setDefaultZone(String defaultZoneId) {
this.defaultZoneId = defaultZoneId;
}

View File

@ -37,18 +37,22 @@ public class BackupScheduleResponse extends BaseResponse {
@Param(description = "ID of the VM")
private String vmId;
@SerializedName("schedule")
@SerializedName(ApiConstants.SCHEDULE)
@Param(description = "time the backup is scheduled to be taken.")
private String schedule;
@SerializedName("intervaltype")
@SerializedName(ApiConstants.INTERVAL_TYPE)
@Param(description = "the interval type of the backup schedule")
private DateUtil.IntervalType intervalType;
@SerializedName("timezone")
@SerializedName(ApiConstants.TIMEZONE)
@Param(description = "the time zone of the backup schedule")
private String timezone;
@SerializedName(ApiConstants.MAX_BACKUPS)
@Param(description = "maximum number of backups retained")
private Integer maxBakups;
public String getVmName() {
return vmName;
}
@ -88,4 +92,8 @@ public class BackupScheduleResponse extends BaseResponse {
public void setTimezone(String timezone) {
this.timezone = timezone;
}
public void setMaxBakups(Integer maxBakups) {
this.maxBakups = maxBakups;
}
}

View File

@ -75,7 +75,7 @@ public class BucketResponse extends BaseResponseWithTagInformation implements Co
private String state;
@SerializedName(ApiConstants.QUOTA)
@Param(description = "Bucket Quota in GB")
@Param(description = "Bucket Quota in GiB")
private Integer quota;
@SerializedName(ApiConstants.ENCRYPTION)

View File

@ -105,6 +105,30 @@ public class DomainResponse extends BaseResponseWithAnnotations implements Resou
@SerializedName("snapshotavailable") @Param(description="the total number of snapshots available for this domain")
private String snapshotAvailable;
@SerializedName(ApiConstants.BACKUP_LIMIT)
@Param(description = "the total number of backups which can be stored by this domain", since = "4.21.0")
private String backupLimit;
@SerializedName(ApiConstants.BACKUP_TOTAL)
@Param(description = "the total number of backups stored by this domain", since = "4.21.0")
private Long backupTotal;
@SerializedName(ApiConstants.BACKUP_AVAILABLE)
@Param(description = "the total number of backups available to this domain", since = "4.21.0")
private String backupAvailable;
@SerializedName(ApiConstants.BACKUP_STORAGE_LIMIT)
@Param(description = "the total backup storage space (in GiB) the domain can own", since = "4.21.0")
private String backupStorageLimit;
@SerializedName(ApiConstants.BACKUP_STORAGE_TOTAL)
@Param(description = "the total backup storage space (in GiB) owned by the domain", since = "4.21.0")
private Long backupStorageTotal;
@SerializedName(ApiConstants.BACKUP_STORAGE_AVAILABLE)
@Param(description = "the total backup storage space (in GiB) available to the domain", since = "4.21.0")
private String backupStorageAvailable;
@SerializedName("templatelimit") @Param(description="the total number of templates which can be created by this domain")
private String templateLimit;
@ -177,6 +201,30 @@ public class DomainResponse extends BaseResponseWithAnnotations implements Resou
@SerializedName("secondarystorageavailable") @Param(description="the total secondary storage space (in GiB) available to be used for this domain", since="4.2.0")
private String secondaryStorageAvailable;
@SerializedName(ApiConstants.BUCKET_LIMIT)
@Param(description = "the total number of buckets which can be stored by this domain", since = "4.21.0")
private String bucketLimit;
@SerializedName(ApiConstants.BUCKET_TOTAL)
@Param(description = "the total number of buckets stored by this domain", since = "4.21.0")
private Long bucketTotal;
@SerializedName(ApiConstants.BUCKET_AVAILABLE)
@Param(description = "the total number of buckets available to this domain", since = "4.21.0")
private String bucketAvailable;
@SerializedName(ApiConstants.OBJECT_STORAGE_LIMIT)
@Param(description = "the total object storage space (in GiB) the domain can own", since = "4.21.0")
private String objectStorageLimit;
@SerializedName(ApiConstants.OBJECT_STORAGE_TOTAL)
@Param(description = "the total object storage space (in GiB) owned by the domain", since = "4.21.0")
private Long objectStorageTotal;
@SerializedName(ApiConstants.OBJECT_STORAGE_AVAILABLE)
@Param(description = "the total object storage space (in GiB) available to the domain", since = "4.21.0")
private String objectStorageAvailable;
@SerializedName(ApiConstants.RESOURCE_ICON)
@Param(description = "Base64 string representation of the resource icon", since = "4.16.0.0")
ResourceIconResponse icon;
@ -313,6 +361,36 @@ public class DomainResponse extends BaseResponseWithAnnotations implements Resou
this.snapshotAvailable = snapshotAvailable;
}
@Override
public void setBackupLimit(String backupLimit) {
this.backupLimit = backupLimit;
}
@Override
public void setBackupTotal(Long backupTotal) {
this.backupTotal = backupTotal;
}
@Override
public void setBackupAvailable(String backupAvailable) {
this.backupAvailable = backupAvailable;
}
@Override
public void setBackupStorageLimit(String backupStorageLimit) {
this.backupStorageLimit = backupStorageLimit;
}
@Override
public void setBackupStorageTotal(Long backupStorageTotal) {
this.backupStorageTotal = backupStorageTotal;
}
@Override
public void setBackupStorageAvailable(String backupStorageAvailable) {
this.backupStorageAvailable = backupStorageAvailable;
}
@Override
public void setTemplateLimit(String templateLimit) {
this.templateLimit = templateLimit;
@ -430,6 +508,36 @@ public class DomainResponse extends BaseResponseWithAnnotations implements Resou
this.secondaryStorageAvailable = secondaryStorageAvailable;
}
@Override
public void setBucketLimit(String bucketLimit) {
this.bucketLimit = bucketLimit;
}
@Override
public void setBucketTotal(Long bucketTotal) {
this.bucketTotal = bucketTotal;
}
@Override
public void setBucketAvailable(String bucketAvailable) {
this.bucketAvailable = bucketAvailable;
}
@Override
public void setObjectStorageLimit(String objectStorageLimit) {
this.objectStorageLimit = objectStorageLimit;
}
@Override
public void setObjectStorageTotal(Long objectStorageTotal) {
this.objectStorageTotal = objectStorageTotal;
}
@Override
public void setObjectStorageAvailable(String objectStorageAvailable) {
this.objectStorageAvailable = objectStorageAvailable;
}
public void setState(String state) {
this.state = state;
}

View File

@ -152,7 +152,7 @@ public class HostResponse extends BaseResponseWithAnnotations {
@Deprecated
@SerializedName("memoryallocated")
@Param(description = "the amount of the host's memory currently allocated")
private long memoryAllocated;
private Long memoryAllocated;
@SerializedName("memoryallocatedpercentage")
@Param(description = "the amount of the host's memory currently allocated in percentage")
@ -415,7 +415,7 @@ public class HostResponse extends BaseResponseWithAnnotations {
this.memWithOverprovisioning=memWithOverprovisioning;
}
public void setMemoryAllocated(long memoryAllocated) {
public void setMemoryAllocated(Long memoryAllocated) {
this.memoryAllocated = memoryAllocated;
}
@ -703,8 +703,8 @@ public class HostResponse extends BaseResponseWithAnnotations {
return memoryTotal;
}
public long getMemoryAllocated() {
return memoryAllocated;
public Long getMemoryAllocated() {
return memoryAllocated == null ? 0 : memoryAllocated;
}
public void setMemoryAllocatedPercentage(String memoryAllocatedPercentage) {

View File

@ -74,9 +74,9 @@ public class ManagementServerResponse extends BaseResponse {
@Param(description = "the running OS kernel version for this Management Server")
private String kernelVersion;
@SerializedName(ApiConstants.SERVICE_IP)
@SerializedName(ApiConstants.IP_ADDRESS)
@Param(description = "the IP Address for this Management Server")
private String serviceIp;
private String ipAddress;
@SerializedName(ApiConstants.PEERS)
@Param(description = "the Management Server Peers")
@ -130,8 +130,8 @@ public class ManagementServerResponse extends BaseResponse {
return lastBoot;
}
public String getServiceIp() {
return serviceIp;
public String getIpAddress() {
return ipAddress;
}
public Long getAgentsCount() {
@ -186,8 +186,8 @@ public class ManagementServerResponse extends BaseResponse {
this.kernelVersion = kernelVersion;
}
public void setServiceIp(String serviceIp) {
this.serviceIp = serviceIp;
public void setIpAddress(String ipAddress) {
this.ipAddress = ipAddress;
}
public void setAgentsCount(Long agentsCount) {

View File

@ -140,6 +140,30 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou
@Param(description = "the total secondary storage space (in GiB) available to be used for this project", since = "4.2.0")
private String secondaryStorageAvailable;
@SerializedName(ApiConstants.BUCKET_LIMIT)
@Param(description = "the total number of buckets which can be stored by this project", since = "4.21.0")
private String bucketLimit;
@SerializedName(ApiConstants.BUCKET_TOTAL)
@Param(description = "the total number of buckets stored by this project", since = "4.21.0")
private Long bucketTotal;
@SerializedName(ApiConstants.BUCKET_AVAILABLE)
@Param(description = "the total number of buckets available to this project", since = "4.21.0")
private String bucketAvailable;
@SerializedName(ApiConstants.OBJECT_STORAGE_LIMIT)
@Param(description = "the total object storage space (in GiB) the project can own", since = "4.21.0")
private String objectStorageLimit;
@SerializedName(ApiConstants.OBJECT_STORAGE_TOTAL)
@Param(description = "the total object storage space (in GiB) owned by the project", since = "4.21.0")
private Long objectStorageTotal;
@SerializedName(ApiConstants.OBJECT_STORAGE_AVAILABLE)
@Param(description = "the total object storage space (in GiB) available to the project", since = "4.21.0")
private String objectStorageAvailable;
@SerializedName(ApiConstants.VM_LIMIT)
@Param(description = "the total number of virtual machines that can be deployed by this project", since = "4.2.0")
private String vmLimit;
@ -188,6 +212,30 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou
@Param(description = "the total number of snapshots available for this project", since = "4.2.0")
private String snapshotAvailable;
@SerializedName(ApiConstants.BACKUP_LIMIT)
@Param(description = "the total number of backups which can be stored by this project", since = "4.21.0")
private String backupLimit;
@SerializedName(ApiConstants.BACKUP_TOTAL)
@Param(description = "the total number of backups stored by this project", since = "4.21.0")
private Long backupTotal;
@SerializedName(ApiConstants.BACKUP_AVAILABLE)
@Param(description = "the total number of backups available to this project", since = "4.21.0")
private String backupAvailable;
@SerializedName(ApiConstants.BACKUP_STORAGE_LIMIT)
@Param(description = "the total backup storage space (in GiB) the project can own", since = "4.21.0")
private String backupStorageLimit;
@SerializedName(ApiConstants.BACKUP_STORAGE_TOTAL)
@Param(description = "the total backup storage space (in GiB) owned by the project", since = "4.21.0")
private Long backupStorageTotal;
@SerializedName(ApiConstants.BACKUP_STORAGE_AVAILABLE)
@Param(description = "the total backup storage space (in GiB) available to the project", since = "4.21.0")
private String backupStorageAvailable;
@SerializedName("templatelimit")
@Param(description = "the total number of templates which can be created by this project", since = "4.2.0")
private String templateLimit;
@ -320,6 +368,36 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou
this.snapshotAvailable = snapshotAvailable;
}
@Override
public void setBackupLimit(String backupLimit) {
this.backupLimit = backupLimit;
}
@Override
public void setBackupTotal(Long backupTotal) {
this.backupTotal = backupTotal;
}
@Override
public void setBackupAvailable(String backupAvailable) {
this.backupAvailable = backupAvailable;
}
@Override
public void setBackupStorageLimit(String backupStorageLimit) {
this.backupStorageLimit = backupStorageLimit;
}
@Override
public void setBackupStorageTotal(Long backupStorageTotal) {
this.backupStorageTotal = backupStorageTotal;
}
@Override
public void setBackupStorageAvailable(String backupStorageAvailable) {
this.backupStorageAvailable = backupStorageAvailable;
}
@Override
public void setTemplateLimit(String templateLimit) {
this.templateLimit = templateLimit;
@ -435,6 +513,36 @@ public class ProjectResponse extends BaseResponse implements ResourceLimitAndCou
this.secondaryStorageAvailable = secondaryStorageAvailable;
}
@Override
public void setBucketLimit(String bucketLimit) {
this.bucketLimit = bucketLimit;
}
@Override
public void setBucketTotal(Long bucketTotal) {
this.bucketTotal = bucketTotal;
}
@Override
public void setBucketAvailable(String bucketAvailable) {
this.bucketAvailable = bucketAvailable;
}
@Override
public void setObjectStorageLimit(String objectStorageLimit) {
this.objectStorageLimit = objectStorageLimit;
}
@Override
public void setObjectStorageTotal(Long objectStorageTotal) {
this.objectStorageTotal = objectStorageTotal;
}
@Override
public void setObjectStorageAvailable(String objectStorageAvailable) {
this.objectStorageAvailable = objectStorageAvailable;
}
public void setOwners(List<Map<String, String>> owners) {
this.owners = owners;
}

View File

@ -84,6 +84,30 @@ public interface ResourceLimitAndCountResponse {
public void setSnapshotAvailable(String snapshotAvailable);
public void setBackupLimit(String backupLimit);
public void setBackupTotal(Long backupTotal);
public void setBackupAvailable(String backupAvailable);
public void setBackupStorageLimit(String backupStorageLimit);
public void setBackupStorageTotal(Long backupStorageTotal);
public void setBackupStorageAvailable(String backupStorageAvailable);
void setBucketLimit(String bucketLimit);
void setBucketTotal(Long bucketTotal);
void setBucketAvailable(String bucketAvailable);
void setObjectStorageLimit(String objectStorageLimit);
void setObjectStorageTotal(Long objectStorageTotal);
void setObjectStorageAvailable(String objectStorageAvailable);
public void setTemplateLimit(String templateLimit);
public void setTemplateTotal(Long templateTotal);

View File

@ -33,6 +33,28 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity {
Allocated, Queued, BackingUp, BackedUp, Error, Failed, Restoring, Removed, Expunged
}
public enum Type {
MANUAL, HOURLY, DAILY, WEEKLY, MONTHLY;
private int max = 8;
public void setMax(int max) {
this.max = max;
}
public int getMax() {
return max;
}
@Override
public String toString() {
return this.name();
}
public boolean equals(String snapshotType) {
return this.toString().equalsIgnoreCase(snapshotType);
}
}
class Metric {
private Long backupSize = 0L;
private Long dataSize = 0L;

View File

@ -19,6 +19,7 @@ package org.apache.cloudstack.backup;
import java.util.List;
import com.cloud.exception.ResourceAllocationException;
import org.apache.cloudstack.api.command.admin.backup.ImportBackupOfferingCmd;
import org.apache.cloudstack.api.command.admin.backup.UpdateBackupOfferingCmd;
import org.apache.cloudstack.api.command.user.backup.CreateBackupScheduleCmd;
@ -56,6 +57,86 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer
"false",
"Enable volume attach/detach operations for VMs that are assigned to Backup Offerings.", true);
ConfigKey<Integer> BackupHourlyMax = new ConfigKey<Integer>("Advanced", Integer.class,
"backup.max.hourly",
"8",
"Maximum recurring hourly backups to be retained for an instance. If the limit is reached, early backups from the start of the hour are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring hourly backups can not be scheduled.",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Integer> BackupDailyMax = new ConfigKey<Integer>("Advanced", Integer.class,
"backup.max.daily",
"8",
"Maximum recurring daily backups to be retained for an instance. If the limit is reached, backups from the start of the day are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring daily backups can not be scheduled.",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Integer> BackupWeeklyMax = new ConfigKey<Integer>("Advanced", Integer.class,
"backup.max.weekly",
"8",
"Maximum recurring weekly backups to be retained for an instance. If the limit is reached, backups from the beginning of the week are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring weekly backups can not be scheduled.",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Integer> BackupMonthlyMax = new ConfigKey<Integer>("Advanced", Integer.class,
"backup.max.monthly",
"8",
"Maximum recurring monthly backups to be retained for an instance. If the limit is reached, backups from the beginning of the month are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring monthly backups can not be scheduled.",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxAccountBackups = new ConfigKey<Long>("Account Defaults", Long.class,
"max.account.backups",
"20",
"The default maximum number of backups that can be created for an account",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxAccountBackupStorage = new ConfigKey<Long>("Account Defaults", Long.class,
"max.account.backup.storage",
"400",
"The default maximum backup storage space (in GiB) that can be used for an account",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxProjectBackups = new ConfigKey<Long>("Project Defaults", Long.class,
"max.project.backups",
"20",
"The default maximum number of backups that can be created for a project",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxProjectBackupStorage = new ConfigKey<Long>("Project Defaults", Long.class,
"max.project.backup.storage",
"400",
"The default maximum backup storage space (in GiB) that can be used for a project",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxDomainBackups = new ConfigKey<Long>("Domain Defaults", Long.class,
"max.domain.backups",
"40",
"The default maximum number of backups that can be created for a domain",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxDomainBackupStorage = new ConfigKey<Long>("Domain Defaults", Long.class,
"max.domain.backup.storage",
"800",
"The default maximum backup storage space (in GiB) that can be used for a domain",
false,
ConfigKey.Scope.Global,
null);
/**
* List backup provider offerings
* @param zoneId zone id
@ -119,9 +200,10 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer
/**
* Creates backup of a VM
* @param vmId Virtual Machine ID
* @param scheduleId Virtual Machine Backup Schedule ID
* @return returns operation success
*/
boolean createBackup(final Long vmId);
boolean createBackup(final Long vmId, final Long scheduleId) throws ResourceAllocationException;
/**
* List existing backups for a VM

View File

@ -75,7 +75,7 @@ public interface BackupProvider {
* @param backup
* @return
*/
boolean takeBackup(VirtualMachine vm);
Pair<Boolean, Backup> takeBackup(VirtualMachine vm);
/**
* Delete an existing backup
@ -104,9 +104,16 @@ public interface BackupProvider {
Map<VirtualMachine, Backup.Metric> getBackupMetrics(Long zoneId, List<VirtualMachine> vms);
/**
* This method should reconcile and create backup entries for any backups created out-of-band
* @param vm
* This method should TODO
* @param
*/
public List<Backup.RestorePoint> listRestorePoints(VirtualMachine vm);
/**
* This method should TODO
* @param
* @param
* @param metric
*/
void syncBackups(VirtualMachine vm, Backup.Metric metric);
Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric);
}

View File

@ -30,4 +30,5 @@ public interface BackupSchedule extends InternalIdentity {
String getTimezone();
Date getScheduledTimestamp();
Long getAsyncJobId();
Integer getMaxBackups();
}

View File

@ -22,10 +22,59 @@ import com.cloud.exception.ResourceAllocationException;
import com.cloud.user.Account;
import org.apache.cloudstack.api.command.user.bucket.CreateBucketCmd;
import org.apache.cloudstack.api.command.user.bucket.UpdateBucketCmd;
import org.apache.cloudstack.framework.config.ConfigKey;
public interface BucketApiService {
ConfigKey<Long> DefaultMaxAccountBuckets = new ConfigKey<Long>("Account Defaults", Long.class,
"max.account.buckets",
"20",
"The default maximum number of buckets that can be created for an account",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxAccountObjectStorage = new ConfigKey<Long>("Account Defaults", Long.class,
"max.account.object.storage",
"400",
"The default maximum object storage space (in GiB) that can be used for an account",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxProjectBuckets = new ConfigKey<Long>("Project Defaults", Long.class,
"max.project.buckets",
"20",
"The default maximum number of buckets that can be created for a project",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxProjectObjectStorage = new ConfigKey<Long>("Project Defaults", Long.class,
"max.project.object.storage",
"400",
"The default maximum object storage space (in GiB) that can be used for a project",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxDomainBuckets = new ConfigKey<Long>("Domain Defaults", Long.class,
"max.domain.buckets",
"20",
"The default maximum number of buckets that can be created for a domain",
false,
ConfigKey.Scope.Global,
null);
ConfigKey<Long> DefaultMaxDomainObjectStorage = new ConfigKey<Long>("Domain Defaults", Long.class,
"max.domain.object.storage",
"400",
"The default maximum object storage space (in GiB) that can be used for a domain",
false,
ConfigKey.Scope.Global,
null);
/**
* Creates the database object for a Bucket based on the given criteria
*
@ -48,7 +97,7 @@ public interface BucketApiService {
boolean deleteBucket(long bucketId, Account caller);
boolean updateBucket(UpdateBucketCmd cmd, Account caller);
boolean updateBucket(UpdateBucketCmd cmd, Account caller) throws ResourceAllocationException;
void getBucketUsage();
}

View File

@ -0,0 +1,46 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;
/**
* This command will destroy a leftover VM during the expunge process if it wasn't destroyed before.
*
*/
public class CleanupVMCommand extends Command {
String vmName;
boolean executeInSequence;
public CleanupVMCommand(String vmName) {
this(vmName, false);
}
public CleanupVMCommand(String vmName, boolean executeInSequence) {
this.vmName = vmName;
this.executeInSequence = executeInSequence;
}
@Override
public boolean executeInSequence() {
return executeInSequence;
}
public String getVmName() {
return vmName;
}
}

2
debian/control vendored
View File

@ -1,7 +1,7 @@
Source: cloudstack
Section: libs
Priority: extra
Maintainer: Wido den Hollander <wido@widodh.nl>
Maintainer: The Apache CloudStack Team <dev@cloudstack.apache.org>
Build-Depends: debhelper (>= 9), openjdk-17-jdk | java17-sdk | java17-jdk | zulu-17 | openjdk-11-jdk | java11-sdk | java11-jdk | zulu-11, genisoimage,
python-mysql.connector | python3-mysql.connector | mysql-connector-python-py3, maven (>= 3) | maven3,
python (>= 2.7) | python2 (>= 2.7), python3 (>= 3), python-setuptools, python3-setuptools,

View File

@ -16,6 +16,8 @@
// under the License.
package com.cloud.capacity;
import java.util.List;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
@ -67,7 +69,7 @@ public interface CapacityManager {
"0.85",
"Percentage (as a value between 0 and 1) of storage utilization above which allocators will disable using the pool for low storage available.",
true,
ConfigKey.Scope.Zone);
List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone));
static final ConfigKey<Double> StorageOverprovisioningFactor =
new ConfigKey<>(
"Storage",
@ -85,7 +87,7 @@ public interface CapacityManager {
"0.85",
"Percentage (as a value between 0 and 1) of allocated storage utilization above which allocators will disable using the pool for low allocated storage available.",
true,
ConfigKey.Scope.Zone);
List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone));
static final ConfigKey<Boolean> StorageOperationsExcludeCluster =
new ConfigKey<>(
Boolean.class,
@ -125,7 +127,7 @@ public interface CapacityManager {
"Percentage (as a value between 0 and 1) of allocated storage utilization above which allocators will disable using the pool for volume resize. " +
"This is applicable only when volume.resize.allowed.beyond.allocation is set to true.",
true,
ConfigKey.Scope.Zone);
List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone));
ConfigKey<Integer> CapacityCalculateWorkers = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class,
"capacity.calculate.workers", "1",

View File

@ -214,7 +214,7 @@ public interface StorageManager extends StorageService {
ConfigKey<Boolean> AllowVolumeReSizeBeyondAllocation = new ConfigKey<Boolean>("Advanced", Boolean.class, "volume.resize.allowed.beyond.allocation", "false",
"Determines whether volume size can exceed the pool capacity allocation disable threshold (pool.storage.allocated.capacity.disablethreshold) " +
"when resize a volume upto resize capacity disable threshold (pool.storage.allocated.resize.capacity.disablethreshold)",
true, ConfigKey.Scope.Zone);
true, List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone));
ConfigKey<Integer> StoragePoolHostConnectWorkers = new ConfigKey<>("Storage", Integer.class,
"storage.pool.host.connect.workers", "1",

View File

@ -1807,7 +1807,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool);
} catch (Exception e) {
throw new StorageAccessException(String.format("Unable to grant access to the volume [%s] on host [%s].", newVolToString, host));
throw new StorageAccessException(String.format("Unable to grant access to the volume [%s] on host [%s].", newVolToString, host), e);
}
}
@ -1847,7 +1847,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
volService.grantAccess(volFactory.getVolume(volumeId), host, volumeStore);
} catch (Exception e) {
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host));
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host), e);
}
}
@ -1928,7 +1928,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
try {
volService.grantAccess(volFactory.getVolume(vol.getId()), host, store);
} catch (Exception e) {
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host));
throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host), e);
}
} else {
grantVolumeAccessToHostIfNeeded(store, vol.getId(), host, volToString);

View File

@ -20,8 +20,9 @@ import java.util.Collection;
import java.util.Map;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDao;
public interface ClusterDetailsDao extends GenericDao<ClusterDetailsVO, Long> {
public interface ClusterDetailsDao extends GenericDao<ClusterDetailsVO, Long>, ResourceDetailsDao<ClusterDetailsVO> {
Map<String, String> findDetails(long clusterId);
void persist(long clusterId, Map<String, String> details);

View File

@ -22,18 +22,27 @@ import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.inject.Inject;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
import org.apache.cloudstack.framework.config.ScopedConfigStorage;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.org.Cluster;
import com.cloud.utils.Pair;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.TransactionLegacy;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase;
public class ClusterDetailsDaoImpl extends ResourceDetailsDaoBase<ClusterDetailsVO> implements ClusterDetailsDao, ScopedConfigStorage {
@Inject
ClusterDao clusterDao;
public class ClusterDetailsDaoImpl extends GenericDaoBase<ClusterDetailsVO, Long> implements ClusterDetailsDao, ScopedConfigStorage {
protected final SearchBuilder<ClusterDetailsVO> ClusterSearch;
protected final SearchBuilder<ClusterDetailsVO> DetailSearch;
@ -44,11 +53,11 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase<ClusterDetailsVO, Long
protected ClusterDetailsDaoImpl() {
ClusterSearch = createSearchBuilder();
ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
ClusterSearch.and("clusterId", ClusterSearch.entity().getResourceId(), SearchCriteria.Op.EQ);
ClusterSearch.done();
DetailSearch = createSearchBuilder();
DetailSearch.and("clusterId", DetailSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
DetailSearch.and("clusterId", DetailSearch.entity().getResourceId(), SearchCriteria.Op.EQ);
DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ);
DetailSearch.done();
}
@ -68,6 +77,11 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase<ClusterDetailsVO, Long
return detail;
}
@Override
public void addDetail(long resourceId, String key, String value, boolean display) {
super.addDetail(new ClusterDetailsVO(resourceId, key, value));
}
@Override
public Map<String, String> findDetails(long clusterId) {
SearchCriteria<ClusterDetailsVO> sc = ClusterSearch.create();
@ -91,7 +105,7 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase<ClusterDetailsVO, Long
return new HashMap<>();
}
SearchBuilder<ClusterDetailsVO> sb = createSearchBuilder();
sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ);
sb.and("clusterId", sb.entity().getResourceId(), SearchCriteria.Op.EQ);
sb.and("name", sb.entity().getName(), SearchCriteria.Op.IN);
sb.done();
SearchCriteria<ClusterDetailsVO> sc = sb.create();
@ -180,4 +194,13 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase<ClusterDetailsVO, Long
return name;
}
@Override
public Pair<Scope, Long> getParentScope(long id) {
Cluster cluster = clusterDao.findById(id);
if (cluster == null) {
return null;
}
return new Pair<>(getScope().getParent(), cluster.getDataCenterId());
}
}

View File

@ -23,11 +23,11 @@ import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.api.ResourceDetail;
@Entity
@Table(name = "cluster_details")
public class ClusterDetailsVO implements InternalIdentity {
public class ClusterDetailsVO implements ResourceDetail {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@ -35,7 +35,7 @@ public class ClusterDetailsVO implements InternalIdentity {
private long id;
@Column(name = "cluster_id")
private long clusterId;
private long resourceId;
@Column(name = "name")
private String name;
@ -47,13 +47,14 @@ public class ClusterDetailsVO implements InternalIdentity {
}
public ClusterDetailsVO(long clusterId, String name, String value) {
this.clusterId = clusterId;
this.resourceId = clusterId;
this.name = name;
this.value = value;
}
public long getClusterId() {
return clusterId;
@Override
public long getResourceId() {
return resourceId;
}
public String getName() {
@ -64,6 +65,11 @@ public class ClusterDetailsVO implements InternalIdentity {
return value;
}
@Override
public boolean isDisplay() {
return true;
}
public void setValue(String value) {
this.value = value;
}

View File

@ -23,18 +23,18 @@ import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.api.ResourceDetail;
@Entity
@Table(name = "domain_details")
public class DomainDetailVO implements InternalIdentity {
public class DomainDetailVO implements ResourceDetail {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "domain_id")
private long domainId;
private long resourceId;
@Column(name = "name")
private String name;
@ -46,13 +46,14 @@ public class DomainDetailVO implements InternalIdentity {
}
public DomainDetailVO(long domainId, String name, String value) {
this.domainId = domainId;
this.resourceId = domainId;
this.name = name;
this.value = value;
}
public long getDomainId() {
return domainId;
@Override
public long getResourceId() {
return resourceId;
}
public String getName() {
@ -63,6 +64,11 @@ public class DomainDetailVO implements InternalIdentity {
return value;
}
@Override
public boolean isDisplay() {
return true;
}
public void setValue(String value) {
this.value = value;
}

View File

@ -20,8 +20,9 @@ import java.util.Map;
import com.cloud.domain.DomainDetailVO;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDao;
public interface DomainDetailsDao extends GenericDao<DomainDetailVO, Long> {
public interface DomainDetailsDao extends GenericDao<DomainDetailVO, Long>, ResourceDetailsDao<DomainDetailVO> {
Map<String, String> findDetails(long domainId);
void persist(long domainId, Map<String, String> details);
@ -31,6 +32,4 @@ public interface DomainDetailsDao extends GenericDao<DomainDetailVO, Long> {
void deleteDetails(long domainId);
void update(long domainId, Map<String, String> details);
String getActualValue(DomainDetailVO domainDetailVO);
}

View File

@ -25,19 +25,17 @@ import javax.inject.Inject;
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
import org.apache.cloudstack.framework.config.ScopedConfigStorage;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
import com.cloud.domain.DomainDetailVO;
import com.cloud.domain.DomainVO;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.TransactionLegacy;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase;
public class DomainDetailsDaoImpl extends GenericDaoBase<DomainDetailVO, Long> implements DomainDetailsDao, ScopedConfigStorage {
public class DomainDetailsDaoImpl extends ResourceDetailsDaoBase<DomainDetailVO> implements DomainDetailsDao, ScopedConfigStorage {
protected final SearchBuilder<DomainDetailVO> domainSearch;
@Inject
@ -47,14 +45,14 @@ public class DomainDetailsDaoImpl extends GenericDaoBase<DomainDetailVO, Long> i
protected DomainDetailsDaoImpl() {
domainSearch = createSearchBuilder();
domainSearch.and("domainId", domainSearch.entity().getDomainId(), Op.EQ);
domainSearch.and("domainId", domainSearch.entity().getResourceId(), Op.EQ);
domainSearch.done();
}
@Override
public Map<String, String> findDetails(long domainId) {
QueryBuilder<DomainDetailVO> sc = QueryBuilder.create(DomainDetailVO.class);
sc.and(sc.entity().getDomainId(), Op.EQ, domainId);
sc.and(sc.entity().getResourceId(), Op.EQ, domainId);
List<DomainDetailVO> results = sc.list();
Map<String, String> details = new HashMap<String, String>(results.size());
for (DomainDetailVO r : results) {
@ -80,11 +78,16 @@ public class DomainDetailsDaoImpl extends GenericDaoBase<DomainDetailVO, Long> i
@Override
public DomainDetailVO findDetail(long domainId, String name) {
QueryBuilder<DomainDetailVO> sc = QueryBuilder.create(DomainDetailVO.class);
sc.and(sc.entity().getDomainId(), Op.EQ, domainId);
sc.and(sc.entity().getResourceId(), Op.EQ, domainId);
sc.and(sc.entity().getName(), Op.EQ, name);
return sc.find();
}
@Override
public void addDetail(long resourceId, String key, String value, boolean display) {
super.addDetail(new DomainDetailVO(resourceId, key, value));
}
@Override
public void deleteDetails(long domainId) {
SearchCriteria<DomainDetailVO> sc = domainSearch.create();
@ -129,13 +132,4 @@ public class DomainDetailsDaoImpl extends GenericDaoBase<DomainDetailVO, Long> i
}
return vo == null ? null : getActualValue(vo);
}
@Override
public String getActualValue(DomainDetailVO domainDetailVO) {
ConfigurationVO configurationVO = _configDao.findByName(domainDetailVO.getName());
if (configurationVO != null && configurationVO.isEncrypted()) {
return DBEncryptionUtil.decrypt(domainDetailVO.getValue());
}
return domainDetailVO.getValue();
}
}

View File

@ -43,7 +43,9 @@ public interface FirewallRulesDao extends GenericDao<FirewallRuleVO, Long> {
List<FirewallRuleVO> listStaticNatByVmId(long vmId);
List<FirewallRuleVO> listByIpPurposeAndProtocolAndNotRevoked(long ipAddressId, Integer startPort, Integer endPort, String protocol, FirewallRule.Purpose purpose);
List<FirewallRuleVO> listByIpPurposePortsProtocolAndNotRevoked(long ipAddressId, Integer startPort, Integer endPort, String protocol, FirewallRule.Purpose purpose);
List<FirewallRuleVO> listByIpPurposeProtocolAndNotRevoked(long ipAddressId, FirewallRule.Purpose purpose, String protocol);
FirewallRuleVO findByRelatedId(long ruleId);

View File

@ -270,8 +270,25 @@ public class FirewallRulesDaoImpl extends GenericDaoBase<FirewallRuleVO, Long> i
}
@Override
public List<FirewallRuleVO> listByIpPurposeAndProtocolAndNotRevoked(long ipAddressId, Integer startPort, Integer endPort, String protocol,
FirewallRule.Purpose purpose) {
public List<FirewallRuleVO> listByIpPurposeProtocolAndNotRevoked(long ipAddressId, Purpose purpose, String protocol) {
SearchCriteria<FirewallRuleVO> sc = NotRevokedSearch.create();
sc.setParameters("ipId", ipAddressId);
sc.setParameters("state", State.Revoke);
if (purpose != null) {
sc.setParameters("purpose", purpose);
}
if (protocol != null) {
sc.setParameters("protocol", protocol);
}
return listBy(sc);
}
@Override
public List<FirewallRuleVO> listByIpPurposePortsProtocolAndNotRevoked(long ipAddressId, Integer startPort, Integer endPort, String protocol,
FirewallRule.Purpose purpose) {
SearchCriteria<FirewallRuleVO> sc = NotRevokedSearch.create();
sc.setParameters("ipId", ipAddressId);
sc.setParameters("state", State.Revoke);

View File

@ -27,4 +27,8 @@ public interface BucketDao extends GenericDao<BucketVO, Long> {
List<BucketVO> listByObjectStoreIdAndAccountId(long objectStoreId, long accountId);
List<BucketVO> searchByIds(Long[] ids);
Long countBucketsForAccount(long accountId);
Long calculateObjectStorageAllocationForAccount(long accountId);
}

View File

@ -16,8 +16,10 @@
// under the License.
package com.cloud.storage.dao;
import com.cloud.configuration.Resource;
import com.cloud.storage.BucketVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import org.springframework.stereotype.Component;
@ -31,6 +33,8 @@ public class BucketDaoImpl extends GenericDaoBase<BucketVO, Long> implements Buc
private SearchBuilder<BucketVO> searchFilteringStoreId;
private SearchBuilder<BucketVO> bucketSearch;
private GenericSearchBuilder<BucketVO, Long> CountBucketsByAccount;
private GenericSearchBuilder<BucketVO, SumCount> CalculateBucketsQuotaByAccount;
private static final String STORE_ID = "store_id";
private static final String STATE = "state";
@ -54,6 +58,20 @@ public class BucketDaoImpl extends GenericDaoBase<BucketVO, Long> implements Buc
bucketSearch.and("idIN", bucketSearch.entity().getId(), SearchCriteria.Op.IN);
bucketSearch.done();
CountBucketsByAccount = createSearchBuilder(Long.class);
CountBucketsByAccount.select(null, SearchCriteria.Func.COUNT, null);
CountBucketsByAccount.and(ACCOUNT_ID, CountBucketsByAccount.entity().getAccountId(), SearchCriteria.Op.EQ);
CountBucketsByAccount.and(STATE, CountBucketsByAccount.entity().getState(), SearchCriteria.Op.NIN);
CountBucketsByAccount.and("removed", CountBucketsByAccount.entity().getRemoved(), SearchCriteria.Op.NULL);
CountBucketsByAccount.done();
CalculateBucketsQuotaByAccount = createSearchBuilder(SumCount.class);
CalculateBucketsQuotaByAccount.select("sum", SearchCriteria.Func.SUM, CalculateBucketsQuotaByAccount.entity().getQuota());
CalculateBucketsQuotaByAccount.and(ACCOUNT_ID, CalculateBucketsQuotaByAccount.entity().getAccountId(), SearchCriteria.Op.EQ);
CalculateBucketsQuotaByAccount.and(STATE, CalculateBucketsQuotaByAccount.entity().getState(), SearchCriteria.Op.NIN);
CalculateBucketsQuotaByAccount.and("removed", CalculateBucketsQuotaByAccount.entity().getRemoved(), SearchCriteria.Op.NULL);
CalculateBucketsQuotaByAccount.done();
return true;
}
@Override
@ -79,4 +97,21 @@ public class BucketDaoImpl extends GenericDaoBase<BucketVO, Long> implements Buc
sc.setParameters("idIN", ids);
return search(sc, null, null, false);
}
@Override
public Long countBucketsForAccount(long accountId) {
SearchCriteria<Long> sc = CountBucketsByAccount.create();
sc.setParameters(ACCOUNT_ID, accountId);
sc.setParameters(STATE, BucketVO.State.Destroyed);
return customSearch(sc, null).get(0);
}
@Override
public Long calculateObjectStorageAllocationForAccount(long accountId) {
SearchCriteria<SumCount> sc = CalculateBucketsQuotaByAccount.create();
sc.setParameters(ACCOUNT_ID, accountId);
sc.setParameters(STATE, BucketVO.State.Destroyed);
Long totalQuota = customSearch(sc, null).get(0).sum;
return (totalQuota * Resource.ResourceType.bytesToGiB);
}
}

View File

@ -30,6 +30,8 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import com.cloud.utils.Pair;
public class StoragePoolDetailsDaoImpl extends ResourceDetailsDaoBase<StoragePoolDetailVO> implements StoragePoolDetailsDao, ScopedConfigStorage {
@Inject
@ -46,7 +48,7 @@ public class StoragePoolDetailsDaoImpl extends ResourceDetailsDaoBase<StoragePoo
@Override
public String getConfigValue(long id, String key) {
StoragePoolDetailVO vo = findDetail(id, key);
return vo == null ? null : vo.getValue();
return vo == null ? null : getActualValue(vo);
}
@Override
@ -57,4 +59,17 @@ public class StoragePoolDetailsDaoImpl extends ResourceDetailsDaoBase<StoragePoo
}
super.addDetail(new StoragePoolDetailVO(resourceId, key, value, display));
}
@Override
public Pair<Scope, Long> getParentScope(long id) {
StoragePoolVO pool = _storagePoolDao.findById(id);
if (pool != null) {
if (pool.getClusterId() != null) {
return new Pair<>(getScope().getParent(), pool.getClusterId());
} else {
return new Pair<>(ConfigKey.Scope.Zone, pool.getDataCenterId());
}
}
return null;
}
}

View File

@ -54,7 +54,7 @@ public class ConfigurationGroupsAggregator {
public void updateConfigurationGroups() {
LOG.debug("Updating configuration groups");
List<ConfigurationVO> configs = configDao.listAllIncludingRemoved();
List<ConfigurationVO> configs = configDao.searchPartialConfigurations();
if (CollectionUtils.isEmpty(configs)) {
return;
}

View File

@ -334,7 +334,7 @@ public class SystemVmTemplateRegistration {
}
};
public static boolean validateIfSeeded(String url, String path, String nfsVersion) {
public boolean validateIfSeeded(TemplateDataStoreVO templDataStoreVO, String url, String path, String nfsVersion) {
String filePath = null;
try {
filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString();
@ -347,6 +347,9 @@ public class SystemVmTemplateRegistration {
String templatePath = filePath + File.separator + partialDirPath;
File templateProps = new File(templatePath + "/template.properties");
if (templateProps.exists()) {
Pair<Long, Long> templateSizes = readTemplatePropertiesSizes(templatePath + "/template.properties");
updateSeededTemplateDetails(templDataStoreVO.getTemplateId(), templDataStoreVO.getDataStoreId(),
templateSizes.first(), templateSizes.second());
LOGGER.info("SystemVM template already seeded, skipping registration");
return true;
}
@ -542,6 +545,21 @@ public class SystemVmTemplateRegistration {
}
}
public void updateSeededTemplateDetails(long templateId, long storeId, long size, long physicalSize) {
VMTemplateVO template = vmTemplateDao.findById(templateId);
template.setSize(size);
vmTemplateDao.update(template.getId(), template);
TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(storeId, template.getId());
templateDataStoreVO.setSize(size);
templateDataStoreVO.setPhysicalSize(physicalSize);
templateDataStoreVO.setLastUpdated(new Date(DateUtil.currentGMTTime().getTime()));
boolean updated = templateDataStoreDao.update(templateDataStoreVO.getId(), templateDataStoreVO);
if (!updated) {
throw new CloudRuntimeException("Failed to update template_store_ref entry for seeded systemVM template");
}
}
public void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hypervisorType) {
vmInstanceDao.updateSystemVmTemplateId(templateId, hypervisorType);
}
@ -555,7 +573,7 @@ public class SystemVmTemplateRegistration {
}
}
private static void readTemplateProperties(String path, SystemVMTemplateDetails details) {
private static Pair<Long, Long> readTemplatePropertiesSizes(String path) {
File tmpFile = new File(path);
Long size = null;
Long physicalSize = 0L;
@ -574,8 +592,13 @@ public class SystemVmTemplateRegistration {
} catch (IOException ex) {
LOGGER.warn("Failed to read from template.properties", ex);
}
details.setSize(size);
details.setPhysicalSize(physicalSize);
return new Pair<>(size, physicalSize);
}
public static void readTemplateProperties(String path, SystemVMTemplateDetails details) {
Pair<Long, Long> templateSizes = readTemplatePropertiesSizes(path);
details.setSize(templateSizes.first());
details.setPhysicalSize(templateSizes.second());
}
private void updateTemplateTablesOnFailure(long templateId) {
@ -799,7 +822,7 @@ public class SystemVmTemplateRegistration {
TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(storeUrlAndId.second(), templateId);
if (templateDataStoreVO != null) {
String installPath = templateDataStoreVO.getInstallPath();
if (validateIfSeeded(storeUrlAndId.first(), installPath, nfsVersion)) {
if (validateIfSeeded(templateDataStoreVO, storeUrlAndId.first(), installPath, nfsVersion)) {
continue;
}
}

View File

@ -87,6 +87,36 @@ public class DatabaseAccessObject {
return columnExists;
}
public String getColumnType(Connection conn, String tableName, String columnName) {
try (PreparedStatement pstmt = conn.prepareStatement(String.format("DESCRIBE %s %s", tableName, columnName));){
ResultSet rs = pstmt.executeQuery();
if (rs.next()) {
return rs.getString("Type");
}
} catch (SQLException e) {
logger.warn("Type for column {} can not be retrieved in {} ignoring exception: {}", columnName, tableName, e.getMessage());
}
return null;
}
public void addColumn(Connection conn, String tableName, String columnName, String columnDefinition) {
try (PreparedStatement pstmt = conn.prepareStatement(String.format("ALTER TABLE %s ADD COLUMN %s %s", tableName, columnName, columnDefinition));){
pstmt.executeUpdate();
logger.debug("Column {} is added successfully from the table {}", columnName, tableName);
} catch (SQLException e) {
logger.warn("Unable to add column {} to table {} due to exception", columnName, tableName, e);
}
}
public void changeColumn(Connection conn, String tableName, String oldColumnName, String newColumnName, String columnDefinition) {
try (PreparedStatement pstmt = conn.prepareStatement(String.format("ALTER TABLE %s CHANGE COLUMN %s %s %s", tableName, oldColumnName, newColumnName, columnDefinition));){
pstmt.executeUpdate();
logger.debug("Column {} is changed successfully to {} from the table {}", oldColumnName, newColumnName, tableName);
} catch (SQLException e) {
logger.warn("Unable to add column {} to {} from the table {} due to exception", oldColumnName, newColumnName, tableName, e);
}
}
public String generateIndexName(String tableName, String... columnName) {
return String.format("i_%s__%s", tableName, StringUtils.join(columnName, "__"));
}

View File

@ -58,4 +58,20 @@ public class DbUpgradeUtils {
}
}
public static String getTableColumnType(Connection conn, String tableName, String columnName) {
return dao.getColumnType(conn, tableName, columnName);
}
public static void addTableColumnIfNotExist(Connection conn, String tableName, String columnName, String columnDefinition) {
if (!dao.columnExists(conn, tableName, columnName)) {
dao.addColumn(conn, tableName, columnName, columnDefinition);
}
}
public static void changeTableColumnIfNotExist(Connection conn, String tableName, String oldColumnName, String newColumnName, String columnDefinition) {
if (dao.columnExists(conn, tableName, oldColumnName)) {
dao.changeColumn(conn, tableName, oldColumnName, newColumnName, columnDefinition);
}
}
}

View File

@ -17,6 +17,7 @@
package com.cloud.upgrade.dao;
import com.cloud.upgrade.SystemVmTemplateRegistration;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.exception.CloudRuntimeException;
import java.io.InputStream;
@ -29,6 +30,8 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.framework.config.ConfigKey;
public class Upgrade42010to42100 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate {
private SystemVmTemplateRegistration systemVmTemplateRegistration;
@ -61,6 +64,7 @@ public class Upgrade42010to42100 extends DbUpgradeAbstractImpl implements DbUpgr
@Override
public void performDataMigration(Connection conn) {
updateKubernetesClusterNodeVersions(conn);
migrateConfigurationScopeToBitmask(conn);
}
@Override
@ -177,4 +181,35 @@ public class Upgrade42010to42100 extends DbUpgradeAbstractImpl implements DbUpgr
}
}
}
protected void migrateConfigurationScopeToBitmask(Connection conn) {
String scopeDataType = DbUpgradeUtils.getTableColumnType(conn, "configuration", "scope");
logger.info("Data type of the column scope of table configuration is {}", scopeDataType);
if (!"varchar(255)".equals(scopeDataType)) {
return;
}
DbUpgradeUtils.addTableColumnIfNotExist(conn, "configuration", "new_scope", "BIGINT DEFAULT 0");
migrateExistingConfigurationScopeValues(conn);
DbUpgradeUtils.dropTableColumnsIfExist(conn, "configuration", List.of("scope"));
DbUpgradeUtils.changeTableColumnIfNotExist(conn, "configuration", "new_scope", "scope", "BIGINT NOT NULL DEFAULT 0 COMMENT 'Bitmask for scope(s) of this parameter'");
}
protected void migrateExistingConfigurationScopeValues(Connection conn) {
StringBuilder sql = new StringBuilder("UPDATE configuration\n" +
"SET new_scope = " +
" CASE ");
for (ConfigKey.Scope scope : ConfigKey.Scope.values()) {
sql.append(" WHEN scope = '").append(scope.name()).append("' THEN ").append(scope.getBitValue()).append(" ");
}
sql.append(" ELSE 0 " +
" END " +
"WHERE scope IS NOT NULL;");
TransactionLegacy txn = TransactionLegacy.currentTxn();
try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql.toString())) {
pstmt.executeUpdate();
} catch (SQLException e) {
logger.error("Failed to migrate existing configuration scope values to bitmask", e);
throw new CloudRuntimeException(String.format("Failed to migrate existing configuration scope values to bitmask due to: %s", e.getMessage()));
}
}
}

View File

@ -23,18 +23,18 @@ import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.api.ResourceDetail;
@Entity
@Table(name = "account_details")
public class AccountDetailVO implements InternalIdentity {
public class AccountDetailVO implements ResourceDetail {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "account_id")
private long accountId;
private long resourceId;
@Column(name = "name")
private String name;
@ -46,13 +46,14 @@ public class AccountDetailVO implements InternalIdentity {
}
public AccountDetailVO(long accountId, String name, String value) {
this.accountId = accountId;
this.resourceId = accountId;
this.name = name;
this.value = value;
}
public long getAccountId() {
return accountId;
@Override
public long getResourceId() {
return resourceId;
}
public String getName() {
@ -63,6 +64,11 @@ public class AccountDetailVO implements InternalIdentity {
return value;
}
@Override
public boolean isDisplay() {
return true;
}
public void setValue(String value) {
this.value = value;
}

View File

@ -19,8 +19,9 @@ package com.cloud.user;
import java.util.Map;
import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDao;
public interface AccountDetailsDao extends GenericDao<AccountDetailVO, Long> {
public interface AccountDetailsDao extends GenericDao<AccountDetailVO, Long>, ResourceDetailsDao<AccountDetailVO> {
Map<String, String> findDetails(long accountId);
void persist(long accountId, Map<String, String> details);
@ -34,6 +35,4 @@ public interface AccountDetailsDao extends GenericDao<AccountDetailVO, Long> {
* they will get created
*/
void update(long accountId, Map<String, String> details);
String getActualValue(AccountDetailVO accountDetailVO);
}

View File

@ -27,22 +27,21 @@ import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
import org.apache.cloudstack.framework.config.ScopedConfigStorage;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
import com.cloud.domain.DomainDetailVO;
import com.cloud.domain.DomainVO;
import com.cloud.domain.dao.DomainDao;
import com.cloud.domain.dao.DomainDetailsDao;
import com.cloud.user.dao.AccountDao;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.Pair;
import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.TransactionLegacy;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase;
public class AccountDetailsDaoImpl extends GenericDaoBase<AccountDetailVO, Long> implements AccountDetailsDao, ScopedConfigStorage {
public class AccountDetailsDaoImpl extends ResourceDetailsDaoBase<AccountDetailVO> implements AccountDetailsDao, ScopedConfigStorage {
protected final SearchBuilder<AccountDetailVO> accountSearch;
@Inject
@ -56,16 +55,16 @@ public class AccountDetailsDaoImpl extends GenericDaoBase<AccountDetailVO, Long>
protected AccountDetailsDaoImpl() {
accountSearch = createSearchBuilder();
accountSearch.and("accountId", accountSearch.entity().getAccountId(), Op.EQ);
accountSearch.and("accountId", accountSearch.entity().getResourceId(), Op.EQ);
accountSearch.done();
}
@Override
public Map<String, String> findDetails(long accountId) {
QueryBuilder<AccountDetailVO> sc = QueryBuilder.create(AccountDetailVO.class);
sc.and(sc.entity().getAccountId(), Op.EQ, accountId);
sc.and(sc.entity().getResourceId(), Op.EQ, accountId);
List<AccountDetailVO> results = sc.list();
Map<String, String> details = new HashMap<String, String>(results.size());
Map<String, String> details = new HashMap<>(results.size());
for (AccountDetailVO r : results) {
details.put(r.getName(), r.getValue());
}
@ -89,11 +88,16 @@ public class AccountDetailsDaoImpl extends GenericDaoBase<AccountDetailVO, Long>
@Override
public AccountDetailVO findDetail(long accountId, String name) {
QueryBuilder<AccountDetailVO> sc = QueryBuilder.create(AccountDetailVO.class);
sc.and(sc.entity().getAccountId(), Op.EQ, accountId);
sc.and(sc.entity().getResourceId(), Op.EQ, accountId);
sc.and(sc.entity().getName(), Op.EQ, name);
return sc.find();
}
@Override
public void addDetail(long resourceId, String key, String value, boolean display) {
super.addDetail(new AccountDetailVO(resourceId, key, value));
}
@Override
public void deleteDetails(long accountId) {
SearchCriteria<AccountDetailVO> sc = accountSearch.create();
@ -155,11 +159,11 @@ public class AccountDetailsDaoImpl extends GenericDaoBase<AccountDetailVO, Long>
}
@Override
public String getActualValue(AccountDetailVO accountDetailVO) {
ConfigurationVO configurationVO = _configDao.findByName(accountDetailVO.getName());
if (configurationVO != null && configurationVO.isEncrypted()) {
return DBEncryptionUtil.decrypt(accountDetailVO.getValue());
public Pair<Scope, Long> getParentScope(long id) {
Account account = _accountDao.findById(id);
if (account == null) {
return null;
}
return accountDetailVO.getValue();
return new Pair<>(getScope().getParent(), account.getDomainId());
}
}

View File

@ -58,15 +58,19 @@ public class BackupScheduleVO implements BackupSchedule {
@Column(name = "async_job_id")
Long asyncJobId;
@Column(name = "max_backups")
Integer maxBackups = 0;
public BackupScheduleVO() {
}
public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String schedule, String timezone, Date scheduledTimestamp) {
public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String schedule, String timezone, Date scheduledTimestamp, Integer maxBackups) {
this.vmId = vmId;
this.scheduleType = (short) scheduleType.ordinal();
this.schedule = schedule;
this.timezone = timezone;
this.scheduledTimestamp = scheduledTimestamp;
this.maxBackups = maxBackups;
}
@Override
@ -128,4 +132,12 @@ public class BackupScheduleVO implements BackupSchedule {
public void setAsyncJobId(Long asyncJobId) {
this.asyncJobId = asyncJobId;
}
public Integer getMaxBackups() {
return maxBackups;
}
public void setMaxBackups(Integer maxBackups) {
this.maxBackups = maxBackups;
}
}

View File

@ -88,6 +88,9 @@ public class BackupVO implements Backup {
@Column(name = "zone_id")
private long zoneId;
@Column(name = "backup_interval_type")
private short backupIntervalType;
@Column(name = "backed_volumes", length = 65535)
protected String backedUpVolumes;
@ -208,6 +211,14 @@ public class BackupVO implements Backup {
this.zoneId = zoneId;
}
public short getBackupIntervalType() {
return backupIntervalType;
}
public void setBackupIntervalType(short backupIntervalType) {
this.backupIntervalType = backupIntervalType;
}
@Override
public Class<?> getEntityType() {
return Backup.class;

View File

@ -35,5 +35,10 @@ public interface BackupDao extends GenericDao<BackupVO, Long> {
List<Backup> syncBackups(Long zoneId, Long vmId, List<Backup> externalBackups);
BackupVO getBackupVO(Backup backup);
List<Backup> listByOfferingId(Long backupOfferingId);
List<BackupVO> listBackupsByVMandIntervalType(Long vmId, Backup.Type backupType);
BackupResponse newBackupResponse(Backup backup);
public Long countBackupsForAccount(long accountId);
public Long calculateBackupStorageForAccount(long accountId);
}

View File

@ -24,6 +24,7 @@ import java.util.Objects;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import com.cloud.utils.db.GenericSearchBuilder;
import org.apache.cloudstack.api.response.BackupResponse;
import org.apache.cloudstack.backup.Backup;
import org.apache.cloudstack.backup.BackupOffering;
@ -60,6 +61,9 @@ public class BackupDaoImpl extends GenericDaoBase<BackupVO, Long> implements Bac
BackupOfferingDao backupOfferingDao;
private SearchBuilder<BackupVO> backupSearch;
private GenericSearchBuilder<BackupVO, Long> CountBackupsByAccount;
private GenericSearchBuilder<BackupVO, SumCount> CalculateBackupStorageByAccount;
private SearchBuilder<BackupVO> ListBackupsByVMandIntervalType;
public BackupDaoImpl() {
}
@ -72,6 +76,27 @@ public class BackupDaoImpl extends GenericDaoBase<BackupVO, Long> implements Bac
backupSearch.and("backup_offering_id", backupSearch.entity().getBackupOfferingId(), SearchCriteria.Op.EQ);
backupSearch.and("zone_id", backupSearch.entity().getZoneId(), SearchCriteria.Op.EQ);
backupSearch.done();
CountBackupsByAccount = createSearchBuilder(Long.class);
CountBackupsByAccount.select(null, SearchCriteria.Func.COUNT, null);
CountBackupsByAccount.and("account", CountBackupsByAccount.entity().getAccountId(), SearchCriteria.Op.EQ);
CountBackupsByAccount.and("status", CountBackupsByAccount.entity().getStatus(), SearchCriteria.Op.NIN);
CountBackupsByAccount.and("removed", CountBackupsByAccount.entity().getRemoved(), SearchCriteria.Op.NULL);
CountBackupsByAccount.done();
CalculateBackupStorageByAccount = createSearchBuilder(SumCount.class);
CalculateBackupStorageByAccount.select("sum", SearchCriteria.Func.SUM, CalculateBackupStorageByAccount.entity().getSize());
CalculateBackupStorageByAccount.and("account", CalculateBackupStorageByAccount.entity().getAccountId(), SearchCriteria.Op.EQ);
CalculateBackupStorageByAccount.and("status", CalculateBackupStorageByAccount.entity().getStatus(), SearchCriteria.Op.NIN);
CalculateBackupStorageByAccount.and("removed", CalculateBackupStorageByAccount.entity().getRemoved(), SearchCriteria.Op.NULL);
CalculateBackupStorageByAccount.done();
ListBackupsByVMandIntervalType = createSearchBuilder();
ListBackupsByVMandIntervalType.and("vmId", ListBackupsByVMandIntervalType.entity().getVmId(), SearchCriteria.Op.EQ);
ListBackupsByVMandIntervalType.and("intervalType", ListBackupsByVMandIntervalType.entity().getBackupIntervalType(), SearchCriteria.Op.EQ);
ListBackupsByVMandIntervalType.and("status", ListBackupsByVMandIntervalType.entity().getStatus(), SearchCriteria.Op.EQ);
ListBackupsByVMandIntervalType.and("removed", ListBackupsByVMandIntervalType.entity().getRemoved(), SearchCriteria.Op.NULL);
ListBackupsByVMandIntervalType.done();
}
@Override
@ -142,6 +167,31 @@ public class BackupDaoImpl extends GenericDaoBase<BackupVO, Long> implements Bac
return listByVmId(zoneId, vmId);
}
@Override
public Long countBackupsForAccount(long accountId) {
SearchCriteria<Long> sc = CountBackupsByAccount.create();
sc.setParameters("account", accountId);
sc.setParameters("status", Backup.Status.Error, Backup.Status.Failed, Backup.Status.Removed, Backup.Status.Expunged);
return customSearch(sc, null).get(0);
}
@Override
public Long calculateBackupStorageForAccount(long accountId) {
SearchCriteria<SumCount> sc = CalculateBackupStorageByAccount.create();
sc.setParameters("account", accountId);
sc.setParameters("status", Backup.Status.Error, Backup.Status.Failed, Backup.Status.Removed, Backup.Status.Expunged);
return customSearch(sc, null).get(0).sum;
}
@Override
public List<BackupVO> listBackupsByVMandIntervalType(Long vmId, Backup.Type backupType) {
SearchCriteria<BackupVO> sc = ListBackupsByVMandIntervalType.create();
sc.setParameters("vmId", vmId);
sc.setParameters("type", backupType.ordinal());
sc.setParameters("status", Backup.Status.BackedUp);
return listBy(sc, null);
}
@Override
public BackupResponse newBackupResponse(Backup backup) {
VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId());

View File

@ -97,6 +97,7 @@ public class BackupScheduleDaoImpl extends GenericDaoBase<BackupScheduleVO, Long
response.setIntervalType(schedule.getScheduleType());
response.setSchedule(schedule.getSchedule());
response.setTimezone(schedule.getTimezone());
response.setMaxBakups(schedule.getMaxBackups());
response.setObjectName("backupschedule");
return response;
}

View File

@ -53,7 +53,7 @@ public interface ResourceDetailsDao<R extends ResourceDetail> extends GenericDao
* Removes all details for the resource specified
* @param resourceId
*/
public void removeDetails(long resourceId);
void removeDetails(long resourceId);
/**
@ -76,7 +76,7 @@ public interface ResourceDetailsDao<R extends ResourceDetail> extends GenericDao
* @param resourceId
* @return list of details each implementing ResourceDetail interface
*/
public List<R> listDetails(long resourceId);
List<R> listDetails(long resourceId);
/**
* List details for resourceId having display field = forDisplay value passed in
@ -84,21 +84,23 @@ public interface ResourceDetailsDao<R extends ResourceDetail> extends GenericDao
* @param forDisplay
* @return
*/
public List<R> listDetails(long resourceId, boolean forDisplay);
List<R> listDetails(long resourceId, boolean forDisplay);
public Map<String, String> listDetailsKeyPairs(long resourceId);
Map<String, String> listDetailsKeyPairs(long resourceId);
Map<String, String> listDetailsKeyPairs(long resourceId, List<String> keys);
public Map<String, String> listDetailsKeyPairs(long resourceId, boolean forDisplay);
Map<String, String> listDetailsKeyPairs(long resourceId, boolean forDisplay);
Map<String, Boolean> listDetailsVisibility(long resourceId);
public void saveDetails(List<R> details);
void saveDetails(List<R> details);
public void addDetail(long resourceId, String key, String value, boolean display);
void addDetail(long resourceId, String key, String value, boolean display);
public List<Long> findResourceIdsByNameAndValueIn(String name, Object[] values);
List<Long> findResourceIdsByNameAndValueIn(String name, Object[] values);
public long batchExpungeForResources(List<Long> ids, Long batchSize);
long batchExpungeForResources(List<Long> ids, Long batchSize);
String getActualValue(ResourceDetail resourceDetail);
}

View File

@ -21,9 +21,9 @@ import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.cloudstack.api.ResourceDetail;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
import com.cloud.utils.db.SearchBuilder;
@ -31,7 +31,17 @@ import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.TransactionLegacy;
import org.apache.cloudstack.api.ResourceDetail;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
import javax.inject.Inject;
public abstract class ResourceDetailsDaoBase<R extends ResourceDetail> extends GenericDaoBase<R, Long> implements ResourceDetailsDao<R> {
@Inject
private ConfigurationDao configDao;
private SearchBuilder<R> AllFieldsSearch;
public ResourceDetailsDaoBase() {
@ -76,8 +86,7 @@ public abstract class ResourceDetailsDaoBase<R extends ResourceDetail> extends G
sc.setParameters("value", value);
}
List<R> results = search(sc, null);
return results;
return search(sc, null);
}
public Map<String, String> listDetailsKeyPairs(long resourceId) {
@ -85,7 +94,7 @@ public abstract class ResourceDetailsDaoBase<R extends ResourceDetail> extends G
sc.setParameters("resourceId", resourceId);
List<R> results = search(sc, null);
Map<String, String> details = new HashMap<String, String>(results.size());
Map<String, String> details = new HashMap<>(results.size());
for (R result : results) {
details.put(result.getName(), result.getValue());
}
@ -122,8 +131,7 @@ public abstract class ResourceDetailsDaoBase<R extends ResourceDetail> extends G
SearchCriteria<R> sc = AllFieldsSearch.create();
sc.setParameters("resourceId", resourceId);
List<R> results = search(sc, null);
return results;
return search(sc, null);
}
public void removeDetails(long resourceId) {
@ -185,7 +193,7 @@ public abstract class ResourceDetailsDaoBase<R extends ResourceDetail> extends G
sc.setParameters("display", forDisplay);
List<R> results = search(sc, null);
Map<String, String> details = new HashMap<String, String>(results.size());
Map<String, String> details = new HashMap<>(results.size());
for (R result : results) {
details.put(result.getName(), result.getValue());
}
@ -197,8 +205,7 @@ public abstract class ResourceDetailsDaoBase<R extends ResourceDetail> extends G
sc.setParameters("resourceId", resourceId);
sc.setParameters("display", forDisplay);
List<R> results = search(sc, null);
return results;
return search(sc, null);
}
@Override
@ -230,4 +237,13 @@ public abstract class ResourceDetailsDaoBase<R extends ResourceDetail> extends G
sc.setParameters("ids", ids.toArray());
return batchExpunge(sc, batchSize);
}
@Override
public String getActualValue(ResourceDetail resourceDetail) {
ConfigurationVO configurationVO = configDao.findByName(resourceDetail.getName());
if (configurationVO != null && configurationVO.isEncrypted()) {
return DBEncryptionUtil.decrypt(resourceDetail.getValue());
}
return resourceDetail.getValue();
}
}

View File

@ -20,6 +20,8 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
@ -27,6 +29,8 @@ import org.apache.cloudstack.framework.config.ScopedConfigStorage;
import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase;
import org.springframework.stereotype.Component;
import com.cloud.storage.ImageStore;
import com.cloud.utils.Pair;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchBuilder;
@ -36,6 +40,8 @@ import com.cloud.utils.db.TransactionLegacy;
@Component
public class ImageStoreDetailsDaoImpl extends ResourceDetailsDaoBase<ImageStoreDetailVO> implements ImageStoreDetailsDao, ScopedConfigStorage {
@Inject
ImageStoreDao imageStoreDao;
protected final SearchBuilder<ImageStoreDetailVO> storeSearch;
@ -67,7 +73,7 @@ public class ImageStoreDetailsDaoImpl extends ResourceDetailsDaoBase<ImageStoreD
sc.setParameters("store", storeId);
List<ImageStoreDetailVO> details = listBy(sc);
Map<String, String> detailsMap = new HashMap<String, String>();
Map<String, String> detailsMap = new HashMap<>();
for (ImageStoreDetailVO detail : details) {
String name = detail.getName();
String value = detail.getValue();
@ -110,9 +116,24 @@ public class ImageStoreDetailsDaoImpl extends ResourceDetailsDaoBase<ImageStoreD
return vo == null ? null : vo.getValue();
}
@Override
public String getConfigValue(long id, ConfigKey<?> key) {
ImageStoreDetailVO vo = findDetail(id, key.key());
return vo == null ? null : getActualValue(vo);
}
@Override
public void addDetail(long resourceId, String key, String value, boolean display) {
super.addDetail(new ImageStoreDetailVO(resourceId, key, value, display));
}
@Override
public Pair<Scope, Long> getParentScope(long id) {
ImageStore store = imageStoreDao.findById(id);
if (store == null) {
return null;
}
return new Pair<>(getScope().getParent(), store.getDataCenterId());
}
}

View File

@ -755,7 +755,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
if (keyword != null) {
SearchCriteria<StoragePoolVO> ssc = createSearchCriteria();
ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%");
ssc.addOr("poolType", SearchCriteria.Op.LIKE, new Storage.StoragePoolType("%" + keyword + "%"));
ssc.addOr("poolType", SearchCriteria.Op.LIKE, "%" + keyword + "%");
sc.addAnd("name", SearchCriteria.Op.SC, ssc);
}

View File

@ -24,6 +24,14 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.user', 'api_key_access', 'boolean DE
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.account', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the account" ');
CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.account', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the account" ');
-- Create a new group for Usage Server related configurations
INSERT INTO `cloud`.`configuration_group` (`name`, `description`, `precedence`) VALUES ('Usage Server', 'Usage Server related configuration', 9);
UPDATE `cloud`.`configuration_subgroup` set `group_id` = (SELECT `id` FROM `cloud`.`configuration_group` WHERE `name` = 'Usage Server'), `precedence` = 1 WHERE `name`='Usage';
UPDATE `cloud`.`configuration` SET `group_id` = (SELECT `id` FROM `cloud`.`configuration_group` WHERE `name` = 'Usage Server') where `subgroup_id` = (SELECT `id` FROM `cloud`.`configuration_subgroup` WHERE `name` = 'Usage');
-- Update the description to indicate this setting applies only to volume snapshots on running instances
UPDATE `cloud`.`configuration` SET `description`='whether volume snapshot is enabled on running instances on KVM hosts' WHERE `name`='kvm.snapshot.enabled';
-- Modify index for mshost_peer
DELETE FROM `cloud`.`mshost_peer`;
CALL `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY`('cloud.mshost_peer','fk_mshost_peer__owner_mshost');

View File

@ -19,6 +19,10 @@
-- Schema upgrade from 4.20.1.0 to 4.21.0.0
--;
-- Add columns max_backup and backup_interval_type to backup table
ALTER TABLE `cloud`.`backup_schedule` ADD COLUMN `max_backups` int(8) default NULL COMMENT 'maximum number of backups to maintain';
ALTER TABLE `cloud`.`backups` ADD COLUMN `backup_interval_type` int(5) COMMENT 'type of backup, e.g. manual, recurring - hourly, daily, weekly or monthly';
-- Add console_endpoint_creator_address column to cloud.console_session table
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.console_session', 'console_endpoint_creator_address', 'VARCHAR(45)');

View File

@ -68,6 +68,14 @@ select
`primary_storage_count`.`count` AS `primaryStorageTotal`,
`secondary_storage_limit`.`max` AS `secondaryStorageLimit`,
`secondary_storage_count`.`count` AS `secondaryStorageTotal`,
`backup_limit`.`max` AS `backupLimit`,
`backup_count`.`count` AS `backupTotal`,
`backup_storage_limit`.`max` AS `backupStorageLimit`,
`backup_storage_count`.`count` AS `backupStorageTotal`,
`bucket_limit`.`max` AS `bucketLimit`,
`bucket_count`.`count` AS `bucketTotal`,
`object_storage_limit`.`max` AS `objectStorageLimit`,
`object_storage_count`.`count` AS `objectStorageTotal`,
`async_job`.`id` AS `job_id`,
`async_job`.`uuid` AS `job_uuid`,
`async_job`.`job_status` AS `job_status`,
@ -160,6 +168,30 @@ from
`cloud`.`resource_count` secondary_storage_count ON account.id = secondary_storage_count.account_id
and secondary_storage_count.type = 'secondary_storage'
left join
`cloud`.`resource_limit` backup_limit ON account.id = backup_limit.account_id
and backup_limit.type = 'backup'
left join
`cloud`.`resource_count` backup_count ON account.id = backup_count.account_id
and backup_count.type = 'backup'
left join
`cloud`.`resource_limit` backup_storage_limit ON account.id = backup_storage_limit.account_id
and backup_storage_limit.type = 'backup_storage'
left join
`cloud`.`resource_count` backup_storage_count ON account.id = backup_storage_count.account_id
and backup_storage_count.type = 'backup_storage'
left join
`cloud`.`resource_limit` bucket_limit ON account.id = bucket_limit.account_id
and bucket_limit.type = 'bucket'
left join
`cloud`.`resource_count` bucket_count ON account.id = bucket_count.account_id
and bucket_count.type = 'bucket'
left join
`cloud`.`resource_limit` object_storage_limit ON account.id = object_storage_limit.account_id
and object_storage_limit.type = 'object_storage'
left join
`cloud`.`resource_count` object_storage_count ON account.id = object_storage_count.account_id
and object_storage_count.type = 'object_storage'
left join
`cloud`.`async_job` ON async_job.instance_id = account.id
and async_job.instance_type = 'Account'
and async_job.job_status = 0;

View File

@ -58,7 +58,15 @@ select
`primary_storage_limit`.`max` AS `primaryStorageLimit`,
`primary_storage_count`.`count` AS `primaryStorageTotal`,
`secondary_storage_limit`.`max` AS `secondaryStorageLimit`,
`secondary_storage_count`.`count` AS `secondaryStorageTotal`
`secondary_storage_count`.`count` AS `secondaryStorageTotal`,
`backup_limit`.`max` AS `backupLimit`,
`backup_count`.`count` AS `backupTotal`,
`backup_storage_limit`.`max` AS `backupStorageLimit`,
`backup_storage_count`.`count` AS `backupStorageTotal`,
`bucket_limit`.`max` AS `bucketLimit`,
`bucket_count`.`count` AS `bucketTotal`,
`object_storage_limit`.`max` AS `objectStorageLimit`,
`object_storage_count`.`count` AS `objectStorageTotal`
from
`cloud`.`domain`
left join
@ -132,4 +140,28 @@ from
and secondary_storage_limit.type = 'secondary_storage'
left join
`cloud`.`resource_count` secondary_storage_count ON domain.id = secondary_storage_count.domain_id
and secondary_storage_count.type = 'secondary_storage';
and secondary_storage_count.type = 'secondary_storage'
left join
`cloud`.`resource_limit` backup_limit ON domain.id = backup_limit.domain_id
and backup_limit.type = 'backup'
left join
`cloud`.`resource_count` backup_count ON domain.id = backup_count.domain_id
and backup_count.type = 'backup'
left join
`cloud`.`resource_limit` backup_storage_limit ON domain.id = backup_storage_limit.domain_id
and backup_storage_limit.type = 'backup_storage'
left join
`cloud`.`resource_count` backup_storage_count ON domain.id = backup_storage_count.domain_id
and backup_storage_count.type = 'backup_storage'
left join
`cloud`.`resource_limit` bucket_limit ON domain.id = bucket_limit.domain_id
and bucket_limit.type = 'bucket'
left join
`cloud`.`resource_count` bucket_count ON domain.id = bucket_count.domain_id
and bucket_count.type = 'bucket'
left join
`cloud`.`resource_limit` object_storage_limit ON domain.id = object_storage_limit.domain_id
and object_storage_limit.type = 'object_storage'
left join
`cloud`.`resource_count` object_storage_count ON domain.id = object_storage_count.domain_id
and object_storage_count.type = 'object_storage';

View File

@ -0,0 +1,76 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.upgrade;
import static org.mockito.Mockito.when;
import java.util.Collections;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.dao.ConfigurationGroupDao;
import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao;
import org.apache.cloudstack.framework.config.impl.ConfigurationSubGroupVO;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
import org.apache.logging.log4j.Logger;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class ConfigurationGroupsAggregatorTest {
@InjectMocks
private ConfigurationGroupsAggregator configurationGroupsAggregator = new ConfigurationGroupsAggregator();
@Mock
private ConfigurationDao configDao;
@Mock
private ConfigurationGroupDao configGroupDao;
@Mock
private ConfigurationSubGroupDao configSubGroupDao;
@Mock
private Logger logger;
@Test
public void testUpdateConfigurationGroups() {
ConfigurationVO config = new ConfigurationVO("Advanced", "DEFAULT", "management-server",
"test.config.name", null, "description");
config.setGroupId(1L);
config.setSubGroupId(1L);
when(configDao.searchPartialConfigurations()).thenReturn(Collections.singletonList(config));
ConfigurationSubGroupVO configSubGroup = Mockito.mock(ConfigurationSubGroupVO.class);
when(configSubGroupDao.findByName("name")).thenReturn(configSubGroup);
Mockito.when(configSubGroup.getId()).thenReturn(10L);
Mockito.when(configSubGroup.getGroupId()).thenReturn(5L);
configurationGroupsAggregator.updateConfigurationGroups();
Assert.assertEquals(Long.valueOf(5), config.getGroupId());
Assert.assertEquals(Long.valueOf(10), config.getSubGroupId());
Mockito.verify(configDao, Mockito.times(1)).persist(config);
Mockito.verify(logger, Mockito.times(1)).debug("Updating configuration groups");
Mockito.verify(logger, Mockito.times(1)).debug("Successfully updated configuration groups.");
}
}

View File

@ -511,4 +511,57 @@ public class DatabaseAccessObjectTest {
verify(loggerMock, times(1)).warn(anyString(), eq(sqlException));
}
@Test
public void testGetColumnType() throws Exception {
when(connectionMock.prepareStatement(contains("DESCRIBE"))).thenReturn(preparedStatementMock);
when(preparedStatementMock.executeQuery()).thenReturn(resultSetMock);
when(resultSetMock.next()).thenReturn(true);
when(resultSetMock.getString("Type")).thenReturn("type");
Connection conn = connectionMock;
String tableName = "tableName";
String columnName = "columnName";
Assert.assertEquals("type", dao.getColumnType(conn, tableName, columnName));
verify(connectionMock, times(1)).prepareStatement(anyString());
verify(preparedStatementMock, times(1)).executeQuery();
verify(preparedStatementMock, times(1)).close();
verify(loggerMock, times(0)).debug(anyString());
}
@Test
public void testAddColumn() throws Exception {
when(connectionMock.prepareStatement(contains("ADD COLUMN"))).thenReturn(preparedStatementMock);
when(preparedStatementMock.executeUpdate()).thenReturn(1);
Connection conn = connectionMock;
String tableName = "tableName";
String columnName = "columnName";
String columnType = "columnType";
dao.addColumn(conn, tableName, columnName, columnType);
verify(connectionMock, times(1)).prepareStatement(anyString());
verify(preparedStatementMock, times(1)).executeUpdate();
verify(preparedStatementMock, times(1)).close();
}
@Test
public void testChangeColumn() throws Exception {
when(connectionMock.prepareStatement(contains("CHANGE COLUMN"))).thenReturn(preparedStatementMock);
when(preparedStatementMock.executeUpdate()).thenReturn(1);
Connection conn = connectionMock;
String tableName = "tableName";
String columnName = "columnName";
String newColumnName = "columnName2";
String columnDefinition = "columnDefinition";
dao.changeColumn(conn, tableName, columnName, newColumnName, columnDefinition);
verify(connectionMock, times(1)).prepareStatement(anyString());
verify(preparedStatementMock, times(1)).executeUpdate();
verify(preparedStatementMock, times(1)).close();
}
}

View File

@ -159,4 +159,33 @@ public class DbUpgradeUtilsTest {
verify(daoMock, times(1)).columnExists(conn, tableName, column3);
verify(daoMock, times(1)).dropColumn(conn, tableName, column3);
}
@Test
public void testAddTableColumnIfNotExist() throws Exception {
Connection conn = connectionMock;
String tableName = "tableName";
String columnName = "columnName";
String columnDefinition = "columnDefinition";
when(daoMock.columnExists(conn, tableName, columnName)).thenReturn(false);
DbUpgradeUtils.addTableColumnIfNotExist(conn, tableName, columnName, columnDefinition);
verify(daoMock, times(1)).columnExists(conn, tableName, columnName);
verify(daoMock, times(1)).addColumn(conn, tableName, columnName, columnDefinition);
}
@Test
public void testChangeTableColumnIfNotExist() throws Exception {
Connection conn = connectionMock;
String tableName = "tableName";
String oldColumnName = "oldColumnName";
String newColumnName = "newColumnName";
String columnDefinition = "columnDefinition";
when(daoMock.columnExists(conn, tableName, oldColumnName)).thenReturn(true);
DbUpgradeUtils.changeTableColumnIfNotExist(conn, tableName, oldColumnName, newColumnName, columnDefinition);
verify(daoMock, times(1)).columnExists(conn, tableName, oldColumnName);
verify(daoMock, times(1)).changeColumn(conn, tableName, oldColumnName, newColumnName, columnDefinition);
}
}

View File

@ -0,0 +1,73 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.upgrade.dao;
import static org.mockito.Mockito.when;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import com.cloud.utils.db.TransactionLegacy;
@RunWith(MockitoJUnitRunner.class)
public class Upgrade42010to42100Test {
@Spy
Upgrade42010to42100 upgrade;
@Mock
private Connection conn;
@Test
public void testPerformDataMigration() throws SQLException {
try (MockedStatic<DbUpgradeUtils> ignored = Mockito.mockStatic(DbUpgradeUtils.class)) {
DbUpgradeUtils dbUpgradeUtils = Mockito.mock(DbUpgradeUtils.class);
when(dbUpgradeUtils.getTableColumnType(conn, "configuration", "scope")).thenReturn("varchar(255)");
try (MockedStatic<TransactionLegacy> ignored2 = Mockito.mockStatic(TransactionLegacy.class)) {
TransactionLegacy txn = Mockito.mock(TransactionLegacy.class);
when(TransactionLegacy.currentTxn()).thenReturn(txn);
PreparedStatement pstmt = Mockito.mock(PreparedStatement.class);
String sql = "UPDATE configuration\n" +
"SET new_scope =" +
" CASE" +
" WHEN scope = 'Global' THEN 1" +
" WHEN scope = 'Zone' THEN 2" +
" WHEN scope = 'Cluster' THEN 4" +
" WHEN scope = 'StoragePool' THEN 8" +
" WHEN scope = 'ManagementServer' THEN 16" +
" WHEN scope = 'ImageStore' THEN 32" +
" WHEN scope = 'Domain' THEN 64" +
" WHEN scope = 'Account' THEN 128" +
" ELSE 0" +
" END WHERE scope IS NOT NULL;";
when(txn.prepareAutoCloseStatement(sql)).thenReturn(pstmt);
upgrade.performDataMigration(conn);
Mockito.verify(pstmt, Mockito.times(1)).executeUpdate();
}
}
}
}

View File

@ -40,6 +40,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
@ -1534,6 +1535,16 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
verifyFormat(templateInfo.getFormat());
}
// this blurb handles the case where the storage system can clone a volume from a template
String canCloneVolumeFromTemplate = templateInfo.getDataStore().getDriver().getCapabilities().get("CAN_CLONE_VOLUME_FROM_TEMPLATE");
if (canCloneVolumeFromTemplate != null && canCloneVolumeFromTemplate.toLowerCase().equals("true")) {
DataStoreDriver driver = templateInfo.getDataStore().getDriver();
driver.createAsync(volumeInfo.getDataStore(), volumeInfo, null);
volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore());
driver.copyAsync(templateInfo, volumeInfo, null);
return;
}
HostVO hostVO = null;
final boolean computeClusterSupportsVolumeClone;
@ -1641,7 +1652,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
errMsg = "Create volume from template failed: " + ex.getMessage();
}
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
if (copyCmdAnswer == null) {
@ -2634,7 +2645,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy {
catch (Exception ex) {
errMsg = ex.getMessage();
throw new CloudRuntimeException(errMsg);
throw new CloudRuntimeException(errMsg, ex);
}
finally {
if (copyCmdAnswer == null) {

View File

@ -20,7 +20,6 @@ package org.apache.cloudstack.storage.image.manager;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -180,28 +179,14 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager,
@Override
public DataStore getImageStoreWithFreeCapacity(List<DataStore> imageStores) {
if (imageStores.size() > 1) {
imageStores.sort(new Comparator<DataStore>() { // Sort data stores based on free capacity
@Override
public int compare(DataStore store1, DataStore store2) {
return Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store1),
_statsCollector.imageStoreCurrentFreeCapacity(store2));
}
});
for (DataStore imageStore : imageStores) {
// Return image store if used percentage is less then threshold value i.e. 90%.
if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) {
return imageStore;
}
}
} else if (imageStores.size() == 1) {
if (_statsCollector.imageStoreHasEnoughCapacity(imageStores.get(0))) {
return imageStores.get(0);
imageStores.sort((store1, store2) -> Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store2),
_statsCollector.imageStoreCurrentFreeCapacity(store1)));
for (DataStore imageStore : imageStores) {
if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) {
return imageStore;
}
}
// No store with space found
logger.error(String.format("Can't find an image storage in zone with less than %d usage",
logger.error(String.format("Could not find an image storage in zone with less than %d usage",
Math.round(_statsCollector.getImageStoreCapacityThreshold() * 100)));
return null;
}
@ -209,23 +194,11 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager,
@Override
public List<DataStore> orderImageStoresOnFreeCapacity(List<DataStore> imageStores) {
List<DataStore> stores = new ArrayList<>();
if (imageStores.size() > 1) {
imageStores.sort(new Comparator<DataStore>() { // Sort data stores based on free capacity
@Override
public int compare(DataStore store1, DataStore store2) {
return Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store1),
_statsCollector.imageStoreCurrentFreeCapacity(store2));
}
});
for (DataStore imageStore : imageStores) {
// Return image store if used percentage is less then threshold value i.e. 90%.
if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) {
stores.add(imageStore);
}
}
} else if (imageStores.size() == 1) {
if (_statsCollector.imageStoreHasEnoughCapacity(imageStores.get(0))) {
stores.add(imageStores.get(0));
imageStores.sort((store1, store2) -> Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store2),
_statsCollector.imageStoreCurrentFreeCapacity(store1)));
for (DataStore imageStore : imageStores) {
if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) {
stores.add(imageStore);
}
}
return stores;

View File

@ -16,6 +16,9 @@
// under the License.
package org.apache.cloudstack.storage.image.manager;
import com.cloud.server.StatsCollector;
import com.cloud.utils.Pair;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
import org.junit.Assert;
@ -26,14 +29,22 @@ import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@RunWith(MockitoJUnitRunner.class)
public class ImageStoreProviderManagerImplTest {
@Mock
ImageStoreDao imageStoreDao;
@Mock
StatsCollector statsCollectorMock;
@InjectMocks
ImageStoreProviderManagerImpl imageStoreProviderManager = new ImageStoreProviderManagerImpl();
@Test
public void testGetImageStoreZoneId() {
final long storeId = 1L;
@ -44,4 +55,56 @@ public class ImageStoreProviderManagerImplTest {
long value = imageStoreProviderManager.getImageStoreZoneId(storeId);
Assert.assertEquals(zoneId, value);
}
private Pair<List<DataStore>, List<DataStore>> prepareUnorderedAndOrderedImageStoresForCapacityTests(boolean hasStoragesWithEnoughCapacity) {
DataStore store1 = Mockito.mock(DataStore.class);
Mockito.doReturn(100L).when(statsCollectorMock).imageStoreCurrentFreeCapacity(store1);
Mockito.doReturn(false).when(statsCollectorMock).imageStoreHasEnoughCapacity(store1);
DataStore store2 = Mockito.mock(DataStore.class);
Mockito.doReturn(200L).when(statsCollectorMock).imageStoreCurrentFreeCapacity(store2);
Mockito.doReturn(hasStoragesWithEnoughCapacity).when(statsCollectorMock).imageStoreHasEnoughCapacity(store2);
DataStore store3 = Mockito.mock(DataStore.class);
Mockito.doReturn(300L).when(statsCollectorMock).imageStoreCurrentFreeCapacity(store3);
Mockito.doReturn(hasStoragesWithEnoughCapacity).when(statsCollectorMock).imageStoreHasEnoughCapacity(store3);
DataStore store4 = Mockito.mock(DataStore.class);
Mockito.doReturn(400L).when(statsCollectorMock).imageStoreCurrentFreeCapacity(store4);
Mockito.doReturn(false).when(statsCollectorMock).imageStoreHasEnoughCapacity(store4);
List<DataStore> unordered = Arrays.asList(store1, store2, store3, store4);
List<DataStore> orderedAndEnoughCapacity = new ArrayList<>();
if (hasStoragesWithEnoughCapacity) {
orderedAndEnoughCapacity.add(store3);
orderedAndEnoughCapacity.add(store2);
}
return new Pair<>(unordered, orderedAndEnoughCapacity);
}
@Test
public void getImageStoreWithFreeCapacityTestImageStoresWithEnoughCapacityExistReturnsImageStoreWithMostFreeCapacity() {
Pair<List<DataStore>, List<DataStore>> unorderedAndOrdered = prepareUnorderedAndOrderedImageStoresForCapacityTests(true);
DataStore result = imageStoreProviderManager.getImageStoreWithFreeCapacity(unorderedAndOrdered.first());
Assert.assertEquals(unorderedAndOrdered.second().get(0), result);
}
@Test
public void getImageStoreWithFreeCapacityTestImageStoresWithEnoughCapacityDoNotExistReturnsNull() {
Pair<List<DataStore>, List<DataStore>> unorderedAndOrdered = prepareUnorderedAndOrderedImageStoresForCapacityTests(false);
DataStore result = imageStoreProviderManager.getImageStoreWithFreeCapacity(unorderedAndOrdered.first());
Assert.assertNull(result);
}
@Test
public void orderImageStoresOnFreeCapacityTestReturnsImageStoresOrderedFromMostToLeast() {
Pair<List<DataStore>, List<DataStore>> unorderedAndOrdered = prepareUnorderedAndOrderedImageStoresForCapacityTests(true);
List<DataStore> result = imageStoreProviderManager.orderImageStoresOnFreeCapacity(unorderedAndOrdered.first());
Assert.assertEquals(unorderedAndOrdered.second(), result);
}
}

View File

@ -622,7 +622,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
Thread.sleep(sleepTime * 1000);
} catch (InterruptedException e) {
logger.debug("waiting for template download been interrupted: " + e.toString());
logger.debug("waiting for template download been interrupted: " + e);
}
tries--;
}
@ -691,7 +691,6 @@ public class VolumeServiceImpl implements VolumeService {
}
_tmpltPoolDao.releaseFromLockTable(templatePoolRefId);
}
return;
}
protected Void managedCopyBaseImageCallback(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, ManagedCreateBaseImageContext<VolumeApiResult> context) {
@ -1039,7 +1038,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
grantAccess(templateOnPrimary, destHost, destPrimaryDataStore);
} catch (Exception e) {
throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary.getImage(), destHost));
throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary.getImage(), destHost), e);
}
templateOnPrimary.processEvent(Event.CopyingRequested);
@ -1161,7 +1160,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore);
} catch (Exception e) {
throw new StorageAccessException(String.format("Unable to grant access to src template: %s on host: %s", srcTemplateOnPrimary, destHost));
throw new StorageAccessException(String.format("Unable to grant access to src template: %s on host: %s", srcTemplateOnPrimary, destHost), e);
}
_volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false);
@ -1408,7 +1407,7 @@ public class VolumeServiceImpl implements VolumeService {
try {
grantAccess(templateOnPrimary, destHost, destPrimaryDataStore);
} catch (Exception e) {
throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary, destHost));
throw new StorageAccessException(String.format("Unable to grant access to template: %s on host: %s", templateOnPrimary, destHost), e);
}
templateOnPrimary.processEvent(Event.CopyingRequested);

View File

@ -17,6 +17,9 @@
package org.apache.cloudstack.config;
import java.util.Date;
import java.util.List;
import org.apache.cloudstack.framework.config.ConfigKey;
/**
* Configuration represents one global configuration parameter for CloudStack.
@ -74,7 +77,9 @@ public interface Configuration {
* always global. A non-null value indicates that this parameter can be
* set at a certain organization level.
*/
String getScope();
int getScope();
List<ConfigKey.Scope> getScopes();
/**
* @return can the configuration parameter be changed without restarting the server.

View File

@ -18,6 +18,8 @@ package org.apache.cloudstack.framework.config;
import java.util.Set;
import com.cloud.utils.Pair;
/**
* ConfigDepot is a repository of configurations.
*
@ -34,4 +36,5 @@ public interface ConfigDepot {
boolean isNewConfig(ConfigKey<?> configKey);
String getConfigStringValue(String key, ConfigKey.Scope scope, Long scopeId);
void invalidateConfigCache(String key, ConfigKey.Scope scope, Long scopeId);
Pair<ConfigKey.Scope, Long> getParentScope(ConfigKey.Scope scope, Long id);
}

View File

@ -17,8 +17,14 @@
package org.apache.cloudstack.framework.config;
import java.sql.Date;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl;
import org.apache.commons.collections.CollectionUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
@ -30,6 +36,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
*
*/
public class ConfigKey<T> {
private static final Logger logger = LogManager.getLogger(ConfigKey.class);
public static final String CATEGORY_ADVANCED = "Advanced";
public static final String CATEGORY_ALERT = "Alert";
@ -37,7 +44,89 @@ public class ConfigKey<T> {
public static final String CATEGORY_SYSTEM = "System";
public enum Scope {
Global, Zone, Cluster, StoragePool, Account, ManagementServer, ImageStore, Domain
Global(null, 1),
Zone(Global, 1 << 1),
Cluster(Zone, 1 << 2),
StoragePool(Cluster, 1 << 3),
ManagementServer(Global, 1 << 4),
ImageStore(Zone, 1 << 5),
Domain(Global, 1 << 6),
Account(Domain, 1 << 7);
private final Scope parent;
private final int bitValue;
Scope(Scope parent, int bitValue) {
this.parent = parent;
this.bitValue = bitValue;
}
public Scope getParent() {
return parent;
}
public int getBitValue() {
return bitValue;
}
public boolean isDescendantOf(Scope other) {
Scope parent = this.getParent();
while (parent != null) {
if (parent == other) {
return true;
}
parent = parent.getParent();
}
return false;
}
public static List<Scope> getAllDescendants(String str) {
Scope s1 = Scope.valueOf(str);
List<Scope> scopes = new ArrayList<>();
for (Scope s : Scope.values()) {
if (s.isDescendantOf(s1)) {
scopes.add(s);
}
}
return scopes;
}
public static List<Scope> decode(int bitmask) {
if (bitmask == 0) {
return Collections.emptyList();
}
List<Scope> scopes = new ArrayList<>();
for (Scope scope : Scope.values()) {
if ((bitmask & scope.getBitValue()) != 0) {
scopes.add(scope);
}
}
return scopes;
}
public static String decodeAsCsv(int bitmask) {
if (bitmask == 0) {
return null;
}
StringBuilder builder = new StringBuilder();
for (Scope scope : Scope.values()) {
if ((bitmask & scope.getBitValue()) != 0) {
builder.append(scope.name()).append(", ");
}
}
if (builder.length() > 0) {
builder.setLength(builder.length() - 2);
}
return builder.toString();
}
public static int getBitmask(Scope... scopes) {
int bitmask = 0;
for (Scope scope : scopes) {
bitmask |= scope.getBitValue();
}
return bitmask;
}
}
public enum Kind {
@ -70,8 +159,8 @@ public class ConfigKey<T> {
return _displayText;
}
public Scope scope() {
return _scope;
public List<Scope> getScopes() {
return scopes;
}
public boolean isDynamic() {
@ -108,7 +197,7 @@ public class ConfigKey<T> {
private final String _defaultValue;
private final String _description;
private final String _displayText;
private final Scope _scope; // Parameter can be at different levels (Zone/cluster/pool/account), by default every parameter is at global
private final List<Scope> scopes; // Parameter can be at different levels (Zone/cluster/pool/account), by default every parameter is at global
private final boolean _isDynamic;
private final String _parent;
private final Ternary<String, String, Long> _group; // Group name, description with precedence
@ -128,6 +217,10 @@ public class ConfigKey<T> {
this(type, name, category, defaultValue, description, isDynamic, scope, null);
}
public ConfigKey(String category, Class<T> type, String name, String defaultValue, String description, boolean isDynamic, List<Scope> scopes) {
this(type, name, category, defaultValue, description, isDynamic, scopes, null);
}
public ConfigKey(String category, Class<T> type, String name, String defaultValue, String description, boolean isDynamic, Scope scope, String parent) {
this(type, name, category, defaultValue, description, isDynamic, scope, null, null, parent, null, null, null, null);
}
@ -148,6 +241,10 @@ public class ConfigKey<T> {
this(type, name, category, defaultValue, description, isDynamic, scope, multiplier, null, null, null, null, null, null);
}
public ConfigKey(Class<T> type, String name, String category, String defaultValue, String description, boolean isDynamic, List<Scope> scopes, T multiplier) {
this(type, name, category, defaultValue, description, isDynamic, scopes, multiplier, null, null, null, null, null, null);
}
public ConfigKey(Class<T> type, String name, String category, String defaultValue, String description, boolean isDynamic, Scope scope, T multiplier, String parent) {
this(type, name, category, defaultValue, description, isDynamic, scope, multiplier, null, parent, null, null, null, null);
}
@ -159,13 +256,22 @@ public class ConfigKey<T> {
public ConfigKey(Class<T> type, String name, String category, String defaultValue, String description, boolean isDynamic, Scope scope, T multiplier,
String displayText, String parent, Ternary<String, String, Long> group, Pair<String, Long> subGroup, Kind kind, String options) {
this(type, name, category, defaultValue, description, isDynamic, scope == null ? null : List.of(scope), multiplier,
displayText, parent, group, subGroup, kind, options);
}
public ConfigKey(Class<T> type, String name, String category, String defaultValue, String description, boolean isDynamic, List<Scope> scopes, T multiplier,
String displayText, String parent, Ternary<String, String, Long> group, Pair<String, Long> subGroup, Kind kind, String options) {
_category = category;
_type = type;
_name = name;
_defaultValue = defaultValue;
_description = description;
_displayText = displayText;
_scope = scope;
this.scopes = new ArrayList<>();
if (scopes != null) {
this.scopes.addAll(scopes);
}
_isDynamic = isDynamic;
_multiplier = multiplier;
_parent = parent;
@ -218,28 +324,45 @@ public class ConfigKey<T> {
String value = s_depot != null ? s_depot.getConfigStringValue(_name, Scope.Global, null) : null;
_value = valueOf((value == null) ? defaultValue() : value);
}
return _value;
}
protected T valueInScope(Scope scope, Long id) {
protected T valueInGlobalOrAvailableParentScope(Scope scope, Long id) {
if (scopes.size() <= 1) {
return value();
}
Pair<Scope, Long> s = new Pair<>(scope, id);
do {
s = s_depot != null ? s_depot.getParentScope(s.first(), s.second()) : null;
if (s != null && scopes.contains(s.first())) {
return valueInScope(s.first(), s.second());
}
} while (s != null);
logger.trace("Global value for config ({}): {}", _name, _value);
return value();
}
public T valueInScope(Scope scope, Long id) {
if (id == null) {
return value();
}
String value = s_depot != null ? s_depot.getConfigStringValue(_name, scope, id) : null;
if (value == null) {
return value();
return valueInGlobalOrAvailableParentScope(scope, id);
}
logger.trace("Scope({}) value for config ({}): {}", scope, _name, _value);
return valueOf(value);
}
public T valueIn(Long id) {
return valueInScope(_scope, id);
protected Scope getPrimaryScope() {
if (CollectionUtils.isNotEmpty(scopes)) {
return scopes.get(0);
}
return null;
}
public T valueInDomain(Long domainId) {
return valueInScope(Scope.Domain, domainId);
public T valueIn(Long id) {
return valueInScope(getPrimaryScope(), id);
}
@SuppressWarnings("unchecked")
@ -277,4 +400,20 @@ public class ConfigKey<T> {
}
}
public boolean isGlobalOrEmptyScope() {
return CollectionUtils.isEmpty(scopes) ||
(scopes.size() == 1 && scopes.get(0) == Scope.Global);
}
public int getScopeBitmask() {
int bitmask = 0;
if (CollectionUtils.isEmpty(scopes)) {
return bitmask;
}
for (Scope scope : scopes) {
bitmask |= scope.getBitValue();
}
return bitmask;
}
}

View File

@ -18,6 +18,8 @@ package org.apache.cloudstack.framework.config;
import org.apache.cloudstack.framework.config.ConfigKey.Scope;
import com.cloud.utils.Pair;
/**
*
* This method is used by individual storage for configuration
@ -31,4 +33,7 @@ public interface ScopedConfigStorage {
default String getConfigValue(long id, ConfigKey<?> key) {
return getConfigValue(id, key.key());
}
default Pair<Scope, Long> getParentScope(long id) {
return null;
}
}

View File

@ -16,6 +16,7 @@
// under the License.
package org.apache.cloudstack.framework.config.dao;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
@ -67,4 +68,6 @@ public interface ConfigurationDao extends GenericDao<ConfigurationVO, String> {
boolean update(String name, String category, String value);
void invalidateCache();
List<ConfigurationVO> searchPartialConfigurations();
}

View File

@ -43,6 +43,7 @@ public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String
final SearchBuilder<ConfigurationVO> InstanceSearch;
final SearchBuilder<ConfigurationVO> NameSearch;
final SearchBuilder<ConfigurationVO> PartialSearch;
public static final String UPDATE_CONFIGURATION_SQL = "UPDATE configuration SET value = ? WHERE name = ?";
@ -53,6 +54,11 @@ public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String
NameSearch = createSearchBuilder();
NameSearch.and("name", NameSearch.entity().getName(), SearchCriteria.Op.EQ);
setRunLevel(ComponentLifecycle.RUN_LEVEL_SYSTEM_BOOTSTRAP);
PartialSearch = createSearchBuilder();
PartialSearch.select("name", SearchCriteria.Func.NATIVE, PartialSearch.entity().getName());
PartialSearch.select("groupId", SearchCriteria.Func.NATIVE, PartialSearch.entity().getGroupId());
PartialSearch.select("subGroupId", SearchCriteria.Func.NATIVE, PartialSearch.entity().getSubGroupId());
}
@Override
@ -207,4 +213,9 @@ public class ConfigurationDaoImpl extends GenericDaoBase<ConfigurationVO, String
return findOneIncludingRemovedBy(sc);
}
@Override
public List<ConfigurationVO> searchPartialConfigurations() {
SearchCriteria<ConfigurationVO> sc = PartialSearch.create();
return searchIncludingRemoved(sc, null, null, false);
}
}

View File

@ -144,9 +144,11 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin {
createOrupdateConfigObject(date, configurable.getConfigComponentName(), key, null);
if ((key.scope() != null) && (key.scope() != ConfigKey.Scope.Global)) {
Set<ConfigKey<?>> currentConfigs = _scopeLevelConfigsMap.get(key.scope());
currentConfigs.add(key);
if (!key.isGlobalOrEmptyScope()) {
for (ConfigKey.Scope scope : key.getScopes()) {
Set<ConfigKey<?>> currentConfigs = _scopeLevelConfigsMap.get(scope);
currentConfigs.add(key);
}
}
}
@ -204,12 +206,12 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin {
} else {
boolean configUpdated = false;
if (vo.isDynamic() != key.isDynamic() || !ObjectUtils.equals(vo.getDescription(), key.description()) || !ObjectUtils.equals(vo.getDefaultValue(), key.defaultValue()) ||
!ObjectUtils.equals(vo.getScope(), key.scope().toString()) ||
!ObjectUtils.equals(vo.getScope(), key.getScopeBitmask()) ||
!ObjectUtils.equals(vo.getComponent(), componentName)) {
vo.setDynamic(key.isDynamic());
vo.setDescription(key.description());
vo.setDefaultValue(key.defaultValue());
vo.setScope(key.scope().toString());
vo.setScope(key.getScopeBitmask());
vo.setComponent(componentName);
vo.setUpdated(date);
configUpdated = true;
@ -283,12 +285,7 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin {
scopeId = Long.valueOf(parts[2]);
} catch (IllegalArgumentException ignored) {}
if (!ConfigKey.Scope.Global.equals(scope) && scopeId != null) {
ScopedConfigStorage scopedConfigStorage = null;
for (ScopedConfigStorage storage : _scopedStorages) {
if (storage.getScope() == scope) {
scopedConfigStorage = storage;
}
}
ScopedConfigStorage scopedConfigStorage = getScopedStorage(scope);
if (scopedConfigStorage == null) {
throw new CloudRuntimeException("Unable to find config storage for this scope: " + scope + " for " + key);
}
@ -315,26 +312,6 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin {
configCache.invalidate(getConfigCacheKey(key, scope, scopeId));
}
public ScopedConfigStorage findScopedConfigStorage(ConfigKey<?> config) {
for (ScopedConfigStorage storage : _scopedStorages) {
if (storage.getScope() == config.scope()) {
return storage;
}
}
throw new CloudRuntimeException("Unable to find config storage for this scope: " + config.scope() + " for " + config.key());
}
public ScopedConfigStorage getDomainScope(ConfigKey<?> config) {
for (ScopedConfigStorage storage : _scopedStorages) {
if (storage.getScope() == ConfigKey.Scope.Domain) {
return storage;
}
}
throw new CloudRuntimeException("Unable to find config storage for this scope: " + ConfigKey.Scope.Domain + " for " + config.key());
}
public List<ScopedConfigStorage> getScopedStorages() {
return _scopedStorages;
}
@ -398,4 +375,27 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin {
public boolean isNewConfig(ConfigKey<?> configKey) {
return newConfigs.contains(configKey.key());
}
protected ScopedConfigStorage getScopedStorage(ConfigKey.Scope scope) {
ScopedConfigStorage scopedConfigStorage = null;
for (ScopedConfigStorage storage : _scopedStorages) {
if (storage.getScope() == scope) {
scopedConfigStorage = storage;
break;
}
}
return scopedConfigStorage;
}
@Override
public Pair<ConfigKey.Scope, Long> getParentScope(ConfigKey.Scope scope, Long id) {
if (scope.getParent() == null) {
return null;
}
ScopedConfigStorage scopedConfigStorage = getScopedStorage(scope);
if (scopedConfigStorage == null) {
return null;
}
return scopedConfigStorage.getParentScope(id);
}
}

View File

@ -17,6 +17,7 @@
package org.apache.cloudstack.framework.config.impl;
import java.util.Date;
import java.util.List;
import javax.persistence.Column;
import javax.persistence.Entity;
@ -60,7 +61,7 @@ public class ConfigurationVO implements Configuration {
private boolean dynamic;
@Column(name = "scope")
private String scope;
private Integer scope;
@Column(name = "updated")
@Temporal(value = TemporalType.TIMESTAMP)
@ -102,6 +103,7 @@ public class ConfigurationVO implements Configuration {
this.name = name;
this.description = description;
this.parent = parentConfigName;
this.scope = 0;
setValue(value);
setDisplayText(displayText);
setGroupId(groupId);
@ -112,7 +114,7 @@ public class ConfigurationVO implements Configuration {
this(key.category(), "DEFAULT", component, key.key(), key.defaultValue(), key.description(), key.displayText(), key.parent());
defaultValue = key.defaultValue();
dynamic = key.isDynamic();
scope = key.scope() != null ? key.scope().toString() : null;
scope = key.getScopeBitmask();
}
@Override
@ -183,10 +185,15 @@ public class ConfigurationVO implements Configuration {
}
@Override
public String getScope() {
public int getScope() {
return scope;
}
@Override
public List<ConfigKey.Scope> getScopes() {
return ConfigKey.Scope.decode(scope);
}
@Override
public boolean isDynamic() {
return dynamic;
@ -205,7 +212,7 @@ public class ConfigurationVO implements Configuration {
this.defaultValue = defaultValue;
}
public void setScope(String scope) {
public void setScope(int scope) {
this.scope = scope;
}

View File

@ -16,6 +16,8 @@
// under the License.
package org.apache.cloudstack.framework.config;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
@ -47,4 +49,31 @@ public class ConfigKeyTest {
ConfigKey key = new ConfigKey("hond", Boolean.class, "naam", "truus", "thrown name", false);
Assert.assertFalse("zero and 0L should be considered the same address", key.isSameKeyAs(0L));
}
@Test
public void testDecode() {
ConfigKey key = new ConfigKey("testcategoey", Boolean.class, "test", "true", "test descriptuin", false, List.of(Scope.Zone, Scope.StoragePool));
int bitmask = key.getScopeBitmask();
List<Scope> scopes = ConfigKey.Scope.decode(bitmask);
Assert.assertEquals(bitmask, ConfigKey.Scope.getBitmask(scopes.toArray(new Scope[0])));
for (Scope scope : scopes) {
Assert.assertTrue(scope == Scope.Zone || scope == Scope.StoragePool);
}
}
@Test
public void testDecodeAsCsv() {
ConfigKey key = new ConfigKey("testcategoey", Boolean.class, "test", "true", "test descriptuin", false, List.of(Scope.Zone, Scope.StoragePool));
int bitmask = key.getScopeBitmask();
String scopes = ConfigKey.Scope.decodeAsCsv(bitmask);
Assert.assertTrue("Zone, StoragePool".equals(scopes));
}
@Test
public void testGetDescendants() {
List<Scope> descendants = ConfigKey.Scope.getAllDescendants(Scope.Zone.name());
for (Scope descendant : descendants) {
Assert.assertTrue(descendant == Scope.Cluster || descendant == Scope.StoragePool || descendant == Scope.ImageStore);
}
}
}

View File

@ -20,10 +20,14 @@ package org.apache.cloudstack.framework.config.impl;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.framework.config.ScopedConfigStorage;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -33,11 +37,15 @@ import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.springframework.test.util.ReflectionTestUtils;
import com.cloud.utils.Pair;
@RunWith(MockitoJUnitRunner.class)
public class ConfigDepotImplTest {
@Mock
ConfigurationDao _configDao;
@Mock
ConfigurationSubGroupDao configSubGroupDao;
@InjectMocks
private ConfigDepotImpl configDepotImpl = new ConfigDepotImpl();
@ -107,4 +115,76 @@ public class ConfigDepotImplTest {
runTestGetConfigStringValueExpiry(((ConfigDepotImpl.CONFIG_CACHE_EXPIRE_SECONDS) + 5) * 1000,
2);
}
@Test
public void testPopulateConfigurationNewVO() {
ConfigKey StorageDisableThreshold = new ConfigKey<>(ConfigKey.CATEGORY_ALERT, Double.class, "pool.storage.capacity.disablethreshold", "0.85",
"Percentage (as a value between 0 and 1) of storage utilization above which allocators will disable using the pool for low storage available.",
true, List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone));
Configurable configurable = new Configurable() {
@Override
public String getConfigComponentName() {
return "test";
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] { StorageDisableThreshold };
}
};
configDepotImpl.setConfigurables(List.of(configurable));
configDepotImpl.populateConfigurations();
Assert.assertEquals("pool.storage.capacity.disablethreshold",
configDepotImpl._scopeLevelConfigsMap.get(ConfigKey.Scope.Zone).iterator().next().key());
Assert.assertEquals("pool.storage.capacity.disablethreshold",
configDepotImpl._scopeLevelConfigsMap.get(ConfigKey.Scope.StoragePool).iterator().next().key());
Assert.assertEquals(0, configDepotImpl._scopeLevelConfigsMap.get(ConfigKey.Scope.Cluster).size());
}
@Test
public void testPopulateConfiguration() {
ConfigKey StorageDisableThreshold = new ConfigKey<>(ConfigKey.CATEGORY_ALERT, Double.class, "pool.storage.capacity.disablethreshold", "0.85",
"Percentage (as a value between 0 and 1) of storage utilization above which allocators will disable using the pool for low storage available.",
true, List.of(ConfigKey.Scope.StoragePool, ConfigKey.Scope.Zone));
Configurable configurable = new Configurable() {
@Override
public String getConfigComponentName() {
return "test";
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[]{StorageDisableThreshold};
}
};
configDepotImpl.setConfigurables(List.of(configurable));
ConfigurationVO configurationVO = new ConfigurationVO(StorageDisableThreshold.category(), "DEFAULT", "component",
StorageDisableThreshold.key(), StorageDisableThreshold.defaultValue(), StorageDisableThreshold.description(),
StorageDisableThreshold.displayText(), StorageDisableThreshold.parent(), 1L, 10L);
Mockito.when(_configDao.findById("pool.storage.capacity.disablethreshold")).thenReturn(configurationVO);
configDepotImpl.populateConfigurations();
Mockito.verify(_configDao, Mockito.times(1)).persist(configurationVO);
}
@Test
public void getParentScopeWithValidScope() {
ConfigKey.Scope scope = ConfigKey.Scope.Cluster;
ScopedConfigStorage scopedConfigStorage = Mockito.mock(ScopedConfigStorage.class);
Long id = 1L;
ConfigKey.Scope parentScope = ConfigKey.Scope.Zone;
Long parentId = 2L;
Mockito.when(scopedConfigStorage.getScope()).thenReturn(scope);
Mockito.when(scopedConfigStorage.getParentScope(id)).thenReturn(new Pair<>(parentScope, parentId));
configDepotImpl.setScopedStorages(Collections.singletonList(scopedConfigStorage));
Pair<ConfigKey.Scope, Long> result = configDepotImpl.getParentScope(scope, id);
Assert.assertNotNull(result);
Assert.assertEquals(parentScope, result.first());
Assert.assertEquals(parentId, result.second());
}
}

View File

@ -484,6 +484,9 @@ public abstract class SearchBase<J extends SearchBase<?, T, K>, T, K> {
tableAlias = attr.table;
}
}
if (op == Op.BINARY_OR) {
sql.append("(");
}
sql.append(tableAlias).append(".").append(attr.columnName).append(op.toString());
if (op == Op.IN && params.length == 1) {

View File

@ -38,7 +38,7 @@ public class SearchCriteria<K> {
" NOT BETWEEN ? AND ? ",
2), IN(" IN () ", -1), NOTIN(" NOT IN () ", -1), LIKE(" LIKE ? ", 1), NLIKE(" NOT LIKE ? ", 1), NIN(" NOT IN () ", -1), NULL(" IS NULL ", 0), NNULL(
" IS NOT NULL ",
0), SC(" () ", 1), TEXT(" () ", 1), RP("", 0), AND(" AND ", 0), OR(" OR ", 0), NOT(" NOT ", 0), FIND_IN_SET(" ) ", 1);
0), SC(" () ", 1), TEXT(" () ", 1), RP("", 0), AND(" AND ", 0), OR(" OR ", 0), NOT(" NOT ", 0), FIND_IN_SET(" ) ", 1), BINARY_OR(" & ?) > 0", 1);
private final String op;
int params;

View File

@ -34,6 +34,7 @@ import javax.naming.ConfigurationException;
import com.cloud.user.Account;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.quota.activationrule.presetvariables.Configuration;
import org.apache.cloudstack.quota.activationrule.presetvariables.GenericPresetVariable;
import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariableHelper;
import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariables;
@ -467,6 +468,11 @@ public class QuotaManagerImpl extends ManagerBase implements QuotaManager {
}
Configuration configuration = presetVariables.getConfiguration();
if (configuration != null) {
jsInterpreter.injectVariable("configuration", configuration.toString());
}
jsInterpreter.injectStringVariable("resourceType", presetVariables.getResourceType());
jsInterpreter.injectVariable("value", presetVariables.getValue().toString());
jsInterpreter.injectVariable("zone", presetVariables.getZone().toString());

View File

@ -17,10 +17,15 @@
package org.apache.cloudstack.quota.activationrule.presetvariables;
import org.apache.cloudstack.quota.constant.QuotaTypes;
public class ComputeOffering extends GenericPresetVariable {
@PresetVariableDefinition(description = "A boolean informing if the compute offering is customized or not.")
private boolean customized;
@PresetVariableDefinition(description = "A boolean informing if the compute offering offers HA or not.", supportedTypes = {QuotaTypes.RUNNING_VM})
private boolean offerHa;
public boolean isCustomized() {
return customized;
}
@ -30,4 +35,13 @@ public class ComputeOffering extends GenericPresetVariable {
fieldNamesToIncludeInToString.add("customized");
}
public boolean offerHa() {
return offerHa;
}
public void setOfferHa(boolean offerHa) {
this.offerHa = offerHa;
fieldNamesToIncludeInToString.add("offerHa");
}
}

View File

@ -0,0 +1,35 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.quota.activationrule.presetvariables;
import org.apache.cloudstack.quota.constant.QuotaTypes;
public class Configuration extends GenericPresetVariable{
@PresetVariableDefinition(description = "A boolean informing if the cluster configuration force.ha is enabled or not.", supportedTypes = {QuotaTypes.RUNNING_VM})
private boolean forceHa;
public boolean getForceHa() {
return forceHa;
}
public void setForceHa(boolean forceHa) {
this.forceHa = forceHa;
fieldNamesToIncludeInToString.add("forceHa");
}
}

View File

@ -0,0 +1,165 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.quota.activationrule.presetvariables;
public class DiskOfferingPresetVariables extends GenericPresetVariable {
@PresetVariableDefinition(description = "A long informing the bytes read rate of the disk offering.")
private Long bytesReadRate;
@PresetVariableDefinition(description = "A long informing the burst bytes read rate of the disk offering.")
private Long bytesReadBurst;
@PresetVariableDefinition(description = "The length (in seconds) of the bytes read burst.")
private Long bytesReadBurstLength;
@PresetVariableDefinition(description = "A long informing the bytes write rate of the disk offering.")
private Long bytesWriteRate;
@PresetVariableDefinition(description = "A long informing the burst bytes write rate of the disk offering.")
private Long bytesWriteBurst;
@PresetVariableDefinition(description = "The length (in seconds) of the bytes write burst.")
private Long bytesWriteBurstLength;
@PresetVariableDefinition(description = "A long informing the I/O requests read rate of the disk offering.")
private Long iopsReadRate;
@PresetVariableDefinition(description = "A long informing the burst I/O requests read rate of the disk offering.")
private Long iopsReadBurst;
@PresetVariableDefinition(description = "The length (in seconds) of the IOPS read burst.")
private Long iopsReadBurstLength;
@PresetVariableDefinition(description = "A long informing the I/O requests write rate of the disk offering.")
private Long iopsWriteRate;
@PresetVariableDefinition(description = "A long informing the burst I/O requests write rate of the disk offering.")
private Long iopsWriteBurst;
@PresetVariableDefinition(description = "The length (in seconds) of the IOPS write burst.")
private Long iopsWriteBurstLength;
public Long getBytesReadRate() {
return bytesReadRate;
}
public void setBytesReadRate(Long bytesReadRate) {
this.bytesReadRate = bytesReadRate;
fieldNamesToIncludeInToString.add("bytesReadRate");
}
public Long getBytesReadBurst() {
return bytesReadBurst;
}
public void setBytesReadBurst(Long bytesReadBurst) {
this.bytesReadBurst = bytesReadBurst;
fieldNamesToIncludeInToString.add("bytesReadBurst");
}
public Long getBytesReadBurstLength() {
return bytesReadBurstLength;
}
public void setBytesReadBurstLength(Long bytesReadBurstLength) {
this.bytesReadBurstLength = bytesReadBurstLength;
fieldNamesToIncludeInToString.add("bytesReadBurstLength");
}
public Long getBytesWriteRate() {
return bytesWriteRate;
}
public void setBytesWriteRate(Long bytesWriteRate) {
this.bytesWriteRate = bytesWriteRate;
fieldNamesToIncludeInToString.add("bytesWriteRate");
}
public Long getBytesWriteBurst() {
return bytesWriteBurst;
}
public void setBytesWriteBurst(Long bytesWriteBurst) {
this.bytesWriteBurst = bytesWriteBurst;
fieldNamesToIncludeInToString.add("bytesWriteBurst");
}
public Long getBytesWriteBurstLength() {
return bytesWriteBurstLength;
}
public void setBytesWriteBurstLength(Long bytesWriteBurstLength) {
this.bytesWriteBurstLength = bytesWriteBurstLength;
fieldNamesToIncludeInToString.add("bytesWriteBurstLength");
}
public Long getIopsReadRate() {
return iopsReadRate;
}
public void setIopsReadRate(Long iopsReadRate) {
this.iopsReadRate = iopsReadRate;
fieldNamesToIncludeInToString.add("iopsReadRate");
}
public Long getIopsReadBurst() {
return iopsReadBurst;
}
public void setIopsReadBurst(Long iopsReadBurst) {
this.iopsReadBurst = iopsReadBurst;
fieldNamesToIncludeInToString.add("iopsReadBurst");
}
public Long getIopsReadBurstLength() {
return iopsReadBurstLength;
}
public void setIopsReadBurstLength(Long iopsReadBurstLength) {
this.iopsReadBurstLength = iopsReadBurstLength;
fieldNamesToIncludeInToString.add("iopsReadBurstLength");
}
public Long getIopsWriteRate() {
return iopsWriteRate;
}
public void setIopsWriteRate(Long iopsWriteRate) {
this.iopsWriteRate = iopsWriteRate;
fieldNamesToIncludeInToString.add("iopsWriteRate");
}
public Long getIopsWriteBurst() {
return iopsWriteBurst;
}
public void setIopsWriteBurst(Long iopsWriteBurst) {
this.iopsWriteBurst = iopsWriteBurst;
fieldNamesToIncludeInToString.add("iopsWriteBurst");
}
public Long getIopsWriteBurstLength() {
return iopsWriteBurstLength;
}
public void setIopsWriteBurstLength(Long iopsWriteBurstLength) {
this.iopsWriteBurstLength = iopsWriteBurstLength;
fieldNamesToIncludeInToString.add("iopsWriteBurstLength");
}
}

View File

@ -25,6 +25,8 @@ import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.host.HostTagVO;
import com.cloud.network.dao.NetworkVO;
import com.cloud.network.vpc.VpcVO;
@ -37,6 +39,7 @@ import org.apache.cloudstack.acl.dao.RoleDao;
import org.apache.cloudstack.backup.BackupOfferingVO;
import org.apache.cloudstack.backup.dao.BackupOfferingDao;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.quota.constant.QuotaTypes;
import org.apache.cloudstack.quota.dao.NetworkDao;
import org.apache.cloudstack.quota.dao.VmTemplateDao;
@ -51,6 +54,7 @@ import org.apache.cloudstack.usage.UsageTypes;
import org.apache.cloudstack.utils.bytescale.ByteScaleUtils;
import org.apache.cloudstack.utils.jsinterpreter.JsInterpreter;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.springframework.stereotype.Component;
@ -181,6 +185,11 @@ public class PresetVariableHelper {
@Inject
VpcDao vpcDao;
@Inject
ConfigurationDao configDao;
@Inject
ClusterDetailsDao clusterDetailsDao;
protected boolean backupSnapshotAfterTakingSnapshot = SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value();
@ -194,6 +203,7 @@ public class PresetVariableHelper {
presetVariables.setAccount(getPresetVariableAccount(usageRecord.getAccountId()));
setPresetVariableProject(presetVariables);
setPresetVariableConfiguration(presetVariables, usageRecord);
presetVariables.setDomain(getPresetVariableDomain(usageRecord.getDomainId()));
presetVariables.setResourceType(usageRecord.getType());
@ -272,6 +282,39 @@ public class PresetVariableHelper {
return zone;
}
protected void setPresetVariableConfiguration(PresetVariables presetVariables, UsageVO usageRecord) {
if (usageRecord.getUsageType() != UsageTypes.RUNNING_VM) {
return;
}
Configuration configuration = new Configuration();
setForceHaInConfiguration(configuration, usageRecord);
presetVariables.setConfiguration(configuration);
}
protected void setForceHaInConfiguration(Configuration configuration, UsageVO usageRecord) {
Long vmId = usageRecord.getUsageId();
VMInstanceVO vmVo = vmInstanceDao.findByIdIncludingRemoved(vmId);
validateIfObjectIsNull(vmVo, vmId, "VM");
Long hostId = ObjectUtils.defaultIfNull(vmVo.getHostId(), vmVo.getLastHostId());
HostVO hostVo = hostDao.findByIdIncludingRemoved(hostId);
validateIfObjectIsNull(hostVo, hostId, "host");
ClusterDetailsVO forceHa = clusterDetailsDao.findDetail(hostVo.getClusterId(), "force.ha");
String forceHaValue;
if (forceHa != null) {
forceHaValue = forceHa.getValue();
} else {
forceHaValue = configDao.getValue("force.ha");
}
configuration.setForceHa((Boolean.parseBoolean(forceHaValue)));
}
protected Value getPresetVariableValue(UsageVO usageRecord) {
Long accountId = usageRecord.getAccountId();
int usageType = usageRecord.getUsageType();
@ -390,12 +433,16 @@ public class PresetVariableHelper {
return guestOsVo.getDisplayName();
}
protected ComputeOffering getPresetVariableValueComputeOffering(ServiceOfferingVO serviceOfferingVo) {
protected ComputeOffering getPresetVariableValueComputeOffering(ServiceOfferingVO serviceOfferingVo, int usageType) {
ComputeOffering computeOffering = new ComputeOffering();
computeOffering.setId(serviceOfferingVo.getUuid());
computeOffering.setName(serviceOfferingVo.getName());
computeOffering.setCustomized(serviceOfferingVo.isDynamic());
if (usageType == UsageTypes.RUNNING_VM) {
computeOffering.setOfferHa(serviceOfferingVo.isOfferHA());
}
return computeOffering;
}
@ -404,7 +451,7 @@ public class PresetVariableHelper {
long computeOfferingId = vmVo.getServiceOfferingId();
ServiceOfferingVO serviceOfferingVo = serviceOfferingDao.findByIdIncludingRemoved(computeOfferingId);
validateIfObjectIsNull(serviceOfferingVo, computeOfferingId, "compute offering");
value.setComputeOffering(getPresetVariableValueComputeOffering(serviceOfferingVo));
value.setComputeOffering(getPresetVariableValueComputeOffering(serviceOfferingVo, usageType));
if (usageType == UsageTypes.RUNNING_VM) {
value.setComputingResources(getPresetVariableValueComputingResource(vmVo, serviceOfferingVo));
@ -492,6 +539,7 @@ public class PresetVariableHelper {
value.setId(volumeVo.getUuid());
value.setName(volumeVo.getName());
value.setProvisioningType(volumeVo.getProvisioningType());
value.setVolumeType(volumeVo.getVolumeType());
Long poolId = volumeVo.getPoolId();
if (poolId == null) {
@ -510,13 +558,25 @@ public class PresetVariableHelper {
}
}
protected GenericPresetVariable getPresetVariableValueDiskOffering(Long diskOfferingId) {
protected DiskOfferingPresetVariables getPresetVariableValueDiskOffering(Long diskOfferingId) {
DiskOfferingVO diskOfferingVo = diskOfferingDao.findByIdIncludingRemoved(diskOfferingId);
validateIfObjectIsNull(diskOfferingVo, diskOfferingId, "disk offering");
GenericPresetVariable diskOffering = new GenericPresetVariable();
DiskOfferingPresetVariables diskOffering = new DiskOfferingPresetVariables();
diskOffering.setId(diskOfferingVo.getUuid());
diskOffering.setName(diskOfferingVo.getName());
diskOffering.setBytesReadRate(diskOfferingVo.getBytesReadRate());
diskOffering.setBytesReadBurst(diskOfferingVo.getBytesReadRateMax());
diskOffering.setBytesReadBurstLength(diskOfferingVo.getBytesReadRateMaxLength());
diskOffering.setBytesWriteRate(diskOfferingVo.getBytesWriteRate());
diskOffering.setBytesWriteBurst(diskOfferingVo.getBytesWriteRateMax());
diskOffering.setBytesWriteBurstLength(diskOfferingVo.getBytesWriteRateMaxLength());
diskOffering.setIopsReadRate(diskOfferingVo.getIopsReadRate());
diskOffering.setIopsReadBurst(diskOfferingVo.getIopsReadRateMax());
diskOffering.setIopsReadBurstLength(diskOfferingVo.getIopsReadRateMaxLength());
diskOffering.setIopsWriteRate(diskOfferingVo.getIopsWriteRate());
diskOffering.setIopsWriteBurst(diskOfferingVo.getIopsWriteRateMax());
diskOffering.setIopsWriteBurstLength(diskOfferingVo.getIopsWriteRateMaxLength());
return diskOffering;
}

View File

@ -39,6 +39,9 @@ public class PresetVariables {
@PresetVariableDefinition(description = "Zone where the resource is.")
private GenericPresetVariable zone;
@PresetVariableDefinition(description = "Configurations of the resource.")
private Configuration configuration;
@PresetVariableDefinition(description = "A list containing the tariffs ordered by the field 'position'.")
private List<Tariff> lastTariffs;
@ -90,6 +93,14 @@ public class PresetVariables {
this.zone = zone;
}
public Configuration getConfiguration() {
return configuration;
}
public void setConfiguration(Configuration configuration) {
this.configuration = configuration;
}
public List<Tariff> getLastTariffs() {
return lastTariffs;
}

View File

@ -22,6 +22,7 @@ import java.util.Map;
import com.cloud.storage.Snapshot;
import com.cloud.storage.Storage.ProvisioningType;
import com.cloud.storage.Volume;
import com.cloud.vm.snapshot.VMSnapshot;
import org.apache.cloudstack.quota.constant.QuotaTypes;
@ -75,7 +76,7 @@ public class Value extends GenericPresetVariable {
private GenericPresetVariable template;
@PresetVariableDefinition(description = "Disk offering of the volume.", supportedTypes = {QuotaTypes.VOLUME})
private GenericPresetVariable diskOffering;
private DiskOfferingPresetVariables diskOffering;
@PresetVariableDefinition(description = "Storage where the volume or snapshot is. While handling with snapshots, this value can be from the primary storage if the global " +
"setting 'snapshot.backup.to.secondary' is false, otherwise it will be from secondary storage.", supportedTypes = {QuotaTypes.VOLUME, QuotaTypes.SNAPSHOT})
@ -93,6 +94,10 @@ public class Value extends GenericPresetVariable {
@PresetVariableDefinition(description = "The volume format. Values can be: RAW, VHD, VHDX, OVA and QCOW2.", supportedTypes = {QuotaTypes.VOLUME, QuotaTypes.VOLUME_SECONDARY})
private String volumeFormat;
@PresetVariableDefinition(description = "The volume type. Values can be: UNKNOWN, ROOT, SWAP, DATADISK and ISO.", supportedTypes = {QuotaTypes.VOLUME})
private Volume.Type volumeType;
private String state;
public Host getHost() {
@ -194,11 +199,11 @@ public class Value extends GenericPresetVariable {
fieldNamesToIncludeInToString.add("template");
}
public GenericPresetVariable getDiskOffering() {
public DiskOfferingPresetVariables getDiskOffering() {
return diskOffering;
}
public void setDiskOffering(GenericPresetVariable diskOffering) {
public void setDiskOffering(DiskOfferingPresetVariables diskOffering) {
this.diskOffering = diskOffering;
fieldNamesToIncludeInToString.add("diskOffering");
}
@ -257,6 +262,15 @@ public class Value extends GenericPresetVariable {
return volumeFormat;
}
public Volume.Type getVolumeType() {
return volumeType;
}
public void setVolumeType(Volume.Type volumeType) {
this.volumeType = volumeType;
fieldNamesToIncludeInToString.add("volumeType");
}
public String getState() {
return state;
}

View File

@ -270,6 +270,7 @@ public class QuotaManagerImplTest {
Mockito.verify(jsInterpreterMock).injectVariable(Mockito.eq("account"), Mockito.anyString());
Mockito.verify(jsInterpreterMock).injectVariable(Mockito.eq("domain"), Mockito.anyString());
Mockito.verify(jsInterpreterMock, Mockito.never()).injectVariable(Mockito.eq("project"), Mockito.anyString());
Mockito.verify(jsInterpreterMock, Mockito.never()).injectVariable(Mockito.eq("configuration"), Mockito.anyString());
Mockito.verify(jsInterpreterMock).injectStringVariable(Mockito.eq("resourceType"), Mockito.anyString());
Mockito.verify(jsInterpreterMock).injectVariable(Mockito.eq("value"), Mockito.anyString());
Mockito.verify(jsInterpreterMock).injectVariable(Mockito.eq("zone"), Mockito.anyString());

View File

@ -27,6 +27,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.host.HostTagVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.StoragePoolTagVO;
@ -76,6 +78,7 @@ import com.cloud.storage.SnapshotVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.ProvisioningType;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSDao;
@ -122,6 +125,9 @@ public class PresetVariableHelperTest {
@Mock
HostTagsDao hostTagsDaoMock;
@Mock
ClusterDetailsDao clusterDetailsDaoMock;
@Mock
ImageStoreDao imageStoreDaoMock;
@ -208,7 +214,7 @@ public class PresetVariableHelperTest {
value.setComputeOffering(getComputeOfferingForTests());
value.setTags(Collections.singletonMap("tag1", "value1"));
value.setTemplate(getGenericPresetVariableForTests());
value.setDiskOffering(getGenericPresetVariableForTests());
value.setDiskOffering(getDiskOfferingForTests());
value.setProvisioningType(ProvisioningType.THIN);
value.setStorage(getStorageForTests());
value.setSize(ByteScaleUtils.GiB);
@ -216,6 +222,7 @@ public class PresetVariableHelperTest {
value.setTag("tag_test");
value.setVmSnapshotType(VMSnapshot.Type.Disk);
value.setComputingResources(getComputingResourcesForTests());
value.setVolumeType(Volume.Type.DATADISK);
return value;
}
@ -232,6 +239,7 @@ public class PresetVariableHelperTest {
computeOffering.setId("compute_offering_id");
computeOffering.setName("compute_offering_name");
computeOffering.setCustomized(false);
computeOffering.setOfferHa(false);
return computeOffering;
}
@ -243,6 +251,14 @@ public class PresetVariableHelperTest {
return host;
}
private Configuration getConfigurationForTests() {
Configuration configuration = new Configuration();
configuration.setId("config_id");
configuration.setName("config_name");
configuration.setForceHa(false);
return configuration;
}
private List<HostTagVO> getHostTagsForTests() {
return Arrays.asList(new HostTagVO(1, "tag1", false), new HostTagVO(1, "tag2", false));
}
@ -308,6 +324,13 @@ public class PresetVariableHelperTest {
return backupOffering;
}
private DiskOfferingPresetVariables getDiskOfferingForTests() {
DiskOfferingPresetVariables diskOffering = new DiskOfferingPresetVariables();
diskOffering.setId("disk_offering_id");
diskOffering.setName("disk_offering_name");
return diskOffering;
}
private void mockMethodValidateIfObjectIsNull() {
Mockito.doNothing().when(presetVariableHelperSpy).validateIfObjectIsNull(Mockito.any(), Mockito.anyLong(), Mockito.anyString());
}
@ -329,6 +352,7 @@ public class PresetVariableHelperTest {
Mockito.doReturn(expected.getAccount()).when(presetVariableHelperSpy).getPresetVariableAccount(Mockito.anyLong());
Mockito.doNothing().when(presetVariableHelperSpy).setPresetVariableProject(Mockito.any());
Mockito.doNothing().when(presetVariableHelperSpy).setPresetVariableConfiguration(Mockito.any(), Mockito.any());
Mockito.doReturn(expected.getDomain()).when(presetVariableHelperSpy).getPresetVariableDomain(Mockito.anyLong());
Mockito.doReturn(expected.getValue()).when(presetVariableHelperSpy).getPresetVariableValue(Mockito.any(UsageVO.class));
Mockito.doReturn(expected.getZone()).when(presetVariableHelperSpy).getPresetVariableZone(Mockito.anyLong());
@ -352,6 +376,35 @@ public class PresetVariableHelperTest {
Assert.assertNull(result.getProject());
}
@Test
public void setPresetVariableConfigurationTestQuotaTypeDifferentFromRunningVmDoNothing() {
getQuotaTypesForTests(UsageTypes.RUNNING_VM).forEach(type -> {
PresetVariables result = new PresetVariables();
Mockito.doReturn(type.getKey()).when(usageVoMock).getUsageType();
presetVariableHelperSpy.setPresetVariableConfiguration(result, usageVoMock);
Assert.assertNull(result.getConfiguration());
});
}
@Test
public void setPresetVariableConfigurationTestQuotaTypeIsRunningVmSetConfiguration() {
PresetVariables result = new PresetVariables();
Configuration expectedConfig = getConfigurationForTests();
HostVO hostVoMock = Mockito.mock(HostVO.class);
ClusterDetailsVO clusterDetailsVoMock = Mockito.mock(ClusterDetailsVO.class);
Mockito.doReturn(vmInstanceVoMock).when(vmInstanceDaoMock).findByIdIncludingRemoved(Mockito.anyLong());
Mockito.doReturn(hostVoMock).when(hostDaoMock).findByIdIncludingRemoved(Mockito.anyLong());
Mockito.doReturn(1L).when(vmInstanceVoMock).getHostId();
Mockito.doReturn(1).when(usageVoMock).getUsageType();
Mockito.doReturn(clusterDetailsVoMock).when(clusterDetailsDaoMock).findDetail(Mockito.anyLong(), Mockito.anyString());
presetVariableHelperSpy.setPresetVariableConfiguration(result, usageVoMock);
Assert.assertNotNull(result.getConfiguration());
Assert.assertEquals(expectedConfig.getForceHa(), result.getConfiguration().getForceHa());
}
@Test
public void setPresetVariableProjectTestAccountWithoutRoleSetAsProject() {
PresetVariables result = new PresetVariables();
@ -627,19 +680,36 @@ public class PresetVariableHelperTest {
}
@Test
public void getPresetVariableValueComputeOfferingTestSetFieldsAndReturnObject() {
public void getPresetVariableValueComputeOfferingForTestSetFieldsAndReturnObjectForRunningVm() {
ComputeOffering expected = getComputeOfferingForTests();
Mockito.doReturn(expected.getId()).when(serviceOfferingVoMock).getUuid();
Mockito.doReturn(expected.getName()).when(serviceOfferingVoMock).getName();
Mockito.doReturn(expected.isCustomized()).when(serviceOfferingVoMock).isDynamic();
Mockito.doReturn(expected.offerHa()).when(serviceOfferingVoMock).isOfferHA();
ComputeOffering result = presetVariableHelperSpy.getPresetVariableValueComputeOffering(serviceOfferingVoMock, UsageTypes.RUNNING_VM);
assertPresetVariableIdAndName(expected, result);
Assert.assertEquals(expected.isCustomized(), result.isCustomized());
Assert.assertEquals(expected.offerHa(), result.offerHa());
validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "customized", "offerHa"), result);
}
@Test
public void getPresetVariableValueComputeOfferingForTestSetFieldsAndReturnObjectForAllocatedVm() {
ComputeOffering expected = getComputeOfferingForTests();
Mockito.doReturn(expected.getId()).when(serviceOfferingVoMock).getUuid();
Mockito.doReturn(expected.getName()).when(serviceOfferingVoMock).getName();
Mockito.doReturn(expected.isCustomized()).when(serviceOfferingVoMock).isDynamic();
ComputeOffering result = presetVariableHelperSpy.getPresetVariableValueComputeOffering(serviceOfferingVoMock);
ComputeOffering result = presetVariableHelperSpy.getPresetVariableValueComputeOffering(serviceOfferingVoMock, UsageTypes.ALLOCATED_VM);
assertPresetVariableIdAndName(expected, result);
Assert.assertEquals(expected.isCustomized(), result.isCustomized());
validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "customized"), result);
}
@Test
public void getPresetVariableValueTemplateTestSetValuesAndReturnObject() {
VMTemplateVO vmTemplateVoMock = Mockito.mock(VMTemplateVO.class);
@ -698,6 +768,7 @@ public class PresetVariableHelperTest {
Mockito.doReturn(expected.getName()).when(volumeVoMock).getName();
Mockito.doReturn(expected.getDiskOffering()).when(presetVariableHelperSpy).getPresetVariableValueDiskOffering(Mockito.anyLong());
Mockito.doReturn(expected.getProvisioningType()).when(volumeVoMock).getProvisioningType();
Mockito.doReturn(expected.getVolumeType()).when(volumeVoMock).getVolumeType();
Mockito.doReturn(expected.getStorage()).when(presetVariableHelperSpy).getPresetVariableValueStorage(Mockito.anyLong(), Mockito.anyInt());
Mockito.doReturn(expected.getTags()).when(presetVariableHelperSpy).getPresetVariableValueResourceTags(Mockito.anyLong(), Mockito.any(ResourceObjectType.class));
Mockito.doReturn(expected.getSize()).when(volumeVoMock).getSize();
@ -713,12 +784,13 @@ public class PresetVariableHelperTest {
assertPresetVariableIdAndName(expected, result);
Assert.assertEquals(expected.getDiskOffering(), result.getDiskOffering());
Assert.assertEquals(expected.getProvisioningType(), result.getProvisioningType());
Assert.assertEquals(expected.getVolumeType(), result.getVolumeType());
Assert.assertEquals(expected.getStorage(), result.getStorage());
Assert.assertEquals(expected.getTags(), result.getTags());
Assert.assertEquals(expectedSize, result.getSize());
Assert.assertEquals(imageFormat.name(), result.getVolumeFormat());
validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "diskOffering", "provisioningType", "storage", "tags", "size", "volumeFormat"), result);
validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "diskOffering", "provisioningType", "volumeType", "storage", "tags", "size", "volumeFormat"), result);
}
Mockito.verify(presetVariableHelperSpy, Mockito.times(ImageFormat.values().length)).getPresetVariableValueResourceTags(Mockito.anyLong(),
@ -740,6 +812,7 @@ public class PresetVariableHelperTest {
Mockito.doReturn(expected.getName()).when(volumeVoMock).getName();
Mockito.doReturn(expected.getDiskOffering()).when(presetVariableHelperSpy).getPresetVariableValueDiskOffering(Mockito.anyLong());
Mockito.doReturn(expected.getProvisioningType()).when(volumeVoMock).getProvisioningType();
Mockito.doReturn(expected.getVolumeType()).when(volumeVoMock).getVolumeType();
Mockito.doReturn(expected.getTags()).when(presetVariableHelperSpy).getPresetVariableValueResourceTags(Mockito.anyLong(), Mockito.any(ResourceObjectType.class));
Mockito.doReturn(expected.getSize()).when(volumeVoMock).getSize();
Mockito.doReturn(imageFormat).when(volumeVoMock).getFormat();
@ -754,12 +827,13 @@ public class PresetVariableHelperTest {
assertPresetVariableIdAndName(expected, result);
Assert.assertEquals(expected.getDiskOffering(), result.getDiskOffering());
Assert.assertEquals(expected.getProvisioningType(), result.getProvisioningType());
Assert.assertEquals(expected.getVolumeType(), result.getVolumeType());
Assert.assertNull(result.getStorage());
Assert.assertEquals(expected.getTags(), result.getTags());
Assert.assertEquals(expectedSize, result.getSize());
Assert.assertEquals(imageFormat.name(), result.getVolumeFormat());
validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "diskOffering", "provisioningType", "tags", "size", "volumeFormat"), result);
validateFieldNamesToIncludeInToString(Arrays.asList("id", "name", "diskOffering", "provisioningType", "volumeType", "tags", "size", "volumeFormat"), result);
}
Mockito.verify(presetVariableHelperSpy, Mockito.times(ImageFormat.values().length)).getPresetVariableValueResourceTags(Mockito.anyLong(),
@ -772,14 +846,15 @@ public class PresetVariableHelperTest {
Mockito.doReturn(diskOfferingVoMock).when(diskOfferingDaoMock).findByIdIncludingRemoved(Mockito.anyLong());
mockMethodValidateIfObjectIsNull();
GenericPresetVariable expected = getGenericPresetVariableForTests();
DiskOfferingPresetVariables expected = getDiskOfferingForTests();
Mockito.doReturn(expected.getId()).when(diskOfferingVoMock).getUuid();
Mockito.doReturn(expected.getName()).when(diskOfferingVoMock).getName();
GenericPresetVariable result = presetVariableHelperSpy.getPresetVariableValueDiskOffering(1l);
assertPresetVariableIdAndName(expected, result);
validateFieldNamesToIncludeInToString(Arrays.asList("id", "name"), result);
validateFieldNamesToIncludeInToString(Arrays.asList("bytesReadBurst", "bytesReadBurstLength", "bytesReadRate", "bytesWriteBurst", "bytesWriteBurstLength", "bytesWriteRate",
"id", "iopsReadBurst", "iopsReadBurstLength", "iopsReadRate", "iopsWriteBurst", "iopsWriteBurstLength", "iopsWriteRate", "name"), result);
}
@Test
@ -1113,7 +1188,7 @@ public class PresetVariableHelperTest {
Mockito.doReturn(serviceOfferingVoMock).when(serviceOfferingDaoMock).findByIdIncludingRemoved(Mockito.anyLong());
mockMethodValidateIfObjectIsNull();
Mockito.doReturn(expected.getComputeOffering()).when(presetVariableHelperSpy).getPresetVariableValueComputeOffering(Mockito.any());
Mockito.doReturn(expected.getComputeOffering()).when(presetVariableHelperSpy).getPresetVariableValueComputeOffering(Mockito.any(), Mockito.anyInt());
Mockito.doReturn(expected.getComputingResources()).when(presetVariableHelperSpy).getPresetVariableValueComputingResource(Mockito.any(), Mockito.any());
QuotaTypes.listQuotaTypes().forEach((typeInt, value) -> {

View File

@ -39,7 +39,7 @@ import com.cloud.utils.mgmt.ManagementBean;
public class CloudStackExtendedLifeCycle extends AbstractBeanCollector {
Map<Integer, Set<ComponentLifecycle>> sorted = new TreeMap<Integer, Set<ComponentLifecycle>>();
Map<Integer, Set<ComponentLifecycle>> sorted = new TreeMap<>();
public CloudStackExtendedLifeCycle() {
super();
@ -80,13 +80,8 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector {
ManagementBean mbean = (ManagementBean)lifecycle;
try {
JmxUtil.registerMBean(mbean);
} catch (MalformedObjectNameException e) {
logger.warn("Unable to register MBean: " + mbean.getName(), e);
} catch (InstanceAlreadyExistsException e) {
logger.warn("Unable to register MBean: " + mbean.getName(), e);
} catch (MBeanRegistrationException e) {
logger.warn("Unable to register MBean: " + mbean.getName(), e);
} catch (NotCompliantMBeanException e) {
} catch (MalformedObjectNameException | InstanceAlreadyExistsException |
MBeanRegistrationException | NotCompliantMBeanException e) {
logger.warn("Unable to register MBean: " + mbean.getName(), e);
}
logger.info("Registered MBean: " + mbean.getName());
@ -129,6 +124,7 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector {
throw new CloudRuntimeException(e);
} catch (Exception e) {
logger.error("Error on configuring bean {} - {}", lifecycle.getName(), e.getMessage(), e);
throw new CloudRuntimeException(e);
}
}
});
@ -141,7 +137,7 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector {
Set<ComponentLifecycle> set = sorted.get(lifecycle.getRunLevel());
if (set == null) {
set = new HashSet<ComponentLifecycle>();
set = new HashSet<>();
sorted.put(lifecycle.getRunLevel(), set);
}
@ -169,12 +165,7 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector {
}
}
@Override
public int getPhase() {
return 2000;
}
private static interface WithComponentLifeCycle {
private interface WithComponentLifeCycle {
public void with(ComponentLifecycle lifecycle);
}
}

View File

@ -48,7 +48,7 @@ public class RegistryLifecycle implements BeanPostProcessor, SmartLifecycle, App
* can use this.
*/
String registryBeanName;
Set<Object> beans = new HashSet<Object>();
Set<Object> beans = new HashSet<>();
Class<?> typeClass;
ApplicationContext applicationContext;
Set<String> excludes = null;
@ -79,7 +79,7 @@ public class RegistryLifecycle implements BeanPostProcessor, SmartLifecycle, App
protected synchronized void loadExcluded() {
Properties props = applicationContext.getBean("DefaultConfigProperties", Properties.class);
excludes = new HashSet<String>();
excludes = new HashSet<>();
for (String exclude : props.getProperty(EXTENSION_EXCLUDE, "").trim().split("\\s*,\\s*")) {
if (StringUtils.hasText(exclude)) {
excludes.add(exclude);
@ -109,10 +109,15 @@ public class RegistryLifecycle implements BeanPostProcessor, SmartLifecycle, App
while (iter.hasNext()) {
Object next = iter.next();
if (registry.register(next)) {
logger.debug("Registered " + next);
} else {
iter.remove();
try {
if (registry.register(next)) {
logger.debug("Registered " + next);
} else {
logger.warn("Bean registration failed for " + next.toString());
iter.remove();
}
} catch (Throwable e) {
logger.warn("Bean registration attempt resulted in an exception for " + next.toString(), e);
}
}
}

View File

@ -47,7 +47,7 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API
private RoleService roleService;
private List<PluggableService> services;
private Map<RoleType, Set<String>> annotationRoleBasedApisMap = new HashMap<RoleType, Set<String>>();
private Map<RoleType, Set<String>> annotationRoleBasedApisMap = new HashMap<>();
private LazyCache<Long, Account> accountCache;
private LazyCache<Long, Pair<Role, List<RolePermission>>> rolePermissionsCache;
@ -56,7 +56,7 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API
protected DynamicRoleBasedAPIAccessChecker() {
super();
for (RoleType roleType : RoleType.values()) {
annotationRoleBasedApisMap.put(roleType, new HashSet<String>());
annotationRoleBasedApisMap.put(roleType, new HashSet<>());
}
}

View File

@ -24,6 +24,7 @@ import java.util.Map;
import javax.inject.Inject;
import com.cloud.configuration.Resource;
import com.cloud.storage.dao.VolumeDao;
import org.apache.cloudstack.backup.dao.BackupDao;
@ -99,6 +100,16 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider {
return metrics;
}
@Override
public List<Backup.RestorePoint> listRestorePoints(VirtualMachine vm) {
return null;
}
@Override
public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) {
return null;
}
@Override
public boolean removeVMFromBackupOffering(VirtualMachine vm) {
logger.debug(String.format("Removing VM %s from backup offering by the Dummy Backup Provider", vm));
@ -111,7 +122,7 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider {
}
@Override
public boolean takeBackup(VirtualMachine vm) {
public Pair<Boolean, Backup> takeBackup(VirtualMachine vm) {
logger.debug(String.format("Starting backup for VM %s on Dummy provider", vm));
BackupVO backup = new BackupVO();
@ -119,23 +130,20 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider {
backup.setExternalId("dummy-external-id");
backup.setType("FULL");
backup.setDate(new Date());
backup.setSize(1024L);
backup.setProtectedSize(1024000L);
backup.setSize(1024000L);
backup.setProtectedSize(1 * Resource.ResourceType.bytesToGiB);
backup.setStatus(Backup.Status.BackedUp);
backup.setBackupOfferingId(vm.getBackupOfferingId());
backup.setAccountId(vm.getAccountId());
backup.setDomainId(vm.getDomainId());
backup.setZoneId(vm.getDataCenterId());
backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId())));
return backupDao.persist(backup) != null;
backup = backupDao.persist(backup);
return new Pair<>(true, backup);
}
@Override
public boolean deleteBackup(Backup backup, boolean forced) {
return true;
}
@Override
public void syncBackups(VirtualMachine vm, Backup.Metric metric) {
}
}

View File

@ -46,6 +46,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import javax.inject.Inject;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
@ -141,7 +142,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
}
@Override
public boolean takeBackup(final VirtualMachine vm) {
public Pair<Boolean, Backup> takeBackup(final VirtualMachine vm) {
final Host host = getVMHypervisorHost(vm);
final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(vm.getBackupOfferingId());
@ -179,12 +180,16 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
backupVO.setSize(answer.getSize());
backupVO.setStatus(Backup.Status.BackedUp);
backupVO.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId())));
return backupDao.update(backupVO.getId(), backupVO);
if (backupDao.update(backupVO.getId(), backupVO)) {
return new Pair<>(true, backupVO);
} else {
throw new CloudRuntimeException("Failed to update backup");
}
} else {
backupVO.setStatus(Backup.Status.Failed);
backupDao.remove(backupVO.getId());
return new Pair<>(false, null);
}
return Objects.nonNull(answer) && answer.getResult();
}
private BackupVO createBackupObject(VirtualMachine vm, String backupPath) {
@ -358,6 +363,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
return backupDao.remove(backup.getId());
}
logger.debug("There was an error removing the backup with id " + backup.getId());
return false;
}
@ -383,6 +389,16 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
return metrics;
}
@Override
public List<Backup.RestorePoint> listRestorePoints(VirtualMachine vm) {
return null;
}
@Override
public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) {
return null;
}
@Override
public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) {
return Hypervisor.HypervisorType.KVM.equals(vm.getHypervisorType());
@ -398,11 +414,6 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
return false;
}
@Override
public void syncBackups(VirtualMachine vm, Backup.Metric metric) {
// TODO: check and sum/return backups metrics on per VM basis
}
@Override
public List<BackupOffering> listBackupOfferings(Long zoneId) {
final List<BackupRepository> repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, getName());

View File

@ -29,15 +29,11 @@ import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.component.AdapterBase;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallbackNoReturn;
import com.cloud.utils.db.TransactionStatus;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.backup.dao.BackupDao;
import org.apache.cloudstack.backup.dao.BackupOfferingDaoImpl;
import org.apache.cloudstack.backup.networker.NetworkerClient;
@ -462,7 +458,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid
}
@Override
public boolean takeBackup(VirtualMachine vm) {
public Pair<Boolean, Backup> takeBackup(VirtualMachine vm) {
String networkerServer;
String clusterName;
@ -514,11 +510,11 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid
if (backup != null) {
backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId())));
backupDao.persist(backup);
return true;
return new Pair<>(true, backup);
} else {
LOG.error("Could not register backup for vm {} with saveset Time: {}", vm, saveTime);
// We need to handle this rare situation where backup is successful but can't be registered properly.
return false;
return new Pair<>(false, null);
}
}
@ -532,7 +528,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid
LOG.debug("EMC Networker successfully deleted backup with id " + externalBackupId);
return true;
} else {
LOG.debug("There was an error removing the backup with id " + externalBackupId + " from EMC NEtworker");
LOG.debug("There was an error removing the backup with id " + externalBackupId + " from EMC Networker");
}
return false;
}
@ -550,12 +546,12 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid
for (final VirtualMachine vm : vms) {
for ( Backup.VolumeInfo thisVMVol : vm.getBackupVolumeList()) {
vmBackupSize += (thisVMVol.getSize() / 1024L / 1024L);
vmBackupProtectedSize += (thisVMVol.getSize() / 1024L / 1024L);
}
final ArrayList<String> vmBackups = getClient(zoneId).getBackupsForVm(vm);
for ( String vmBackup : vmBackups ) {
NetworkerBackup vmNwBackup = getClient(zoneId).getNetworkerBackupInfo(vmBackup);
vmBackupProtectedSize+= vmNwBackup.getSize().getValue() / 1024L;
vmBackupSize += vmNwBackup.getSize().getValue() / 1024L;
}
Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize);
LOG.debug(String.format("Metrics for VM [%s] is [backup size: %s, data size: %s].", vm, vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize()));
@ -565,83 +561,53 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid
}
@Override
public void syncBackups(VirtualMachine vm, Backup.Metric metric) {
final Long zoneId = vm.getDataCenterId();
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
final List<Backup> backupsInDb = backupDao.listByVmId(null, vm.getId());
final ArrayList<String> backupsInNetworker = getClient(zoneId).getBackupsForVm(vm);
final List<Long> removeList = backupsInDb.stream().map(InternalIdentity::getId).collect(Collectors.toList());
for (final String networkerBackupId : backupsInNetworker ) {
Long vmBackupSize=0L;
boolean backupExists = false;
for (final Backup backupInDb : backupsInDb) {
LOG.debug(String.format("Checking if Backup %s with external ID %s for VM %s is valid", backupsInDb, backupInDb.getName(), vm));
if ( networkerBackupId.equals(backupInDb.getExternalId()) ) {
LOG.debug(String.format("Found Backup %s in both Database and Networker", backupInDb));
backupExists = true;
removeList.remove(backupInDb.getId());
if (metric != null) {
LOG.debug(String.format("Update backup [%s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].",
backupInDb, backupInDb.getSize(), backupInDb.getProtectedSize(),
metric.getBackupSize(), metric.getDataSize()));
((BackupVO) backupInDb).setSize(metric.getBackupSize());
((BackupVO) backupInDb).setProtectedSize(metric.getDataSize());
backupDao.update(backupInDb.getId(), ((BackupVO) backupInDb));
}
break;
}
}
if (backupExists) {
continue;
}
// Technically an administrator can manually create a backup for a VM by utilizing the KVM scripts
// with the proper parameters. So we will register any backups taken on the Networker side from
// outside Cloudstack. If ever Networker will support KVM out of the box this functionality also will
// ensure that SLA like backups will be found and registered.
NetworkerBackup strayNetworkerBackup = getClient(vm.getDataCenterId()).getNetworkerBackupInfo(networkerBackupId);
// Since running backups are already present in Networker Server but not completed
// make sure the backup is not in progress at this time.
if ( strayNetworkerBackup.getCompletionTime() != null) {
BackupVO strayBackup = new BackupVO();
strayBackup.setVmId(vm.getId());
strayBackup.setExternalId(strayNetworkerBackup.getId());
strayBackup.setType(strayNetworkerBackup.getType());
SimpleDateFormat formatterDateTime = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
try {
strayBackup.setDate(formatterDateTime.parse(strayNetworkerBackup.getSaveTime()));
} catch (ParseException e) {
String msg = String.format("Unable to parse date [%s].", strayNetworkerBackup.getSaveTime());
LOG.error(msg, e);
throw new CloudRuntimeException(msg, e);
}
strayBackup.setStatus(Backup.Status.BackedUp);
for ( Backup.VolumeInfo thisVMVol : vm.getBackupVolumeList()) {
vmBackupSize += (thisVMVol.getSize() / 1024L /1024L);
}
strayBackup.setSize(vmBackupSize);
strayBackup.setProtectedSize(strayNetworkerBackup.getSize().getValue() / 1024L );
strayBackup.setBackupOfferingId(vm.getBackupOfferingId());
strayBackup.setAccountId(vm.getAccountId());
strayBackup.setDomainId(vm.getDomainId());
strayBackup.setZoneId(vm.getDataCenterId());
LOG.debug(String.format("Creating a new entry in backups: [id: %s, uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, "
+ "domain_id: %s, zone_id: %s].", strayBackup.getId(), strayBackup.getUuid(), strayBackup.getVmId(), strayBackup.getExternalId(),
strayBackup.getType(), strayBackup.getDate(), strayBackup.getBackupOfferingId(), strayBackup.getAccountId(),
strayBackup.getDomainId(), strayBackup.getZoneId()));
backupDao.persist(strayBackup);
LOG.warn("Added backup found in provider [" + strayBackup + "]");
} else {
LOG.debug ("Backup is in progress, skipping addition for this run");
}
}
for (final Long backupIdToRemove : removeList) {
LOG.warn(String.format("Removing backup with ID: [%s].", backupIdToRemove));
backupDao.remove(backupIdToRemove);
}
public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) {
// Technically an administrator can manually create a backup for a VM by utilizing the KVM scripts
// with the proper parameters. So we will register any backups taken on the Networker side from
// outside Cloudstack. If ever Networker will support KVM out of the box this functionality also will
// ensure that SLA like backups will be found and registered.
NetworkerBackup strayNetworkerBackup = getClient(vm.getDataCenterId()).getNetworkerBackupInfo(restorePoint.getId());
// Since running backups are already present in Networker Server but not completed
// make sure the backup is not in progress at this time.
if (strayNetworkerBackup.getCompletionTime() != null) {
BackupVO backup = new BackupVO();
backup.setVmId(vm.getId());
backup.setExternalId(strayNetworkerBackup.getId());
backup.setType(strayNetworkerBackup.getType());
SimpleDateFormat formatterDateTime = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ");
try {
backup.setDate(formatterDateTime.parse(strayNetworkerBackup.getSaveTime()));
} catch (ParseException e) {
String msg = String.format("Unable to parse date [%s].", strayNetworkerBackup.getSaveTime());
LOG.error(msg, e);
throw new CloudRuntimeException(msg, e);
}
});
backup.setStatus(Backup.Status.BackedUp);
Long vmBackupProtectedSize=0L;
for (Backup.VolumeInfo thisVMVol : vm.getBackupVolumeList()) {
vmBackupProtectedSize += (thisVMVol.getSize() / 1024L / 1024L);
}
backup.setSize(strayNetworkerBackup.getSize().getValue() / 1024L);
backup.setProtectedSize(vmBackupProtectedSize);
backup.setBackupOfferingId(vm.getBackupOfferingId());
backup.setAccountId(vm.getAccountId());
backup.setDomainId(vm.getDomainId());
backup.setZoneId(vm.getDataCenterId());
backupDao.persist(backup);
return backup;
}
LOG.debug ("Backup is in progress, skipping addition for this run");
return null;
}
@Override
public List<Backup.RestorePoint> listRestorePoints(VirtualMachine vm) {
final Long zoneId = vm.getDataCenterId();
final ArrayList<String> backupIds = getClient(zoneId).getBackupsForVm(vm);
List<Backup.RestorePoint> restorePoints =
backupIds.stream().map(id -> new Backup.RestorePoint(id, null, null)).collect(Collectors.toList());
return restorePoints;
}
@Override

View File

@ -29,8 +29,6 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.backup.Backup.Metric;
import org.apache.cloudstack.backup.dao.BackupDao;
import org.apache.cloudstack.backup.veeam.VeeamClient;
@ -42,20 +40,13 @@ import org.apache.commons.lang3.BooleanUtils;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.event.ActionEventUtils;
import com.cloud.event.EventTypes;
import com.cloud.event.EventVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.dc.VmwareDatacenter;
import com.cloud.hypervisor.vmware.VmwareDatacenterZoneMap;
import com.cloud.dc.dao.VmwareDatacenterDao;
import com.cloud.hypervisor.vmware.dao.VmwareDatacenterZoneMapDao;
import com.cloud.user.User;
import com.cloud.utils.Pair;
import com.cloud.utils.component.AdapterBase;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallbackNoReturn;
import com.cloud.utils.db.TransactionStatus;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
@ -220,9 +211,10 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider,
}
@Override
public boolean takeBackup(final VirtualMachine vm) {
public Pair<Boolean, Backup> takeBackup(final VirtualMachine vm) {
final VeeamClient client = getClient(vm.getDataCenterId());
return client.startBackupJob(vm.getBackupExternalId());
Boolean result = client.startBackupJob(vm.getBackupExternalId());
return new Pair<>(result, null);
}
@Override
@ -322,78 +314,30 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider,
return metrics;
}
private List<Backup.RestorePoint> listRestorePoints(VirtualMachine vm) {
String backupName = getGuestBackupName(vm.getInstanceName(), vm.getUuid());
return getClient(vm.getDataCenterId()).listRestorePoints(backupName, vm.getInstanceName());
}
private Backup checkAndUpdateIfBackupEntryExistsForRestorePoint(List<Backup> backupsInDb, Backup.RestorePoint restorePoint, Backup.Metric metric) {
for (final Backup backup : backupsInDb) {
if (restorePoint.getId().equals(backup.getExternalId())) {
if (metric != null) {
logger.debug("Update backup with [id: {}, uuid: {}, name: {}, external id: {}] from [size: {}, protected size: {}] to [size: {}, protected size: {}].",
backup.getId(), backup.getUuid(), backup.getName(), backup.getExternalId(), backup.getSize(), backup.getProtectedSize(), metric.getBackupSize(), metric.getDataSize());
((BackupVO) backup).setSize(metric.getBackupSize());
((BackupVO) backup).setProtectedSize(metric.getDataSize());
backupDao.update(backup.getId(), ((BackupVO) backup));
}
return backup;
}
@Override
public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) {
BackupVO backup = new BackupVO();
backup.setVmId(vm.getId());
backup.setExternalId(restorePoint.getId());
backup.setType(restorePoint.getType());
backup.setDate(restorePoint.getCreated());
backup.setStatus(Backup.Status.BackedUp);
if (metric != null) {
backup.setSize(metric.getBackupSize());
backup.setProtectedSize(metric.getDataSize());
}
return null;
backup.setBackupOfferingId(vm.getBackupOfferingId());
backup.setAccountId(vm.getAccountId());
backup.setDomainId(vm.getDomainId());
backup.setZoneId(vm.getDataCenterId());
backupDao.persist(backup);
return backup;
}
@Override
public void syncBackups(VirtualMachine vm, Backup.Metric metric) {
List<Backup.RestorePoint> restorePoints = listRestorePoints(vm);
if (CollectionUtils.isEmpty(restorePoints)) {
logger.debug("Can't find any restore point to VM: {}", vm);
return;
}
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
final List<Backup> backupsInDb = backupDao.listByVmId(null, vm.getId());
final List<Long> removeList = backupsInDb.stream().map(InternalIdentity::getId).collect(Collectors.toList());
for (final Backup.RestorePoint restorePoint : restorePoints) {
if (!(restorePoint.getId() == null || restorePoint.getType() == null || restorePoint.getCreated() == null)) {
Backup existingBackupEntry = checkAndUpdateIfBackupEntryExistsForRestorePoint(backupsInDb, restorePoint, metric);
if (existingBackupEntry != null) {
removeList.remove(existingBackupEntry.getId());
continue;
}
BackupVO backup = new BackupVO();
backup.setVmId(vm.getId());
backup.setExternalId(restorePoint.getId());
backup.setType(restorePoint.getType());
backup.setDate(restorePoint.getCreated());
backup.setStatus(Backup.Status.BackedUp);
if (metric != null) {
backup.setSize(metric.getBackupSize());
backup.setProtectedSize(metric.getDataSize());
}
backup.setBackupOfferingId(vm.getBackupOfferingId());
backup.setAccountId(vm.getAccountId());
backup.setDomainId(vm.getDomainId());
backup.setZoneId(vm.getDataCenterId());
logger.debug("Creating a new entry in backups: [id: {}, uuid: {}, name: {}, vm_id: {}, external_id: {}, type: {}, date: {}, backup_offering_id: {}, account_id: {}, "
+ "domain_id: {}, zone_id: {}].", backup.getId(), backup.getUuid(), backup.getName(), backup.getVmId(), backup.getExternalId(), backup.getType(), backup.getDate(), backup.getBackupOfferingId(), backup.getAccountId(), backup.getDomainId(), backup.getZoneId());
backupDao.persist(backup);
ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_VM_BACKUP_CREATE,
String.format("Created backup %s for VM ID: %s", backup.getUuid(), vm.getUuid()),
vm.getId(), ApiCommandResourceType.VirtualMachine.toString(),0);
}
}
for (final Long backupIdToRemove : removeList) {
logger.warn(String.format("Removing backup with ID: [%s].", backupIdToRemove));
backupDao.remove(backupIdToRemove);
}
}
});
public List<Backup.RestorePoint> listRestorePoints(VirtualMachine vm) {
String backupName = getGuestBackupName(vm.getInstanceName(), vm.getUuid());
return getClient(vm.getDataCenterId()).listRestorePoints(backupName, vm.getInstanceName());
}
@Override

View File

@ -844,11 +844,11 @@ public class VeeamClient {
"if ($restore) { $restore ^| Format-List } }"
);
Pair<Boolean, String> response = executePowerShellCommands(cmds);
final List<Backup.RestorePoint> restorePoints = new ArrayList<>();
if (response == null || !response.first()) {
return restorePoints;
return null;
}
final List<Backup.RestorePoint> restorePoints = new ArrayList<>();
for (final String block : response.second().split("\r\n\r\n")) {
if (block.isEmpty()) {
continue;

View File

@ -3155,7 +3155,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
disk.setLogicalBlockIOSize(pool.getSupportedLogicalBlockSize());
disk.setPhysicalBlockIOSize(pool.getSupportedPhysicalBlockSize());
if (diskBusType == DiskDef.DiskBus.SCSI ) {
if (diskBusType == DiskDef.DiskBus.SCSI || diskBusType == DiskDef.DiskBus.VIRTIOBLK) {
disk.setQemuDriver(true);
disk.setDiscard(DiscardType.UNMAP);
}
@ -3226,7 +3226,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
disk.setCacheMode(DiskDef.DiskCacheMode.valueOf(volumeObjectTO.getCacheMode().toString().toUpperCase()));
}
if (volumeObjectTO.requiresEncryption()) {
if (volumeObjectTO.requiresEncryption() &&
pool.getType().encryptionSupportMode() == Storage.EncryptionSupport.Hypervisor ) {
String secretUuid = createLibvirtVolumeSecret(conn, volumeObjectTO.getPath(), volumeObjectTO.getPassphrase());
DiskDef.LibvirtDiskEncryptDetails encryptDetails = new DiskDef.LibvirtDiskEncryptDetails(secretUuid, QemuObject.EncryptFormat.enumValue(volumeObjectTO.getEncryptFormat()));
disk.setLibvirtDiskEncryptDetails(encryptDetails);

View File

@ -248,7 +248,7 @@ public class LibvirtVMDef {
guestDef.append("<boot dev='" + bo + "'/>\n");
}
}
if (!(_arch != null && _arch.equals("s390x"))) {
if (_arch == null || ! (_arch.equals("aarch64") || _arch.equals("s390x"))) { // simplification of (as ref.) (!(_arch != null && _arch.equals("s390x")) || (_arch == null || !_arch.equals("aarch64")))
guestDef.append("<smbios mode='sysinfo'/>\n");
}
guestDef.append("</os>\n");
@ -680,7 +680,7 @@ public class LibvirtVMDef {
}
public enum DiskBus {
IDE("ide"), SCSI("scsi"), VIRTIO("virtio"), XEN("xen"), USB("usb"), UML("uml"), FDC("fdc"), SATA("sata");
IDE("ide"), SCSI("scsi"), VIRTIO("virtio"), XEN("xen"), USB("usb"), UML("uml"), FDC("fdc"), SATA("sata"), VIRTIOBLK("virtio-blk");
String _bus;
DiskBus(String bus) {

Some files were not shown because too many files have changed in this diff Show More