diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index e70acee229d..cd31b0db56d 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -447,3 +447,6 @@ iscsi.session.cleanup.enabled=false # Timeout (in seconds) to wait for the incremental snapshot to complete. # incremental.snapshot.timeout=10800 + +# If set to true, creates VMs as full clones of their templates on KVM hypervisor. Creates as linked clones otherwise. +# create.full.clone=false diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index b7c24e5126c..e86557e9681 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -97,7 +97,6 @@ import com.cloud.utils.nio.Link; import com.cloud.utils.nio.NioClient; import com.cloud.utils.nio.NioConnection; import com.cloud.utils.nio.Task; -import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; /** @@ -476,7 +475,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater return; } - logger.info("Scheduling a recurring preferred host checker task with lb algorithm '{}' and host.lb.interval={} ms", lbAlgorithm, checkInterval); + logger.info("Scheduling a recurring preferred host checker task with host.lb.interval={} ms", checkInterval); hostLbCheckExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); hostLbCheckExecutor.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval, TimeUnit.MILLISECONDS); @@ -614,9 +613,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater } protected String getAgentArch() { - final Script command = new Script("/usr/bin/arch", 500, logger); - final OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); - return command.execute(parser); + String arch = Script.runSimpleBashScript(Script.getExecutableAbsolutePath("arch"), 1000); + logger.debug("Arch for agent: {} found: {}", _name, arch); + return arch; } @Override @@ -968,9 +967,11 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater if (CollectionUtils.isNotEmpty(cmd.getMsList())) { processManagementServerList(cmd.getMsList(), cmd.getAvoidMsList(), cmd.getLbAlgorithm(), cmd.getLbCheckInterval(), false); } - Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("MigrateAgentConnection-Job")).schedule(() -> { + ScheduledExecutorService migrateAgentConnectionService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("MigrateAgentConnection-Job")); + migrateAgentConnectionService.schedule(() -> { migrateAgentConnection(cmd.getAvoidMsList()); }, 3, TimeUnit.SECONDS); + migrateAgentConnectionService.shutdown(); } catch (Exception e) { String errMsg = "Migrate agent connection failed, due to " + e.getMessage(); logger.debug(errMsg, e); diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index 47255762a05..847d1bb2396 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -863,6 +863,14 @@ public class AgentProperties{ * */ public static final Property REVERT_SNAPSHOT_TIMEOUT = new Property<>("revert.snapshot.timeout", 10800); + /** + * If set to true, creates VMs as full clones of their templates on KVM hypervisor. Creates as linked clones otherwise.
+ * Data type: Boolean.
+ * Default value: false + */ + public static final Property CREATE_FULL_CLONE = new Property<>("create.full.clone", false); + + public static class Property { private String name; private T defaultValue; diff --git a/api/src/main/java/com/cloud/capacity/Capacity.java b/api/src/main/java/com/cloud/capacity/Capacity.java index a4e2c2a7f05..4e584b18fee 100644 --- a/api/src/main/java/com/cloud/capacity/Capacity.java +++ b/api/src/main/java/com/cloud/capacity/Capacity.java @@ -34,13 +34,17 @@ public interface Capacity extends InternalIdentity, Identity { public static final short CAPACITY_TYPE_LOCAL_STORAGE = 9; public static final short CAPACITY_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET = 10; public static final short CAPACITY_TYPE_GPU = 19; + public static final short CAPACITY_TYPE_OBJECT_STORAGE = 20; + public static final short CAPACITY_TYPE_BACKUP_STORAGE = 21; public static final short CAPACITY_TYPE_CPU_CORE = 90; public static final List STORAGE_CAPACITY_TYPES = List.of(CAPACITY_TYPE_STORAGE, CAPACITY_TYPE_STORAGE_ALLOCATED, CAPACITY_TYPE_SECONDARY_STORAGE, - CAPACITY_TYPE_LOCAL_STORAGE); + CAPACITY_TYPE_LOCAL_STORAGE, + CAPACITY_TYPE_BACKUP_STORAGE, + CAPACITY_TYPE_OBJECT_STORAGE); public Long getHostOrPoolId(); diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index a8777d5c75b..be21f13267b 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -632,11 +632,13 @@ public class EventTypes { public static final String EVENT_VM_BACKUP_CREATE = "BACKUP.CREATE"; public static final String EVENT_VM_BACKUP_RESTORE = "BACKUP.RESTORE"; public static final String EVENT_VM_BACKUP_DELETE = "BACKUP.DELETE"; + public static final String EVENT_VM_BACKUP_OFFERING_REMOVED_AND_BACKUPS_DELETED = "BACKUP.OFFERING.BACKUPS.DEL"; public static final String EVENT_VM_BACKUP_RESTORE_VOLUME_TO_VM = "BACKUP.RESTORE.VOLUME.TO.VM"; public static final String EVENT_VM_BACKUP_SCHEDULE_CONFIGURE = "BACKUP.SCHEDULE.CONFIGURE"; public static final String EVENT_VM_BACKUP_SCHEDULE_DELETE = "BACKUP.SCHEDULE.DELETE"; public static final String EVENT_VM_BACKUP_USAGE_METRIC = "BACKUP.USAGE.METRIC"; public static final String EVENT_VM_BACKUP_EDIT = "BACKUP.OFFERING.EDIT"; + public static final String EVENT_VM_CREATE_FROM_BACKUP = "VM.CREATE.FROM.BACKUP"; // external network device events public static final String EVENT_EXTERNAL_NVP_CONTROLLER_ADD = "PHYSICAL.NVPCONTROLLER.ADD"; diff --git a/api/src/main/java/com/cloud/network/NetworkService.java b/api/src/main/java/com/cloud/network/NetworkService.java index fd51cbfa774..196e1f9aab8 100644 --- a/api/src/main/java/com/cloud/network/NetworkService.java +++ b/api/src/main/java/com/cloud/network/NetworkService.java @@ -272,5 +272,7 @@ public interface NetworkService { boolean handleCksIsoOnNetworkVirtualRouter(Long virtualRouterId, boolean mount) throws ResourceUnavailableException; + IpAddresses getIpAddressesFromIps(String ipAddress, String ip6Address, String macAddress); + String getNicVlanValueForExternalVm(NicTO nic); } diff --git a/api/src/main/java/com/cloud/offering/DiskOfferingInfo.java b/api/src/main/java/com/cloud/offering/DiskOfferingInfo.java index d83039e15c2..12dcf423e34 100644 --- a/api/src/main/java/com/cloud/offering/DiskOfferingInfo.java +++ b/api/src/main/java/com/cloud/offering/DiskOfferingInfo.java @@ -31,6 +31,13 @@ public class DiskOfferingInfo { _diskOffering = diskOffering; } + public DiskOfferingInfo(DiskOffering diskOffering, Long size, Long minIops, Long maxIops) { + _diskOffering = diskOffering; + _size = size; + _minIops = minIops; + _maxIops = maxIops; + } + public void setDiskOffering(DiskOffering diskOffering) { _diskOffering = diskOffering; } diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index 6f7b62911b6..a29c8f6aece 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -134,7 +134,7 @@ public interface StorageService { void removeSecondaryStorageHeuristic(RemoveSecondaryStorageSelectorCmd cmd); - ObjectStore discoverObjectStore(String name, String url, String providerName, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException; + ObjectStore discoverObjectStore(String name, String url, Long size, String providerName, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException; boolean deleteObjectStore(DeleteObjectStoragePoolCmd cmd); diff --git a/api/src/main/java/com/cloud/storage/VolumeApiService.java b/api/src/main/java/com/cloud/storage/VolumeApiService.java index dd7341da1b5..4140d51a800 100644 --- a/api/src/main/java/com/cloud/storage/VolumeApiService.java +++ b/api/src/main/java/com/cloud/storage/VolumeApiService.java @@ -113,10 +113,10 @@ public interface VolumeApiService { Volume detachVolumeFromVM(DetachVolumeCmd cmd); - Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup, Map tags, List zoneIds) + Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup, Map tags, List zoneIds, List poolIds, Boolean useStorageReplication) throws ResourceAllocationException; - Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType, List zoneIds) throws ResourceAllocationException; + Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType, List zoneIds, List storagePoolIds, Boolean useStorageReplication) throws ResourceAllocationException; Volume updateVolume(long volumeId, String path, String state, Long storageId, Boolean displayVolume, Boolean deleteProtection, diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index a8ed62fb6b9..6f1aba4613d 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; +import org.apache.cloudstack.api.command.user.vm.CreateVMFromBackupCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; import org.apache.cloudstack.api.command.user.vm.RebootVMCmd; @@ -220,7 +221,7 @@ public interface UserVmService { * available. */ UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List securityGroupIdList, - Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, + Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, List dataDiskInfoList, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, Long userDataId, String userDataDetails, List sshKeyPairs, Map requestedIps, IpAddresses defaultIp, Boolean displayVm, String keyboard, List affinityGroupIdList, Map customParameter, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, @@ -297,7 +298,7 @@ public interface UserVmService { * available. */ UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List networkIdList, - List securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, + List securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, List dataDiskInfoList, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, Long userDataId, String userDataDetails, List sshKeyPairs, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, Map userVmOVFProperties, boolean dynamicScalingEnabled, Long overrideDiskOfferingId, String vmType, Volume volume, Snapshot snapshot) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException; @@ -369,7 +370,7 @@ public interface UserVmService { * available. */ UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List networkIdList, Account owner, - String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, + String hostName, String displayName, Long diskOfferingId, Long diskSize, List dataDiskInfoList, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, Long userDataId, String userDataDetails, List sshKeyPairs, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, Map templateOvfPropertiesMap, boolean dynamicScalingEnabled, String vmType, Long overrideDiskOfferingId, Volume volume, Snapshot snapshot) @@ -516,4 +517,8 @@ public interface UserVmService { * @return true if the VM is successfully unmanaged, false if not. */ boolean unmanageUserVM(Long vmId); + + UserVm allocateVMFromBackup(CreateVMFromBackupCmd cmd) throws InsufficientCapacityException, ResourceAllocationException, ResourceUnavailableException; + + UserVm restoreVMFromBackup(CreateVMFromBackupCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException; } diff --git a/api/src/main/java/com/cloud/vm/VirtualMachine.java b/api/src/main/java/com/cloud/vm/VirtualMachine.java index e2ea408e7b8..d244de7115e 100644 --- a/api/src/main/java/com/cloud/vm/VirtualMachine.java +++ b/api/src/main/java/com/cloud/vm/VirtualMachine.java @@ -128,7 +128,6 @@ public interface VirtualMachine extends RunningOn, ControlledEntity, Partition, s_fsm.addTransition(new Transition(State.Error, VirtualMachine.Event.DestroyRequested, State.Expunging, null)); s_fsm.addTransition(new Transition(State.Error, VirtualMachine.Event.ExpungeOperation, State.Expunging, null)); s_fsm.addTransition(new Transition(State.Stopped, Event.RestoringRequested, State.Restoring, null)); - s_fsm.addTransition(new Transition(State.Expunging, Event.RestoringRequested, State.Restoring, null)); s_fsm.addTransition(new Transition(State.Destroyed, Event.RestoringRequested, State.Restoring, null)); s_fsm.addTransition(new Transition(State.Restoring, Event.RestoringSuccess, State.Stopped, null)); s_fsm.addTransition(new Transition(State.Restoring, Event.RestoringFailed, State.Stopped, null)); diff --git a/api/src/main/java/com/cloud/vm/VmDiskInfo.java b/api/src/main/java/com/cloud/vm/VmDiskInfo.java new file mode 100644 index 00000000000..b8779a8d77c --- /dev/null +++ b/api/src/main/java/com/cloud/vm/VmDiskInfo.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.vm; + +import com.cloud.offering.DiskOffering; +import com.cloud.offering.DiskOfferingInfo; + +public class VmDiskInfo extends DiskOfferingInfo { + private Long _deviceId; + + public VmDiskInfo(DiskOffering diskOffering, Long size, Long minIops, Long maxIops) { + super(diskOffering, size, minIops, maxIops); + } + + public VmDiskInfo(DiskOffering diskOffering, Long size, Long minIops, Long maxIops, Long deviceId) { + super(diskOffering, size, minIops, maxIops); + _deviceId = deviceId; + } + + public Long getDeviceId() { + return _deviceId; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java index 5146e5c38e8..d8e471756a0 100644 --- a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java +++ b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java @@ -74,6 +74,8 @@ public interface AlertService { public static final AlertType ALERT_TYPE_VR_PUBLIC_IFACE_MTU = new AlertType((short)32, "ALERT.VR.PUBLIC.IFACE.MTU", true); public static final AlertType ALERT_TYPE_VR_PRIVATE_IFACE_MTU = new AlertType((short)32, "ALERT.VR.PRIVATE.IFACE.MTU", true); public static final AlertType ALERT_TYPE_EXTENSION_PATH_NOT_READY = new AlertType((short)33, "ALERT.TYPE.EXTENSION.PATH.NOT.READY", true); + public static final AlertType ALERT_TYPE_BACKUP_STORAGE = new AlertType(Capacity.CAPACITY_TYPE_BACKUP_STORAGE, "ALERT.STORAGE.BACKUP", true); + public static final AlertType ALERT_TYPE_OBJECT_STORAGE = new AlertType(Capacity.CAPACITY_TYPE_OBJECT_STORAGE, "ALERT.STORAGE.OBJECT", true); public short getType() { return type; diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 4fef598d311..489d737b5bb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -23,12 +23,14 @@ public class ApiConstants { public static final String ACCOUNT_ID = "accountid"; public static final String ACCOUNT_IDS = "accountids"; public static final String ACCUMULATE = "accumulate"; + public static final String ACQUIRED = "acquired"; public static final String ACTIVATION_RULE = "activationrule"; public static final String ACTIVITY = "activity"; public static final String ADAPTER_TYPE = "adaptertype"; public static final String ADDRESS = "address"; public static final String ALGORITHM = "algorithm"; public static final String ALIAS = "alias"; + public static final String ALLOCATED = "allocated"; public static final String ALLOCATED_DATE = "allocateddate"; public static final String ALLOCATED_ONLY = "allocatedonly"; public static final String ALLOCATED_TIME = "allocated"; @@ -60,6 +62,7 @@ public class ApiConstants { public static final String BACKUP_STORAGE_AVAILABLE = "backupstorageavailable"; public static final String BACKUP_STORAGE_LIMIT = "backupstoragelimit"; public static final String BACKUP_STORAGE_TOTAL = "backupstoragetotal"; + public static final String BACKUP_VM_OFFERING_REMOVED = "vmbackupofferingremoved"; public static final String BACKUP_TOTAL = "backuptotal"; public static final String BASE64_IMAGE = "base64image"; public static final String BGP_PEERS = "bgppeers"; @@ -92,9 +95,11 @@ public class ApiConstants { public static final String CONVERT_INSTANCE_HOST_ID = "convertinstancehostid"; public static final String CONVERT_INSTANCE_STORAGE_POOL_ID = "convertinstancepoolid"; public static final String ENABLED_REVOCATION_CHECK = "enabledrevocationcheck"; + public static final String CLIENT_ADDRESS = "clientaddress"; public static final String COMBINED_CAPACITY_ORDERING = "COMBINED"; public static final String CONTROLLER = "controller"; public static final String CONTROLLER_UNIT = "controllerunit"; + public static final String CONSOLE_ENDPOINT_CREATOR_ADDRESS = "consoleendpointcreatoraddress"; public static final String COPY_IMAGE_TAGS = "copyimagetags"; public static final String CPU_OVERCOMMIT_RATIO = "cpuOvercommitRatio"; public static final String CSR = "csr"; @@ -155,6 +160,7 @@ public class ApiConstants { public static final String MAX_IOPS = "maxiops"; public static final String HYPERVISOR_SNAPSHOT_RESERVE = "hypervisorsnapshotreserve"; public static final String DATACENTER_NAME = "datacentername"; + public static final String DATADISKS_DETAILS = "datadisksdetails"; public static final String DATADISK_OFFERING_LIST = "datadiskofferinglist"; public static final String DEFAULT_VALUE = "defaultvalue"; public static final String DELETE_PROTECTION = "deleteprotection"; @@ -163,6 +169,7 @@ public class ApiConstants { public static final String DESTINATION_ZONE_ID = "destzoneid"; public static final String DETAILS = "details"; public static final String DEVICE_ID = "deviceid"; + public static final String DEVICE_IDS = "deviceids"; public static final String DEVICE_NAME = "devicename"; public static final String DIRECT_DOWNLOAD = "directdownload"; public static final String DISK = "disk"; @@ -305,6 +312,7 @@ public class ApiConstants { public static final String IP_ADDRESS = "ipaddress"; public static final String IP_ADDRESSES = "ipaddresses"; public static final String IP6_ADDRESS = "ip6address"; + public static final String IP6_ADDRESSES = "ip6addresses"; public static final String IP_ADDRESS_ID = "ipaddressid"; public static final String IS_2FA_ENABLED = "is2faenabled"; public static final String IS_2FA_VERIFIED = "is2faverified"; @@ -356,6 +364,7 @@ public class ApiConstants { public static final String LBID = "lbruleid"; public static final String LB_PROVIDER = "lbprovider"; public static final String MAC_ADDRESS = "macaddress"; + public static final String MAC_ADDRESSES = "macaddresses"; public static final String MANUAL_UPGRADE = "manualupgrade"; public static final String MAX = "max"; public static final String MAX_SNAPS = "maxsnaps"; @@ -383,6 +392,7 @@ public class ApiConstants { public static final String NETMASK = "netmask"; public static final String NEW_NAME = "newname"; public static final String NIC = "nic"; + public static final String NICS = "nics"; public static final String NIC_NETWORK_LIST = "nicnetworklist"; public static final String NIC_IP_ADDRESS_LIST = "nicipaddresslist"; public static final String NIC_MULTIQUEUE_NUMBER = "nicmultiqueuenumber"; @@ -459,6 +469,7 @@ public class ApiConstants { public static final String POWER_STATE = "powerstate"; public static final String PRECEDENCE = "precedence"; public static final String PREPARE_VM = "preparevm"; + public static final String PRESERVE_IP = "preserveip"; public static final String PRIVATE_INTERFACE = "privateinterface"; public static final String PRIVATE_IP = "privateip"; public static final String PRIVATE_PORT = "privateport"; @@ -494,6 +505,7 @@ public class ApiConstants { public static final String REGISTERED = "registered"; public static final String QUALIFIERS = "qualifiers"; public static final String QUERY_FILTER = "queryfilter"; + public static final String QUIESCE_VM = "quiescevm"; public static final String SCHEDULE = "schedule"; public static final String SCHEDULE_ID = "scheduleid"; public static final String SCOPE = "scope"; @@ -509,6 +521,7 @@ public class ApiConstants { public static final String SERIAL = "serial"; public static final String SERVICE_IP = "serviceip"; public static final String SERVICE_OFFERING_ID = "serviceofferingid"; + public static final String SERVICE_OFFERING_NAME = "serviceofferingname"; public static final String SESSIONKEY = "sessionkey"; public static final String SHOW_CAPACITIES = "showcapacities"; public static final String SHOW_REMOVED = "showremoved"; @@ -526,6 +539,9 @@ public class ApiConstants { public static final String SNAPSHOT_POLICY_ID = "snapshotpolicyid"; public static final String SNAPSHOT_TYPE = "snapshottype"; public static final String SNAPSHOT_QUIESCEVM = "quiescevm"; + + public static final String USE_STORAGE_REPLICATION = "usestoragereplication"; + public static final String SOURCE_CIDR_LIST = "sourcecidrlist"; public static final String SOURCE_ZONE_ID = "sourcezoneid"; public static final String SSL_VERIFICATION = "sslverification"; @@ -576,6 +592,7 @@ public class ApiConstants { public static final String TRUST_STORE_PASSWORD = "truststorepass"; public static final String URL = "url"; public static final String USAGE_INTERFACE = "usageinterface"; + public static final String USED = "used"; public static final String USED_SUBNETS = "usedsubnets"; public static final String USED_IOPS = "usediops"; public static final String USER_DATA = "userdata"; @@ -634,8 +651,10 @@ public class ApiConstants { public static final String IS_DEDICATED = "isdedicated"; public static final String TAKEN = "taken"; public static final String VM_AVAILABLE = "vmavailable"; + public static final String VM_DETAILS = "vmdetails"; public static final String VM_LIMIT = "vmlimit"; public static final String VM_TOTAL = "vmtotal"; + public static final String VM_SETTINGS = "vmsettings"; public static final String VM_TYPE = "vmtype"; public static final String VNET = "vnet"; public static final String IS_VOLATILE = "isvolatile"; @@ -817,6 +836,7 @@ public class ApiConstants { public static final String LIST_ALL = "listall"; public static final String LIST_ONLY_REMOVED = "listonlyremoved"; public static final String LIST_SYSTEM_VMS = "listsystemvms"; + public static final String LIST_VM_DETAILS = "listvmdetails"; public static final String IP_RANGES = "ipranges"; public static final String IPV4_ROUTING = "ip4routing"; public static final String IPV4_ROUTES = "ip4routes"; @@ -1129,6 +1149,7 @@ public class ApiConstants { public static final String NETWORK_SPANNED_ZONES = "zonesnetworkspans"; public static final String METADATA = "metadata"; public static final String PHYSICAL_SIZE = "physicalsize"; + public static final String CHAIN_SIZE = "chainsize"; public static final String OVM3_POOL = "ovm3pool"; public static final String OVM3_CLUSTER = "ovm3cluster"; public static final String OVM3_VIP = "ovm3vip"; @@ -1141,6 +1162,7 @@ public class ApiConstants { public static final String ZONE_ID_LIST = "zoneids"; public static final String DESTINATION_ZONE_ID_LIST = "destzoneids"; + public static final String STORAGE_ID_LIST = "storageids"; public static final String ADMIN = "admin"; public static final String CHECKSUM_PARAMETER_PREFIX_DESCRIPTION = "The parameter containing the checksum will be considered a MD5sum if it is not prefixed\n" + " and just a plain ascii/utf8 representation of a hexadecimal string. If it is required to\n" @@ -1254,6 +1276,7 @@ public class ApiConstants { public static final String NETRIS_DETAIL_KEY = "forNetris"; public static final String NETRIS_TAG = "netristag"; public static final String NETRIS_VXLAN_ID = "netrisvxlanid"; + public static final String NETRIS_URL = "netrisurl"; public static final String DISK_PATH = "diskpath"; public static final String IMPORT_SOURCE = "importsource"; public static final String TEMP_PATH = "temppath"; @@ -1307,6 +1330,10 @@ public class ApiConstants { "however, the following formats are also accepted: \"yyyy-MM-dd HH:mm:ss\" (e.g.: \"2023-01-01 12:00:00\") and \"yyyy-MM-dd\" (e.g.: \"2023-01-01\" - if the time is not " + "added, it will be interpreted as \"23:59:59\"). If the recommended format is not used, the date will be considered in the server timezone."; + public static final String PARAMETER_DESCRIPTION_MAX_BACKUPS = "The maximum number of backups to keep for a VM. " + + "If \"0\", no retention policy will be applied and, thus, no backups from the schedule will be automatically deleted. " + + "This parameter is only supported for the Dummy, NAS and EMC Networker backup provider."; + public static final String VMWARE_DC = "vmwaredc"; public static final String CSS = "css"; diff --git a/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java index d0683299e73..8e92e877f5c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java @@ -22,6 +22,8 @@ import java.util.List; import java.util.Map; import java.util.Set; +import org.apache.cloudstack.api.response.ConsoleSessionResponse; +import org.apache.cloudstack.consoleproxy.ConsoleSession; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.ApiConstants.HostDetails; @@ -38,7 +40,6 @@ import org.apache.cloudstack.api.response.AutoScaleVmGroupResponse; import org.apache.cloudstack.api.response.AutoScaleVmProfileResponse; import org.apache.cloudstack.api.response.BackupOfferingResponse; import org.apache.cloudstack.api.response.BackupRepositoryResponse; -import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.api.response.BackupScheduleResponse; import org.apache.cloudstack.api.response.BucketResponse; import org.apache.cloudstack.api.response.CapacityResponse; @@ -142,7 +143,6 @@ import org.apache.cloudstack.api.response.VpcOfferingResponse; import org.apache.cloudstack.api.response.VpcResponse; import org.apache.cloudstack.api.response.VpnUsersResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.backup.BackupRepository; import org.apache.cloudstack.backup.BackupSchedule; @@ -534,8 +534,6 @@ public interface ResponseGenerator { UserDataResponse createUserDataResponse(UserData userData); - BackupResponse createBackupResponse(Backup backup); - BackupScheduleResponse createBackupScheduleResponse(BackupSchedule backup); BackupOfferingResponse createBackupOfferingResponse(BackupOffering policy); @@ -583,4 +581,6 @@ public interface ResponseGenerator { void updateTemplateIsoResponsesForIcons(List responses, ResourceTag.ResourceObjectType type); GuiThemeResponse createGuiThemeResponse(GuiThemeJoin guiThemeJoin); + + ConsoleSessionResponse createConsoleSessionResponse(ConsoleSession consoleSession, ResponseView responseView); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/project/ListProjectRolesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/project/ListProjectRolesCmd.java index e876dbc9b58..dedbb410ea5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/project/ListProjectRolesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/project/ListProjectRolesCmd.java @@ -72,7 +72,7 @@ public class ListProjectRolesCmd extends BaseListCmd { @Override public void execute() { - List projectRoles; + List projectRoles = new ArrayList<>(); if (getProjectId() != null && getProjectRoleId() != null) { projectRoles = Collections.singletonList(projRoleService.findProjectRole(getProjectRoleId(), getProjectId())); } else if (StringUtils.isNotBlank(getRoleName())) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListCapacityCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListCapacityCmd.java index 6b31c4cc43c..f46cecdef50 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListCapacityCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListCapacityCmd.java @@ -132,11 +132,12 @@ public class ListCapacityCmd extends BaseListCmd { Collections.sort(capacityResponses, new Comparator() { public int compare(CapacityResponse resp1, CapacityResponse resp2) { int res = resp1.getZoneName().compareTo(resp2.getZoneName()); + // Group by zone if (res != 0) { return res; - } else { - return resp1.getCapacityType().compareTo(resp2.getCapacityType()); } + // Sort by capacity type only if not already sorted by usage + return (getSortBy() != null) ? 0 : resp1.getCapacityType().compareTo(resp2.getCapacityType()); } }); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmd.java index b779ba2a2b4..460b8d642e9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmd.java @@ -56,6 +56,9 @@ public class AddObjectStoragePoolCmd extends BaseCmd { @Parameter(name = ApiConstants.TAGS, type = CommandType.STRING, description = "the tags for the storage pool") private String tags; + @Parameter(name = ApiConstants.SIZE, type = CommandType.LONG, description = "the total size of the object store in GiB. Used for tracking capacity and sending alerts", since = "4.21") + private Long size; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -68,6 +71,10 @@ public class AddObjectStoragePoolCmd extends BaseCmd { return name; } + public Long getTotalSize() { + return size; + } + public Map getDetails() { Map detailsMap = null; if (details != null && !details.isEmpty()) { @@ -112,7 +119,7 @@ public class AddObjectStoragePoolCmd extends BaseCmd { @Override public void execute(){ try{ - ObjectStore result = _storageService.discoverObjectStore(getName(), getUrl(), getProviderName(), getDetails()); + ObjectStore result = _storageService.discoverObjectStore(getName(), getUrl(), getTotalSize(), getProviderName(), getDetails()); ObjectStoreResponse storeResponse = null; if (result != null) { storeResponse = _responseGenerator.createObjectStoreResponse(result); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateObjectStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateObjectStoragePoolCmd.java index 497179d25ef..ac007137ef1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateObjectStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateObjectStoragePoolCmd.java @@ -44,6 +44,8 @@ public class UpdateObjectStoragePoolCmd extends BaseCmd { @Parameter(name = ApiConstants.URL, type = CommandType.STRING, description = "the url for the object store") private String url; + @Parameter(name = ApiConstants.SIZE, type = CommandType.LONG, description = "the total size of the object store in GiB. Used for tracking capacity and sending alerts. Set to 0 to stop tracking.", since = "4.21") + private Long size; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -61,6 +63,10 @@ public class UpdateObjectStoragePoolCmd extends BaseCmd { return url; } + public Long getSize() { + return size; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/CreateVMFromBackupCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/CreateVMFromBackupCmdByAdmin.java new file mode 100644 index 00000000000..d95f17ef304 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/CreateVMFromBackupCmdByAdmin.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.vm; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.command.user.vm.CreateVMFromBackupCmd; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.PodResponse; +import org.apache.cloudstack.api.response.UserVmResponse; + +import com.cloud.vm.VirtualMachine; + +@APICommand(name = "createVMFromBackup", + description = "Creates and automatically starts a VM from a backup.", + responseObject = UserVmResponse.class, + responseView = ResponseObject.ResponseView.Full, + entityType = {VirtualMachine.class}, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = true, + since = "4.21.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class CreateVMFromBackupCmdByAdmin extends CreateVMFromBackupCmd implements AdminCmd { + + @Parameter(name = ApiConstants.POD_ID, type = CommandType.UUID, entityType = PodResponse.class, description = "destination Pod ID to deploy the VM to - parameter available for root admin only", since = "4.21") + private Long podId; + + @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "destination Cluster ID to deploy the VM to - parameter available for root admin only", since = "4.21") + private Long clusterId; + + public Long getPodId() { + return podId; + } + + public Long getClusterId() { + return clusterId; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java index 2d387788243..4e9028f5217 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.api.command.user.backup; import javax.inject.Inject; -import com.cloud.storage.Snapshot; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -28,7 +27,6 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseAsyncCreateCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.response.BackupScheduleResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.backup.BackupManager; @@ -62,12 +60,26 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd { description = "ID of the VM") private Long vmId; - @Parameter(name = ApiConstants.SCHEDULE_ID, - type = CommandType.LONG, - entityType = BackupScheduleResponse.class, - description = "backup schedule ID of the VM, if this is null, it indicates that it is a manual backup.", + @Parameter(name = ApiConstants.NAME, + type = CommandType.STRING, + description = "the name of the backup", since = "4.21.0") - private Long scheduleId; + private String name; + + @Parameter(name = ApiConstants.DESCRIPTION, + type = CommandType.STRING, + description = "the description for the backup", + since = "4.21.0") + private String description; + + @Parameter(name = ApiConstants.QUIESCE_VM, + type = CommandType.BOOLEAN, + required = false, + description = "Quiesce the instance before checkpointing the disks for backup. Applicable only to NAS backup provider. " + + "The filesystem is frozen before the backup starts and thawed immediately after. " + + "Requires the instance to have the QEMU Guest Agent installed and running.", + since = "4.21.0") + private Boolean quiesceVM; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -77,12 +89,16 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd { return vmId; } - public Long getScheduleId() { - if (scheduleId != null) { - return scheduleId; - } else { - return Snapshot.MANUAL_POLICY_ID; - } + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public Boolean getQuiesceVM() { + return quiesceVM; } ///////////////////////////////////////////////////// @@ -92,7 +108,7 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { try { - boolean result = backupManager.createBackup(getVmId(), getScheduleId()); + boolean result = backupManager.createBackup(this, getJob()); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java index 1d0741e6217..a352c688f12 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java @@ -75,12 +75,19 @@ public class CreateBackupScheduleCmd extends BaseCmd { description = "Specifies a timezone for this command. For more information on the timezone parameter, see TimeZone Format.") private String timezone; - @Parameter(name = ApiConstants.MAX_BACKUPS, - type = CommandType.INTEGER, - description = "maximum number of backups to retain", - since = "4.21.0") + @Parameter(name = ApiConstants.MAX_BACKUPS, type = CommandType.INTEGER, + since = "4.21.0", description = ApiConstants.PARAMETER_DESCRIPTION_MAX_BACKUPS) private Integer maxBackups; + @Parameter(name = ApiConstants.QUIESCE_VM, + type = CommandType.BOOLEAN, + required = false, + description = "Quiesce the instance before checkpointing the disks for backup. Applicable only to NAS backup provider. " + + "The filesystem is frozen before the backup starts and thawed immediately after. " + + "Requires the instance to have the QEMU Guest Agent installed and running.", + since = "4.21.0") + private Boolean quiesceVM; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -105,6 +112,10 @@ public class CreateBackupScheduleCmd extends BaseCmd { return maxBackups; } + public Boolean getQuiesceVM() { + return quiesceVM; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java index 7d87cc37e6c..0c2b4f9b91e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseListProjectAndAccountResourcesCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BackupOfferingResponse; import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UserVmResponse; @@ -75,6 +76,25 @@ public class ListBackupsCmd extends BaseListProjectAndAccountResourcesCmd { description = "list backups by zone id") private Long zoneId; + @Parameter(name = ApiConstants.NAME, + type = CommandType.STRING, + since = "4.21.0", + description = "list backups by name") + private String name; + + @Parameter(name = ApiConstants.BACKUP_OFFERING_ID, + type = CommandType.UUID, + entityType = BackupOfferingResponse.class, + since = "4.21.0", + description = "list backups by backup offering") + private Long backupOfferingId; + + @Parameter(name = ApiConstants.LIST_VM_DETAILS, + type = CommandType.BOOLEAN, + since = "4.21.0", + description = "list backups with VM details") + private Boolean listVmDetails; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -87,10 +107,22 @@ public class ListBackupsCmd extends BaseListProjectAndAccountResourcesCmd { return vmId; } + public String getName() { + return name; + } + + public Long getBackupOfferingId() { + return backupOfferingId; + } + public Long getZoneId() { return zoneId; } + public Boolean getListVmDetails() { + return listVmDetails; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -101,7 +133,7 @@ public class ListBackupsCmd extends BaseListProjectAndAccountResourcesCmd { if (backup == null) { continue; } - BackupResponse backupResponse = _responseGenerator.createBackupResponse(backup); + BackupResponse backupResponse = backupManager.createBackupResponse(backup, this.getListVmDetails()); responses.add(backupResponse); } final ListResponse response = new ListResponse<>(); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/consoleproxy/ListConsoleSessionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/consoleproxy/ListConsoleSessionsCmd.java new file mode 100644 index 00000000000..774cd9d59fe --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/consoleproxy/ListConsoleSessionsCmd.java @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.import org.apache.cloudstack.context.CallContext; +package org.apache.cloudstack.api.command.user.consoleproxy; + +import org.apache.cloudstack.consoleproxy.ConsoleSession; + +import com.cloud.user.Account; +import com.cloud.user.AccountService; +import com.cloud.user.UserAccount; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.ACL; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.AccountResponse; +import org.apache.cloudstack.api.response.ConsoleSessionResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.UserResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.consoleproxy.ConsoleAccessManager; + +import javax.inject.Inject; +import java.util.Date; + +@APICommand(name = "listConsoleSessions", description = "Lists console sessions.", responseObject = ConsoleSessionResponse.class, + entityType = {ConsoleSession.class}, since = "4.21.0", + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + authorized = {RoleType.Admin, RoleType.DomainAdmin, RoleType.ResourceAdmin, RoleType.User}) +public class ListConsoleSessionsCmd extends BaseListCmd { + @Inject + private AccountService accountService; + + @Inject + private ConsoleAccessManager consoleAccessManager; + + @ACL + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ConsoleSessionResponse.class, description = "The ID of the console session.") + private Long id; + + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "The domain ID of the account that created the console endpoint.") + private Long domainId; + + @ACL + @Parameter(name = ApiConstants.ACCOUNT_ID, type = CommandType.UUID, entityType = AccountResponse.class, description = "The ID of the account that created the console endpoint.") + private Long accountId; + + @ACL + @Parameter(name = ApiConstants.USER_ID, type = CommandType.UUID, entityType = UserResponse.class, description = "The ID of the user that created the console endpoint.") + private Long userId; + + @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, authorized = {RoleType.Admin}, description = "Lists console sessions from the specified host.") + private Long hostId; + + @Parameter(name = ApiConstants.START_DATE, type = CommandType.DATE, description = "Lists console sessions generated from this date onwards. " + + ApiConstants.PARAMETER_DESCRIPTION_START_DATE_POSSIBLE_FORMATS) + private Date startDate; + + @Parameter(name = ApiConstants.END_DATE, type = CommandType.DATE, description = "Lists console sessions generated up until this date. " + + ApiConstants.PARAMETER_DESCRIPTION_END_DATE_POSSIBLE_FORMATS) + private Date endDate; + + @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, type = CommandType.UUID, entityType = UserVmResponse.class, description = "The ID of the virtual machine.") + private Long vmId; + + @Parameter(name = ApiConstants.CONSOLE_ENDPOINT_CREATOR_ADDRESS, type = CommandType.STRING, description = "IP address of the creator of the console endpoint.") + private String consoleEndpointCreatorAddress; + + @Parameter(name = ApiConstants.CLIENT_ADDRESS, type = CommandType.STRING, description = "IP address of the client that accessed the console session.") + private String clientAddress; + + @Parameter(name = ApiConstants.ACTIVE_ONLY, type = CommandType.BOOLEAN, + description = "Lists only active console sessions, defaults to true. Active sessions are the ones that have been acquired and have not been removed.") + private boolean activeOnly = true; + + @Parameter(name = ApiConstants.ACQUIRED, type = CommandType.BOOLEAN, + description = "Lists acquired console sessions, defaults to false. Acquired console sessions are the ones that have been accessed. " + + "The 'activeonly' parameter has precedence over the 'acquired' parameter, i.e., when the 'activeonly' parameter is 'true', the 'acquired' parameter value will be ignored.") + private boolean acquired = false; + + @Parameter(name = ApiConstants.IS_RECURSIVE, type = CommandType.BOOLEAN, + description = "Lists console sessions recursively per domain. If an account ID is informed, only the account's console sessions will be listed. Defaults to false.") + private boolean recursive = false; + + public Long getId() { + return id; + } + + public Long getDomainId() { + return domainId; + } + + public Long getAccountId() { + return accountId; + } + + public Long getUserId() { + return userId; + } + + public Long getHostId() { + return hostId; + } + + public Date getStartDate() { + return startDate; + } + + public Date getEndDate() { + return endDate; + } + + public Long getVmId() { + return vmId; + } + + public String getConsoleEndpointCreatorAddress() { + return consoleEndpointCreatorAddress; + } + + public String getClientAddress() { + return clientAddress; + } + + public boolean isActiveOnly() { + return activeOnly; + } + + public boolean getAcquired() { + return acquired; + } + + public boolean isRecursive() { + return recursive; + } + + @Override + public void execute() { + ListResponse response = consoleAccessManager.listConsoleSessions(this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + if (getId() != null) { + ConsoleSession consoleSession = consoleAccessManager.listConsoleSessionById(getId()); + if (consoleSession != null) { + return consoleSession.getAccountId(); + } + } + + if (getAccountId() != null) { + return getAccountId(); + } + + if (getUserId() != null) { + UserAccount userAccount = accountService.getUserAccountById(getUserId()); + if (userAccount != null) { + return userAccount.getAccountId(); + } + } + + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventsCmd.java index b5273c64922..cb935c13e97 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventsCmd.java @@ -73,6 +73,9 @@ public class ListEventsCmd extends BaseListProjectAndAccountResourcesCmd { @Parameter(name = ApiConstants.ARCHIVED, type = CommandType.BOOLEAN, description = "true to list archived events otherwise false", since="4.19.0") private Boolean archived; + @Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "The state of the events", since="4.21.0") + private String state; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -121,6 +124,10 @@ public class ListEventsCmd extends BaseListProjectAndAccountResourcesCmd { return archived != null && archived; } + public String getState() { + return state; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java index 760a531e899..346eca8cff0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java @@ -57,7 +57,7 @@ public class ListIsosCmd extends BaseListTaggedResourcesCmd implements UserCmd { @Parameter(name = ApiConstants.IS_PUBLIC, type = CommandType.BOOLEAN, description = "true if the ISO is publicly available to all users, false otherwise.") private Boolean publicIso; - @Parameter(name = ApiConstants.IS_READY, type = CommandType.BOOLEAN, description = "true if this ISO is ready to be deployed") + @Parameter(name = ApiConstants.IS_READY, type = CommandType.BOOLEAN, description = "list ISOs that are ready to be deployed") private Boolean ready; @Parameter(name = ApiConstants.ISO_FILTER, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java index 07973fcbfca..ac54ebbd8f8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java @@ -17,9 +17,13 @@ package org.apache.cloudstack.api.command.user.snapshot; -import java.util.ArrayList; -import java.util.List; - +import com.cloud.dc.DataCenter; +import com.cloud.event.EventTypes; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.storage.Snapshot; +import com.cloud.user.Account; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -31,26 +35,24 @@ import org.apache.cloudstack.api.ResponseObject; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.user.UserCmd; import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.collections.CollectionUtils; - -import com.cloud.dc.DataCenter; -import com.cloud.event.EventTypes; -import com.cloud.exception.ResourceAllocationException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.exception.StorageUnavailableException; -import com.cloud.storage.Snapshot; -import com.cloud.user.Account; +import org.apache.commons.lang3.BooleanUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import java.util.ArrayList; +import java.util.List; + @APICommand(name = "copySnapshot", description = "Copies a snapshot from one zone to another.", responseObject = SnapshotResponse.class, responseView = ResponseObject.ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class CopySnapshotCmd extends BaseAsyncCmd implements UserCmd { public static final Logger logger = LogManager.getLogger(CopySnapshotCmd.class.getName()); + private Snapshot snapshot; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -84,6 +86,20 @@ public class CopySnapshotCmd extends BaseAsyncCmd implements UserCmd { "Do not specify destzoneid and destzoneids together, however one of them is required.") protected List destZoneIds; + @Parameter(name = ApiConstants.STORAGE_ID_LIST, + type=CommandType.LIST, + collectionType = CommandType.UUID, + entityType = StoragePoolResponse.class, + required = false, + authorized = RoleType.Admin, + since = "4.21.0", + description = "A comma-separated list of IDs of the storage pools in other zones in which the snapshot will be made available. " + + "The snapshot will always be made available in the zone in which the volume is present. Currently supported for StorPool only") + protected List storagePoolIds; + + @Parameter (name = ApiConstants.USE_STORAGE_REPLICATION, type=CommandType.BOOLEAN, required = false, since = "4.21.0", description = "This parameter enables the option the snapshot to be copied to supported primary storage") + protected Boolean useStorageReplication; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -106,7 +122,15 @@ public class CopySnapshotCmd extends BaseAsyncCmd implements UserCmd { destIds.add(destZoneId); return destIds; } - return null; + return new ArrayList<>(); + } + + public List getStoragePoolIds() { + return storagePoolIds; + } + + public Boolean useStorageReplication() { + return BooleanUtils.toBoolean(useStorageReplication); } @Override @@ -152,7 +176,7 @@ public class CopySnapshotCmd extends BaseAsyncCmd implements UserCmd { @Override public void execute() throws ResourceUnavailableException { try { - if (destZoneId == null && CollectionUtils.isEmpty(destZoneIds)) + if (destZoneId == null && CollectionUtils.isEmpty(destZoneIds) && useStorageReplication()) throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Either destzoneid or destzoneids parameters have to be specified."); @@ -161,7 +185,7 @@ public class CopySnapshotCmd extends BaseAsyncCmd implements UserCmd { "Both destzoneid and destzoneids cannot be specified at the same time."); CallContext.current().setEventDetails(getEventDescription()); - Snapshot snapshot = _snapshotService.copySnapshot(this); + snapshot = _snapshotService.copySnapshot(this); if (snapshot != null) { SnapshotResponse response = _queryService.listSnapshot(this); @@ -177,6 +201,13 @@ public class CopySnapshotCmd extends BaseAsyncCmd implements UserCmd { logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } + } + public Snapshot getSnapshot() { + return snapshot; + } + + public void setSnapshot(Snapshot snapshot) { + this.snapshot = snapshot; } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java index 3289ac2fe10..60f3bbda858 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java @@ -16,11 +16,13 @@ // under the License. package org.apache.cloudstack.api.command.user.snapshot; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; @@ -32,6 +34,7 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.SnapshotPolicyResponse; import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.collections.MapUtils; @@ -99,6 +102,19 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { since = "4.19.0") protected List zoneIds; + @Parameter(name = ApiConstants.STORAGE_ID_LIST, + type=CommandType.LIST, + collectionType = CommandType.UUID, + entityType = StoragePoolResponse.class, + authorized = RoleType.Admin, + description = "A comma-separated list of IDs of the storage pools in other zones in which the snapshot will be made available. " + + "The snapshot will always be made available in the zone in which the volume is present.", + since = "4.21.0") + protected List storagePoolIds; + + @Parameter (name = ApiConstants.USE_STORAGE_REPLICATION, type=CommandType.BOOLEAN, required = false, description = "This parameter enables the option the snapshot to be copied to supported primary storage") + protected Boolean useStorageReplication; + private String syncObjectType = BaseAsyncCmd.snapshotHostSyncObject; // /////////////////////////////////////////////////// @@ -161,6 +177,17 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { return zoneIds; } + public List getStoragePoolIds() { + return storagePoolIds == null ? new ArrayList<>() : storagePoolIds; + } + + public Boolean useStorageReplication() { + if (useStorageReplication == null) { + return false; + } + return useStorageReplication; + } + // /////////////////////////////////////////////////// // ///////////// API Implementation/////////////////// // /////////////////////////////////////////////////// @@ -209,7 +236,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { @Override public void create() throws ResourceAllocationException { - Snapshot snapshot = _volumeService.allocSnapshot(getVolumeId(), getPolicyId(), getSnapshotName(), getLocationType(), getZoneIds()); + Snapshot snapshot = _volumeService.allocSnapshot(getVolumeId(), getPolicyId(), getSnapshotName(), getLocationType(), getZoneIds(), getStoragePoolIds(), useStorageReplication()); if (snapshot != null) { setEntityId(snapshot.getId()); setEntityUuid(snapshot.getUuid()); @@ -223,7 +250,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { Snapshot snapshot; try { snapshot = - _volumeService.takeSnapshot(getVolumeId(), getPolicyId(), getEntityId(), _accountService.getAccount(getEntityOwnerId()), getQuiescevm(), getLocationType(), getAsyncBackup(), getTags(), getZoneIds()); + _volumeService.takeSnapshot(getVolumeId(), getPolicyId(), getEntityId(), _accountService.getAccount(getEntityOwnerId()), getQuiescevm(), getLocationType(), getAsyncBackup(), getTags(), getZoneIds(), getStoragePoolIds(), useStorageReplication()); if (snapshot != null) { SnapshotResponse response = _responseGenerator.createSnapshotResponse(snapshot); @@ -243,7 +270,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { } } - private Snapshot.LocationType getLocationType() { + public Snapshot.LocationType getLocationType() { if (Snapshot.LocationType.values() == null || Snapshot.LocationType.values().length == 0 || locationType == null) { return null; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java index e30b897db2e..66089894737 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java @@ -16,11 +16,13 @@ // under the License. package org.apache.cloudstack.api.command.user.snapshot; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.projects.Project; +import com.cloud.storage.Volume; +import com.cloud.storage.snapshot.SnapshotPolicy; +import com.cloud.user.Account; +import java.util.ArrayList; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -30,16 +32,16 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SnapshotPolicyResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.collections.MapUtils; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.projects.Project; -import com.cloud.storage.Volume; -import com.cloud.storage.snapshot.SnapshotPolicy; -import com.cloud.user.Account; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.apache.commons.lang3.BooleanUtils; @APICommand(name = "createSnapshotPolicy", description = "Creates a snapshot policy for the account.", responseObject = SnapshotPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) @@ -83,6 +85,17 @@ public class CreateSnapshotPolicyCmd extends BaseCmd { "The snapshots will always be made available in the zone in which the volume is present.") protected List zoneIds; + @Parameter(name = ApiConstants.STORAGE_ID_LIST, + type=CommandType.LIST, + collectionType = CommandType.UUID, + entityType = StoragePoolResponse.class, + description = "A comma-separated list of IDs of the storage pools in other zones in which the snapshot will be made available. " + + "The snapshot will always be made available in the zone in which the volume is present.", + since = "4.21.0") + protected List storagePoolIds; + + @Parameter (name = ApiConstants.USE_STORAGE_REPLICATION, type=CommandType.BOOLEAN, required = false, since = "4.21.0", description = "This parameter enables the option the snapshot to be copied to supported primary storage") + protected Boolean useStorageReplication; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -119,6 +132,14 @@ public class CreateSnapshotPolicyCmd extends BaseCmd { return zoneIds; } + public List getStoragePoolIds() { + return storagePoolIds == null ? new ArrayList<>() : storagePoolIds; + } + + public Boolean useStorageReplication() { + return BooleanUtils.toBoolean(useStorageReplication); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java index 4727e395c41..223ac57b11f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java @@ -126,6 +126,9 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements User since = "4.21.0") private Long extensionId; + @Parameter(name = ApiConstants.IS_READY, type = CommandType.BOOLEAN, description = "list templates that are ready to be deployed", since = "4.21.0") + private Boolean ready; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -195,6 +198,13 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements User boolean onlyReady = (templateFilter == TemplateFilter.featured) || (templateFilter == TemplateFilter.selfexecutable) || (templateFilter == TemplateFilter.sharedexecutable) || (templateFilter == TemplateFilter.executable && isAccountSpecific) || (templateFilter == TemplateFilter.community); + + if (!onlyReady) { + if (isReady() != null && isReady().booleanValue() != onlyReady) { + onlyReady = isReady().booleanValue(); + } + } + return onlyReady; } @@ -230,6 +240,10 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements User return extensionId; } + public Boolean isReady() { + return ready; + } + @Override public String getCommandName() { return s_name; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java new file mode 100644 index 00000000000..ecbde47692f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java @@ -0,0 +1,848 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.vm; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import javax.annotation.Nonnull; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.affinity.AffinityGroupResponse; +import org.apache.cloudstack.api.ACL; +import org.apache.cloudstack.api.ApiArgValidator; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiConstants.IoDriverPolicy; +import org.apache.cloudstack.api.BaseAsyncCreateCustomIdCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.DiskOfferingResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.api.response.SecurityGroupResponse; +import org.apache.cloudstack.api.response.UserDataResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.vm.lease.VMLeaseManager; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.EnumUtils; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.agent.api.LogLevel; +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.Network; +import com.cloud.network.Network.IpAddresses; +import com.cloud.offering.DiskOffering; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.utils.net.NetUtils; +import com.cloud.vm.VmDetailConstants; +import com.cloud.vm.VmDiskInfo; +import com.cloud.utils.net.Dhcp; + +public abstract class BaseDeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityGroupAction, UserCmd { + + private static final String s_name = "deployvirtualmachineresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true, description = "availability zone for the virtual machine") + private Long zoneId; + + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "host name for the virtual machine", validations = {ApiArgValidator.RFCComplianceDomainName}) + private String name; + + @Parameter(name = ApiConstants.DISPLAY_NAME, type = CommandType.STRING, description = "an optional user generated name for the virtual machine") + private String displayName; + + @Parameter(name=ApiConstants.PASSWORD, type=CommandType.STRING, description="The password of the virtual machine. If null, a random password will be generated for the VM.", + since="4.19.0.0") + protected String password; + + //Owner information + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the virtual machine. Must be used with domainId.") + private String accountName; + + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used. If account is NOT provided then virtual machine will be assigned to the caller account and domain.") + private Long domainId; + + //Network information + //@ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.NETWORK_IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = NetworkResponse.class, description = "list of network ids used by virtual machine. Can't be specified with ipToNetworkList parameter") + private List networkIds; + + @Parameter(name = ApiConstants.BOOT_TYPE, type = CommandType.STRING, required = false, description = "Guest VM Boot option either custom[UEFI] or default boot [BIOS]. Not applicable with VMware if the template is marked as deploy-as-is, as we honour what is defined in the template.", since = "4.14.0.0") + private String bootType; + + @Parameter(name = ApiConstants.BOOT_MODE, type = CommandType.STRING, required = false, description = "Boot Mode [Legacy] or [Secure] Applicable when Boot Type Selected is UEFI, otherwise Legacy only for BIOS. Not applicable with VMware if the template is marked as deploy-as-is, as we honour what is defined in the template.", since = "4.14.0.0") + private String bootMode; + + @Parameter(name = ApiConstants.BOOT_INTO_SETUP, type = CommandType.BOOLEAN, required = false, description = "Boot into hardware setup or not (ignored if startVm = false, only valid for vmware)", since = "4.15.0.0") + private Boolean bootIntoSetup; + + //DataDisk information + @ACL + @Parameter(name = ApiConstants.DISK_OFFERING_ID, type = CommandType.UUID, entityType = DiskOfferingResponse.class, description = "the ID of the disk offering for the virtual machine. If the template is of ISO format," + + " the diskOfferingId is for the root disk volume. Otherwise this parameter is used to indicate the " + + "offering for the data disk volume. If the templateId parameter passed is from a Template object," + + " the diskOfferingId refers to a DATA Disk Volume created. If the templateId parameter passed is " + + "from an ISO object, the diskOfferingId refers to a ROOT Disk Volume created.") + private Long diskOfferingId; + + @Parameter(name = ApiConstants.SIZE, type = CommandType.LONG, description = "the arbitrary size for the DATADISK volume. Mutually exclusive with diskOfferingId") + private Long size; + + @Parameter(name = ApiConstants.ROOT_DISK_SIZE, + type = CommandType.LONG, + description = "Optional field to resize root disk on deploy. Value is in GB. Only applies to template-based deployments. Analogous to details[0].rootdisksize, which takes precedence over this parameter if both are provided", + since = "4.4") + private Long rootdisksize; + + @Parameter(name = ApiConstants.DATADISKS_DETAILS, + type = CommandType.MAP, + since = "4.21.0", + description = "Disk offering details for creating multiple data volumes. Mutually exclusive with diskOfferingId." + + " Example: datadisksdetails[0].diskofferingid=a2a73a84-19db-4852-8930-dfddef053341&datadisksdetails[0].size=10&datadisksdetails[0].miniops=100&datadisksdetails[0].maxiops=200") + private Map dataDisksDetails; + + @Parameter(name = ApiConstants.GROUP, type = CommandType.STRING, description = "an optional group for the virtual machine") + private String group; + + @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "the hypervisor on which to deploy the virtual machine. " + + "The parameter is required and respected only when hypervisor info is not set on the ISO/Template passed to the call") + private String hypervisor; + + @Parameter(name = ApiConstants.USER_DATA, type = CommandType.STRING, + description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. " + + "This binary data must be base64 encoded before adding it to the request. " + + "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + + "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + + "You also need to change vm.userdata.max.length value", + length = 1048576) + private String userData; + + @Parameter(name = ApiConstants.USER_DATA_ID, type = CommandType.UUID, entityType = UserDataResponse.class, description = "the ID of the Userdata", since = "4.18") + private Long userdataId; + + @Parameter(name = ApiConstants.USER_DATA_DETAILS, type = CommandType.MAP, description = "used to specify the parameters values for the variables in userdata.", since = "4.18") + private Map userdataDetails; + + @Deprecated + @Parameter(name = ApiConstants.SSH_KEYPAIR, type = CommandType.STRING, description = "name of the ssh key pair used to login to the virtual machine") + private String sshKeyPairName; + + @Parameter(name = ApiConstants.SSH_KEYPAIRS, type = CommandType.LIST, collectionType = CommandType.STRING, since="4.17", description = "names of the ssh key pairs used to login to the virtual machine") + private List sshKeyPairNames; + + @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "destination Host ID to deploy the VM to - parameter available for root admin only") + private Long hostId; + + @ACL + @Parameter(name = ApiConstants.SECURITY_GROUP_IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = SecurityGroupResponse.class, description = "comma separated list of security groups id that going to be applied to the virtual machine. " + + "Should be passed only when vm is created from a zone with Basic Network support." + " Mutually exclusive with securitygroupnames parameter") + private List securityGroupIdList; + + @ACL + @Parameter(name = ApiConstants.SECURITY_GROUP_NAMES, type = CommandType.LIST, collectionType = CommandType.STRING, entityType = SecurityGroupResponse.class, description = "comma separated list of security groups names that going to be applied to the virtual machine." + + " Should be passed only when vm is created from a zone with Basic Network support. " + "Mutually exclusive with securitygroupids parameter") + private List securityGroupNameList; + + @Parameter(name = ApiConstants.IP_NETWORK_LIST, type = CommandType.MAP, description = "ip to network mapping. Can't be specified with networkIds parameter." + + " Example: iptonetworklist[0].ip=10.10.10.11&iptonetworklist[0].ipv6=fc00:1234:5678::abcd&iptonetworklist[0].networkid=uuid&iptonetworklist[0].mac=aa:bb:cc:dd:ee::ff - requests to use ip 10.10.10.11 in network id=uuid") + private Map ipToNetworkList; + + @Parameter(name = ApiConstants.IP_ADDRESS, type = CommandType.STRING, description = "the ip address for default vm's network") + private String ipAddress; + + @Parameter(name = ApiConstants.IP6_ADDRESS, type = CommandType.STRING, description = "the ipv6 address for default vm's network") + private String ip6Address; + + @Parameter(name = ApiConstants.MAC_ADDRESS, type = CommandType.STRING, description = "the mac address for default vm's network") + private String macAddress; + + @Parameter(name = ApiConstants.KEYBOARD, type = CommandType.STRING, description = "an optional keyboard device type for the virtual machine. valid value can be one of de,de-ch,es,fi,fr,fr-be,fr-ch,is,it,jp,nl-be,no,pt,uk,us") + private String keyboard; + + @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "Deploy vm for the project") + private Long projectId; + + @Parameter(name = ApiConstants.START_VM, type = CommandType.BOOLEAN, description = "true if start vm after creating; defaulted to true if not specified") + private Boolean startVm; + + @ACL + @Parameter(name = ApiConstants.AFFINITY_GROUP_IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = AffinityGroupResponse.class, description = "comma separated list of affinity groups id that are going to be applied to the virtual machine." + + " Mutually exclusive with affinitygroupnames parameter") + private List affinityGroupIdList; + + @ACL + @Parameter(name = ApiConstants.AFFINITY_GROUP_NAMES, type = CommandType.LIST, collectionType = CommandType.STRING, entityType = AffinityGroupResponse.class, description = "comma separated list of affinity groups names that are going to be applied to the virtual machine." + + "Mutually exclusive with affinitygroupids parameter") + private List affinityGroupNameList; + + @Parameter(name = ApiConstants.DISPLAY_VM, type = CommandType.BOOLEAN, since = "4.2", description = "an optional field, whether to the display the vm to the end user or not.", authorized = {RoleType.Admin}) + private Boolean displayVm; + + @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, since = "4.3", description = "used to specify the custom parameters. 'extraconfig' is not allowed to be passed in details") + private Map details; + + @Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "Deployment planner to use for vm allocation. Available to ROOT admin only", since = "4.4", authorized = { RoleType.Admin }) + private String deploymentPlanner; + + @Parameter(name = ApiConstants.DHCP_OPTIONS_NETWORK_LIST, type = CommandType.MAP, description = "DHCP options which are passed to the VM on start up" + + " Example: dhcpoptionsnetworklist[0].dhcp:114=url&dhcpoptionsetworklist[0].networkid=networkid&dhcpoptionsetworklist[0].dhcp:66=www.test.com") + private Map dhcpOptionsNetworkList; + + @Parameter(name = ApiConstants.DATADISK_OFFERING_LIST, type = CommandType.MAP, since = "4.11", description = "datadisk template to disk-offering mapping;" + + " an optional parameter used to create additional data disks from datadisk templates; can't be specified with diskOfferingId parameter") + private Map dataDiskTemplateToDiskOfferingList; + + @Parameter(name = ApiConstants.EXTRA_CONFIG, type = CommandType.STRING, since = "4.12", description = "an optional URL encoded string that can be passed to the virtual machine upon successful deployment", length = 5120) + private String extraConfig; + + @Parameter(name = ApiConstants.COPY_IMAGE_TAGS, type = CommandType.BOOLEAN, since = "4.13", description = "if true the image tags (if any) will be copied to the VM, default value is false") + private Boolean copyImageTags; + + @Parameter(name = ApiConstants.PROPERTIES, type = CommandType.MAP, since = "4.15", + description = "used to specify the vApp properties.") + @LogLevel(LogLevel.Log4jLevel.Off) + private Map vAppProperties; + + @Parameter(name = ApiConstants.NIC_NETWORK_LIST, type = CommandType.MAP, since = "4.15", + description = "VMware only: used to specify network mapping of a vApp VMware template registered \"as-is\"." + + " Example nicnetworklist[0].ip=Nic-101&nicnetworklist[0].network=uuid") + @LogLevel(LogLevel.Log4jLevel.Off) + private Map vAppNetworks; + + @Parameter(name = ApiConstants.DYNAMIC_SCALING_ENABLED, type = CommandType.BOOLEAN, since = "4.16", + description = "true if virtual machine needs to be dynamically scalable") + protected Boolean dynamicScalingEnabled; + + @Parameter(name = ApiConstants.OVERRIDE_DISK_OFFERING_ID, type = CommandType.UUID, since = "4.17", entityType = DiskOfferingResponse.class, description = "the ID of the disk offering for the virtual machine to be used for root volume instead of the disk offering mapped in service offering." + + "In case of virtual machine deploying from ISO, then the diskofferingid specified for root volume is ignored and uses this override disk offering id") + private Long overrideDiskOfferingId; + + @Parameter(name = ApiConstants.IOTHREADS_ENABLED, type = CommandType.BOOLEAN, required = false, + description = "IOThreads are dedicated event loop threads for supported disk devices to perform block I/O requests in order to improve scalability especially on an SMP host/guest with many LUNs.") + private Boolean iothreadsEnabled; + + @Parameter(name = ApiConstants.IO_DRIVER_POLICY, type = CommandType.STRING, description = "Controls specific policies on IO") + private String ioDriverPolicy; + + @Parameter(name = ApiConstants.NIC_MULTIQUEUE_NUMBER, type = CommandType.INTEGER, since = "4.18", + description = "The number of queues for multiqueue NICs.") + private Integer nicMultiqueueNumber; + + @Parameter(name = ApiConstants.NIC_PACKED_VIRTQUEUES_ENABLED, type = CommandType.BOOLEAN, since = "4.18", + description = "Enable packed virtqueues or not.") + private Boolean nicPackedVirtQueues; + + @Parameter(name = ApiConstants.INSTANCE_LEASE_DURATION, type = CommandType.INTEGER, since = "4.21.0", + description = "Number of days instance is leased for.") + private Integer leaseDuration; + + @Parameter(name = ApiConstants.INSTANCE_LEASE_EXPIRY_ACTION, type = CommandType.STRING, since = "4.21.0", + description = "Lease expiry action, valid values are STOP and DESTROY") + private String leaseExpiryAction; + + @Parameter(name = ApiConstants.EXTERNAL_DETAILS, + type = CommandType.MAP, + description = "Details in key/value pairs using format externaldetails[i].keyname=keyvalue. Example: externaldetails[0].server.type=typevalue", + since = "4.21.0") + protected Map externalDetails; + + private List dataDiskInfoList; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + public String getAccountName() { + if (accountName == null) { + return CallContext.current().getCallingAccount().getAccountName(); + } + return accountName; + } + + public Long getDiskOfferingId() { + return diskOfferingId; + } + + public String getDeploymentPlanner() { + return deploymentPlanner; + } + + public String getDisplayName() { + return displayName; + } + + public Long getDomainId() { + if (domainId == null) { + return CallContext.current().getCallingAccount().getDomainId(); + } + return domainId; + } + + public ApiConstants.BootType getBootType() { + if (StringUtils.isNotBlank(bootType)) { + try { + String type = bootType.trim().toUpperCase(); + return ApiConstants.BootType.valueOf(type); + } catch (IllegalArgumentException e) { + String errMesg = "Invalid bootType " + bootType + "Specified for vm " + getName() + + " Valid values are: " + Arrays.toString(ApiConstants.BootType.values()); + logger.warn(errMesg); + throw new InvalidParameterValueException(errMesg); + } + } + return null; + } + + public Map getDetails() { + Map customparameterMap = convertDetailsToMap(details); + + if (getBootType() != null) { + customparameterMap.put(getBootType().toString(), getBootMode().toString()); + } + + if (rootdisksize != null && !customparameterMap.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { + customparameterMap.put(VmDetailConstants.ROOT_DISK_SIZE, rootdisksize.toString()); + } + + IoDriverPolicy ioPolicy = getIoDriverPolicy(); + if (ioPolicy != null) { + customparameterMap.put(VmDetailConstants.IO_POLICY, ioPolicy.toString()); + } + + if (BooleanUtils.toBoolean(iothreadsEnabled)) { + customparameterMap.put(VmDetailConstants.IOTHREADS, BooleanUtils.toStringTrueFalse(iothreadsEnabled)); + } + + if (nicMultiqueueNumber != null) { + customparameterMap.put(VmDetailConstants.NIC_MULTIQUEUE_NUMBER, nicMultiqueueNumber.toString()); + } + + if (BooleanUtils.toBoolean(nicPackedVirtQueues)) { + customparameterMap.put(VmDetailConstants.NIC_PACKED_VIRTQUEUES_ENABLED, BooleanUtils.toStringTrueFalse(nicPackedVirtQueues)); + } + + if (MapUtils.isNotEmpty(externalDetails)) { + customparameterMap.putAll(getExternalDetails()); + } + return customparameterMap; + } + + public Map getExternalDetails() { + return convertExternalDetailsToMap(externalDetails); + } + + public ApiConstants.BootMode getBootMode() { + if (StringUtils.isNotBlank(bootMode)) { + try { + String mode = bootMode.trim().toUpperCase(); + return ApiConstants.BootMode.valueOf(mode); + } catch (IllegalArgumentException e) { + String msg = String.format("Invalid %s: %s specified for VM: %s. Valid values are: %s", + ApiConstants.BOOT_MODE, bootMode, getName(), Arrays.toString(ApiConstants.BootMode.values())); + logger.error(msg); + throw new InvalidParameterValueException(msg); + } + } + if (ApiConstants.BootType.UEFI.equals(getBootType())) { + String msg = String.format("%s must be specified for the VM with boot type: %s. Valid values are: %s", + ApiConstants.BOOT_MODE, getBootType(), Arrays.toString(ApiConstants.BootMode.values())); + logger.error(msg); + throw new InvalidParameterValueException(msg); + } + return null; + } + + public Map getVmProperties() { + Map map = new HashMap<>(); + if (MapUtils.isNotEmpty(vAppProperties)) { + Collection parameterCollection = vAppProperties.values(); + Iterator iterator = parameterCollection.iterator(); + while (iterator.hasNext()) { + HashMap entry = (HashMap)iterator.next(); + map.put(entry.get("key"), entry.get("value")); + } + } + return map; + } + + public Map getVmNetworkMap() { + Map map = new HashMap<>(); + if (MapUtils.isNotEmpty(vAppNetworks)) { + Collection parameterCollection = vAppNetworks.values(); + Iterator iterator = parameterCollection.iterator(); + while (iterator.hasNext()) { + HashMap entry = (HashMap) iterator.next(); + Integer nic; + try { + nic = Integer.valueOf(entry.get(VmDetailConstants.NIC)); + } catch (NumberFormatException nfe) { + nic = null; + } + String networkUuid = entry.get(VmDetailConstants.NETWORK); + if (logger.isTraceEnabled()) { + logger.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); + } + if (nic == null || StringUtils.isEmpty(networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) { + throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic)); + } + map.put(nic, _entityMgr.findByUuid(Network.class, networkUuid).getId()); + } + } + return map; + } + + public String getGroup() { + return group; + } + + public HypervisorType getHypervisor() { + return HypervisorType.getType(hypervisor); + } + + public Boolean isDisplayVm() { + return displayVm; + } + + @Override + public boolean isDisplay() { + if(displayVm == null) + return true; + else + return displayVm; + } + + public List getSecurityGroupNameList() { + return securityGroupNameList; + } + + public List getSecurityGroupIdList() { + return securityGroupIdList; + } + + public Long getSize() { + return size; + } + + public String getUserData() { + return userData; + } + + public Long getUserdataId() { + return userdataId; + } + + public Map getUserdataDetails() { + return convertDetailsToMap(userdataDetails); + } + + public Long getZoneId() { + return zoneId; + } + + public String getPassword() { + return password; + } + + public Integer getLeaseDuration() { + return leaseDuration; + } + + public VMLeaseManager.ExpiryAction getLeaseExpiryAction() { + if (StringUtils.isBlank(leaseExpiryAction)) { + return null; + } + VMLeaseManager.ExpiryAction action = EnumUtils.getEnumIgnoreCase(VMLeaseManager.ExpiryAction.class, leaseExpiryAction); + if (action == null) { + throw new InvalidParameterValueException("Invalid value configured for leaseexpiryaction, valid values are: " + + com.cloud.utils.EnumUtils.listValues(VMLeaseManager.ExpiryAction.values())); + } + return action; + } + + public List getNetworkIds() { + if (MapUtils.isNotEmpty(vAppNetworks)) { + if (CollectionUtils.isNotEmpty(networkIds) || ipAddress != null || getIp6Address() != null || MapUtils.isNotEmpty(ipToNetworkList)) { + throw new InvalidParameterValueException(String.format("%s can't be specified along with %s, %s, %s", ApiConstants.NIC_NETWORK_LIST, ApiConstants.NETWORK_IDS, ApiConstants.IP_ADDRESS, ApiConstants.IP_NETWORK_LIST)); + } else { + return new ArrayList<>(); + } + } + if (ipToNetworkList != null && !ipToNetworkList.isEmpty()) { + if ((networkIds != null && !networkIds.isEmpty()) || ipAddress != null || getIp6Address() != null) { + throw new InvalidParameterValueException("ipToNetworkMap can't be specified along with networkIds or ipAddress"); + } else { + List networks = new ArrayList(); + networks.addAll(getIpToNetworkMap().keySet()); + return networks; + } + } + return networkIds; + } + + public String getName() { + return name; + } + + public List getSSHKeyPairNames() { + List sshKeyPairs = new ArrayList(); + if(sshKeyPairNames != null) { + sshKeyPairs = sshKeyPairNames; + } + if(sshKeyPairName != null && !sshKeyPairName.isEmpty()) { + sshKeyPairs.add(sshKeyPairName); + } + return sshKeyPairs; + } + + public List getDataDiskInfoList() { + if (this.dataDiskInfoList != null) { + return this.dataDiskInfoList; + } + if (dataDisksDetails == null || dataDisksDetails.isEmpty()) { + return null; + } + if (dataDiskTemplateToDiskOfferingList != null) { + throw new InvalidParameterValueException("datadisktemplatetodiskofferinglist parameter can't be specified along with datadisksdetails parameter"); + } + List vmDiskInfoList = new ArrayList<>(); + Collection dataDisksCollection = dataDisksDetails.values(); + Iterator iter = dataDisksCollection.iterator(); + while (iter.hasNext()) { + HashMap dataDisk = (HashMap)iter.next(); + String diskOfferingUuid = dataDisk.get(ApiConstants.DISK_OFFERING_ID); + if (diskOfferingUuid == null) { + throw new InvalidParameterValueException("diskofferingid parameter is required for datadiskdetails"); + } + DiskOffering diskOffering = _entityMgr.findByUuid(DiskOffering.class, diskOfferingUuid); + if (diskOffering == null) { + throw new InvalidParameterValueException("Unable to find disk offering " + diskOfferingUuid); + } + if (diskOffering.isComputeOnly()) { + throw new InvalidParameterValueException(String.format("The disk offering id %d provided is directly mapped to a service offering, please provide an individual disk offering", diskOffering.getUuid())); + } + + Long size = null; + Long minIops = null; + Long maxIops = null; + if (dataDisk.get(ApiConstants.DEVICE_ID) == null) { + throw new InvalidParameterValueException("deviceid parameter is required for datadiskdetails"); + } + Long deviceId = Long.parseLong(dataDisk.get(ApiConstants.DEVICE_ID)); + if (diskOffering.isCustomized()) { + if (dataDisk.get(ApiConstants.SIZE) == null) { + throw new InvalidParameterValueException("Size is required for custom disk offering"); + } + size = Long.parseLong(dataDisk.get(ApiConstants.SIZE)); + } else { + size = diskOffering.getDiskSize() / (1024 * 1024 * 1024); + } + if (diskOffering.isCustomizedIops() != null && diskOffering.isCustomizedIops()) { + if (dataDisk.get(ApiConstants.MIN_IOPS) == null) { + throw new InvalidParameterValueException("Min IOPS is required for custom disk offering"); + } + if (dataDisk.get(ApiConstants.MAX_IOPS) == null) { + throw new InvalidParameterValueException("Max IOPS is required for custom disk offering"); + } + minIops = Long.parseLong(dataDisk.get(ApiConstants.MIN_IOPS)); + maxIops = Long.parseLong(dataDisk.get(ApiConstants.MAX_IOPS)); + } + VmDiskInfo vmDiskInfo = new VmDiskInfo(diskOffering, size, minIops, maxIops, deviceId); + vmDiskInfoList.add(vmDiskInfo); + } + this.dataDiskInfoList = vmDiskInfoList; + return dataDiskInfoList; + } + + public Long getHostId() { + return hostId; + } + + public boolean getStartVm() { + return startVm == null ? true : startVm; + } + + public Map getIpToNetworkMap() { + if ((networkIds != null || ipAddress != null || getIp6Address() != null) && ipToNetworkList != null) { + throw new InvalidParameterValueException("NetworkIds and ipAddress can't be specified along with ipToNetworkMap parameter"); + } + LinkedHashMap ipToNetworkMap = null; + if (ipToNetworkList != null && !ipToNetworkList.isEmpty()) { + ipToNetworkMap = new LinkedHashMap(); + Collection ipsCollection = ipToNetworkList.values(); + Iterator iter = ipsCollection.iterator(); + while (iter.hasNext()) { + HashMap ips = (HashMap)iter.next(); + Long networkId = getNetworkIdFomIpMap(ips); + IpAddresses addrs = getIpAddressesFromIpMap(ips); + ipToNetworkMap.put(networkId, addrs); + } + } + + return ipToNetworkMap; + } + + @Nonnull + private IpAddresses getIpAddressesFromIpMap(HashMap ips) { + String requestedIp = ips.get("ip"); + String requestedIpv6 = ips.get("ipv6"); + String requestedMac = ips.get("mac"); + if (requestedIpv6 != null) { + requestedIpv6 = NetUtils.standardizeIp6Address(requestedIpv6); + } + if (requestedMac != null) { + if(!NetUtils.isValidMac(requestedMac)) { + throw new InvalidParameterValueException("Mac address is not valid: " + requestedMac); + } else if(!NetUtils.isUnicastMac(requestedMac)) { + throw new InvalidParameterValueException("Mac address is not unicast: " + requestedMac); + } + requestedMac = NetUtils.standardizeMacAddress(requestedMac); + } + return new IpAddresses(requestedIp, requestedIpv6, requestedMac); + } + + @Nonnull + private Long getNetworkIdFomIpMap(HashMap ips) { + Long networkId; + final String networkid = ips.get("networkid"); + Network network = _networkService.getNetwork(networkid); + if (network != null) { + networkId = network.getId(); + } else { + try { + networkId = Long.parseLong(networkid); + } catch (NumberFormatException e) { + throw new InvalidParameterValueException("Unable to translate and find entity with networkId: " + networkid); + } + } + return networkId; + } + + public String getIpAddress() { + return ipAddress; + } + + public String getIp6Address() { + if (ip6Address == null) { + return null; + } + return NetUtils.standardizeIp6Address(ip6Address); + } + + + public String getMacAddress() { + if (macAddress == null) { + return null; + } + if(!NetUtils.isValidMac(macAddress)) { + throw new InvalidParameterValueException("Mac address is not valid: " + macAddress); + } else if(!NetUtils.isUnicastMac(macAddress)) { + throw new InvalidParameterValueException("Mac address is not unicast: " + macAddress); + } + return NetUtils.standardizeMacAddress(macAddress); + } + + public List getAffinityGroupIdList() { + if (affinityGroupNameList != null && affinityGroupIdList != null) { + throw new InvalidParameterValueException("affinitygroupids parameter is mutually exclusive with affinitygroupnames parameter"); + } + + // transform group names to ids here + if (affinityGroupNameList != null) { + List affinityGroupIds = new ArrayList(); + for (String groupName : affinityGroupNameList) { + Long groupId = _responseGenerator.getAffinityGroupId(groupName, getEntityOwnerId()); + if (groupId == null) { + throw new InvalidParameterValueException("Unable to find affinity group by name " + groupName); + } else { + affinityGroupIds.add(groupId); + } + } + return affinityGroupIds; + } else { + return affinityGroupIdList; + } + } + + public String getKeyboard() { + // TODO Auto-generated method stub + return keyboard; + } + + public Map> getDhcpOptionsMap() { + Map> dhcpOptionsMap = new HashMap<>(); + if (dhcpOptionsNetworkList != null && !dhcpOptionsNetworkList.isEmpty()) { + + Collection> paramsCollection = this.dhcpOptionsNetworkList.values(); + for (Map dhcpNetworkOptions : paramsCollection) { + String networkId = dhcpNetworkOptions.get(ApiConstants.NETWORK_ID); + + if (networkId == null) { + throw new IllegalArgumentException("No networkid specified when providing extra dhcp options."); + } + + Map dhcpOptionsForNetwork = new HashMap<>(); + dhcpOptionsMap.put(networkId, dhcpOptionsForNetwork); + + for (String key : dhcpNetworkOptions.keySet()) { + if (key.startsWith(ApiConstants.DHCP_PREFIX)) { + int dhcpOptionValue = Integer.parseInt(key.replaceFirst(ApiConstants.DHCP_PREFIX, "")); + dhcpOptionsForNetwork.put(dhcpOptionValue, dhcpNetworkOptions.get(key)); + } else if (!key.equals(ApiConstants.NETWORK_ID)) { + Dhcp.DhcpOptionCode dhcpOptionEnum = Dhcp.DhcpOptionCode.valueOfString(key); + dhcpOptionsForNetwork.put(dhcpOptionEnum.getCode(), dhcpNetworkOptions.get(key)); + } + } + + } + } + + return dhcpOptionsMap; + } + + public Map getDataDiskTemplateToDiskOfferingMap() { + if (diskOfferingId != null && dataDiskTemplateToDiskOfferingList != null) { + throw new InvalidParameterValueException("diskofferingid parameter can't be specified along with datadisktemplatetodiskofferinglist parameter"); + } + if (MapUtils.isEmpty(dataDiskTemplateToDiskOfferingList)) { + return new HashMap(); + } + + HashMap dataDiskTemplateToDiskOfferingMap = new HashMap(); + for (Object objDataDiskTemplates : dataDiskTemplateToDiskOfferingList.values()) { + HashMap dataDiskTemplates = (HashMap) objDataDiskTemplates; + Long dataDiskTemplateId; + DiskOffering dataDiskOffering = null; + VirtualMachineTemplate dataDiskTemplate= _entityMgr.findByUuid(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid")); + if (dataDiskTemplate == null) { + dataDiskTemplate = _entityMgr.findById(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid")); + if (dataDiskTemplate == null) + throw new InvalidParameterValueException("Unable to translate and find entity with datadisktemplateid " + dataDiskTemplates.get("datadisktemplateid")); + } + dataDiskTemplateId = dataDiskTemplate.getId(); + dataDiskOffering = _entityMgr.findByUuid(DiskOffering.class, dataDiskTemplates.get("diskofferingid")); + if (dataDiskOffering == null) { + dataDiskOffering = _entityMgr.findById(DiskOffering.class, dataDiskTemplates.get("diskofferingid")); + if (dataDiskOffering == null) + throw new InvalidParameterValueException("Unable to translate and find entity with diskofferingId " + dataDiskTemplates.get("diskofferingid")); + } + dataDiskTemplateToDiskOfferingMap.put(dataDiskTemplateId, dataDiskOffering); + } + return dataDiskTemplateToDiskOfferingMap; + } + + public String getExtraConfig() { + return extraConfig; + } + + public boolean getCopyImageTags() { + return copyImageTags == null ? false : copyImageTags; + } + + public Boolean getBootIntoSetup() { + return bootIntoSetup; + } + + public boolean isDynamicScalingEnabled() { + return dynamicScalingEnabled == null ? true : dynamicScalingEnabled; + } + + public Long getOverrideDiskOfferingId() { + return overrideDiskOfferingId; + } + + public IoDriverPolicy getIoDriverPolicy() { + if (StringUtils.isNotBlank(ioDriverPolicy)) { + try { + String policyType = ioDriverPolicy.trim().toUpperCase(); + return IoDriverPolicy.valueOf(policyType); + } catch (IllegalArgumentException e) { + String errMesg = String.format("Invalid io policy %s specified for vm %s. Valid values are: %s", ioDriverPolicy, getName(), Arrays.toString(IoDriverPolicy.values())); + logger.warn(errMesg); + throw new InvalidParameterValueException(errMesg); + } + } + return null; + } + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + public static String getResultObjectName() { + return "virtualmachine"; + } + + @Override + public long getEntityOwnerId() { + Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + if (accountId == null) { + return CallContext.current().getCallingAccount().getId(); + } + + return accountId; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_VM_CREATE; + } + + @Override + public String getCreateEventType() { + return EventTypes.EVENT_VM_CREATE; + } + + @Override + public String getCreateEventDescription() { + return "creating Vm"; + } + + @Override + public String getEventDescription() { + if(getStartVm()) { + return "starting Vm. Vm Id: " + getEntityUuid(); + } + return "deploying Vm. Vm Id: " + getEntityUuid(); + } + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.VirtualMachine; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java new file mode 100644 index 00000000000..04e413ed67a --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java @@ -0,0 +1,153 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.vm; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.ACL; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BackupResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.api.response.TemplateResponse; +import org.apache.cloudstack.api.response.UserVmResponse; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.uservm.UserVm; +import com.cloud.vm.VirtualMachine; + +@APICommand(name = "createVMFromBackup", + description = "Creates and automatically starts a VM from a backup.", + responseObject = UserVmResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + entityType = {VirtualMachine.class}, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = true, + since = "4.21.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class CreateVMFromBackupCmd extends BaseDeployVMCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.BACKUP_ID, + type = CommandType.UUID, + entityType = BackupResponse.class, + required = true, + description = "backup ID to create the VM from") + private Long backupId; + + @ACL + @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, description = "the ID of the service offering for the virtual machine") + private Long serviceOfferingId; + + @ACL + @Parameter(name = ApiConstants.TEMPLATE_ID, type = CommandType.UUID, entityType = TemplateResponse.class, description = "the ID of the template for the virtual machine") + private Long templateId; + + @Parameter(name = ApiConstants.PRESERVE_IP, type = CommandType.BOOLEAN, description = "Use the same IP/MAC addresses as stored in the backup metadata. Works only if the original Instance is deleted and the IP/MAC address is available.") + private Boolean preserveIp; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getBackupId() { + return backupId; + } + + public Long getServiceOfferingId() { + return serviceOfferingId; + } + + public Long getTemplateId() { + return templateId; + } + + public boolean getPreserveIp() { + return (preserveIp != null) ? preserveIp : false; + } + + @Override + public void create() { + UserVm vm; + try { + vm = _userVmService.allocateVMFromBackup(this); + if (vm != null) { + setEntityId(vm.getId()); + setEntityUuid(vm.getUuid()); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to deploy vm"); + } + } catch (InsufficientCapacityException ex) { + logger.info(ex); + logger.trace(ex.getMessage(), ex); + throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); + } catch (ResourceUnavailableException ex) { + logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); + } catch (ConcurrentOperationException ex) { + logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } catch (ResourceAllocationException ex) { + logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); + } + } + + @Override + public void execute () { + UserVm vm = null; + try { + vm = _userVmService.restoreVMFromBackup(this); + } catch (ResourceUnavailableException ex) { + logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); + } catch (ResourceAllocationException ex) { + logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); + } catch (ConcurrentOperationException ex) { + logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } catch (InsufficientCapacityException ex) { + StringBuilder message = new StringBuilder(ex.getMessage()); + if (ex instanceof InsufficientServerCapacityException) { + if (((InsufficientServerCapacityException)ex).isAffinityApplied()) { + message.append(", Please check the affinity groups provided, there may not be sufficient capacity to follow them"); + } + } + logger.info(String.format("%s: %s", message.toString(), ex.getLocalizedMessage())); + logger.debug(message.toString(), ex); + throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString()); + } + + if (vm != null) { + UserVmResponse response = _responseGenerator.createUserVmResponse(getResponseView(), "virtualmachine", vm).get(0); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to deploy vm uuid:"+getEntityUuid()); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index afd23cfd871..dc1ca3583cc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -16,87 +16,40 @@ // under the License. package org.apache.cloudstack.api.command.user.vm; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.stream.Stream; -import javax.annotation.Nonnull; - -import org.apache.cloudstack.acl.RoleType; -import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.APICommand; -import org.apache.cloudstack.api.ApiArgValidator; -import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.ApiConstants.IoDriverPolicy; import org.apache.cloudstack.api.ApiErrorCode; -import org.apache.cloudstack.api.BaseAsyncCreateCustomIdCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.command.user.UserCmd; -import org.apache.cloudstack.api.response.DiskOfferingResponse; -import org.apache.cloudstack.api.response.DomainResponse; -import org.apache.cloudstack.api.response.HostResponse; -import org.apache.cloudstack.api.response.NetworkResponse; -import org.apache.cloudstack.api.response.ProjectResponse; -import org.apache.cloudstack.api.response.SecurityGroupResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.SnapshotResponse; import org.apache.cloudstack.api.response.TemplateResponse; -import org.apache.cloudstack.api.response.UserDataResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; -import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.vm.lease.VMLeaseManager; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; -import org.apache.commons.lang3.BooleanUtils; -import org.apache.commons.lang3.EnumUtils; -import org.apache.commons.lang3.StringUtils; -import com.cloud.agent.api.LogLevel; -import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.network.Network; -import com.cloud.network.Network.IpAddresses; -import com.cloud.offering.DiskOffering; -import com.cloud.template.VirtualMachineTemplate; import com.cloud.uservm.UserVm; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.net.Dhcp; -import com.cloud.utils.net.NetUtils; import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VmDetailConstants; @APICommand(name = "deployVirtualMachine", description = "Creates and automatically starts a virtual machine based on a service offering, disk offering, and template.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) -public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityGroupAction, UserCmd { - - private static final String s_name = "deployvirtualmachineresponse"; +public class DeployVMCmd extends BaseDeployVMCmd { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true, description = "availability zone for the virtual machine") - private Long zoneId; - @ACL @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, required = true, description = "the ID of the service offering for the virtual machine") private Long serviceOfferingId; @@ -105,672 +58,24 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG @Parameter(name = ApiConstants.TEMPLATE_ID, type = CommandType.UUID, entityType = TemplateResponse.class, description = "the ID of the template for the virtual machine") private Long templateId; - @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "host name for the virtual machine", validations = {ApiArgValidator.RFCComplianceDomainName}) - private String name; - - @Parameter(name = ApiConstants.DISPLAY_NAME, type = CommandType.STRING, description = "an optional user generated name for the virtual machine") - private String displayName; - - @Parameter(name=ApiConstants.PASSWORD, type=CommandType.STRING, description="The password of the virtual machine. If null, a random password will be generated for the VM.", - since="4.19.0.0") - protected String password; - - //Owner information - @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the virtual machine. Must be used with domainId.") - private String accountName; - - @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used. If account is NOT provided then virtual machine will be assigned to the caller account and domain.") - private Long domainId; - - //Network information - //@ACL(accessType = AccessType.UseEntry) - @Parameter(name = ApiConstants.NETWORK_IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = NetworkResponse.class, description = "list of network ids used by virtual machine. Can't be specified with ipToNetworkList parameter") - private List networkIds; - - @Parameter(name = ApiConstants.BOOT_TYPE, type = CommandType.STRING, required = false, description = "Guest VM Boot option either custom[UEFI] or default boot [BIOS]. Not applicable with VMware if the template is marked as deploy-as-is, as we honour what is defined in the template.", since = "4.14.0.0") - private String bootType; - - @Parameter(name = ApiConstants.BOOT_MODE, type = CommandType.STRING, required = false, description = "Boot Mode [Legacy] or [Secure] Applicable when Boot Type Selected is UEFI, otherwise Legacy only for BIOS. Not applicable with VMware if the template is marked as deploy-as-is, as we honour what is defined in the template.", since = "4.14.0.0") - private String bootMode; - - @Parameter(name = ApiConstants.BOOT_INTO_SETUP, type = CommandType.BOOLEAN, required = false, description = "Boot into hardware setup or not (ignored if startVm = false, only valid for vmware)", since = "4.15.0.0") - private Boolean bootIntoSetup; - - //DataDisk information - @ACL - @Parameter(name = ApiConstants.DISK_OFFERING_ID, type = CommandType.UUID, entityType = DiskOfferingResponse.class, description = "the ID of the disk offering for the virtual machine. If the template is of ISO format," - + " the diskOfferingId is for the root disk volume. Otherwise this parameter is used to indicate the " - + "offering for the data disk volume. If the templateId parameter passed is from a Template object," - + " the diskOfferingId refers to a DATA Disk Volume created. If the templateId parameter passed is " - + "from an ISO object, the diskOfferingId refers to a ROOT Disk Volume created.") - private Long diskOfferingId; - - @Parameter(name = ApiConstants.SIZE, type = CommandType.LONG, description = "the arbitrary size for the DATADISK volume. Mutually exclusive with diskOfferingId") - private Long size; - - @Parameter(name = ApiConstants.ROOT_DISK_SIZE, - type = CommandType.LONG, - description = "Optional field to resize root disk on deploy. Value is in GB. Only applies to template-based deployments. Analogous to details[0].rootdisksize, which takes precedence over this parameter if both are provided", - since = "4.4") - private Long rootdisksize; - - @Parameter(name = ApiConstants.GROUP, type = CommandType.STRING, description = "an optional group for the virtual machine") - private String group; - - @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "the hypervisor on which to deploy the virtual machine. " - + "The parameter is required and respected only when hypervisor info is not set on the ISO/Template passed to the call") - private String hypervisor; - - @Parameter(name = ApiConstants.USER_DATA, type = CommandType.STRING, - description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. " + - "This binary data must be base64 encoded before adding it to the request. " + - "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + - "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + - "You also need to change vm.userdata.max.length value", - length = 1048576) - private String userData; - - @Parameter(name = ApiConstants.USER_DATA_ID, type = CommandType.UUID, entityType = UserDataResponse.class, description = "the ID of the Userdata", since = "4.18") - private Long userdataId; - - @Parameter(name = ApiConstants.USER_DATA_DETAILS, type = CommandType.MAP, description = "used to specify the parameters values for the variables in userdata.", since = "4.18") - private Map userdataDetails; - - @Deprecated - @Parameter(name = ApiConstants.SSH_KEYPAIR, type = CommandType.STRING, description = "name of the ssh key pair used to login to the virtual machine") - private String sshKeyPairName; - - @Parameter(name = ApiConstants.SSH_KEYPAIRS, type = CommandType.LIST, collectionType = CommandType.STRING, since="4.17", description = "names of the ssh key pairs used to login to the virtual machine") - private List sshKeyPairNames; - - @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "destination Host ID to deploy the VM to - parameter available for root admin only") - private Long hostId; - - @ACL - @Parameter(name = ApiConstants.SECURITY_GROUP_IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = SecurityGroupResponse.class, description = "comma separated list of security groups id that going to be applied to the virtual machine. " - + "Should be passed only when vm is created from a zone with Basic Network support." + " Mutually exclusive with securitygroupnames parameter") - private List securityGroupIdList; - - @ACL - @Parameter(name = ApiConstants.SECURITY_GROUP_NAMES, type = CommandType.LIST, collectionType = CommandType.STRING, entityType = SecurityGroupResponse.class, description = "comma separated list of security groups names that going to be applied to the virtual machine." - + " Should be passed only when vm is created from a zone with Basic Network support. " + "Mutually exclusive with securitygroupids parameter") - private List securityGroupNameList; - - @Parameter(name = ApiConstants.IP_NETWORK_LIST, type = CommandType.MAP, description = "ip to network mapping. Can't be specified with networkIds parameter." - + " Example: iptonetworklist[0].ip=10.10.10.11&iptonetworklist[0].ipv6=fc00:1234:5678::abcd&iptonetworklist[0].networkid=uuid&iptonetworklist[0].mac=aa:bb:cc:dd:ee::ff - requests to use ip 10.10.10.11 in network id=uuid") - private Map ipToNetworkList; - - @Parameter(name = ApiConstants.IP_ADDRESS, type = CommandType.STRING, description = "the ip address for default vm's network") - private String ipAddress; - - @Parameter(name = ApiConstants.IP6_ADDRESS, type = CommandType.STRING, description = "the ipv6 address for default vm's network") - private String ip6Address; - - @Parameter(name = ApiConstants.MAC_ADDRESS, type = CommandType.STRING, description = "the mac address for default vm's network") - private String macAddress; - - @Parameter(name = ApiConstants.KEYBOARD, type = CommandType.STRING, description = "an optional keyboard device type for the virtual machine. valid value can be one of de,de-ch,es,fi,fr,fr-be,fr-ch,is,it,jp,nl-be,no,pt,uk,us") - private String keyboard; - - @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "Deploy vm for the project") - private Long projectId; - - @Parameter(name = ApiConstants.START_VM, type = CommandType.BOOLEAN, description = "true if start vm after creating; defaulted to true if not specified") - private Boolean startVm; - - @ACL - @Parameter(name = ApiConstants.AFFINITY_GROUP_IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = AffinityGroupResponse.class, description = "comma separated list of affinity groups id that are going to be applied to the virtual machine." - + " Mutually exclusive with affinitygroupnames parameter") - private List affinityGroupIdList; - - @ACL - @Parameter(name = ApiConstants.AFFINITY_GROUP_NAMES, type = CommandType.LIST, collectionType = CommandType.STRING, entityType = AffinityGroupResponse.class, description = "comma separated list of affinity groups names that are going to be applied to the virtual machine." - + "Mutually exclusive with affinitygroupids parameter") - private List affinityGroupNameList; - - @Parameter(name = ApiConstants.DISPLAY_VM, type = CommandType.BOOLEAN, since = "4.2", description = "an optional field, whether to the display the vm to the end user or not.", authorized = {RoleType.Admin}) - private Boolean displayVm; - - @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, since = "4.3", description = "used to specify the custom parameters. 'extraconfig' is not allowed to be passed in details") - private Map details; - - @Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "Deployment planner to use for vm allocation. Available to ROOT admin only", since = "4.4", authorized = { RoleType.Admin }) - private String deploymentPlanner; - - @Parameter(name = ApiConstants.DHCP_OPTIONS_NETWORK_LIST, type = CommandType.MAP, description = "DHCP options which are passed to the VM on start up" - + " Example: dhcpoptionsnetworklist[0].dhcp:114=url&dhcpoptionsetworklist[0].networkid=networkid&dhcpoptionsetworklist[0].dhcp:66=www.test.com") - private Map dhcpOptionsNetworkList; - - @Parameter(name = ApiConstants.DATADISK_OFFERING_LIST, type = CommandType.MAP, since = "4.11", description = "datadisk template to disk-offering mapping;" + - " an optional parameter used to create additional data disks from datadisk templates; can't be specified with diskOfferingId parameter") - private Map dataDiskTemplateToDiskOfferingList; - - @Parameter(name = ApiConstants.EXTRA_CONFIG, type = CommandType.STRING, since = "4.12", description = "an optional URL encoded string that can be passed to the virtual machine upon successful deployment", length = 5120) - private String extraConfig; - - @Parameter(name = ApiConstants.COPY_IMAGE_TAGS, type = CommandType.BOOLEAN, since = "4.13", description = "if true the image tags (if any) will be copied to the VM, default value is false") - private Boolean copyImageTags; - - @Parameter(name = ApiConstants.PROPERTIES, type = CommandType.MAP, since = "4.15", - description = "used to specify the vApp properties.") - @LogLevel(LogLevel.Log4jLevel.Off) - private Map vAppProperties; - - @Parameter(name = ApiConstants.NIC_NETWORK_LIST, type = CommandType.MAP, since = "4.15", - description = "VMware only: used to specify network mapping of a vApp VMware template registered \"as-is\"." + - " Example nicnetworklist[0].ip=Nic-101&nicnetworklist[0].network=uuid") - @LogLevel(LogLevel.Log4jLevel.Off) - private Map vAppNetworks; - - @Parameter(name = ApiConstants.DYNAMIC_SCALING_ENABLED, type = CommandType.BOOLEAN, since = "4.16", - description = "true if virtual machine needs to be dynamically scalable") - protected Boolean dynamicScalingEnabled; - - @Parameter(name = ApiConstants.OVERRIDE_DISK_OFFERING_ID, type = CommandType.UUID, since = "4.17", entityType = DiskOfferingResponse.class, description = "the ID of the disk offering for the virtual machine to be used for root volume instead of the disk offering mapped in service offering." + - "In case of virtual machine deploying from ISO, then the diskofferingid specified for root volume is ignored and uses this override disk offering id") - private Long overrideDiskOfferingId; - - @Parameter(name = ApiConstants.IOTHREADS_ENABLED, type = CommandType.BOOLEAN, required = false, - description = "IOThreads are dedicated event loop threads for supported disk devices to perform block I/O requests in order to improve scalability especially on an SMP host/guest with many LUNs.") - private Boolean iothreadsEnabled; - - @Parameter(name = ApiConstants.IO_DRIVER_POLICY, type = CommandType.STRING, description = "Controls specific policies on IO") - private String ioDriverPolicy; - - @Parameter(name = ApiConstants.NIC_MULTIQUEUE_NUMBER, type = CommandType.INTEGER, since = "4.18", - description = "The number of queues for multiqueue NICs.") - private Integer nicMultiqueueNumber; - - @Parameter(name = ApiConstants.NIC_PACKED_VIRTQUEUES_ENABLED, type = CommandType.BOOLEAN, since = "4.18", - description = "Enable packed virtqueues or not.") - private Boolean nicPackedVirtQueues; - - @Parameter(name = ApiConstants.INSTANCE_LEASE_DURATION, type = CommandType.INTEGER, since = "4.21.0", - description = "Number of days instance is leased for.") - private Integer leaseDuration; - - @Parameter(name = ApiConstants.INSTANCE_LEASE_EXPIRY_ACTION, type = CommandType.STRING, since = "4.21.0", - description = "Lease expiry action, valid values are STOP and DESTROY") - private String leaseExpiryAction; - @Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.UUID, entityType = VolumeResponse.class, since = "4.21") private Long volumeId; @Parameter(name = ApiConstants.SNAPSHOT_ID, type = CommandType.UUID, entityType = SnapshotResponse.class, since = "4.21") private Long snapshotId; - @Parameter(name = ApiConstants.EXTERNAL_DETAILS, - type = CommandType.MAP, - description = "Details in key/value pairs using format externaldetails[i].keyname=keyvalue. Example: externaldetails[0].server.type=typevalue", - since = "4.21.0") - protected Map externalDetails; - ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public String getAccountName() { - if (accountName == null) { - return CallContext.current().getCallingAccount().getAccountName(); - } - return accountName; - } - - public Long getDiskOfferingId() { - return diskOfferingId; - } - - public String getDeploymentPlanner() { - return deploymentPlanner; - } - - public String getDisplayName() { - return displayName; - } - - public Long getDomainId() { - if (domainId == null) { - return CallContext.current().getCallingAccount().getDomainId(); - } - return domainId; - } - - public ApiConstants.BootType getBootType() { - if (StringUtils.isNotBlank(bootType)) { - try { - String type = bootType.trim().toUpperCase(); - return ApiConstants.BootType.valueOf(type); - } catch (IllegalArgumentException e) { - String errMesg = "Invalid bootType " + bootType + "Specified for vm " + getName() - + " Valid values are: " + Arrays.toString(ApiConstants.BootType.values()); - logger.warn(errMesg); - throw new InvalidParameterValueException(errMesg); - } - } - return null; - } - - public Map getDetails() { - Map customparameterMap = convertDetailsToMap(details); - - if (getBootType() != null) { - customparameterMap.put(getBootType().toString(), getBootMode().toString()); - } - - if (rootdisksize != null && !customparameterMap.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { - customparameterMap.put(VmDetailConstants.ROOT_DISK_SIZE, rootdisksize.toString()); - } - - IoDriverPolicy ioPolicy = getIoDriverPolicy(); - if (ioPolicy != null) { - customparameterMap.put(VmDetailConstants.IO_POLICY, ioPolicy.toString()); - } - - if (BooleanUtils.toBoolean(iothreadsEnabled)) { - customparameterMap.put(VmDetailConstants.IOTHREADS, BooleanUtils.toStringTrueFalse(iothreadsEnabled)); - } - - if (nicMultiqueueNumber != null) { - customparameterMap.put(VmDetailConstants.NIC_MULTIQUEUE_NUMBER, nicMultiqueueNumber.toString()); - } - - if (BooleanUtils.toBoolean(nicPackedVirtQueues)) { - customparameterMap.put(VmDetailConstants.NIC_PACKED_VIRTQUEUES_ENABLED, BooleanUtils.toStringTrueFalse(nicPackedVirtQueues)); - } - - if (MapUtils.isNotEmpty(externalDetails)) { - customparameterMap.putAll(getExternalDetails()); - } - - return customparameterMap; - } - - public Map getExternalDetails() { - return convertExternalDetailsToMap(externalDetails); - } - - public ApiConstants.BootMode getBootMode() { - if (StringUtils.isNotBlank(bootMode)) { - try { - String mode = bootMode.trim().toUpperCase(); - return ApiConstants.BootMode.valueOf(mode); - } catch (IllegalArgumentException e) { - String msg = String.format("Invalid %s: %s specified for VM: %s. Valid values are: %s", - ApiConstants.BOOT_MODE, bootMode, getName(), Arrays.toString(ApiConstants.BootMode.values())); - logger.error(msg); - throw new InvalidParameterValueException(msg); - } - } - if (ApiConstants.BootType.UEFI.equals(getBootType())) { - String msg = String.format("%s must be specified for the VM with boot type: %s. Valid values are: %s", - ApiConstants.BOOT_MODE, getBootType(), Arrays.toString(ApiConstants.BootMode.values())); - logger.error(msg); - throw new InvalidParameterValueException(msg); - } - return null; - } - - public Map getVmProperties() { - Map map = new HashMap<>(); - if (MapUtils.isNotEmpty(vAppProperties)) { - Collection parameterCollection = vAppProperties.values(); - Iterator iterator = parameterCollection.iterator(); - while (iterator.hasNext()) { - HashMap entry = (HashMap)iterator.next(); - map.put(entry.get("key"), entry.get("value")); - } - } - return map; - } - - public Map getVmNetworkMap() { - Map map = new HashMap<>(); - if (MapUtils.isNotEmpty(vAppNetworks)) { - Collection parameterCollection = vAppNetworks.values(); - Iterator iterator = parameterCollection.iterator(); - while (iterator.hasNext()) { - HashMap entry = (HashMap) iterator.next(); - Integer nic; - try { - nic = Integer.valueOf(entry.get(VmDetailConstants.NIC)); - } catch (NumberFormatException nfe) { - nic = null; - } - String networkUuid = entry.get(VmDetailConstants.NETWORK); - if (logger.isTraceEnabled()) { - logger.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); - } - if (nic == null || StringUtils.isEmpty(networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) { - throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic)); - } - map.put(nic, _entityMgr.findByUuid(Network.class, networkUuid).getId()); - } - } - return map; - } - - public String getGroup() { - return group; - } - - public HypervisorType getHypervisor() { - return HypervisorType.getType(hypervisor); - } - - public Boolean isDisplayVm() { - return displayVm; - } - - @Override - public boolean isDisplay() { - if(displayVm == null) - return true; - else - return displayVm; - } - - public List getSecurityGroupNameList() { - return securityGroupNameList; - } - - public List getSecurityGroupIdList() { - return securityGroupIdList; - } - public Long getServiceOfferingId() { return serviceOfferingId; } - public Long getSize() { - return size; - } - public Long getTemplateId() { return templateId; } - public String getUserData() { - return userData; - } - - public Long getUserdataId() { - return userdataId; - } - - public Map getUserdataDetails() { - return convertDetailsToMap(userdataDetails); - } - - public Long getZoneId() { - return zoneId; - } - - public String getPassword() { - return password; - } - - public Integer getLeaseDuration() { - return leaseDuration; - } - - public VMLeaseManager.ExpiryAction getLeaseExpiryAction() { - if (StringUtils.isBlank(leaseExpiryAction)) { - return null; - } - VMLeaseManager.ExpiryAction action = EnumUtils.getEnumIgnoreCase(VMLeaseManager.ExpiryAction.class, leaseExpiryAction); - if (action == null) { - throw new InvalidParameterValueException("Invalid value configured for leaseexpiryaction, valid values are: " + - com.cloud.utils.EnumUtils.listValues(VMLeaseManager.ExpiryAction.values())); - } - return action; - } - - public List getNetworkIds() { - if (MapUtils.isNotEmpty(vAppNetworks)) { - if (CollectionUtils.isNotEmpty(networkIds) || ipAddress != null || getIp6Address() != null || MapUtils.isNotEmpty(ipToNetworkList)) { - throw new InvalidParameterValueException(String.format("%s can't be specified along with %s, %s, %s", ApiConstants.NIC_NETWORK_LIST, ApiConstants.NETWORK_IDS, ApiConstants.IP_ADDRESS, ApiConstants.IP_NETWORK_LIST)); - } else { - return new ArrayList<>(); - } - } - if (ipToNetworkList != null && !ipToNetworkList.isEmpty()) { - if ((networkIds != null && !networkIds.isEmpty()) || ipAddress != null || getIp6Address() != null) { - throw new InvalidParameterValueException("ipToNetworkMap can't be specified along with networkIds or ipAddress"); - } else { - List networks = new ArrayList(); - networks.addAll(getIpToNetworkMap().keySet()); - return networks; - } - } - return networkIds; - } - - public String getName() { - return name; - } - - public List getSSHKeyPairNames() { - List sshKeyPairs = new ArrayList(); - if(sshKeyPairNames != null) { - sshKeyPairs = sshKeyPairNames; - } - if(sshKeyPairName != null && !sshKeyPairName.isEmpty()) { - sshKeyPairs.add(sshKeyPairName); - } - return sshKeyPairs; - } - - public Long getHostId() { - return hostId; - } - - public boolean getStartVm() { - return startVm == null ? true : startVm; - } - - public Map getIpToNetworkMap() { - if ((networkIds != null || ipAddress != null || getIp6Address() != null) && ipToNetworkList != null) { - throw new InvalidParameterValueException("NetworkIds and ipAddress can't be specified along with ipToNetworkMap parameter"); - } - LinkedHashMap ipToNetworkMap = null; - if (ipToNetworkList != null && !ipToNetworkList.isEmpty()) { - ipToNetworkMap = new LinkedHashMap(); - Collection ipsCollection = ipToNetworkList.values(); - Iterator iter = ipsCollection.iterator(); - while (iter.hasNext()) { - HashMap ips = (HashMap)iter.next(); - Long networkId = getNetworkIdFomIpMap(ips); - IpAddresses addrs = getIpAddressesFromIpMap(ips); - ipToNetworkMap.put(networkId, addrs); - } - } - - return ipToNetworkMap; - } - - @Nonnull - private IpAddresses getIpAddressesFromIpMap(HashMap ips) { - String requestedIp = ips.get("ip"); - String requestedIpv6 = ips.get("ipv6"); - String requestedMac = ips.get("mac"); - if (requestedIpv6 != null) { - requestedIpv6 = NetUtils.standardizeIp6Address(requestedIpv6); - } - if (requestedMac != null) { - if(!NetUtils.isValidMac(requestedMac)) { - throw new InvalidParameterValueException("Mac address is not valid: " + requestedMac); - } else if(!NetUtils.isUnicastMac(requestedMac)) { - throw new InvalidParameterValueException("Mac address is not unicast: " + requestedMac); - } - requestedMac = NetUtils.standardizeMacAddress(requestedMac); - } - return new IpAddresses(requestedIp, requestedIpv6, requestedMac); - } - - @Nonnull - private Long getNetworkIdFomIpMap(HashMap ips) { - Long networkId; - final String networkid = ips.get("networkid"); - Network network = _networkService.getNetwork(networkid); - if (network != null) { - networkId = network.getId(); - } else { - try { - networkId = Long.parseLong(networkid); - } catch (NumberFormatException e) { - throw new InvalidParameterValueException("Unable to translate and find entity with networkId: " + networkid); - } - } - return networkId; - } - - public String getIpAddress() { - return ipAddress; - } - - public String getIp6Address() { - if (ip6Address == null) { - return null; - } - return NetUtils.standardizeIp6Address(ip6Address); - } - - - public String getMacAddress() { - if (macAddress == null) { - return null; - } - if(!NetUtils.isValidMac(macAddress)) { - throw new InvalidParameterValueException("Mac address is not valid: " + macAddress); - } else if(!NetUtils.isUnicastMac(macAddress)) { - throw new InvalidParameterValueException("Mac address is not unicast: " + macAddress); - } - return NetUtils.standardizeMacAddress(macAddress); - } - - public List getAffinityGroupIdList() { - if (affinityGroupNameList != null && affinityGroupIdList != null) { - throw new InvalidParameterValueException("affinitygroupids parameter is mutually exclusive with affinitygroupnames parameter"); - } - - // transform group names to ids here - if (affinityGroupNameList != null) { - List affinityGroupIds = new ArrayList(); - for (String groupName : affinityGroupNameList) { - Long groupId = _responseGenerator.getAffinityGroupId(groupName, getEntityOwnerId()); - if (groupId == null) { - throw new InvalidParameterValueException("Unable to find affinity group by name " + groupName); - } else { - affinityGroupIds.add(groupId); - } - } - return affinityGroupIds; - } else { - return affinityGroupIdList; - } - } - - public String getKeyboard() { - // TODO Auto-generated method stub - return keyboard; - } - - public Map> getDhcpOptionsMap() { - Map> dhcpOptionsMap = new HashMap<>(); - if (dhcpOptionsNetworkList != null && !dhcpOptionsNetworkList.isEmpty()) { - - Collection> paramsCollection = this.dhcpOptionsNetworkList.values(); - for (Map dhcpNetworkOptions : paramsCollection) { - String networkId = dhcpNetworkOptions.get(ApiConstants.NETWORK_ID); - - if (networkId == null) { - throw new IllegalArgumentException("No networkid specified when providing extra dhcp options."); - } - - Map dhcpOptionsForNetwork = new HashMap<>(); - dhcpOptionsMap.put(networkId, dhcpOptionsForNetwork); - - for (String key : dhcpNetworkOptions.keySet()) { - if (key.startsWith(ApiConstants.DHCP_PREFIX)) { - int dhcpOptionValue = Integer.parseInt(key.replaceFirst(ApiConstants.DHCP_PREFIX, "")); - dhcpOptionsForNetwork.put(dhcpOptionValue, dhcpNetworkOptions.get(key)); - } else if (!key.equals(ApiConstants.NETWORK_ID)) { - Dhcp.DhcpOptionCode dhcpOptionEnum = Dhcp.DhcpOptionCode.valueOfString(key); - dhcpOptionsForNetwork.put(dhcpOptionEnum.getCode(), dhcpNetworkOptions.get(key)); - } - } - - } - } - - return dhcpOptionsMap; - } - - public Map getDataDiskTemplateToDiskOfferingMap() { - if (diskOfferingId != null && dataDiskTemplateToDiskOfferingList != null) { - throw new InvalidParameterValueException("diskofferingid parameter can't be specified along with datadisktemplatetodiskofferinglist parameter"); - } - if (MapUtils.isEmpty(dataDiskTemplateToDiskOfferingList)) { - return new HashMap(); - } - - HashMap dataDiskTemplateToDiskOfferingMap = new HashMap(); - for (Object objDataDiskTemplates : dataDiskTemplateToDiskOfferingList.values()) { - HashMap dataDiskTemplates = (HashMap) objDataDiskTemplates; - Long dataDiskTemplateId; - DiskOffering dataDiskOffering = null; - VirtualMachineTemplate dataDiskTemplate= _entityMgr.findByUuid(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid")); - if (dataDiskTemplate == null) { - dataDiskTemplate = _entityMgr.findById(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid")); - if (dataDiskTemplate == null) - throw new InvalidParameterValueException("Unable to translate and find entity with datadisktemplateid " + dataDiskTemplates.get("datadisktemplateid")); - } - dataDiskTemplateId = dataDiskTemplate.getId(); - dataDiskOffering = _entityMgr.findByUuid(DiskOffering.class, dataDiskTemplates.get("diskofferingid")); - if (dataDiskOffering == null) { - dataDiskOffering = _entityMgr.findById(DiskOffering.class, dataDiskTemplates.get("diskofferingid")); - if (dataDiskOffering == null) - throw new InvalidParameterValueException("Unable to translate and find entity with diskofferingId " + dataDiskTemplates.get("diskofferingid")); - } - dataDiskTemplateToDiskOfferingMap.put(dataDiskTemplateId, dataDiskOffering); - } - return dataDiskTemplateToDiskOfferingMap; - } - - public String getExtraConfig() { - return extraConfig; - } - - public boolean getCopyImageTags() { - return copyImageTags == null ? false : copyImageTags; - } - - public Boolean getBootIntoSetup() { - return bootIntoSetup; - } - - public boolean isDynamicScalingEnabled() { - return dynamicScalingEnabled == null ? true : dynamicScalingEnabled; - } - - public Long getOverrideDiskOfferingId() { - return overrideDiskOfferingId; - } - - public ApiConstants.IoDriverPolicy getIoDriverPolicy() { - if (StringUtils.isNotBlank(ioDriverPolicy)) { - try { - String policyType = ioDriverPolicy.trim().toUpperCase(); - return ApiConstants.IoDriverPolicy.valueOf(policyType); - } catch (IllegalArgumentException e) { - String errMesg = String.format("Invalid io policy %s specified for vm %s. Valid values are: %s", ioDriverPolicy, getName(), Arrays.toString(ApiConstants.IoDriverPolicy.values())); - logger.warn(errMesg); - throw new InvalidParameterValueException(errMesg); - } - } - return null; - } - public Long getVolumeId() { return volumeId; } @@ -782,57 +87,6 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG public boolean isVolumeOrSnapshotProvided() { return volumeId != null || snapshotId != null; } - ///////////////////////////////////////////////////// - /////////////// API Implementation/////////////////// - ///////////////////////////////////////////////////// - - @Override - public String getCommandName() { - return s_name; - } - - public static String getResultObjectName() { - return "virtualmachine"; - } - - @Override - public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); - if (accountId == null) { - return CallContext.current().getCallingAccount().getId(); - } - - return accountId; - } - - @Override - public String getEventType() { - return EventTypes.EVENT_VM_CREATE; - } - - @Override - public String getCreateEventType() { - return EventTypes.EVENT_VM_CREATE; - } - - @Override - public String getCreateEventDescription() { - return "creating Vm"; - } - - @Override - public String getEventDescription() { - if(getStartVm()) { - return "starting Vm. Vm Id: " + getEntityUuid(); - } - return "deploying Vm. Vm Id: " + getEntityUuid(); - } - - @Override - public ApiCommandResourceType getApiResourceType() { - return ApiCommandResourceType.VirtualMachine; - } - @Override public void execute() { UserVm result; @@ -875,7 +129,6 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG } } - @Override public void create() throws ResourceAllocationException { if (Stream.of(templateId, snapshotId, volumeId).filter(Objects::nonNull).count() != 1) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java index aa121162cb4..18a9d2058a6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java @@ -140,7 +140,8 @@ public class DestroyVMCmd extends BaseAsyncCmd implements UserCmd { if (responses != null && !responses.isEmpty()) { response = responses.get(0); } - response.setResponseName("virtualmachine"); + response.setResponseName(getCommandName()); + response.setObjectName("virtualmachine"); setResponseObject(response); } else { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to destroy vm"); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupOfferingResponse.java index 480ebcfb13d..0e895fa4e96 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BackupOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupOfferingResponse.java @@ -41,6 +41,10 @@ public class BackupOfferingResponse extends BaseResponse { @Param(description = "description for the backup offering") private String description; + @SerializedName(ApiConstants.PROVIDER) + @Param(description = "provider name", since = "4.21.0") + private String provider; + @SerializedName(ApiConstants.EXTERNAL_ID) @Param(description = "external ID on the provider side") private String externalId; @@ -69,6 +73,10 @@ public class BackupOfferingResponse extends BaseResponse { this.externalId = externalId; } + public void setProvider(String provider) { + this.provider = provider; + } + public void setName(String name) { this.name = name; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java index 63419680fea..0ae558ac803 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java @@ -26,6 +26,7 @@ import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; import java.util.Date; +import java.util.Map; @EntityReference(value = Backup.class) public class BackupResponse extends BaseResponse { @@ -34,6 +35,14 @@ public class BackupResponse extends BaseResponse { @Param(description = "ID of the VM backup") private String id; + @SerializedName(ApiConstants.NAME) + @Param(description = "name of the backup", since = "4.21.0") + private String name; + + @SerializedName(ApiConstants.DESCRIPTION) + @Param(description = "description for the backup", since = "4.21.0") + private String description; + @SerializedName(ApiConstants.VIRTUAL_MACHINE_ID) @Param(description = "ID of the VM") private String vmId; @@ -102,6 +111,18 @@ public class BackupResponse extends BaseResponse { @Param(description = "zone name") private String zone; + @SerializedName(ApiConstants.VM_DETAILS) + @Param(description = "Lists the vm specific details for the backup", since = "4.21.0") + private Map vmDetails; + + @SerializedName(ApiConstants.INTERVAL_TYPE) + @Param(description = "Interval type of the backup", since = "4.21.0") + private String intervalType; + + @SerializedName(ApiConstants.BACKUP_VM_OFFERING_REMOVED) + @Param(description = "The backup offering corresponding to this backup was removed from the VM", since = "4.21.0") + private Boolean vmOfferingRemoved; + public String getId() { return id; } @@ -110,6 +131,22 @@ public class BackupResponse extends BaseResponse { this.id = id; } + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + public String getVmId() { return vmId; } @@ -245,4 +282,28 @@ public class BackupResponse extends BaseResponse { public void setZone(String zone) { this.zone = zone; } + + public Map getVmDetails() { + return vmDetails; + } + + public void setVmDetails(Map vmDetails) { + this.vmDetails = vmDetails; + } + + public String getIntervalType() { + return this.intervalType; + } + + public void setIntervalType(String intervalType) { + this.intervalType = intervalType; + } + + public Boolean getVmOfferingRemoved() { + return this.vmOfferingRemoved; + } + + public void setVmOfferingRemoved(Boolean vmOfferingRemoved) { + this.vmOfferingRemoved = vmOfferingRemoved; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java index 9584223563f..d8c2980774d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java @@ -60,6 +60,10 @@ public class BackupScheduleResponse extends BaseResponse { this.id = id; } + @SerializedName(ApiConstants.QUIESCE_VM) + @Param(description = "quiesce the instance before checkpointing the disks for backup") + private Boolean quiesceVM; + public String getVmName() { return vmName; } @@ -103,4 +107,8 @@ public class BackupScheduleResponse extends BaseResponse { public void setMaxBackups(Integer maxBackups) { this.maxBackups = maxBackups; } + + public void setQuiesceVM(Boolean quiesceVM) { + this.quiesceVM = quiesceVM; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ConsoleSessionResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ConsoleSessionResponse.java new file mode 100644 index 00000000000..85747d7c2a8 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/ConsoleSessionResponse.java @@ -0,0 +1,236 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.import org.apache.cloudstack.context.CallContext; +package org.apache.cloudstack.api.response; + +import com.google.gson.annotations.SerializedName; + +import com.cloud.serializer.Param; +import org.apache.cloudstack.consoleproxy.ConsoleSession; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import java.util.Date; + +@EntityReference(value = ConsoleSession.class) +public class ConsoleSessionResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "ID of the console session.") + private String id; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "Date when the console session's endpoint was created.") + private Date created; + + @SerializedName(ApiConstants.DOMAIN) + @Param(description = "Domain of the account that created the console endpoint.") + private String domain; + + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "Domain path of the account that created the console endpoint.") + private String domainPath; + + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "Domain ID of the account that created the console endpoint.") + private String domainId; + + @SerializedName(ApiConstants.ACCOUNT) + @Param(description = "Account that created the console endpoint.") + private String account; + + @SerializedName(ApiConstants.ACCOUNT_ID) + @Param(description = "ID of the account that created the console endpoint.") + private String accountId; + + @SerializedName(ApiConstants.USER) + @Param(description = "User that created the console endpoint.") + private String user; + + @SerializedName(ApiConstants.USER_ID) + @Param(description = "ID of the user that created the console endpoint.") + private String userId; + + @SerializedName(ApiConstants.VIRTUAL_MACHINE_ID) + @Param(description = "ID of the virtual machine.") + private String vmId; + + @SerializedName(ApiConstants.VIRTUAL_MACHINE_NAME) + @Param(description = "Name of the virtual machine.") + private String vmName; + + @SerializedName(ApiConstants.HOST_ID) + @Param(description = "ID of the host.") + private String hostId; + + @SerializedName(ApiConstants.HOST_NAME) + @Param(description = "Name of the host.") + private String hostName; + + @SerializedName(ApiConstants.ACQUIRED) + @Param(description = "Date when the console session was acquired.") + private Date acquired; + + @SerializedName(ApiConstants.REMOVED) + @Param(description = "Date when the console session was removed.") + private Date removed; + + @SerializedName(ApiConstants.CONSOLE_ENDPOINT_CREATOR_ADDRESS) + @Param(description = "IP address of the creator of the console endpoint.") + private String consoleEndpointCreatorAddress; + + @SerializedName(ApiConstants.CLIENT_ADDRESS) + @Param(description = "IP address of the client that created the console session.") + private String clientAddress; + + public void setId(String id) { + this.id = id; + } + + public void setCreated(Date created) { + this.created = created; + } + + public void setDomain(String domain) { + this.domain = domain; + } + + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public void setAccount(String account) { + this.account = account; + } + + public void setAccountId(String accountId) { + this.accountId = accountId; + } + + public void setUser(String user) { + this.user = user; + } + + public void setUserId(String userId) { + this.userId = userId; + } + + public void setVmId(String vmId) { + this.vmId = vmId; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } + + public void setHostId(String hostId) { + this.hostId = hostId; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public void setAcquired(Date acquired) { + this.acquired = acquired; + } + + public void setRemoved(Date removed) { + this.removed = removed; + } + + public void setConsoleEndpointCreatorAddress(String consoleEndpointCreatorAddress) { + this.consoleEndpointCreatorAddress = consoleEndpointCreatorAddress; + } + + public void setClientAddress(String clientAddress) { + this.clientAddress = clientAddress; + } + + public String getId() { + return id; + } + + public Date getCreated() { + return created; + } + + public String getDomain() { + return domain; + } + + public String getDomainPath() { + return domainPath; + } + + public String getDomainId() { + return domainId; + } + + public String getAccount() { + return account; + } + + public String getAccountId() { + return accountId; + } + + public String getUser() { + return user; + } + + public String getUserId() { + return userId; + } + + public String getVmId() { + return vmId; + } + + public String getVmName() { + return vmName; + } + + public String getHostId() { + return hostId; + } + + public String getHostName() { + return hostName; + } + + public Date getAcquired() { + return acquired; + } + + public Date getRemoved() { + return removed; + } + + public String getConsoleEndpointCreatorAddress() { + return consoleEndpointCreatorAddress; + } + + public String getClientAddress() { + return clientAddress; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ObjectStoreResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ObjectStoreResponse.java index e4030799aa7..dcb93aaaf1d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ObjectStoreResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ObjectStoreResponse.java @@ -17,6 +17,8 @@ package org.apache.cloudstack.api.response; import com.cloud.serializer.Param; + +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.storage.object.ObjectStore; import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.api.BaseResponseWithAnnotations; @@ -24,15 +26,15 @@ import org.apache.cloudstack.api.EntityReference; @EntityReference(value = ObjectStore.class) public class ObjectStoreResponse extends BaseResponseWithAnnotations { - @SerializedName("id") + @SerializedName(ApiConstants.ID) @Param(description = "the ID of the object store") private String id; - @SerializedName("name") + @SerializedName(ApiConstants.NAME) @Param(description = "the name of the object store") private String name; - @SerializedName("url") + @SerializedName(ApiConstants.URL) @Param(description = "the url of the object store") private String url; @@ -44,6 +46,10 @@ public class ObjectStoreResponse extends BaseResponseWithAnnotations { @Param(description = "the total size of the object store") private Long storageTotal; + @SerializedName("storageallocated") + @Param(description = "the allocated size of the object store") + private Long storageAllocated; + @SerializedName("storageused") @Param(description = "the object store currently used size") private Long storageUsed; @@ -96,6 +102,14 @@ public class ObjectStoreResponse extends BaseResponseWithAnnotations { this.storageTotal = storageTotal; } + public Long getStorageAllocated() { + return storageAllocated; + } + + public void setStorageAllocated(Long storageAllocated) { + this.storageAllocated = storageAllocated; + } + public Long getStorageUsed() { return storageUsed; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SnapshotPolicyResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SnapshotPolicyResponse.java index bfa1cca1ca0..4ce77cfdf6e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/SnapshotPolicyResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/SnapshotPolicyResponse.java @@ -16,17 +16,16 @@ // under the License. package org.apache.cloudstack.api.response; -import java.util.LinkedHashSet; -import java.util.Set; - +import com.cloud.serializer.Param; +import com.cloud.storage.snapshot.SnapshotPolicy; +import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponseWithTagInformation; import org.apache.cloudstack.api.EntityReference; -import com.cloud.serializer.Param; -import com.cloud.storage.snapshot.SnapshotPolicy; -import com.google.gson.annotations.SerializedName; +import java.util.LinkedHashSet; +import java.util.Set; @EntityReference(value = SnapshotPolicy.class) public class SnapshotPolicyResponse extends BaseResponseWithTagInformation { @@ -62,9 +61,14 @@ public class SnapshotPolicyResponse extends BaseResponseWithTagInformation { @Param(description = "The list of zones in which snapshot backup is scheduled", responseObject = ZoneResponse.class, since = "4.19.0") protected Set zones; + @SerializedName(ApiConstants.STORAGE) + @Param(description = "The list of pools in which snapshot backup is scheduled", responseObject = StoragePoolResponse.class, since = "4.21.0") + protected Set storagePools; + public SnapshotPolicyResponse() { tags = new LinkedHashSet(); zones = new LinkedHashSet<>(); + storagePools = new LinkedHashSet<>(); } public String getId() { @@ -130,4 +134,6 @@ public class SnapshotPolicyResponse extends BaseResponseWithTagInformation { public void setZones(Set zones) { this.zones = zones; } + + public void setStoragePools(Set pools) { this.storagePools = pools; } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java index 9f7a7f42dec..5d6756c950d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java @@ -107,6 +107,10 @@ public class SnapshotResponse extends BaseResponseWithTagInformation implements @Param(description = "physical size of backedup snapshot on image store") private long physicalSize; + @SerializedName(ApiConstants.CHAIN_SIZE) + @Param(description = "chain size of snapshot including all parent snapshots. Shown only for incremental snapshots if snapshot.show.chain.size setting is set to true", since = "4.21.0") + private Long chainSize; + @SerializedName(ApiConstants.ZONE_ID) @Param(description = "id of the availability zone") private String zoneId; @@ -244,6 +248,10 @@ public class SnapshotResponse extends BaseResponseWithTagInformation implements this.physicalSize = physicalSize; } + public void setChainSize(long chainSize) { + this.chainSize = chainSize; + } + @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/backup/Backup.java b/api/src/main/java/org/apache/cloudstack/backup/Backup.java index dffe8a03213..951af9180e7 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/Backup.java +++ b/api/src/main/java/org/apache/cloudstack/backup/Backup.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.backup; import java.util.Date; import java.util.List; +import java.util.Map; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.api.Identity; @@ -33,28 +34,6 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { Allocated, Queued, BackingUp, BackedUp, Error, Failed, Restoring, Removed, Expunged } - public enum Type { - MANUAL, HOURLY, DAILY, WEEKLY, MONTHLY; - private int max = 8; - - public void setMax(int max) { - this.max = max; - } - - public int getMax() { - return max; - } - - @Override - public String toString() { - return this.name(); - } - - public boolean equals(String snapshotType) { - return this.toString().equalsIgnoreCase(snapshotType); - } - } - class Metric { private Long backupSize = 0L; private Long dataSize = 0L; @@ -85,6 +64,8 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { private String id; private Date created; private String type; + private Long backupSize = 0L; + private Long dataSize = 0L; public RestorePoint(String id, Date created, String type) { this.id = id; @@ -92,6 +73,12 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { this.type = type; } + public RestorePoint(String id, Date created, String type, Long backupSize, Long dataSize) { + this(id, created, type); + this.backupSize = backupSize; + this.dataSize = dataSize; + } + public String getId() { return id; } @@ -115,6 +102,22 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { public void setType(String type) { this.type = type; } + + public Long getBackupSize() { + return backupSize; + } + + public void setBackupSize(Long backupSize) { + this.backupSize = backupSize; + } + + public Long getDataSize() { + return dataSize; + } + + public void setDataSize(Long dataSize) { + this.dataSize = dataSize; + } } class VolumeInfo { @@ -122,12 +125,20 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { private Volume.Type type; private Long size; private String path; + private Long deviceId; + private String diskOfferingId; + private Long minIops; + private Long maxIops; - public VolumeInfo(String uuid, String path, Volume.Type type, Long size) { + public VolumeInfo(String uuid, String path, Volume.Type type, Long size, Long deviceId, String diskOfferingId, Long minIops, Long maxIops) { this.uuid = uuid; this.type = type; this.size = size; this.path = path; + this.deviceId = deviceId; + this.diskOfferingId = diskOfferingId; + this.minIops = minIops; + this.maxIops = maxIops; } public String getUuid() { @@ -150,13 +161,29 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { return size; } + public Long getDeviceId() { + return deviceId; + } + + public String getDiskOfferingId() { + return diskOfferingId; + } + + public Long getMinIops() { + return minIops; + } + + public Long getMaxIops() { + return maxIops; + } + @Override public String toString() { - return StringUtils.join(":", uuid, path, type, size); + return StringUtils.join(":", uuid, path, type, size, deviceId, diskOfferingId, minIops, maxIops); } } - long getVmId(); + Long getVmId(); long getBackupOfferingId(); String getExternalId(); String getType(); @@ -164,6 +191,12 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { Backup.Status getStatus(); Long getSize(); Long getProtectedSize(); + void setName(String name); + String getDescription(); + void setDescription(String description); List getBackedUpVolumes(); long getZoneId(); + Map getDetails(); + String getDetail(String name); + Long getBackupScheduleId(); } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java index eebad3af067..c4b92fc9e05 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java @@ -18,20 +18,29 @@ package org.apache.cloudstack.backup; import java.util.List; +import java.util.Map; +import com.cloud.capacity.Capacity; import com.cloud.exception.ResourceAllocationException; import org.apache.cloudstack.api.command.admin.backup.ImportBackupOfferingCmd; import org.apache.cloudstack.api.command.admin.backup.UpdateBackupOfferingCmd; +import org.apache.cloudstack.api.command.user.backup.CreateBackupCmd; import org.apache.cloudstack.api.command.user.backup.CreateBackupScheduleCmd; import org.apache.cloudstack.api.command.user.backup.DeleteBackupScheduleCmd; import org.apache.cloudstack.api.command.user.backup.ListBackupOfferingsCmd; import org.apache.cloudstack.api.command.user.backup.ListBackupsCmd; +import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.Network; +import com.cloud.storage.Volume; import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.utils.component.PluggableService; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VmDiskInfo; /** * Backup and Recover Manager Interface @@ -58,38 +67,6 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer "false", "Enable volume attach/detach operations for VMs that are assigned to Backup Offerings.", true); - ConfigKey BackupHourlyMax = new ConfigKey("Advanced", Integer.class, - "backup.max.hourly", - "8", - "Maximum recurring hourly backups to be retained for an instance. If the limit is reached, early backups from the start of the hour are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring hourly backups can not be scheduled.", - false, - ConfigKey.Scope.Global, - null); - - ConfigKey BackupDailyMax = new ConfigKey("Advanced", Integer.class, - "backup.max.daily", - "8", - "Maximum recurring daily backups to be retained for an instance. If the limit is reached, backups from the start of the day are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring daily backups can not be scheduled.", - false, - ConfigKey.Scope.Global, - null); - - ConfigKey BackupWeeklyMax = new ConfigKey("Advanced", Integer.class, - "backup.max.weekly", - "8", - "Maximum recurring weekly backups to be retained for an instance. If the limit is reached, backups from the beginning of the week are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring weekly backups can not be scheduled.", - false, - ConfigKey.Scope.Global, - null); - - ConfigKey BackupMonthlyMax = new ConfigKey("Advanced", Integer.class, - "backup.max.monthly", - "8", - "Maximum recurring monthly backups to be retained for an instance. If the limit is reached, backups from the beginning of the month are deleted so that newer ones can be saved. This limit does not apply to manual backups. If set to 0, recurring monthly backups can not be scheduled.", - false, - ConfigKey.Scope.Global, - null); - ConfigKey DefaultMaxAccountBackups = new ConfigKey("Account Defaults", Long.class, "max.account.backups", "20", @@ -138,6 +115,14 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer ConfigKey.Scope.Global, null); + ConfigKey BackupStorageCapacityThreshold = new ConfigKey<>("Alert", Float.class, + "zone.backupStorage.capacity.notificationthreshold", + "0.75", + "Percentage (as a value between 0 and 1) of backup storage utilization above which alerts will be sent about low storage available.", + true, + ConfigKey.Scope.Zone, + null); + /** * List backup provider offerings * @param zoneId zone id @@ -200,11 +185,11 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer /** * Creates backup of a VM - * @param vmId Virtual Machine ID - * @param scheduleId Virtual Machine Backup Schedule ID + * @param cmd CreateBackupCmd + * @param job The async job associated with the backup retention * @return returns operation success */ - boolean createBackup(final Long vmId, final Long scheduleId) throws ResourceAllocationException; + boolean createBackup(CreateBackupCmd cmd, Object job) throws ResourceAllocationException; /** * List existing backups for a VM @@ -216,6 +201,15 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer */ boolean restoreBackup(final Long backupId); + Map getIpToNetworkMapFromBackup(Backup backup, boolean preserveIps, List networkIds); + + Boolean canCreateInstanceFromBackup(Long backupId); + + /** + * Restore a backup to a new Instance + */ + boolean restoreBackupToVM(Long backupId, Long vmId) throws ResourceUnavailableException; + /** * Restore a backed up volume and attach it to a VM */ @@ -229,5 +223,25 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer */ boolean deleteBackup(final Long backupId, final Boolean forced); + void validateBackupForZone(Long zoneId); + BackupOffering updateBackupOffering(UpdateBackupOfferingCmd updateBackupOfferingCmd); + + VmDiskInfo getRootDiskInfoFromBackup(Backup backup); + + List getDataDiskInfoListFromBackup(Backup backup); + + void checkVmDisksSizeAgainstBackup(List vmDiskInfoList, Backup backup); + + Map getBackupDetailsFromVM(VirtualMachine vm); + + String createVolumeInfoFromVolumes(List vmVolumes); + + String getBackupNameFromVM(VirtualMachine vm); + + BackupResponse createBackupResponse(Backup backup, Boolean listVmDetails); + + Capacity getBackupStorageUsedStats(Long zoneId); + + void checkAndRemoveBackupOfferingBeforeExpunge(VirtualMachine vm); } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java index 39582b0e423..1eb36f89556 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.backup; import java.util.List; -import java.util.Map; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; @@ -71,10 +70,12 @@ public interface BackupProvider { /** * Starts and creates an adhoc backup process * for a previously registered VM backup - * @param vm the machine to make a backup of + * + * @param vm the machine to make a backup of + * @param quiesceVM instance will be quiesced for checkpointing for backup. Applicable only to NAS plugin. * @return the result and {code}Backup{code} {code}Object{code} */ - Pair takeBackup(VirtualMachine vm); + Pair takeBackup(VirtualMachine vm, Boolean quiesceVM); /** * Delete an existing backup @@ -84,6 +85,8 @@ public interface BackupProvider { */ boolean deleteBackup(Backup backup, boolean forced); + boolean restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid); + /** * Restore VM from backup */ @@ -92,27 +95,44 @@ public interface BackupProvider { /** * Restore a volume from a backup */ - Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState); + Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState); /** - * Returns backup metrics for a list of VMs in a zone + * Syncs backup metrics (backup size, protected size) from the plugin and stores it within the provider * @param zoneId the zone for which to return metrics - * @param vms a list of machines to get measurements for - * @return a map of machine -> backup metrics */ - Map getBackupMetrics(Long zoneId, List vms); + void syncBackupMetrics(Long zoneId); /** - * This method should TODO - * @param vm the machine to get restore point for + * Returns a list of Backup.RestorePoint + * @param vm the machine to get the restore points for */ List listRestorePoints(VirtualMachine vm); /** - * This method should TODO + * Creates and returns an entry in the backups table by getting the information from restorePoint and vm. + * * @param restorePoint the restore point to create a backup for - * @param vm The machine for which to create a backup - * @param metric the metric object to update with the new backup data + * @param vm The machine for which to create a backup */ - Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric); + Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm); + + /** + * Returns if the backup provider supports creating new instance from backup + */ + boolean supportsInstanceFromBackup(); + + /** + * Returns the backup storage usage (Used, Total) for a backup provider + * @param zoneId the zone for which to return metrics + * @return a pair of Used size and Total size for the backup storage + */ + Pair getBackupStorageStats(Long zoneId); + + /** + * Gets the backup storage usage (Used, Total) from the plugin and stores it in db + * @param zoneId the zone for which to return metrics + */ + void syncBackupStorageStats(Long zoneId); + } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupRepository.java b/api/src/main/java/org/apache/cloudstack/backup/BackupRepository.java index 8e5c9740e69..be539a0eb04 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupRepository.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupRepository.java @@ -28,7 +28,9 @@ public interface BackupRepository extends InternalIdentity, Identity { String getType(); String getAddress(); String getMountOptions(); + void setUsedBytes(Long usedBytes); Long getCapacityBytes(); Long getUsedBytes(); + void setCapacityBytes(Long capacityBytes); Date getCreated(); } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java b/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java index f439b3a9139..b5138d34de1 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java @@ -30,6 +30,7 @@ public interface BackupSchedule extends InternalIdentity { String getTimezone(); Date getScheduledTimestamp(); Long getAsyncJobId(); - Integer getMaxBackups(); + Boolean getQuiesceVM(); + int getMaxBackups(); String getUuid(); } diff --git a/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManager.java b/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManager.java index 23b571e7fae..655b8faf443 100644 --- a/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManager.java +++ b/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManager.java @@ -18,6 +18,9 @@ package org.apache.cloudstack.consoleproxy; import com.cloud.utils.component.Manager; import org.apache.cloudstack.api.command.user.consoleproxy.ConsoleEndpoint; +import org.apache.cloudstack.api.command.user.consoleproxy.ListConsoleSessionsCmd; +import org.apache.cloudstack.api.response.ConsoleSessionResponse; +import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import java.util.Date; @@ -48,4 +51,8 @@ public interface ConsoleAccessManager extends Manager, Configurable { String genAccessTicket(String host, String port, String sid, String tag, String sessionUuid); String genAccessTicket(String host, String port, String sid, String tag, Date normalizedHashTime, String sessionUuid); + + ListResponse listConsoleSessions(ListConsoleSessionsCmd cmd); + + ConsoleSession listConsoleSessionById(long id); } diff --git a/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleSession.java b/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleSession.java new file mode 100644 index 00000000000..6cbdd31fd94 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleSession.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.consoleproxy; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +import java.util.Date; + +public interface ConsoleSession extends InternalIdentity, Identity { + + Date getCreated(); + + long getDomainId(); + + long getAccountId(); + + long getUserId(); + + long getInstanceId(); + + long getHostId(); + + Date getRemoved(); + + Date getAcquired(); + + String getConsoleEndpointCreatorAddress(); + + String getClientAddress(); +} diff --git a/api/src/main/java/org/apache/cloudstack/network/RoutedIpv4Manager.java b/api/src/main/java/org/apache/cloudstack/network/RoutedIpv4Manager.java index 221a550ad63..9285331f41a 100644 --- a/api/src/main/java/org/apache/cloudstack/network/RoutedIpv4Manager.java +++ b/api/src/main/java/org/apache/cloudstack/network/RoutedIpv4Manager.java @@ -158,7 +158,7 @@ public interface RoutedIpv4Manager extends PluggableService, Configurable { boolean isRoutedVpc(Vpc vpc); - boolean isVpcVirtualRouterGateway(VpcOffering vpcOffering); + boolean isValidGateway(VpcOffering vpcOffering); BgpPeer createBgpPeer(CreateBgpPeerCmd createBgpPeerCmd); diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmdTest.java index a69a7a858ce..c7aeb8ba99b 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmdTest.java @@ -61,6 +61,8 @@ public class AddObjectStoragePoolCmdTest { String provider = "Simulator"; + Long size = 10L; + Map details; private AutoCloseable closeable; @@ -74,6 +76,7 @@ public class AddObjectStoragePoolCmdTest { ReflectionTestUtils.setField(addObjectStoragePoolCmdSpy, "url", url); ReflectionTestUtils.setField(addObjectStoragePoolCmdSpy, "providerName", provider); ReflectionTestUtils.setField(addObjectStoragePoolCmdSpy, "details", details); + ReflectionTestUtils.setField(addObjectStoragePoolCmdSpy, "size", size); addObjectStoragePoolCmdSpy._storageService = storageService; addObjectStoragePoolCmdSpy._responseGenerator = responseGenerator; } @@ -87,12 +90,12 @@ public class AddObjectStoragePoolCmdTest { @Test public void testAddObjectStore() throws DiscoveryException { Mockito.doReturn(objectStore).when(storageService).discoverObjectStore(Mockito.anyString(), - Mockito.anyString(), Mockito.anyString(), any()); + Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), any()); ObjectStoreResponse objectStoreResponse = new ObjectStoreResponse(); Mockito.doReturn(objectStoreResponse).when(responseGenerator).createObjectStoreResponse(any()); addObjectStoragePoolCmdSpy.execute(); Mockito.verify(storageService, Mockito.times(1)) - .discoverObjectStore(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); + .discoverObjectStore(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); } } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/test/CreateSnapshotCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/test/CreateSnapshotCmdTest.java index 34baebe5257..5fa46ec97e5 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/test/CreateSnapshotCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/test/CreateSnapshotCmdTest.java @@ -93,7 +93,7 @@ public class CreateSnapshotCmdTest extends TestCase { Snapshot snapshot = Mockito.mock(Snapshot.class); try { Mockito.when(volumeApiService.takeSnapshot(nullable(Long.class), nullable(Long.class), isNull(), - nullable(Account.class), nullable(Boolean.class), nullable(Snapshot.LocationType.class), nullable(Boolean.class), nullable(Map.class), nullable(List.class))).thenReturn(snapshot); + nullable(Account.class), nullable(Boolean.class), nullable(Snapshot.LocationType.class), nullable(Boolean.class), nullable(Map.class), nullable(List.class), nullable(List.class), Mockito.anyBoolean())).thenReturn(snapshot); } catch (Exception e) { Assert.fail("Received exception when success expected " + e.getMessage()); @@ -126,7 +126,7 @@ public class CreateSnapshotCmdTest extends TestCase { try { Mockito.when(volumeApiService.takeSnapshot(nullable(Long.class), nullable(Long.class), nullable(Long.class), - nullable(Account.class), nullable(Boolean.class), nullable(Snapshot.LocationType.class), nullable(Boolean.class), any(), Mockito.anyList())).thenReturn(null); + nullable(Account.class), nullable(Boolean.class), nullable(Snapshot.LocationType.class), nullable(Boolean.class), any(), Mockito.anyList(), Mockito.anyList(), Mockito.anyBoolean())).thenReturn(null); } catch (Exception e) { Assert.fail("Received exception when success expected " + e.getMessage()); } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/consoleproxy/ListConsoleSessionsCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/consoleproxy/ListConsoleSessionsCmdTest.java new file mode 100644 index 00000000000..47bef14bb61 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/consoleproxy/ListConsoleSessionsCmdTest.java @@ -0,0 +1,124 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.import org.apache.cloudstack.context.CallContext; +package org.apache.cloudstack.api.command.user.consoleproxy; + +import org.apache.cloudstack.consoleproxy.ConsoleSession; +import com.cloud.user.AccountService; + +import com.cloud.user.UserAccount; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.consoleproxy.ConsoleAccessManager; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; + +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class ListConsoleSessionsCmdTest { + @Mock + private AccountService accountServiceMock; + + @Mock + private ConsoleAccessManager consoleAccessManagerMock; + + @Spy + @InjectMocks + private ListConsoleSessionsCmd listConsoleSessionsCmdSpy; + + @Test + public void executeTestApiExecutionShouldCallServiceLayer() { + Mockito.when(consoleAccessManagerMock.listConsoleSessions(listConsoleSessionsCmdSpy)).thenReturn(new ListResponse<>()); + listConsoleSessionsCmdSpy.execute(); + Mockito.verify(consoleAccessManagerMock).listConsoleSessions(listConsoleSessionsCmdSpy); + } + + @Test + public void getEntityOwnerIdTestReturnConsoleSessionIdIfProvided() { + ConsoleSession consoleSessionMock = Mockito.mock(ConsoleSession.class); + long consoleSessionId = 2L; + long accountId = 2L; + + Mockito.when(listConsoleSessionsCmdSpy.getId()).thenReturn(consoleSessionId); + Mockito.when(consoleAccessManagerMock.listConsoleSessionById(consoleSessionId)).thenReturn(consoleSessionMock); + Mockito.when(consoleSessionMock.getAccountId()).thenReturn(accountId); + + Assert.assertEquals(accountId, listConsoleSessionsCmdSpy.getEntityOwnerId()); + } + + @Test + public void getEntityOwnerIdTestReturnAccountIdWhenNoConsoleSessionIdIsProvided() { + long accountId = 2L; + + Mockito.when(listConsoleSessionsCmdSpy.getId()).thenReturn(null); + Mockito.when(listConsoleSessionsCmdSpy.getAccountId()).thenReturn(accountId); + + Assert.assertEquals(accountId, listConsoleSessionsCmdSpy.getEntityOwnerId()); + } + + @Test + public void getEntityOwnerIdTestReturnUserIdWhenNoConsoleSessionIdAndAccountIdAreProvided() { + UserAccount userAccountMock = Mockito.mock(UserAccount.class); + long userId = 2L; + + Mockito.when(listConsoleSessionsCmdSpy.getId()).thenReturn(null); + Mockito.when(listConsoleSessionsCmdSpy.getAccountId()).thenReturn(null); + Mockito.when(listConsoleSessionsCmdSpy.getUserId()).thenReturn(userId); + Mockito.when(accountServiceMock.getUserAccountById(userId)).thenReturn(userAccountMock); + Mockito.when(userAccountMock.getAccountId()).thenReturn(userId); + + Assert.assertEquals(userId, listConsoleSessionsCmdSpy.getEntityOwnerId()); + } + + @Test + public void getEntityOwnerIdTestReturnSystemAccountIdWhenNoConsoleSessionIdAndAccountIdAndUserIdAreProvided() { + long systemAccountId = 1L; + + Mockito.when(listConsoleSessionsCmdSpy.getId()).thenReturn(null); + Mockito.when(listConsoleSessionsCmdSpy.getAccountId()).thenReturn(null); + Mockito.when(listConsoleSessionsCmdSpy.getUserId()).thenReturn(null); + + Assert.assertEquals(systemAccountId, listConsoleSessionsCmdSpy.getEntityOwnerId()); + } + + @Test + public void getEntityOwnerIdTestReturnSystemAccountIdWhenConsoleSessionDoesNotExist() { + long consoleSessionId = 2L; + long systemAccountId = 1L; + + Mockito.when(listConsoleSessionsCmdSpy.getId()).thenReturn(consoleSessionId); + Mockito.when(consoleAccessManagerMock.listConsoleSessionById(consoleSessionId)).thenReturn(null); + + Assert.assertEquals(systemAccountId, listConsoleSessionsCmdSpy.getEntityOwnerId()); + } + + @Test + public void getEntityOwnerIdTestReturnSystemAccountIdWhenUserAccountDoesNotExist() { + long userId = 2L; + long systemAccountId = 1L; + + Mockito.when(listConsoleSessionsCmdSpy.getUserId()).thenReturn(userId); + Mockito.when(accountServiceMock.getUserAccountById(userId)).thenReturn(null); + + Assert.assertEquals(systemAccountId, listConsoleSessionsCmdSpy.getEntityOwnerId()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmdTest.java index 632496ad215..db27cc76ec9 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmdTest.java @@ -87,7 +87,12 @@ public class CopySnapshotCmdTest { @Test (expected = ServerApiException.class) public void testExecuteWrongNoParams() { + UUIDManager uuidManager = Mockito.mock(UUIDManager.class); + SnapshotApiService snapshotApiService = Mockito.mock(SnapshotApiService.class); final CopySnapshotCmd cmd = new CopySnapshotCmd(); + cmd._uuidMgr = uuidManager; + cmd._snapshotService = snapshotApiService; + try { cmd.execute(); } catch (ResourceUnavailableException e) { diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmdTest.java new file mode 100644 index 00000000000..f7e3e38d9c3 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmdTest.java @@ -0,0 +1,483 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.vm; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiConstants.BootMode; +import org.apache.cloudstack.api.ApiConstants.BootType; +import org.apache.cloudstack.api.ApiConstants.IoDriverPolicy; +import org.apache.cloudstack.vm.lease.VMLeaseManager; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.NetworkService; +import com.cloud.utils.db.EntityManager; +import com.cloud.vm.VmDetailConstants; +import com.cloud.network.Network; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.offering.DiskOffering; +import com.cloud.network.Network.IpAddresses; +import com.cloud.vm.VmDiskInfo; + +@RunWith(MockitoJUnitRunner.class) +public class DeployVMCmdTest { + + @Spy + private DeployVMCmd cmd = new DeployVMCmd(); + + @Test + public void testGetBootType_ValidUEFI() { + ReflectionTestUtils.setField(cmd, "bootType", "UEFI"); + + BootType result = cmd.getBootType(); + + assertEquals(BootType.UEFI, result); + } + + @Test + public void testGetBootTypeValidBIOS() { + ReflectionTestUtils.setField(cmd, "bootType", "BIOS"); + + BootType result = cmd.getBootType(); + + assertEquals(BootType.BIOS, result); + } + + @Test + public void testGetBootTypeInvalidValue() { + ReflectionTestUtils.setField(cmd, "bootType", "INVALID"); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getBootType(); + }); + assertTrue(thrownException.getMessage().contains("Invalid bootType INVALID")); + } + + @Test + public void testGetBootTypeNullValue() { + ReflectionTestUtils.setField(cmd, "bootType", null); + + BootType result = cmd.getBootType(); + + assertNull(result); + } + + @Test + public void testGetBootModeValidSecure() { + ReflectionTestUtils.setField(cmd, "bootMode", "SECURE"); + ReflectionTestUtils.setField(cmd, "bootType", "UEFI"); + + BootMode result = cmd.getBootMode(); + + assertEquals(BootMode.SECURE, result); + } + + @Test + public void testGetBootModeValidLegacy() { + ReflectionTestUtils.setField(cmd, "bootMode", "LEGACY"); + ReflectionTestUtils.setField(cmd, "bootType", "UEFI"); + + BootMode result = cmd.getBootMode(); + + assertEquals(BootMode.LEGACY, result); + } + + @Test + public void testGetBootModeInvalidValue() { + ReflectionTestUtils.setField(cmd, "bootMode", "INVALID"); + ReflectionTestUtils.setField(cmd, "bootType", "UEFI"); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getBootMode(); + }); + assertTrue(thrownException.getMessage().contains("Invalid bootmode: INVALID specified for VM: null. Valid values are: [LEGACY, SECURE]")); + } + + @Test + public void testGetBootModeUEFIWithoutBootMode() { + ReflectionTestUtils.setField(cmd, "bootMode", null); + ReflectionTestUtils.setField(cmd, "bootType", "UEFI"); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getBootMode(); + }); + assertTrue(thrownException.getMessage().contains("bootmode must be specified for the VM with boot type: UEFI. Valid values are: [LEGACY, SECURE]")); + } + + @Test + public void testGetDetails() { + ReflectionTestUtils.setField(cmd, "bootType", "UEFI"); + ReflectionTestUtils.setField(cmd, "bootMode", "SECURE"); + ReflectionTestUtils.setField(cmd, "rootdisksize", 100L); + ReflectionTestUtils.setField(cmd, "ioDriverPolicy", "native"); + ReflectionTestUtils.setField(cmd, "iothreadsEnabled", true); + ReflectionTestUtils.setField(cmd, "nicMultiqueueNumber", null); + ReflectionTestUtils.setField(cmd, "nicPackedVirtQueues", null); + ReflectionTestUtils.setField(cmd, "details", new HashMap<>()); + + Map result = cmd.getDetails(); + + assertEquals("SECURE", result.get("UEFI")); + assertEquals("100", result.get(VmDetailConstants.ROOT_DISK_SIZE)); + assertEquals("native", result.get(VmDetailConstants.IO_POLICY)); + assertEquals("true", result.get(VmDetailConstants.IOTHREADS)); + } + + @Test + public void testGetLeaseExpiryActionValidStop() { + ReflectionTestUtils.setField(cmd, "leaseExpiryAction", "STOP"); + + VMLeaseManager.ExpiryAction result = cmd.getLeaseExpiryAction(); + + assertEquals(VMLeaseManager.ExpiryAction.STOP, result); + } + + @Test + public void testGetLeaseExpiryActionValidDestroy() { + ReflectionTestUtils.setField(cmd, "leaseExpiryAction", "DESTROY"); + + VMLeaseManager.ExpiryAction result = cmd.getLeaseExpiryAction(); + + assertEquals(VMLeaseManager.ExpiryAction.DESTROY, result); + } + + @Test + public void testGetLeaseExpiryActionInvalidValue() { + ReflectionTestUtils.setField(cmd, "leaseExpiryAction", "INVALID"); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getLeaseExpiryAction(); + }); + assertTrue(thrownException.getMessage().contains("Invalid value configured for leaseexpiryaction")); + } + + @Test + public void testGetLeaseExpiryActionNullValue() { + ReflectionTestUtils.setField(cmd, "leaseExpiryAction", null); + + VMLeaseManager.ExpiryAction result = cmd.getLeaseExpiryAction(); + + assertNull(result); + } + + @Test + public void testGetIoDriverPolicyValidThrottle() { + ReflectionTestUtils.setField(cmd, "ioDriverPolicy", "native"); + + IoDriverPolicy result = cmd.getIoDriverPolicy(); + + assertEquals(IoDriverPolicy.valueOf("NATIVE"), result); + } + + @Test + public void testGetIoDriverPolicyInvalidValue() { + ReflectionTestUtils.setField(cmd, "ioDriverPolicy", "INVALID"); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getIoDriverPolicy(); + }); + assertTrue(thrownException.getMessage().contains("Invalid io policy INVALID")); + } + + @Test + public void testGetNetworkIds() { + List networkIds = Arrays.asList(1L, 2L, 3L); + ReflectionTestUtils.setField(cmd, "networkIds", networkIds); + ReflectionTestUtils.setField(cmd, "vAppNetworks", null); + ReflectionTestUtils.setField(cmd, "ipToNetworkList", null); + + List result = cmd.getNetworkIds(); + + assertEquals(networkIds, result); + } + + @Test + public void testGetNetworkIdsVAppNetworks() { + Map vAppNetworks = new HashMap<>(); + vAppNetworks.put("network1", new HashMap()); + ReflectionTestUtils.setField(cmd, "vAppNetworks", vAppNetworks); + ReflectionTestUtils.setField(cmd, "networkIds", null); + ReflectionTestUtils.setField(cmd, "ipToNetworkList", null); + ReflectionTestUtils.setField(cmd, "ipAddress", null); + ReflectionTestUtils.setField(cmd, "ip6Address", null); + + List result = cmd.getNetworkIds(); + + assertTrue(result.isEmpty()); + } + + @Test + public void testGetNetworkIdsVAppNetworksAndNetworkIds() { + Map vAppNetworks = new HashMap<>(); + vAppNetworks.put("network1", new HashMap()); + ReflectionTestUtils.setField(cmd, "vAppNetworks", vAppNetworks); + ReflectionTestUtils.setField(cmd, "networkIds", Arrays.asList(1L, 2L)); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getNetworkIds(); + }); + assertTrue(thrownException.getMessage().contains("nicnetworklist can't be specified along with networkids")); + } + + @Test + public void testGetNetworkIdsIpToNetworkListAndNetworkIds() { + Map ipToNetworkList = new HashMap<>(); + ipToNetworkList.put("0", new HashMap()); + ReflectionTestUtils.setField(cmd, "ipToNetworkList", ipToNetworkList); + ReflectionTestUtils.setField(cmd, "networkIds", Arrays.asList(1L, 2L)); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getNetworkIds(); + }); + assertTrue(thrownException.getMessage().contains("ipToNetworkMap can't be specified along with networkIds or ipAddress")); + } + + @Test + public void testGetIpToNetworkMap_WithNetworkIds() { + ReflectionTestUtils.setField(cmd, "networkIds", Arrays.asList(1L, 2L)); + ReflectionTestUtils.setField(cmd, "ipToNetworkList", new HashMap<>()); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getIpToNetworkMap(); + }); + assertTrue(thrownException.getMessage().contains("NetworkIds and ipAddress can't be specified along with ipToNetworkMap parameter")); + } + + @Test + public void testGetIpToNetworkMap_WithIpAddress() { + ReflectionTestUtils.setField(cmd, "ipAddress", "192.168.1.1"); + ReflectionTestUtils.setField(cmd, "ipToNetworkList", new HashMap<>()); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getIpToNetworkMap(); + }); + assertTrue(thrownException.getMessage().contains("NetworkIds and ipAddress can't be specified along with ipToNetworkMap parameter")); + } + + @Test + public void testGetIpToNetworkMap_WithEmptyIpToNetworkList() { + ReflectionTestUtils.setField(cmd, "networkIds", null); + ReflectionTestUtils.setField(cmd, "ipAddress", null); + ReflectionTestUtils.setField(cmd, "ipToNetworkList", new HashMap<>()); + + Map result = cmd.getIpToNetworkMap(); + + assertNull(result); + } + + @Test + public void testGetIpToNetworkMap_WithNullIpToNetworkList() { + ReflectionTestUtils.setField(cmd, "networkIds", null); + ReflectionTestUtils.setField(cmd, "ipAddress", null); + ReflectionTestUtils.setField(cmd, "ipToNetworkList", null); + + Map result = cmd.getIpToNetworkMap(); + + assertNull(result); + } + + @Test + public void testGetDataDiskInfoList() { + Map dataDisksDetails = new HashMap<>(); + Map dataDisk = new HashMap<>(); + dataDisk.put(ApiConstants.DISK_OFFERING_ID, "offering-uuid"); + dataDisk.put(ApiConstants.DEVICE_ID, "0"); + dataDisk.put(ApiConstants.MIN_IOPS, "1000"); + dataDisk.put(ApiConstants.MAX_IOPS, "2000"); + dataDisksDetails.put("0", dataDisk); + + ReflectionTestUtils.setField(cmd, "dataDisksDetails", dataDisksDetails); + + EntityManager entityMgr = mock(EntityManager.class); + ReflectionTestUtils.setField(cmd, "_entityMgr", entityMgr); + DiskOffering diskOffering = mock(DiskOffering.class); + when(diskOffering.getDiskSize()).thenReturn(1024 * 1024 * 1024L); + when(diskOffering.isCustomizedIops()).thenReturn(true); + when(entityMgr.findByUuid(DiskOffering.class, "offering-uuid")).thenReturn(diskOffering); + + List result = cmd.getDataDiskInfoList(); + + assertNotNull(result); + assertEquals(1, result.size()); + assertEquals(diskOffering, result.get(0).getDiskOffering()); + assertEquals(1L, result.get(0).getSize().longValue()); + assertEquals(1000L, result.get(0).getMinIops().longValue()); + assertEquals(2000L, result.get(0).getMaxIops().longValue()); + } + + @Test + public void testGetIpAddressesFromIpMap() { + Map ipToNetworkList = new HashMap<>(); + Map ipMap = new HashMap<>(); + ipMap.put("ip", "192.168.1.100"); + ipMap.put("mac", "00:11:22:33:44:55"); + ipMap.put("networkid", "1"); + ipToNetworkList.put("0", ipMap); + + ReflectionTestUtils.setField(cmd, "ipToNetworkList", ipToNetworkList); + ReflectionTestUtils.setField(cmd, "networkIds", null); + ReflectionTestUtils.setField(cmd, "ipAddress", null); + + Network mockNetwork = mock(Network.class); + NetworkService networkServiceMock = mock(NetworkService.class); + ReflectionTestUtils.setField(cmd, "_networkService", networkServiceMock); + + Map result = cmd.getIpToNetworkMap(); + + assertNotNull(result); + assertTrue(result.containsKey(1L)); + assertEquals(result.get(1L).getIp4Address(), "192.168.1.100"); + assertEquals(result.get(1L).getMacAddress(), "00:11:22:33:44:55"); + } + + @Test + public void testGetIpAddressesFromIpMapInvalidMac() { + Map ipToNetworkList = new HashMap<>(); + Map ipMap = new HashMap<>(); + ipMap.put("ip", "192.168.1.100"); + ipMap.put("mac", "invalid-mac"); + ipMap.put("networkid", "1"); + ipToNetworkList.put("0", ipMap); + + ReflectionTestUtils.setField(cmd, "ipToNetworkList", ipToNetworkList); + ReflectionTestUtils.setField(cmd, "networkIds", null); + ReflectionTestUtils.setField(cmd, "ipAddress", null); + + Network mockNetwork = mock(Network.class); + NetworkService networkServiceMock = mock(NetworkService.class); + ReflectionTestUtils.setField(cmd, "_networkService", networkServiceMock); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getIpToNetworkMap(); + }); + assertTrue(thrownException.getMessage().contains("Mac address is not valid")); + } + + @Test + public void testGetDhcpOptionsMap() { + Map dhcpOptionsNetworkList = new HashMap<>(); + Map dhcpOptions = new HashMap<>(); + dhcpOptions.put("networkid", "network-1"); + dhcpOptions.put("dhcp:114", "url-value"); + dhcpOptions.put("dhcp:66", "www.test.com"); + dhcpOptionsNetworkList.put("0", dhcpOptions); + + ReflectionTestUtils.setField(cmd, "dhcpOptionsNetworkList", dhcpOptionsNetworkList); + + Map> result = cmd.getDhcpOptionsMap(); + + assertNotNull(result); + assertTrue(result.containsKey("network-1")); + Map networkOptions = result.get("network-1"); + assertEquals("url-value", networkOptions.get(114)); + assertEquals("www.test.com", networkOptions.get(66)); + } + + @Test + public void testGetDhcpOptionsMap_WithMissingNetworkId() { + Map dhcpOptionsNetworkList = new HashMap<>(); + Map dhcpOptions = new HashMap<>(); + dhcpOptions.put("dhcp:114", "url-value"); + dhcpOptionsNetworkList.put("0", dhcpOptions); + + ReflectionTestUtils.setField(cmd, "dhcpOptionsNetworkList", dhcpOptionsNetworkList); + + IllegalArgumentException thrownException = assertThrows(IllegalArgumentException.class, () -> { + cmd.getDhcpOptionsMap(); + }); + assertTrue(thrownException.getMessage().contains("No networkid specified when providing extra dhcp options")); + } + + @Test + public void testGetDataDiskTemplateToDiskOfferingMap() { + ReflectionTestUtils.setField(cmd, "diskOfferingId", null); + + Map dataDiskTemplateToDiskOfferingList = new HashMap<>(); + Map dataDiskTemplate = new HashMap<>(); + dataDiskTemplate.put("datadisktemplateid", "template-uuid"); + dataDiskTemplate.put("diskofferingid", "offering-uuid"); + dataDiskTemplateToDiskOfferingList.put("0", dataDiskTemplate); + + ReflectionTestUtils.setField(cmd, "dataDiskTemplateToDiskOfferingList", dataDiskTemplateToDiskOfferingList); + + VirtualMachineTemplate mockTemplate = mock(VirtualMachineTemplate.class); + when(mockTemplate.getId()).thenReturn(1L); + + DiskOffering mockOffering = mock(DiskOffering.class); + + EntityManager entityMgr = mock(EntityManager.class); + ReflectionTestUtils.setField(cmd, "_entityMgr", entityMgr); + when(entityMgr.findByUuid(VirtualMachineTemplate.class, "template-uuid")).thenReturn(mockTemplate); + when(entityMgr.findByUuid(DiskOffering.class, "offering-uuid")).thenReturn(mockOffering); + + Map result = cmd.getDataDiskTemplateToDiskOfferingMap(); + + assertNotNull(result); + assertEquals(mockOffering, result.get(1L)); + } + + @Test + public void testGetDataDiskTemplateToDiskOfferingMapWithDiskOfferingId() { + ReflectionTestUtils.setField(cmd, "diskOfferingId", 1L); + ReflectionTestUtils.setField(cmd, "dataDiskTemplateToDiskOfferingList", new HashMap<>()); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getDataDiskTemplateToDiskOfferingMap(); + }); + assertTrue(thrownException.getMessage().contains("diskofferingid parameter can't be specified along with datadisktemplatetodiskofferinglist parameter")); + } + + @Test + public void testGetDataDiskTemplateToDiskOfferingMapInvalidTemplateId() { + ReflectionTestUtils.setField(cmd, "diskOfferingId", null); + + Map dataDiskTemplateToDiskOfferingList = new HashMap<>(); + Map dataDiskTemplate = new HashMap<>(); + dataDiskTemplate.put("datadisktemplateid", "invalid-template"); + dataDiskTemplate.put("diskofferingid", "offering-uuid"); + dataDiskTemplateToDiskOfferingList.put("0", dataDiskTemplate); + + ReflectionTestUtils.setField(cmd, "dataDiskTemplateToDiskOfferingList", dataDiskTemplateToDiskOfferingList); + + EntityManager entityMgr = mock(EntityManager.class); + ReflectionTestUtils.setField(cmd, "_entityMgr", entityMgr); + when(entityMgr.findByUuid(VirtualMachineTemplate.class, "invalid-template")).thenReturn(null); + when(entityMgr.findById(VirtualMachineTemplate.class, "invalid-template")).thenReturn(null); + + InvalidParameterValueException thrownException = assertThrows(InvalidParameterValueException.class, () -> { + cmd.getDataDiskTemplateToDiskOfferingMap(); + }); + assertTrue(thrownException.getMessage().contains("Unable to translate and find entity with datadisktemplateid")); + } +} diff --git a/client/src/main/java/org/apache/cloudstack/ACSRequestLog.java b/client/src/main/java/org/apache/cloudstack/ACSRequestLog.java new file mode 100644 index 00000000000..123d2761e00 --- /dev/null +++ b/client/src/main/java/org/apache/cloudstack/ACSRequestLog.java @@ -0,0 +1,84 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package org.apache.cloudstack; + +import com.cloud.utils.StringUtils; +import org.eclipse.jetty.server.NCSARequestLog; +import org.eclipse.jetty.server.Request; +import org.eclipse.jetty.server.Response; +import org.eclipse.jetty.util.DateCache; +import org.eclipse.jetty.util.component.LifeCycle; + +import java.util.Locale; +import java.util.TimeZone; + +import static org.apache.commons.configuration.DataConfiguration.DEFAULT_DATE_FORMAT; + +public class ACSRequestLog extends NCSARequestLog { + private static final ThreadLocal buffers = + ThreadLocal.withInitial(() -> new StringBuilder(256)); + + private final DateCache dateCache; + + public ACSRequestLog() { + super(); + + TimeZone timeZone = TimeZone.getTimeZone("GMT"); + Locale locale = Locale.getDefault(); + dateCache = new DateCache(DEFAULT_DATE_FORMAT, locale, timeZone); + } + + @Override + public void log(Request request, Response response) { + String requestURI = StringUtils.cleanString(request.getOriginalURI()); + try { + StringBuilder sb = buffers.get(); + sb.setLength(0); + + sb.append(request.getHttpChannel().getEndPoint() + .getRemoteAddress().getAddress() + .getHostAddress()) + .append(" - - [") + .append(dateCache.format(request.getTimeStamp())) + .append("] \"") + .append(request.getMethod()) + .append(" ") + .append(requestURI) + .append(" ") + .append(request.getProtocol()) + .append("\" ") + .append(response.getStatus()) + .append(" ") + .append(response.getHttpChannel().getBytesWritten()) // apply filter here? + .append(" \"-\" \"") + .append(request.getHeader("User-Agent")) + .append("\""); + + write(sb.toString()); + } catch (Exception e) { + LOG.warn("Unable to log request", e); + } + } + + @Override + protected void stop(LifeCycle lifeCycle) throws Exception { + buffers.remove(); + super.stop(lifeCycle); + } +} diff --git a/client/src/main/java/org/apache/cloudstack/ServerDaemon.java b/client/src/main/java/org/apache/cloudstack/ServerDaemon.java index c6fd2ff24dc..e5ad3d43b2f 100644 --- a/client/src/main/java/org/apache/cloudstack/ServerDaemon.java +++ b/client/src/main/java/org/apache/cloudstack/ServerDaemon.java @@ -24,15 +24,17 @@ import java.io.IOException; import java.io.InputStream; import java.lang.management.ManagementFactory; import java.net.URL; +import java.util.Arrays; import java.util.Properties; +import com.cloud.api.ApiServer; import org.apache.commons.daemon.Daemon; import org.apache.commons.daemon.DaemonContext; import org.apache.commons.lang3.StringUtils; import org.eclipse.jetty.jmx.MBeanContainer; +import org.eclipse.jetty.server.ForwardedRequestCustomizer; import org.eclipse.jetty.server.HttpConfiguration; import org.eclipse.jetty.server.HttpConnectionFactory; -import org.eclipse.jetty.server.NCSARequestLog; import org.eclipse.jetty.server.RequestLog; import org.eclipse.jetty.server.SecureRequestCustomizer; import org.eclipse.jetty.server.Server; @@ -185,6 +187,7 @@ public class ServerDaemon implements Daemon { httpConfig.setResponseHeaderSize(8192); httpConfig.setSendServerVersion(false); httpConfig.setSendDateHeader(false); + addForwardingCustomiser(httpConfig); // HTTP Connector createHttpConnector(httpConfig); @@ -207,6 +210,21 @@ public class ServerDaemon implements Daemon { server.join(); } + /** + * Adds a ForwardedRequestCustomizer to the HTTP configuration to handle forwarded headers. + * The header used for forwarding is determined by the ApiServer.listOfForwardHeaders property. + * Only non empty headers are considered and only the first of the comma-separated list is used. + * @param httpConfig the HTTP configuration to which the customizer will be added + */ + private static void addForwardingCustomiser(HttpConfiguration httpConfig) { + ForwardedRequestCustomizer customiser = new ForwardedRequestCustomizer(); + String header = Arrays.stream(ApiServer.listOfForwardHeaders.value().split(",")).findFirst().orElse(null); + if (com.cloud.utils.StringUtils.isNotEmpty(header)) { + customiser.setForwardedForHeader(header); + } + httpConfig.addCustomizer(customiser); + } + @Override public void stop() throws Exception { server.stop(); @@ -299,7 +317,7 @@ public class ServerDaemon implements Daemon { } private RequestLog createRequestLog() { - final NCSARequestLog log = new NCSARequestLog(); + final ACSRequestLog log = new ACSRequestLog(); final File logPath = new File(accessLogFile); final File parentFile = logPath.getParentFile(); if (parentFile != null) { diff --git a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java index 49768297ad5..ee61fee66c6 100644 --- a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java +++ b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java @@ -26,10 +26,6 @@ import java.util.List; public class ReadyCommand extends Command { private String _details; - public ReadyCommand() { - super(); - } - private Long dcId; private Long hostId; private String hostUuid; @@ -41,6 +37,10 @@ public class ReadyCommand extends Command { private Boolean enableHumanReadableSizes; private String arch; + public ReadyCommand() { + super(); + } + public ReadyCommand(Long dcId) { super(); this.dcId = dcId; @@ -95,7 +95,7 @@ public class ReadyCommand extends Command { return avoidMsHostList; } - public void setAvoidMsHostList(List msHostList) { + public void setAvoidMsHostList(List avoidMsHostList) { this.avoidMsHostList = avoidMsHostList; } diff --git a/core/src/main/java/org/apache/cloudstack/backup/BackupAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/BackupAnswer.java index 09f9c562150..ffc67b628a7 100644 --- a/core/src/main/java/org/apache/cloudstack/backup/BackupAnswer.java +++ b/core/src/main/java/org/apache/cloudstack/backup/BackupAnswer.java @@ -28,6 +28,7 @@ public class BackupAnswer extends Answer { private Long size; private Long virtualSize; private Map volumes; + Boolean needsCleanup; public BackupAnswer(final Command command, final boolean success, final String details) { super(command, success, details); @@ -56,4 +57,15 @@ public class BackupAnswer extends Answer { public void setVolumes(Map volumes) { this.volumes = volumes; } + + public Boolean getNeedsCleanup() { + if (needsCleanup == null) { + return false; + } + return needsCleanup; + } + + public void setNeedsCleanup(Boolean needsCleanup) { + this.needsCleanup = needsCleanup; + } } diff --git a/core/src/main/java/org/apache/cloudstack/backup/BackupStorageStatsAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/BackupStorageStatsAnswer.java new file mode 100644 index 00000000000..eabf6877ba6 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/BackupStorageStatsAnswer.java @@ -0,0 +1,50 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; + +public class BackupStorageStatsAnswer extends Answer { + private Long totalSize; + private Long usedSize; + + public BackupStorageStatsAnswer(final Command command, final boolean success, final String details) { + super(command, success, details); + this.totalSize = 0L; + this.usedSize = 0L; + } + + public Long getTotalSize() { + return totalSize; + } + + public void setTotalSize(Long totalSize) { + this.totalSize = totalSize; + } + + public Long getUsedSize() { + return usedSize; + } + + public void setUsedSize(Long usedSize) { + this.usedSize = usedSize; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/GetBackupStorageStatsCommand.java b/core/src/main/java/org/apache/cloudstack/backup/GetBackupStorageStatsCommand.java new file mode 100644 index 00000000000..1ceeac17e52 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/GetBackupStorageStatsCommand.java @@ -0,0 +1,66 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; + +public class GetBackupStorageStatsCommand extends Command { + private String backupRepoType; + private String backupRepoAddress; + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + + public GetBackupStorageStatsCommand(String backupRepoType, String backupRepoAddress, String mountOptions) { + super(); + this.backupRepoType = backupRepoType; + this.backupRepoAddress = backupRepoAddress; + this.mountOptions = mountOptions; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public String getMountOptions() { + return mountOptions == null ? "" : mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/RestoreBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/RestoreBackupCommand.java index 7228e35147a..f447fbe3d00 100644 --- a/core/src/main/java/org/apache/cloudstack/backup/RestoreBackupCommand.java +++ b/core/src/main/java/org/apache/cloudstack/backup/RestoreBackupCommand.java @@ -30,7 +30,8 @@ public class RestoreBackupCommand extends Command { private String backupPath; private String backupRepoType; private String backupRepoAddress; - private List volumePaths; + private List backupVolumesUUIDs; + private List restoreVolumePaths; private String diskType; private Boolean vmExists; private String restoreVolumeUUID; @@ -72,12 +73,12 @@ public class RestoreBackupCommand extends Command { this.backupRepoAddress = backupRepoAddress; } - public List getVolumePaths() { - return volumePaths; + public List getRestoreVolumePaths() { + return restoreVolumePaths; } - public void setVolumePaths(List volumePaths) { - this.volumePaths = volumePaths; + public void setRestoreVolumePaths(List restoreVolumePaths) { + this.restoreVolumePaths = restoreVolumePaths; } public Boolean isVmExists() { @@ -127,4 +128,12 @@ public class RestoreBackupCommand extends Command { public boolean executeInSequence() { return true; } + + public List getBackupVolumesUUIDs() { + return backupVolumesUUIDs; + } + + public void setBackupVolumesUUIDs(List backupVolumesUUIDs) { + this.backupVolumesUUIDs = backupVolumesUUIDs; + } } diff --git a/core/src/main/java/org/apache/cloudstack/backup/TakeBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/TakeBackupCommand.java index 93855ea1721..ecebd57a178 100644 --- a/core/src/main/java/org/apache/cloudstack/backup/TakeBackupCommand.java +++ b/core/src/main/java/org/apache/cloudstack/backup/TakeBackupCommand.java @@ -30,6 +30,7 @@ public class TakeBackupCommand extends Command { private String backupRepoType; private String backupRepoAddress; private List volumePaths; + private Boolean quiesce; @LogLevel(LogLevel.Log4jLevel.Off) private String mountOptions; @@ -87,6 +88,14 @@ public class TakeBackupCommand extends Command { this.volumePaths = volumePaths; } + public Boolean getQuiesce() { + return quiesce; + } + + public void setQuiesce(Boolean quiesce) { + this.quiesce = quiesce; + } + @Override public boolean executeInSequence() { return true; diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java index 7841eba524a..c05c29add55 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java @@ -124,6 +124,7 @@ public interface VirtualMachineManager extends Manager { * @param defaultNetwork The default network for the VM. * @param rootDiskOffering For created VMs not based on templates, root disk offering specifies the root disk. * @param dataDiskOfferings Data disks to attach to the VM. + * @param dataDiskDeviceIds Device Ids to assign the data disks to. * @param auxiliaryNetworks additional networks to attach the VMs to. * @param plan How to deploy the VM. * @param hyperType Hypervisor type @@ -131,7 +132,7 @@ public interface VirtualMachineManager extends Manager { * @throws InsufficientCapacityException If there are insufficient capacity to deploy this vm. */ void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering, DiskOfferingInfo rootDiskOfferingInfo, - List dataDiskOfferings, LinkedHashMap> auxiliaryNetworks, DeploymentPlan plan, + List dataDiskOfferings, List dataDiskDeviceIds, LinkedHashMap> auxiliaryNetworks, DeploymentPlan plan, HypervisorType hyperType, Map> extraDhcpOptions, Map datadiskTemplateToDiskOfferingMap, Volume volume, Snapshot snapshot) throws InsufficientCapacityException; void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering, diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/OrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/OrchestrationService.java index ffe85818fc4..6be71b3cb25 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/OrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/service/api/OrchestrationService.java @@ -40,6 +40,7 @@ import com.cloud.exception.InsufficientCapacityException; import com.cloud.hypervisor.Hypervisor; import com.cloud.offering.DiskOffering; import com.cloud.vm.NicProfile; +import com.cloud.vm.VmDiskInfo; @Path("orchestration") @Produces({"application/json", "application/xml"}) @@ -69,15 +70,17 @@ public interface OrchestrationService { @QueryParam("compute-tags") List computeTags, @QueryParam("root-disk-tags") List rootDiskTags, @QueryParam("network-nic-map") Map> networkNicMap, @QueryParam("deploymentplan") DeploymentPlan plan, @QueryParam("root-disk-size") Long rootDiskSize, @QueryParam("extra-dhcp-option-map") Map> extraDhcpOptionMap, - @QueryParam("datadisktemplate-diskoffering-map") Map datadiskTemplateToDiskOfferingMap, @QueryParam("disk-offering-id") Long diskOfferingId, @QueryParam("root-disk-offering-id") Long rootDiskOfferingId, Volume volume, Snapshot snapshot) throws InsufficientCapacityException; + @QueryParam("datadisktemplate-diskoffering-map") Map datadiskTemplateToDiskOfferingMap, @QueryParam("disk-offering-id") Long diskOfferingId, + @QueryParam("root-disk-offering-id") Long rootDiskOfferingId, List dataDiskInfoList, Volume volume, Snapshot snapshot) throws InsufficientCapacityException; @POST VirtualMachineEntity createVirtualMachineFromScratch(@QueryParam("id") String id, @QueryParam("owner") String owner, @QueryParam("iso-id") String isoId, - @QueryParam("host-name") String hostName, @QueryParam("display-name") String displayName, @QueryParam("hypervisor") String hypervisor, - @QueryParam("os") String os, @QueryParam("cpu") int cpu, @QueryParam("speed") int speed, @QueryParam("ram") long memory, @QueryParam("disk-size") Long diskSize, - @QueryParam("compute-tags") List computeTags, @QueryParam("root-disk-tags") List rootDiskTags, - @QueryParam("network-nic-map") Map> networkNicMap, @QueryParam("deploymentplan") DeploymentPlan plan, - @QueryParam("extra-dhcp-option-map") Map> extraDhcpOptionMap, @QueryParam("disk-offering-id") Long diskOfferingId, Volume volume, Snapshot snapshot) throws InsufficientCapacityException; + @QueryParam("host-name") String hostName, @QueryParam("display-name") String displayName, @QueryParam("hypervisor") String hypervisor, + @QueryParam("os") String os, @QueryParam("cpu") int cpu, @QueryParam("speed") int speed, @QueryParam("ram") long memory, @QueryParam("disk-size") Long diskSize, + @QueryParam("compute-tags") List computeTags, @QueryParam("root-disk-tags") List rootDiskTags, + @QueryParam("network-nic-map") Map> networkNicMap, @QueryParam("deploymentplan") DeploymentPlan plan, + @QueryParam("extra-dhcp-option-map") Map> extraDhcpOptionMap, @QueryParam("disk-offering-id") Long diskOfferingId, + @QueryParam("data-disks-offering-info") List dataDiskInfoList, Volume volume, Snapshot snapshot) throws InsufficientCapacityException; @POST NetworkEntity createNetwork(String id, String name, String domainName, String cidr, String gateway); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java index f537d8f5202..2494cc7c5fc 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreCapabilities.java @@ -40,5 +40,13 @@ public enum DataStoreCapabilities { /** * indicates that this driver supports reverting a volume to a snapshot state */ - CAN_REVERT_VOLUME_TO_SNAPSHOT + CAN_REVERT_VOLUME_TO_SNAPSHOT, + /** + * indicates that the driver supports copying snapshot between zones on pools of the same type + */ + CAN_COPY_SNAPSHOT_BETWEEN_ZONES_AND_SAME_POOL_TYPE, + /** + * indicates that this driver supports the option to create a template from the back-end snapshot + */ + CAN_CREATE_TEMPLATE_FROM_SNAPSHOT } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java index d7b6b2ec75b..18c924167e0 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java @@ -46,4 +46,6 @@ public interface SnapshotService { AsyncCallFuture copySnapshot(SnapshotInfo snapshot, String copyUrl, DataStore dataStore) throws ResourceUnavailableException; AsyncCallFuture queryCopySnapshot(SnapshotInfo snapshot) throws ResourceUnavailableException; + + AsyncCallFuture copySnapshot(SnapshotInfo sourceSnapshot, SnapshotInfo destSnapshot, SnapshotStrategy strategy); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java index f3aa8f52c93..43f411f7553 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java @@ -16,12 +16,14 @@ // under the License. package org.apache.cloudstack.engine.subsystem.api.storage; + import com.cloud.storage.Snapshot; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; public interface SnapshotStrategy { enum SnapshotOperation { - TAKE, BACKUP, DELETE, REVERT + TAKE, BACKUP, DELETE, REVERT, COPY } SnapshotInfo takeSnapshot(SnapshotInfo snapshot); @@ -35,4 +37,7 @@ public interface SnapshotStrategy { StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op); void postSnapshotCreation(SnapshotInfo snapshot); + + default void copySnapshot(DataObject snapshotSource, DataObject snapshotDest, AsyncCompletionCallback caller) { + } } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 3fc6d80befe..d3a6d4525a5 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -220,6 +220,14 @@ public interface StorageManager extends StorageService { "storage.pool.host.connect.workers", "1", "Number of worker threads to be used to connect hosts to a primary storage", true); + ConfigKey ObjectStorageCapacityThreshold = new ConfigKey<>("Alert", Float.class, + "objectStorage.capacity.notificationthreshold", + "0.75", + "Percentage (as a value between 0 and 1) of object storage utilization above which alerts will be sent about low storage available.", + true, + ConfigKey.Scope.Global, + null); + /** * should we execute in sequence not involving any storages? * @return tru if commands should execute in sequence @@ -415,4 +423,6 @@ public interface StorageManager extends StorageService { Pair checkIfReadyVolumeFitsInStoragePoolWithStorageAccessGroups(StoragePool destPool, Volume volume); String[] getStorageAccessGroups(Long zoneId, Long podId, Long clusterId, Long hostId); + + CapacityVO getObjectStorageUsedStats(Long zoneId); } diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkTakeVolumeSnapshot.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkTakeVolumeSnapshot.java index 8474052be20..88d25441e0a 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/VmWorkTakeVolumeSnapshot.java +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkTakeVolumeSnapshot.java @@ -30,12 +30,12 @@ public class VmWorkTakeVolumeSnapshot extends VmWork { private boolean quiesceVm; private Snapshot.LocationType locationType; private boolean asyncBackup; - + private List poolIds; private List zoneIds; public VmWorkTakeVolumeSnapshot(long userId, long accountId, long vmId, String handlerName, Long volumeId, Long policyId, Long snapshotId, boolean quiesceVm, Snapshot.LocationType locationType, - boolean asyncBackup, List zoneIds) { + boolean asyncBackup, List zoneIds, List poolIds) { super(userId, accountId, vmId, handlerName); this.volumeId = volumeId; this.policyId = policyId; @@ -44,6 +44,7 @@ public class VmWorkTakeVolumeSnapshot extends VmWork { this.locationType = locationType; this.asyncBackup = asyncBackup; this.zoneIds = zoneIds; + this.poolIds = poolIds; } public Long getVolumeId() { @@ -71,4 +72,8 @@ public class VmWorkTakeVolumeSnapshot extends VmWork { public List getZoneIds() { return zoneIds; } + + public List getPoolIds() { + return poolIds; + } } diff --git a/engine/components-api/src/test/java/com/cloud/vm/VmWorkTakeVolumeSnapshotTest.java b/engine/components-api/src/test/java/com/cloud/vm/VmWorkTakeVolumeSnapshotTest.java index feb7ee46aec..f80ba9580d5 100644 --- a/engine/components-api/src/test/java/com/cloud/vm/VmWorkTakeVolumeSnapshotTest.java +++ b/engine/components-api/src/test/java/com/cloud/vm/VmWorkTakeVolumeSnapshotTest.java @@ -26,8 +26,9 @@ public class VmWorkTakeVolumeSnapshotTest { @Test public void testVmWorkTakeVolumeSnapshotZoneIds() { List zoneIds = List.of(10L, 20L); + List poolIds = List.of(10L, 20L); VmWorkTakeVolumeSnapshot work = new VmWorkTakeVolumeSnapshot(1L, 1L, 1L, "handler", - 1L, 1L, 1L, false, null, false, zoneIds); + 1L, 1L, 1L, false, null, false, zoneIds, poolIds); Assert.assertNotNull(work.getZoneIds()); Assert.assertEquals(zoneIds.size(), work.getZoneIds().size()); Assert.assertEquals(zoneIds.get(0), work.getZoneIds().get(0)); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 75e9fb20e5a..5e10312741f 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -27,6 +27,7 @@ import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; @@ -758,15 +759,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } - protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, final StartupCommand[] cmd, final boolean forRebalance) throws ConnectionException { + protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, final StartupCommand[] cmds, final boolean forRebalance) throws ConnectionException { final long hostId = attache.getId(); final HostVO host = _hostDao.findById(hostId); for (final Pair monitor : _hostMonitors) { logger.debug("Sending Connect to listener: {}, for rebalance: {}", monitor.second().getClass().getSimpleName(), forRebalance); - for (int i = 0; i < cmd.length; i++) { + for (StartupCommand cmd : cmds) { try { - logger.debug("process connection to issue: {} for host: {}, forRebalance: {}, connection transferred: {}", ReflectionToStringBuilderUtils.reflectCollection(cmd[i]), hostId, forRebalance, cmd[i].isConnectionTransferred()); - monitor.second().processConnect(host, cmd[i], forRebalance); + logger.debug("process connection to issue: {} for host: {}, forRebalance: {}", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(cmd, "id", "type", "msHostList", "connectionTransferred"), hostId, forRebalance); + monitor.second().processConnect(host, cmd, forRebalance); } catch (final ConnectionException ce) { if (ce.isSetupError()) { logger.warn("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, ce.getMessage()); @@ -1040,39 +1041,50 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected boolean handleDisconnectWithoutInvestigation(final AgentAttache attache, final Status.Event event, final boolean transitState, final boolean removeAgent) { final long hostId = attache.getId(); - + final HostVO host = _hostDao.findById(hostId); boolean result = false; GlobalLock joinLock = getHostJoinLock(hostId); - if (joinLock.lock(60)) { - try { - logger.info("Host {} is disconnecting with event {}", - attache, event); - Status nextStatus; - final HostVO host = _hostDao.findById(hostId); - if (host == null) { - logger.warn("Can't find host with {} ({})", hostId, attache); - nextStatus = Status.Removed; - } else { - nextStatus = getNextStatusOnDisconnection(host, event); - caService.purgeHostCertificate(host); - } - logger.debug("Deregistering link for {} with state {}", attache, nextStatus); - - removeAgent(attache, nextStatus); - - if (host != null && transitState) { - // update the state for host in DB as per the event - disconnectAgent(host, event, _nodeId); - } - } finally { - joinLock.unlock(); + try { + if (!joinLock.lock(60)) { + logger.debug("Unable to acquire lock on host {} to process agent disconnection", Objects.toString(host, String.valueOf(hostId))); + return result; } + + logger.debug("Acquired lock on host {}, to process agent disconnection", Objects.toString(host, String.valueOf(hostId))); + disconnectHostAgent(attache, event, host, transitState, joinLock); result = true; + } finally { + joinLock.releaseRef(); } - joinLock.releaseRef(); + return result; } + private void disconnectHostAgent(final AgentAttache attache, final Status.Event event, final HostVO host, final boolean transitState, final GlobalLock joinLock) { + try { + logger.info("Host {} is disconnecting with event {}", attache, event); + final long hostId = attache.getId(); + Status nextStatus; + if (host == null) { + logger.warn("Can't find host with {} ({})", hostId, attache); + nextStatus = Status.Removed; + } else { + nextStatus = getNextStatusOnDisconnection(host, event); + caService.purgeHostCertificate(host); + } + logger.debug("Deregistering link for {} with state {}", attache, nextStatus); + + removeAgent(attache, nextStatus); + + if (host != null && transitState) { + // update the state for host in DB as per the event + disconnectAgent(host, event, _nodeId); + } + } finally { + joinLock.unlock(); + } + } + protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, Status.Event event) { final long hostId = attache.getId(); HostVO host = _hostDao.findById(hostId); @@ -1341,45 +1353,58 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return attache; } - private AgentAttache sendReadyAndGetAttache(HostVO host, ReadyCommand ready, Link link, StartupCommand[] startup) throws ConnectionException { - final List agentMSHostList = new ArrayList<>(); - String lbAlgorithm = null; - if (startup != null && startup.length > 0) { - final String agentMSHosts = startup[0].getMsHostList(); - if (StringUtils.isNotEmpty(agentMSHosts)) { - String[] msHosts = agentMSHosts.split("@"); - if (msHosts.length > 1) { - lbAlgorithm = msHosts[1]; - } - agentMSHostList.addAll(Arrays.asList(msHosts[0].split(","))); - } - } - ready.setArch(host.getArch().getType()); + private AgentAttache sendReadyAndGetAttache(HostVO host, ReadyCommand ready, Link link, StartupCommand[] startupCmds) throws ConnectionException { AgentAttache attache; GlobalLock joinLock = getHostJoinLock(host.getId()); - if (joinLock.lock(60)) { - try { - - if (!indirectAgentLB.compareManagementServerList(host.getId(), host.getDataCenterId(), agentMSHostList, lbAlgorithm)) { - final List newMSList = indirectAgentLB.getManagementServerList(host.getId(), host.getDataCenterId(), null); - ready.setMsHostList(newMSList); - final List avoidMsList = _mshostDao.listNonUpStateMsIPs(); - ready.setAvoidMsHostList(avoidMsList); - ready.setLbAlgorithm(indirectAgentLB.getLBAlgorithmName()); - ready.setLbCheckInterval(indirectAgentLB.getLBPreferredHostCheckInterval(host.getClusterId())); - logger.debug("Agent's management server host list is not up to date, sending list update: {}", newMSList); - } - - attache = createAttacheForConnect(host, link); - attache = notifyMonitorsOfConnection(attache, startup, false); - } finally { - joinLock.unlock(); + try { + if (!joinLock.lock(60)) { + throw new ConnectionException(true, String.format("Unable to acquire lock on host %s, to process agent connection", host)); } - } else { - throw new ConnectionException(true, - String.format("Unable to acquire lock on host %s", host)); + + logger.debug("Acquired lock on host {}, to process agent connection", host); + attache = connectHostAgent(host, ready, link, startupCmds, joinLock); + } finally { + joinLock.releaseRef(); } - joinLock.releaseRef(); + + return attache; + } + + private AgentAttache connectHostAgent(HostVO host, ReadyCommand ready, Link link, StartupCommand[] startupCmds, GlobalLock joinLock) throws ConnectionException { + AgentAttache attache; + try { + final List agentMSHostList = new ArrayList<>(); + String lbAlgorithm = null; + if (startupCmds != null && startupCmds.length > 0) { + final String agentMSHosts = startupCmds[0].getMsHostList(); + if (StringUtils.isNotEmpty(agentMSHosts)) { + String[] msHosts = agentMSHosts.split("@"); + if (msHosts.length > 1) { + lbAlgorithm = msHosts[1]; + } + agentMSHostList.addAll(Arrays.asList(msHosts[0].split(","))); + } + } + + if (!indirectAgentLB.compareManagementServerListAndLBAlgorithm(host.getId(), host.getDataCenterId(), agentMSHostList, lbAlgorithm)) { + final List newMSList = indirectAgentLB.getManagementServerList(host.getId(), host.getDataCenterId(), null); + ready.setMsHostList(newMSList); + String newLBAlgorithm = indirectAgentLB.getLBAlgorithmName(); + ready.setLbAlgorithm(newLBAlgorithm); + logger.debug("Agent's management server host list or lb algorithm is not up to date, sending list and algorithm update: {}, {}", newMSList, newLBAlgorithm); + } + + final List avoidMsList = _mshostDao.listNonUpStateMsIPs(); + ready.setAvoidMsHostList(avoidMsList); + ready.setLbCheckInterval(indirectAgentLB.getLBPreferredHostCheckInterval(host.getClusterId())); + ready.setArch(host.getArch().getType()); + + attache = createAttacheForConnect(host, link); + attache = notifyMonitorsOfConnection(attache, startupCmds, false); + } finally { + joinLock.unlock(); + } + return attache; } @@ -1666,7 +1691,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl logger.debug("Not processing {} for agent id={}; can't find the host in the DB", PingRoutingCommand.class.getSimpleName(), cmdHostId); } } - if (host!= null && host.getStatus() != Status.Up && gatewayAccessible) { + if (host != null && host.getStatus() != Status.Up && gatewayAccessible) { requestStartupCommand = true; } final List avoidMsList = _mshostDao.listNonUpStateMsIPs(); @@ -1821,11 +1846,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return false; } - private void disconnectInternal(final long hostId, final Status.Event event, final boolean invstigate) { + private void disconnectInternal(final long hostId, final Status.Event event, final boolean investigate) { final AgentAttache attache = findAttache(hostId); if (attache != null) { - if (!invstigate) { + if (!investigate) { disconnectWithoutInvestigation(attache, event); } else { disconnectWithInvestigation(attache, event); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java index f208a81b422..f4efaaa34a4 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java @@ -54,8 +54,10 @@ public class ConnectedAgentAttache extends AgentAttache { @Override public void disconnect(final Status state) { synchronized (this) { - logger.debug("Processing Disconnect."); + logger.debug("Processing disconnect [id: {}, uuid: {}, name: {}]", _id, _uuid, _name); + if (_link != null) { + logger.debug("Disconnecting from {}, Socket Address: {}", _link.getIpAddress(), _link.getSocketAddress()); _link.close(); _link.terminated(); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 3cd8ec0aae3..3a6e1b62277 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -58,6 +58,8 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin; import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; +import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -437,6 +439,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VmWorkJobDao vmWorkJobDao; @Inject DataStoreProviderManager dataStoreProviderManager; + @Inject + BackupManager backupManager; + @Inject + BackupDao backupDao; private SingleCache> vmIdsInProgressCache; @@ -525,8 +531,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override @DB public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering, - final DiskOfferingInfo rootDiskOfferingInfo, final List dataDiskOfferings, - final LinkedHashMap> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map> extraDhcpOptions, final Map datadiskTemplateToDiskOfferingMap, Volume volume, Snapshot snapshot) + final DiskOfferingInfo rootDiskOfferingInfo, final List dataDiskOfferings, List dataDiskDeviceIds, + final LinkedHashMap> auxiliaryNetworks,final DeploymentPlan plan, final HypervisorType hyperType, + final Map> extraDhcpOptions, final Map datadiskTemplateToDiskOfferingMap, Volume volume, Snapshot snapshot) throws InsufficientCapacityException { logger.info("allocating virtual machine from template: {} with hostname: {} and {} networks", template, vmInstanceName, auxiliaryNetworks.size()); @@ -570,19 +577,22 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac CallContext volumeContext = CallContext.register(CallContext.current(), ApiCommandResourceType.Volume); try { if (dataDiskOfferings != null) { + int index = 0; for (final DiskOfferingInfo dataDiskOfferingInfo : dataDiskOfferings) { - volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + persistedVm.getId(), dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(), - dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), persistedVm, template, owner, null); + Long deviceId = dataDiskDeviceIds.get(index++); + String volumeName = deviceId == null ? "DATA-" + persistedVm.getId() : "DATA-" + persistedVm.getId() + "-" + String.valueOf(deviceId); + volumeMgr.allocateRawVolume(Type.DATADISK, volumeName, dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(), + dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), persistedVm, template, owner, deviceId); } } if (datadiskTemplateToDiskOfferingMap != null && !datadiskTemplateToDiskOfferingMap.isEmpty()) { - int diskNumber = 1; + Long diskNumber = 1L; for (Entry dataDiskTemplateToDiskOfferingMap : datadiskTemplateToDiskOfferingMap.entrySet()) { DiskOffering diskOffering = dataDiskTemplateToDiskOfferingMap.getValue(); long diskOfferingSize = diskOffering.getDiskSize() / (1024 * 1024 * 1024); VMTemplateVO dataDiskTemplate = _templateDao.findById(dataDiskTemplateToDiskOfferingMap.getKey()); - volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + persistedVm.getId() + "-" + String.valueOf(diskNumber), diskOffering, diskOfferingSize, null, null, - persistedVm, dataDiskTemplate, owner, Long.valueOf(diskNumber)); + volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + persistedVm.getId() + "-" + String.valueOf( diskNumber), diskOffering, diskOfferingSize, null, null, + persistedVm, dataDiskTemplate, owner, diskNumber); diskNumber++; } } @@ -629,7 +639,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering, final LinkedHashMap> networks, final DeploymentPlan plan, final HypervisorType hyperType, Volume volume, Snapshot snapshot) throws InsufficientCapacityException { DiskOffering diskOffering = _diskOfferingDao.findById(serviceOffering.getDiskOfferingId()); - allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(diskOffering), new ArrayList<>(), networks, plan, hyperType, null, null, volume, snapshot); + allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(diskOffering), new ArrayList<>(), new ArrayList<>(), networks, plan, hyperType, null, null, volume, snapshot); } VirtualMachineGuru getVmGuru(final VirtualMachine vm) { @@ -2589,6 +2599,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throw new CloudRuntimeException("Unable to destroy " + vm); } else { if (expunge) { + backupManager.checkAndRemoveBackupOfferingBeforeExpunge(vm); if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) { logger.debug("Unable to expunge the vm because it is not in the correct state: {}", vm); throw new CloudRuntimeException("Unable to expunge " + vm); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java index c9af76fbddd..8639f006383 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java @@ -60,6 +60,7 @@ import com.cloud.utils.component.ComponentContext; import com.cloud.vm.NicProfile; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VmDiskInfo; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDetailsDao; import com.cloud.vm.dao.VMInstanceDao; @@ -162,7 +163,8 @@ public class CloudOrchestrator implements OrchestrationService { @Override public VirtualMachineEntity createVirtualMachine(String id, String owner, String templateId, String hostName, String displayName, String hypervisor, int cpu, int speed, long memory, Long diskSize, List computeTags, List rootDiskTags, Map> networkNicMap, DeploymentPlan plan, - Long rootDiskSize, Map> extraDhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, Long dataDiskOfferingId, Long rootDiskOfferingId, Volume volume, Snapshot snapshot) throws InsufficientCapacityException { + Long rootDiskSize, Map> extraDhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, Long dataDiskOfferingId, Long rootDiskOfferingId, + List dataDiskInfoList, Volume volume, Snapshot snapshot) throws InsufficientCapacityException { // VirtualMachineEntityImpl vmEntity = new VirtualMachineEntityImpl(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, networks, // vmEntityManager); @@ -187,7 +189,6 @@ public class CloudOrchestrator implements OrchestrationService { // Else, a disk offering is optional, and if present will be used to create the data disk DiskOfferingInfo rootDiskOfferingInfo = new DiskOfferingInfo(); - List dataDiskOfferings = new ArrayList(); ServiceOfferingVO computeOffering = _serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId()); @@ -210,6 +211,8 @@ public class CloudOrchestrator implements OrchestrationService { } } + List dataDiskOfferings = new ArrayList(); + List dataDiskDeviceIds = new ArrayList<>(); if (dataDiskOfferingId != null) { DiskOfferingVO diskOffering = _diskOfferingDao.findById(dataDiskOfferingId); if (diskOffering == null) { @@ -243,6 +246,12 @@ public class CloudOrchestrator implements OrchestrationService { } dataDiskOfferings.add(dataDiskOfferingInfo); + dataDiskDeviceIds.add(null); + } + } else if (dataDiskInfoList != null){ + dataDiskOfferings.addAll(dataDiskInfoList); + for (VmDiskInfo dataDiskInfo : dataDiskInfoList) { + dataDiskDeviceIds.add(dataDiskInfo.getDeviceId()); } } @@ -262,8 +271,8 @@ public class CloudOrchestrator implements OrchestrationService { template = _templateDao.findByIdIncludingRemoved(new Long(templateId)); } else template = _templateDao.findById(new Long(templateId)); - _itMgr.allocate(vm.getInstanceName(), template, computeOffering, rootDiskOfferingInfo, dataDiskOfferings, networkIpMap, plan, - hypervisorType, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap, volume, snapshot); + _itMgr.allocate(vm.getInstanceName(), template, computeOffering, rootDiskOfferingInfo, dataDiskOfferings, dataDiskDeviceIds, + networkIpMap, plan, hypervisorType, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap, volume, snapshot); return vmEntity; } @@ -271,7 +280,7 @@ public class CloudOrchestrator implements OrchestrationService { @Override public VirtualMachineEntity createVirtualMachineFromScratch(String id, String owner, String isoId, String hostName, String displayName, String hypervisor, String os, int cpu, int speed, long memory, Long diskSize, List computeTags, List rootDiskTags, Map> networkNicMap, DeploymentPlan plan, - Map> extraDhcpOptionMap, Long diskOfferingId, Volume volume, Snapshot snapshot) + Map> extraDhcpOptionMap, Long diskOfferingId, List dataDiskInfoList, Volume volume, Snapshot snapshot) throws InsufficientCapacityException { // VirtualMachineEntityImpl vmEntity = new VirtualMachineEntityImpl(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, networks, vmEntityManager); @@ -317,6 +326,14 @@ public class CloudOrchestrator implements OrchestrationService { rootDiskOfferingInfo.setMaxIops(maxIops != null && maxIops.trim().length() > 0 ? Long.parseLong(maxIops) : null); } } + List dataDiskOfferings = new ArrayList<>(); + List dataDiskDeviceIds = new ArrayList<>(); + if (dataDiskInfoList != null) { + dataDiskOfferings.addAll(dataDiskInfoList); + for (VmDiskInfo dataDiskInfo : dataDiskInfoList) { + dataDiskDeviceIds.add(dataDiskInfo.getDeviceId()); + } + } LinkedHashMap> networkIpMap = new LinkedHashMap>(); for (String uuid : networkNicMap.keySet()) { @@ -328,7 +345,8 @@ public class CloudOrchestrator implements OrchestrationService { HypervisorType hypervisorType = HypervisorType.valueOf(hypervisor); - _itMgr.allocate(vm.getInstanceName(), _templateDao.findByIdIncludingRemoved(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList(), networkIpMap, plan, hypervisorType, extraDhcpOptionMap, null, volume, snapshot); + _itMgr.allocate(vm.getInstanceName(), _templateDao.findByIdIncludingRemoved(new Long(isoId)), computeOffering, rootDiskOfferingInfo, dataDiskOfferings, dataDiskDeviceIds, + networkIpMap, plan, hypervisorType, extraDhcpOptionMap, null, volume, snapshot); return vmEntity; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index d9a79f9885b..e98a5b78a94 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -577,14 +577,18 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } VolumeInfo vol = volFactory.getVolume(volume.getId()); + long zoneId = volume.getDataCenterId(); DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); - DataStoreRole dataStoreRole = snapshotHelper.getDataStoreRole(snapshot); - SnapshotInfo snapInfo = snapshotFactory.getSnapshotWithRoleAndZone(snapshot.getId(), dataStoreRole, volume.getDataCenterId()); + DataStoreRole dataStoreRole = snapshotHelper.getDataStoreRole(snapshot, zoneId); + SnapshotInfo snapInfo = snapshotFactory.getSnapshotWithRoleAndZone(snapshot.getId(), dataStoreRole, zoneId); - boolean kvmSnapshotOnlyInPrimaryStorage = snapshotHelper.isKvmSnapshotOnlyInPrimaryStorage(snapshot, dataStoreRole); + boolean kvmSnapshotOnlyInPrimaryStorage = snapshotHelper.isKvmSnapshotOnlyInPrimaryStorage(snapshot, dataStoreRole, volume.getDataCenterId()); + boolean storageSupportSnapshotToTemplateEnabled = snapshotHelper.isStorageSupportSnapshotToTemplate(snapInfo); try { - snapInfo = snapshotHelper.backupSnapshotToSecondaryStorageIfNotExists(snapInfo, dataStoreRole, snapshot, kvmSnapshotOnlyInPrimaryStorage); + if (!storageSupportSnapshotToTemplateEnabled) { + snapInfo = snapshotHelper.backupSnapshotToSecondaryStorageIfNotExists(snapInfo, dataStoreRole, snapshot, kvmSnapshotOnlyInPrimaryStorage); + } } catch (CloudRuntimeException e) { snapshotHelper.expungeTemporarySnapshot(kvmSnapshotOnlyInPrimaryStorage, snapInfo); throw e; @@ -596,7 +600,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } // don't try to perform a sync if the DataStoreRole of the snapshot is equal to DataStoreRole.Primary - if (!DataStoreRole.Primary.equals(dataStoreRole) || kvmSnapshotOnlyInPrimaryStorage) { + if (!DataStoreRole.Primary.equals(dataStoreRole) || !storageSupportSnapshotToTemplateEnabled) { try { // sync snapshot to region store if necessary DataStore snapStore = snapInfo.getDataStore(); diff --git a/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java b/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java index cd62935f17e..fb2d61d8e11 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java +++ b/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java @@ -254,6 +254,8 @@ public class CapacityVO implements Capacity { capacityNames.put(CAPACITY_TYPE_GPU, "GPU"); capacityNames.put(CAPACITY_TYPE_CPU_CORE, "CPU_CORE"); capacityNames.put(CAPACITY_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET, "VIRTUAL_NETWORK_IPV6_SUBNET"); + capacityNames.put(CAPACITY_TYPE_BACKUP_STORAGE, "BACKUP_STORAGE"); + capacityNames.put(CAPACITY_TYPE_OBJECT_STORAGE, "OBJECT_STORAGE"); } public static String getCapacityName (Short capacityType) { diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index d70eeb87653..6785c365329 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -74,7 +74,7 @@ public interface VMTemplateDao extends GenericDao, StateDao< VMTemplateVO findSystemVMTemplate(long zoneId); - VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType); + VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType, String preferredArch); List findSystemVMReadyTemplates(long zoneId, HypervisorType hypervisorType, String preferredArch); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index 267cef2169a..08b82cbb45b 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -24,6 +24,7 @@ import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -621,11 +622,19 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } @Override - public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) { + public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType, String preferredArch) { List templates = listAllReadySystemVMTemplates(zoneId); if (CollectionUtils.isEmpty(templates)) { return null; } + if (StringUtils.isNotBlank(preferredArch)) { + // Sort the templates by preferred architecture first + templates = templates.stream() + .sorted(Comparator.comparing( + x -> !x.getArch().getType().equalsIgnoreCase(preferredArch) + )) + .collect(Collectors.toList()); + } if (hypervisorType == HypervisorType.Any) { return templates.get(0); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index e711c564015..26b033c8d79 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -64,11 +64,14 @@ import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterDaoImpl; import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.DataStoreRole; +import com.cloud.storage.GuestOSVO; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.GuestOSDaoImpl; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateDaoImpl; import com.cloud.storage.dao.VMTemplateZoneDao; @@ -102,15 +105,13 @@ public class SystemVmTemplateRegistration { private static final String PARTIAL_TEMPLATE_FOLDER = String.format("/template/tmpl/%d/", Account.ACCOUNT_ID_SYSTEM); private static final String storageScriptsDir = "scripts/storage/secondary"; private static final Integer OTHER_LINUX_ID = 99; - private static final Integer LINUX_5_ID = 15; - private static final Integer LINUX_7_ID = 183; + private static Integer LINUX_12_ID = 363; private static final Integer SCRIPT_TIMEOUT = 1800000; private static final Integer LOCK_WAIT_TIMEOUT = 1200; protected static final List DOWNLOADABLE_TEMPLATE_ARCH_TYPES = Arrays.asList( CPU.CPUArch.arm64 ); - public static String CS_MAJOR_VERSION = null; public static String CS_TINY_VERSION = null; @@ -132,6 +133,8 @@ public class SystemVmTemplateRegistration { ClusterDao clusterDao; @Inject ConfigurationDao configurationDao; + @Inject + private GuestOSDao guestOSDao; private String systemVmTemplateVersion; @@ -147,6 +150,7 @@ public class SystemVmTemplateRegistration { imageStoreDetailsDao = new ImageStoreDetailsDaoImpl(); clusterDao = new ClusterDaoImpl(); configurationDao = new ConfigurationDaoImpl(); + guestOSDao = new GuestOSDaoImpl(); tempDownloadDir = new File(System.getProperty("java.io.tmpdir")); } @@ -320,7 +324,7 @@ public class SystemVmTemplateRegistration { public static final Map NewTemplateMap = new HashMap<>(); - public static final Map RouterTemplateConfigurationNames = new HashMap() { + public static final Map RouterTemplateConfigurationNames = new HashMap<>() { { put(Hypervisor.HypervisorType.KVM, "router.template.kvm"); put(Hypervisor.HypervisorType.VMware, "router.template.vmware"); @@ -331,14 +335,14 @@ public class SystemVmTemplateRegistration { } }; - public static final Map hypervisorGuestOsMap = new HashMap() { + public static Map hypervisorGuestOsMap = new HashMap<>() { { - put(Hypervisor.HypervisorType.KVM, LINUX_5_ID); + put(Hypervisor.HypervisorType.KVM, LINUX_12_ID); put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID); put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID); - put(Hypervisor.HypervisorType.Hyperv, LINUX_5_ID); - put(Hypervisor.HypervisorType.LXC, LINUX_5_ID); - put(Hypervisor.HypervisorType.Ovm3, LINUX_7_ID); + put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID); + put(Hypervisor.HypervisorType.LXC, LINUX_12_ID); + put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID); } }; @@ -595,6 +599,23 @@ public class SystemVmTemplateRegistration { vmInstanceDao.updateSystemVmTemplateId(templateId, hypervisorType); } + private void updateSystemVmTemplateGuestOsId() { + String systemVmGuestOsName = "Debian GNU/Linux 12 (64-bit)"; // default + try { + GuestOSVO guestOS = guestOSDao.findOneByDisplayName(systemVmGuestOsName); + if (guestOS != null) { + LOGGER.debug("Updating SystemVM Template Guest OS [{}] id", systemVmGuestOsName); + SystemVmTemplateRegistration.LINUX_12_ID = Math.toIntExact(guestOS.getId()); + hypervisorGuestOsMap.put(Hypervisor.HypervisorType.KVM, LINUX_12_ID); + hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID); + hypervisorGuestOsMap.put(Hypervisor.HypervisorType.LXC, LINUX_12_ID); + hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID); + } + } catch (Exception e) { + LOGGER.warn("Couldn't update SystemVM Template Guest OS id, due to {}", e.getMessage()); + } + } + public void updateConfigurationParams(Map configParams) { for (Map.Entry config : configParams.entrySet()) { boolean updated = configurationDao.update(config.getKey(), config.getValue()); @@ -813,7 +834,8 @@ public class SystemVmTemplateRegistration { section.get("filename"), section.get("downloadurl"), section.get("checksum"), - hypervisorType.second())); + hypervisorType.second(), + section.get("guestos"))); } Ini.Section defaultSection = ini.get("default"); return defaultSection.get("version").trim(); @@ -965,6 +987,10 @@ public class SystemVmTemplateRegistration { private void updateRegisteredTemplateDetails(Long templateId, MetadataTemplateDetails templateDetails) { VMTemplateVO templateVO = vmTemplateDao.findById(templateId); templateVO.setTemplateType(Storage.TemplateType.SYSTEM); + GuestOSVO guestOS = guestOSDao.findOneByDisplayName(templateDetails.getGuestOs()); + if (guestOS != null) { + templateVO.setGuestOSId(guestOS.getId()); + } boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO); if (!updated) { String errMsg = String.format("updateSystemVmTemplates:Exception while updating template with id %s to be marked as 'system'", templateId); @@ -980,9 +1006,13 @@ public class SystemVmTemplateRegistration { updateConfigurationParams(configParams); } - private void updateTemplateUrlAndChecksum(VMTemplateVO templateVO, MetadataTemplateDetails templateDetails) { + private void updateTemplateUrlChecksumAndGuestOsId(VMTemplateVO templateVO, MetadataTemplateDetails templateDetails) { templateVO.setUrl(templateDetails.getUrl()); templateVO.setChecksum(templateDetails.getChecksum()); + GuestOSVO guestOS = guestOSDao.findOneByDisplayName(templateDetails.getGuestOs()); + if (guestOS != null) { + templateVO.setGuestOSId(guestOS.getId()); + } boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO); if (!updated) { String errMsg = String.format("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type %s", templateDetails.getHypervisorType()); @@ -1020,7 +1050,7 @@ public class SystemVmTemplateRegistration { VMTemplateVO templateVO = vmTemplateDao.findLatestTemplateByTypeAndHypervisorAndArch( templateDetails.getHypervisorType(), templateDetails.getArch(), Storage.TemplateType.SYSTEM); if (templateVO != null) { - updateTemplateUrlAndChecksum(templateVO, templateDetails); + updateTemplateUrlChecksumAndGuestOsId(templateVO, templateDetails); } } } @@ -1029,6 +1059,7 @@ public class SystemVmTemplateRegistration { public void updateSystemVmTemplates(final Connection conn) { LOGGER.debug("Updating System Vm template IDs"); + updateSystemVmTemplateGuestOsId(); Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { @@ -1076,15 +1107,17 @@ public class SystemVmTemplateRegistration { private final String checksum; private final CPU.CPUArch arch; private String downloadedFilePath; + private final String guestOs; MetadataTemplateDetails(Hypervisor.HypervisorType hypervisorType, String name, String filename, String url, - String checksum, CPU.CPUArch arch) { + String checksum, CPU.CPUArch arch, String guestOs) { this.hypervisorType = hypervisorType; this.name = name; this.filename = filename; this.url = url; this.checksum = checksum; this.arch = arch; + this.guestOs = guestOs; } public Hypervisor.HypervisorType getHypervisorType() { @@ -1111,6 +1144,10 @@ public class SystemVmTemplateRegistration { return arch; } + public String getGuestOs() { + return guestOs; + } + public String getDownloadedFilePath() { return downloadedFilePath; } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDao.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDao.java index 8a72182ec67..2a24016653d 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDao.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDao.java @@ -24,7 +24,7 @@ import com.cloud.usage.UsageBackupVO; import com.cloud.utils.db.GenericDao; public interface UsageBackupDao extends GenericDao { - void updateMetrics(Long vmId, Long size, Long virtualSize); - void removeUsage(Long accountId, Long vmId, Date eventDate); + void updateMetrics(Long vmId, Long backupOfferingId, Long size, Long virtualSize); + void removeUsage(Long accountId, Long vmId, Long backupOfferingId, Date eventDate); List getUsageRecords(Long accountId, Date startDate, Date endDate); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDaoImpl.java index 3403a8dfe5b..e5b46b02a59 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDaoImpl.java @@ -36,16 +36,17 @@ import com.cloud.utils.db.TransactionLegacy; @Component public class UsageBackupDaoImpl extends GenericDaoBase implements UsageBackupDao { - protected static final String UPDATE_DELETED = "UPDATE usage_backup SET removed = ? WHERE account_id = ? AND vm_id = ? and removed IS NULL"; + protected static final String UPDATE_DELETED = "UPDATE usage_backup SET removed = ? WHERE account_id = ? AND vm_id = ? and backup_offering_id = ? and removed IS NULL"; protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT id, zone_id, account_id, domain_id, vm_id, backup_offering_id, size, protected_size, created, removed FROM usage_backup WHERE " + " account_id = ? AND ((removed IS NULL AND created <= ?) OR (created BETWEEN ? AND ?) OR (removed BETWEEN ? AND ?) " + " OR ((created <= ?) AND (removed >= ?)))"; @Override - public void updateMetrics(final Long vmId, final Long size, final Long virtualSize) { + public void updateMetrics(final Long vmId, Long backupOfferingId, final Long size, final Long virtualSize) { try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB)) { SearchCriteria sc = this.createSearchCriteria(); sc.addAnd("vmId", SearchCriteria.Op.EQ, vmId); + sc.addAnd("backupOfferingId", SearchCriteria.Op.EQ, backupOfferingId); UsageBackupVO vo = findOneBy(sc); if (vo != null) { vo.setSize(size); @@ -58,7 +59,7 @@ public class UsageBackupDaoImpl extends GenericDaoBase impl } @Override - public void removeUsage(Long accountId, Long vmId, Date eventDate) { + public void removeUsage(Long accountId, Long vmId, Long backupOfferingId, Date eventDate) { TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); try { txn.start(); @@ -67,6 +68,7 @@ public class UsageBackupDaoImpl extends GenericDaoBase impl pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), eventDate)); pstmt.setLong(2, accountId); pstmt.setLong(3, vmId); + pstmt.setLong(3, backupOfferingId); pstmt.executeUpdate(); } } catch (SQLException e) { diff --git a/engine/schema/src/main/java/com/cloud/vm/ConsoleSessionVO.java b/engine/schema/src/main/java/com/cloud/vm/ConsoleSessionVO.java index ef777be2de9..d8f2838dd47 100644 --- a/engine/schema/src/main/java/com/cloud/vm/ConsoleSessionVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/ConsoleSessionVO.java @@ -19,6 +19,8 @@ package com.cloud.vm; +import org.apache.cloudstack.consoleproxy.ConsoleSession; + import java.util.Date; import javax.persistence.Column; @@ -32,7 +34,7 @@ import javax.persistence.TemporalType; @Entity @Table(name = "console_session") -public class ConsoleSessionVO { +public class ConsoleSessionVO implements ConsoleSession { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @@ -45,6 +47,9 @@ public class ConsoleSessionVO { @Column(name = "created") private Date created; + @Column(name = "domain_id") + private long domainId; + @Column(name = "account_id") private long accountId; @@ -86,6 +91,7 @@ public class ConsoleSessionVO { this.uuid = uuid; } + @Override public Date getCreated() { return created; } @@ -94,6 +100,16 @@ public class ConsoleSessionVO { this.created = created; } + @Override + public long getDomainId() { + return domainId; + } + + public void setDomainId(long domainId) { + this.domainId = domainId; + } + + @Override public long getAccountId() { return accountId; } @@ -102,6 +118,7 @@ public class ConsoleSessionVO { this.accountId = accountId; } + @Override public long getUserId() { return userId; } @@ -110,6 +127,7 @@ public class ConsoleSessionVO { this.userId = userId; } + @Override public long getInstanceId() { return instanceId; } @@ -118,6 +136,7 @@ public class ConsoleSessionVO { this.instanceId = instanceId; } + @Override public long getHostId() { return hostId; } @@ -126,6 +145,7 @@ public class ConsoleSessionVO { this.hostId = hostId; } + @Override public Date getRemoved() { return removed; } @@ -134,6 +154,7 @@ public class ConsoleSessionVO { this.removed = removed; } + @Override public Date getAcquired() { return acquired; } @@ -142,6 +163,7 @@ public class ConsoleSessionVO { this.acquired = acquired; } + @Override public String getConsoleEndpointCreatorAddress() { return consoleEndpointCreatorAddress; } @@ -150,6 +172,7 @@ public class ConsoleSessionVO { this.consoleEndpointCreatorAddress = consoleEndpointCreatorAddress; } + @Override public String getClientAddress() { return clientAddress; } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDao.java index 95ced889b3d..b8fb9557a35 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDao.java @@ -19,6 +19,7 @@ package com.cloud.vm.dao; +import com.cloud.utils.Pair; import com.cloud.vm.ConsoleSessionVO; import com.cloud.utils.db.GenericDao; @@ -36,4 +37,9 @@ public interface ConsoleSessionDao extends GenericDao { void acquireSession(String sessionUuid, String clientAddress); int expungeByVmList(List vmIds, Long batchSize); + + Pair, Integer> listConsoleSessions(Long id, List domainIds, Long accountId, Long userId, Long hostId, + Date startDate, Date endDate, Long instanceId, + String consoleEndpointCreatorAddress, String clientAddress, + boolean activeOnly, boolean acquired, Long pageSizeVal, Long startIndex); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDaoImpl.java index 3d117894670..562142eecc8 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDaoImpl.java @@ -22,6 +22,8 @@ package com.cloud.vm.dao; import java.util.Date; import java.util.List; +import com.cloud.utils.Pair; +import com.cloud.utils.db.Filter; import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.db.GenericDaoBase; @@ -30,13 +32,28 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.ConsoleSessionVO; public class ConsoleSessionDaoImpl extends GenericDaoBase implements ConsoleSessionDao { + private static final String ID = "id"; + private static final String DOMAIN_IDS = "domainIds"; + private static final String ACCOUNT_ID = "accountId"; + private static final String USER_ID = "userId"; + private static final String HOST_ID = "hostId"; + private static final String INSTANCE_ID = "instanceId"; + private static final String VM_IDS = "vmIds"; + private static final String START_DATE = "startDate"; + private static final String END_DATE = "endDate"; + private static final String CREATOR_ADDRESS = "creatorAddress"; + private static final String CLIENT_ADDRESS = "clientAddress"; + private static final String ACQUIRED = "acquired"; + private static final String CREATED = "created"; + private static final String REMOVED = "removed"; + private static final String REMOVED_NOT_NULL = "removedNotNull"; private final SearchBuilder searchByRemovedDate; public ConsoleSessionDaoImpl() { searchByRemovedDate = createSearchBuilder(); - searchByRemovedDate.and("removedNotNull", searchByRemovedDate.entity().getRemoved(), SearchCriteria.Op.NNULL); - searchByRemovedDate.and("removed", searchByRemovedDate.entity().getRemoved(), SearchCriteria.Op.LTEQ); + searchByRemovedDate.and(REMOVED_NOT_NULL, searchByRemovedDate.entity().getRemoved(), SearchCriteria.Op.NNULL); + searchByRemovedDate.and(REMOVED, searchByRemovedDate.entity().getRemoved(), SearchCriteria.Op.LTEQ); } @Override @@ -57,7 +74,7 @@ public class ConsoleSessionDaoImpl extends GenericDaoBase searchCriteria = searchByRemovedDate.create(); - searchCriteria.setParameters("removed", date); + searchCriteria.setParameters(REMOVED, date); return expunge(searchCriteria); } @@ -75,9 +92,66 @@ public class ConsoleSessionDaoImpl extends GenericDaoBase sb = createSearchBuilder(); - sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN); + sb.and(VM_IDS, sb.entity().getInstanceId(), SearchCriteria.Op.IN); SearchCriteria sc = sb.create(); - sc.setParameters("vmIds", vmIds.toArray()); + sc.setParameters(VM_IDS, vmIds.toArray()); return batchExpunge(sc, batchSize); } + + @Override + public Pair, Integer> listConsoleSessions(Long id, List domainIds, Long accountId, Long userId, Long hostId, + Date startDate, Date endDate, Long instanceId, + String consoleEndpointCreatorAddress, String clientAddress, + boolean activeOnly, boolean acquired, Long pageSizeVal, Long startIndex) { + Filter filter = new Filter(ConsoleSessionVO.class, CREATED, false, startIndex, pageSizeVal); + SearchCriteria searchCriteria = createListConsoleSessionsSearchCriteria(id, domainIds, accountId, userId, hostId, + startDate, endDate, instanceId, consoleEndpointCreatorAddress, clientAddress, activeOnly, acquired); + + return searchAndCount(searchCriteria, filter, true); + } + + private SearchCriteria createListConsoleSessionsSearchCriteria(Long id, List domainIds, Long accountId, Long userId, Long hostId, + Date startDate, Date endDate, Long instanceId, + String consoleEndpointCreatorAddress, String clientAddress, + boolean activeOnly, boolean acquired) { + SearchCriteria searchCriteria = createListConsoleSessionsSearchBuilder(activeOnly, acquired).create(); + + searchCriteria.setParametersIfNotNull(ID, id); + searchCriteria.setParametersIfNotNull(DOMAIN_IDS, domainIds.toArray()); + searchCriteria.setParametersIfNotNull(ACCOUNT_ID, accountId); + searchCriteria.setParametersIfNotNull(USER_ID, userId); + searchCriteria.setParametersIfNotNull(HOST_ID, hostId); + searchCriteria.setParametersIfNotNull(INSTANCE_ID, instanceId); + searchCriteria.setParametersIfNotNull(START_DATE, startDate); + searchCriteria.setParametersIfNotNull(END_DATE, endDate); + searchCriteria.setParametersIfNotNull(CREATOR_ADDRESS, consoleEndpointCreatorAddress); + searchCriteria.setParametersIfNotNull(CLIENT_ADDRESS, clientAddress); + + return searchCriteria; + } + + private SearchBuilder createListConsoleSessionsSearchBuilder(boolean activeOnly, boolean acquired) { + SearchBuilder searchBuilder = createSearchBuilder(); + + searchBuilder.and(ID, searchBuilder.entity().getId(), SearchCriteria.Op.EQ); + searchBuilder.and(DOMAIN_IDS, searchBuilder.entity().getDomainId(), SearchCriteria.Op.IN); + searchBuilder.and(ACCOUNT_ID, searchBuilder.entity().getAccountId(), SearchCriteria.Op.EQ); + searchBuilder.and(USER_ID, searchBuilder.entity().getUserId(), SearchCriteria.Op.EQ); + searchBuilder.and(HOST_ID, searchBuilder.entity().getHostId(), SearchCriteria.Op.EQ); + searchBuilder.and(INSTANCE_ID, searchBuilder.entity().getInstanceId(), SearchCriteria.Op.EQ); + searchBuilder.and(START_DATE, searchBuilder.entity().getCreated(), SearchCriteria.Op.GTEQ); + searchBuilder.and(END_DATE, searchBuilder.entity().getCreated(), SearchCriteria.Op.LTEQ); + searchBuilder.and(CREATOR_ADDRESS, searchBuilder.entity().getConsoleEndpointCreatorAddress(), SearchCriteria.Op.EQ); + searchBuilder.and(CLIENT_ADDRESS, searchBuilder.entity().getClientAddress(), SearchCriteria.Op.EQ); + + if (activeOnly) { + searchBuilder.and(ACQUIRED, searchBuilder.entity().getAcquired(), SearchCriteria.Op.NNULL); + searchBuilder.and(REMOVED, searchBuilder.entity().getRemoved(), SearchCriteria.Op.NULL); + } else if (acquired) { + searchBuilder.and(ACQUIRED, searchBuilder.entity().getAcquired(), SearchCriteria.Op.NNULL); + } + + searchBuilder.done(); + return searchBuilder; + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java index 823642d8c3d..f722b4c54e4 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java @@ -118,7 +118,7 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listVmsMigratingFromHost(Long hostId); - List listByZoneWithBackups(Long zoneId, Long backupOfferingId); + List listByZoneAndBackupOffering(Long zoneId, Long backupOfferingId); public Long countActiveByHostId(long hostId); @@ -187,4 +187,5 @@ public interface VMInstanceDao extends GenericDao, StateDao< Map getNameIdMapForVmIds(Collection ids); + List listByIdsIncludingRemoved(List ids); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index dc0391f71fd..2f19d36c37a 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -661,7 +661,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem } @Override - public List listByZoneWithBackups(Long zoneId, Long backupOfferingId) { + public List listByZoneAndBackupOffering(Long zoneId, Long backupOfferingId) { SearchCriteria sc = BackupSearch.create(); sc.setParameters("zone_id", zoneId); if (backupOfferingId != null) { @@ -1246,4 +1246,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return vms.stream() .collect(Collectors.toMap(VMInstanceVO::getInstanceName, VMInstanceVO::getId)); } + + @Override + public List listByIdsIncludingRemoved(List ids) { + SearchBuilder idsSearch = createSearchBuilder(); + idsSearch.and("ids", idsSearch.entity().getId(), SearchCriteria.Op.IN); + idsSearch.done(); + SearchCriteria sc = idsSearch.create(); + sc.setParameters("ids", ids.toArray()); + return listIncludingRemovedBy(sc); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupDetailVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupDetailVO.java new file mode 100644 index 00000000000..aaf63518708 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupDetailVO.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.ResourceDetail; + +@Entity +@Table(name = "backup_details") +public class BackupDetailVO implements ResourceDetail { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "backup_id") + private long resourceId; + + @Column(name = "name") + private String name; + + @Column(name = "value", length = 65536) + private String value; + + @Column(name = "display") + private boolean display = true; + + public BackupDetailVO() { + } + + public BackupDetailVO(long backupId, String name, String value, boolean display) { + this.resourceId = backupId; + this.name = name; + this.value = value; + this.display = display; + } + + @Override + public long getId() { + return id; + } + + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getValue() { + return value; + } + + @Override + public boolean isDisplay() { + return display; + } + + public void setId(long id) { + this.id = id; + } + + public void setResourceId(long resourceId) { + this.resourceId = resourceId; + } + + public void setName(String name) { + this.name = name; + } + + public void setValue(String value) { + this.value = value; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupRepositoryVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupRepositoryVO.java index e8364520ed0..98efa94ceca 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupRepositoryVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupRepositoryVO.java @@ -144,11 +144,21 @@ public class BackupRepositoryVO implements BackupRepository { return usedBytes; } + @Override + public void setUsedBytes(Long usedBytes) { + this.usedBytes = usedBytes; + } + @Override public Long getCapacityBytes() { return capacityBytes; } + @Override + public void setCapacityBytes(Long capacityBytes) { + this.capacityBytes = capacityBytes; + } + public Date getCreated() { return created; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java index 75c7a8be55c..37e8105e3d5 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java @@ -63,18 +63,22 @@ public class BackupScheduleVO implements BackupSchedule { Long asyncJobId; @Column(name = "max_backups") - Integer maxBackups = 0; + private int maxBackups = 0; + + @Column(name = "quiescevm") + Boolean quiesceVM = false; public BackupScheduleVO() { } - public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String schedule, String timezone, Date scheduledTimestamp, Integer maxBackups) { + public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String schedule, String timezone, Date scheduledTimestamp, int maxBackups, Boolean quiesceVM) { this.vmId = vmId; this.scheduleType = (short) scheduleType.ordinal(); this.schedule = schedule; this.timezone = timezone; this.scheduledTimestamp = scheduledTimestamp; this.maxBackups = maxBackups; + this.quiesceVM = quiesceVM; } @Override @@ -142,11 +146,19 @@ public class BackupScheduleVO implements BackupSchedule { this.asyncJobId = asyncJobId; } - public Integer getMaxBackups() { + public int getMaxBackups() { return maxBackups; } - public void setMaxBackups(Integer maxBackups) { + public void setMaxBackups(int maxBackups) { this.maxBackups = maxBackups; } + + public void setQuiesceVM(Boolean quiesceVM) { + this.quiesceVM = quiesceVM; + } + + public Boolean getQuiesceVM() { + return quiesceVM; + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java index 9ef442baff9..0f8a10fb7be 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.backup; import com.cloud.utils.db.GenericDao; import com.google.gson.Gson; + import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; @@ -26,6 +27,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.List; +import java.util.Map; import java.util.UUID; import javax.persistence.Column; @@ -38,6 +40,7 @@ import javax.persistence.Id; import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; +import javax.persistence.Transient; @Entity @Table(name = "backups") @@ -47,11 +50,17 @@ public class BackupVO implements Backup { @Column(name = "id") private long id; + @Column(name = "name") + private String name; + + @Column(name = "description") + private String description; + @Column(name = "uuid") private String uuid; @Column(name = "vm_id") - private long vmId; + private Long vmId; @Column(name = "external_id") private String externalId; @@ -88,12 +97,15 @@ public class BackupVO implements Backup { @Column(name = "zone_id") private long zoneId; - @Column(name = "backup_interval_type") - private short backupIntervalType; - @Column(name = "backed_volumes", length = 65535) protected String backedUpVolumes; + @Column(name = "backup_schedule_id") + private Long backupScheduleId; + + @Transient + Map details; + public BackupVO() { this.uuid = UUID.randomUUID().toString(); } @@ -115,11 +127,11 @@ public class BackupVO implements Backup { } @Override - public long getVmId() { + public Long getVmId() { return vmId; } - public void setVmId(long vmId) { + public void setVmId(Long vmId) { this.vmId = vmId; } @@ -211,14 +223,6 @@ public class BackupVO implements Backup { this.zoneId = zoneId; } - public short getBackupIntervalType() { - return backupIntervalType; - } - - public void setBackupIntervalType(short backupIntervalType) { - this.backupIntervalType = backupIntervalType; - } - @Override public Class getEntityType() { return Backup.class; @@ -226,7 +230,22 @@ public class BackupVO implements Backup { @Override public String getName() { - return null; + return name; + } + + @Override + public void setName(String name) { + this.name = name; + } + + @Override + public String getDescription() { + return description; + } + + @Override + public void setDescription(String description) { + this.description = description; } public List getBackedUpVolumes() { @@ -240,11 +259,33 @@ public class BackupVO implements Backup { this.backedUpVolumes = backedUpVolumes; } + @Override + public Map getDetails() { + return details; + } + + @Override + public String getDetail(String name) { + return this.details.get(name); + } + + public void setDetails(Map details) { + this.details = details; + } + public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { this.removed = removed; } + + @Override + public Long getBackupScheduleId() { + return backupScheduleId; + } + + public void setBackupScheduleId(Long backupScheduleId) { + this.backupScheduleId = backupScheduleId; + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java index ffd5e5a4a66..e60e49e1a0c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.backup.dao; import java.util.List; -import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupVO; @@ -33,12 +32,14 @@ public interface BackupDao extends GenericDao { List listByVmId(Long zoneId, Long vmId); List listByAccountId(Long accountId); List syncBackups(Long zoneId, Long vmId, List externalBackups); + List listByVmIdAndOffering(Long zoneId, Long vmId, Long offeringId); + List searchByVmIds(List vmIds); BackupVO getBackupVO(Backup backup); List listByOfferingId(Long backupOfferingId); - - List listBackupsByVMandIntervalType(Long vmId, Backup.Type backupType); - - BackupResponse newBackupResponse(Backup backup); + List listVmIdsWithBackupsInZone(Long zoneId); public Long countBackupsForAccount(long accountId); public Long calculateBackupStorageForAccount(long accountId); + void loadDetails(BackupVO backup); + void saveDetails(BackupVO backup); + List listBySchedule(Long backupScheduleId); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java index b4e1a760282..fd29da72c71 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java @@ -19,29 +19,31 @@ package org.apache.cloudstack.backup.dao; import java.util.ArrayList; import java.util.List; -import java.util.Objects; +import java.util.Map; import javax.annotation.PostConstruct; import javax.inject.Inject; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericSearchBuilder; -import org.apache.cloudstack.api.response.BackupResponse; -import org.apache.cloudstack.backup.Backup; -import org.apache.cloudstack.backup.BackupOffering; -import org.apache.cloudstack.backup.BackupVO; -import com.cloud.dc.DataCenterVO; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.BackupDetailVO; +import org.apache.cloudstack.backup.BackupVO; +import org.apache.commons.collections.CollectionUtils; + import com.cloud.dc.dao.DataCenterDao; -import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; -import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.vm.VMInstanceVO; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; import com.cloud.vm.dao.VMInstanceDao; -import com.google.gson.Gson; +import com.cloud.network.dao.NetworkDao; public class BackupDaoImpl extends GenericDaoBase implements BackupDao { @@ -57,13 +59,26 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac @Inject VMInstanceDao vmInstanceDao; + @Inject + private VMTemplateDao templateDao; + @Inject BackupOfferingDao backupOfferingDao; + @Inject + BackupDetailsDao backupDetailsDao; + + @Inject + ServiceOfferingDao serviceOfferingDao; + + @Inject + NetworkDao networkDao; + private SearchBuilder backupSearch; private GenericSearchBuilder CountBackupsByAccount; private GenericSearchBuilder CalculateBackupStorageByAccount; - private SearchBuilder ListBackupsByVMandIntervalType; + private SearchBuilder listBackupsBySchedule; + private GenericSearchBuilder backupVmSearchInZone; public BackupDaoImpl() { } @@ -77,6 +92,11 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac backupSearch.and("zone_id", backupSearch.entity().getZoneId(), SearchCriteria.Op.EQ); backupSearch.done(); + backupVmSearchInZone = createSearchBuilder(Long.class); + backupVmSearchInZone.select(null, SearchCriteria.Func.DISTINCT, backupVmSearchInZone.entity().getVmId()); + backupVmSearchInZone.and("zone_id", backupVmSearchInZone.entity().getZoneId(), SearchCriteria.Op.EQ); + backupVmSearchInZone.done(); + CountBackupsByAccount = createSearchBuilder(Long.class); CountBackupsByAccount.select(null, SearchCriteria.Func.COUNT, null); CountBackupsByAccount.and("account", CountBackupsByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); @@ -91,12 +111,11 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac CalculateBackupStorageByAccount.and("removed", CalculateBackupStorageByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); CalculateBackupStorageByAccount.done(); - ListBackupsByVMandIntervalType = createSearchBuilder(); - ListBackupsByVMandIntervalType.and("vmId", ListBackupsByVMandIntervalType.entity().getVmId(), SearchCriteria.Op.EQ); - ListBackupsByVMandIntervalType.and("intervalType", ListBackupsByVMandIntervalType.entity().getBackupIntervalType(), SearchCriteria.Op.EQ); - ListBackupsByVMandIntervalType.and("status", ListBackupsByVMandIntervalType.entity().getStatus(), SearchCriteria.Op.EQ); - ListBackupsByVMandIntervalType.and("removed", ListBackupsByVMandIntervalType.entity().getRemoved(), SearchCriteria.Op.NULL); - ListBackupsByVMandIntervalType.done(); + listBackupsBySchedule = createSearchBuilder(); + listBackupsBySchedule.and("backup_schedule_id", listBackupsBySchedule.entity().getBackupScheduleId(), SearchCriteria.Op.EQ); + listBackupsBySchedule.and("status", listBackupsBySchedule.entity().getStatus(), SearchCriteria.Op.EQ); + listBackupsBySchedule.and("removed", listBackupsBySchedule.entity().getRemoved(), SearchCriteria.Op.NULL); + listBackupsBySchedule.done(); } @Override @@ -130,6 +149,17 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac return new ArrayList<>(listBy(sc)); } + @Override + public List listByVmIdAndOffering(Long zoneId, Long vmId, Long offeringId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters("vm_id", vmId); + if (zoneId != null) { + sc.setParameters("zone_id", zoneId); + } + sc.setParameters("backup_offering_id", offeringId); + return new ArrayList<>(listBy(sc)); + } + private Backup findByExternalId(Long zoneId, String externalId) { SearchCriteria sc = backupSearch.create(); sc.setParameters("external_id", externalId); @@ -137,6 +167,18 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac return findOneBy(sc); } + @Override + public List searchByVmIds(List vmIds) { + if (CollectionUtils.isEmpty(vmIds)) { + return new ArrayList<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getVmId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return search(sc, null); + } + public BackupVO getBackupVO(Backup backup) { BackupVO backupVO = new BackupVO(); backupVO.setExternalId(backup.getExternalId()); @@ -158,6 +200,27 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac expunge(sc); } + @Override + public BackupVO persist(BackupVO backup) { + return Transaction.execute((TransactionCallback) status -> { + BackupVO backupDb = super.persist(backup); + saveDetails(backup); + loadDetails(backupDb); + return backupDb; + }); + } + + @Override + public boolean update(Long id, BackupVO backup) { + return Transaction.execute((TransactionCallback) status -> { + boolean result = super.update(id, backup); + if (result) { + saveDetails(backup); + } + return result; + }); + } + @Override public List syncBackups(Long zoneId, Long vmId, List externalBackups) { for (Backup backup : externalBackups) { @@ -171,7 +234,7 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac public Long countBackupsForAccount(long accountId) { SearchCriteria sc = CountBackupsByAccount.create(); sc.setParameters("account", accountId); - sc.setParameters("status", Backup.Status.Error, Backup.Status.Failed, Backup.Status.Removed, Backup.Status.Expunged); + sc.setParameters("status", Backup.Status.Failed, Backup.Status.Removed, Backup.Status.Expunged); return customSearch(sc, null).get(0); } @@ -179,58 +242,42 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac public Long calculateBackupStorageForAccount(long accountId) { SearchCriteria sc = CalculateBackupStorageByAccount.create(); sc.setParameters("account", accountId); - sc.setParameters("status", Backup.Status.Error, Backup.Status.Failed, Backup.Status.Removed, Backup.Status.Expunged); + sc.setParameters("status", Backup.Status.Failed, Backup.Status.Removed, Backup.Status.Expunged); return customSearch(sc, null).get(0).sum; } @Override - public List listBackupsByVMandIntervalType(Long vmId, Backup.Type backupType) { - SearchCriteria sc = ListBackupsByVMandIntervalType.create(); - sc.setParameters("vmId", vmId); - sc.setParameters("type", backupType.ordinal()); + public List listBySchedule(Long backupScheduleId) { + SearchCriteria sc = listBackupsBySchedule.create(); + sc.setParameters("backup_schedule_id", backupScheduleId); sc.setParameters("status", Backup.Status.BackedUp); - return listBy(sc, null); + return listBy(sc, new Filter(BackupVO.class, "date", true)); } @Override - public BackupResponse newBackupResponse(Backup backup) { - VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); - AccountVO account = accountDao.findByIdIncludingRemoved(vm.getAccountId()); - DomainVO domain = domainDao.findByIdIncludingRemoved(vm.getDomainId()); - DataCenterVO zone = dataCenterDao.findByIdIncludingRemoved(vm.getDataCenterId()); - Long offeringId = backup.getBackupOfferingId(); - if (offeringId == null) { - offeringId = vm.getBackupOfferingId(); - } - BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(offeringId); + public void loadDetails(BackupVO backup) { + Map details = backupDetailsDao.listDetailsKeyPairs(backup.getId()); + backup.setDetails(details); + } - BackupResponse response = new BackupResponse(); - response.setId(backup.getUuid()); - response.setVmId(vm.getUuid()); - response.setVmName(vm.getHostName()); - response.setExternalId(backup.getExternalId()); - response.setType(backup.getType()); - response.setDate(backup.getDate()); - response.setSize(backup.getSize()); - response.setProtectedSize(backup.getProtectedSize()); - response.setStatus(backup.getStatus()); - // ACS 4.20: For backups taken prior this release the backup.backed_volumes column would be empty hence use vm_instance.backup_volumes - String backedUpVolumes; - if (Objects.isNull(backup.getBackedUpVolumes())) { - backedUpVolumes = new Gson().toJson(vm.getBackupVolumeList().toArray(), Backup.VolumeInfo[].class); - } else { - backedUpVolumes = new Gson().toJson(backup.getBackedUpVolumes().toArray(), Backup.VolumeInfo[].class); + @Override + public void saveDetails(BackupVO backup) { + Map detailsStr = backup.getDetails(); + if (detailsStr == null) { + return; } - response.setVolumes(backedUpVolumes); - response.setBackupOfferingId(offering.getUuid()); - response.setBackupOffering(offering.getName()); - response.setAccountId(account.getUuid()); - response.setAccount(account.getAccountName()); - response.setDomainId(domain.getUuid()); - response.setDomain(domain.getName()); - response.setZoneId(zone.getUuid()); - response.setZone(zone.getName()); - response.setObjectName("backup"); - return response; + List details = new ArrayList(); + for (String key : detailsStr.keySet()) { + BackupDetailVO detail = new BackupDetailVO(backup.getId(), key, detailsStr.get(key), true); + details.add(detail); + } + backupDetailsDao.saveDetails(details); + } + + @Override + public List listVmIdsWithBackupsInZone(Long zoneId) { + SearchCriteria sc = backupVmSearchInZone.create(); + sc.setParameters("zone_id", zoneId); + return customSearchIncludingRemoved(sc, null); } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDao.java new file mode 100644 index 00000000000..664650074bc --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDao.java @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup.dao; + +import org.apache.cloudstack.backup.BackupDetailVO; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; + +import com.cloud.utils.db.GenericDao; + +public interface BackupDetailsDao extends GenericDao, ResourceDetailsDao { + +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDaoImpl.java new file mode 100644 index 00000000000..08c7192af90 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDaoImpl.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup.dao; + + +import org.apache.cloudstack.backup.BackupDetailVO; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; +import org.springframework.stereotype.Component; + +@Component +public class BackupDetailsDaoImpl extends ResourceDetailsDaoBase implements BackupDetailsDao { + + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + super.addDetail(new BackupDetailVO(resourceId, key, value, display)); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java index 0568a0185bb..9d67d07fe5e 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java @@ -58,6 +58,7 @@ public class BackupOfferingDaoImpl extends GenericDaoBase listByZoneAndProvider(Long zoneId, String provider); BackupRepository findByBackupOfferingId(Long backupOfferingId); + + boolean updateCapacity(BackupRepository backupRepository, Long capacityBytes, Long usedBytes); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDaoImpl.java index 460b6d8aba4..ea969988e2b 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDaoImpl.java @@ -64,4 +64,12 @@ public class BackupRepositoryDaoImpl extends GenericDaoBase extends GenericDao */ R findDetail(long resourceId, String name); + /** + * Find details by key + * @param key + * @return + */ + List findDetails(String key); + /** * Find details by resourceId and key * @param resourceId diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java index 58b60531e5a..eafaed182ab 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java @@ -65,6 +65,12 @@ public abstract class ResourceDetailsDaoBase extends G return findOneBy(sc); } + public List findDetails(String key) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("name", key); + return listBy(sc); + } + public List findDetails(long resourceId, String key) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("resourceId", resourceId); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreDao.java index 94f6b5ec372..695742823eb 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreDao.java @@ -39,4 +39,6 @@ public interface ObjectStoreDao extends GenericDao { ObjectStoreResponse setObjectStoreResponse(ObjectStoreResponse storeData, ObjectStoreVO store); Integer countAllObjectStores(); + + Boolean updateAllocatedSize(ObjectStoreVO objectStoreVO, long delta); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreDaoImpl.java index 51abde013b6..891ac0996ac 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreDaoImpl.java @@ -21,6 +21,10 @@ package org.apache.cloudstack.storage.datastore.db; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; + import org.apache.cloudstack.api.response.ObjectStoreResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.springframework.stereotype.Component; @@ -142,6 +146,19 @@ public class ObjectStoreDaoImpl extends GenericDaoBase impl ObjectStoreResponse osResponse = new ObjectStoreResponse(); osResponse.setId(store.getUuid()); osResponse.setName(store.getName()); + if (store.getTotalSize() != null && store.getTotalSize() != 0L) { + osResponse.setStorageTotal(store.getTotalSize()); + } + if (store.getUsedSize() == null) { + osResponse.setStorageUsed(0L); + } else { + osResponse.setStorageUsed(store.getUsedSize()); + } + if (store.getAllocatedSize() == null) { + osResponse.setStorageAllocated(0L); + } else { + osResponse.setStorageAllocated(store.getAllocatedSize()); + } osResponse.setProviderName(store.getProviderName()); String url = store.getUrl(); osResponse.setUrl(url); @@ -159,4 +176,19 @@ public class ObjectStoreDaoImpl extends GenericDaoBase impl SearchCriteria sc = createSearchCriteria(); return getCount(sc); } + + @Override + public Boolean updateAllocatedSize(ObjectStoreVO objectStoreVO, long delta) { + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(final TransactionStatus status) { + if (objectStoreVO.getAllocatedSize() != null) { + objectStoreVO.setAllocatedSize(objectStoreVO.getAllocatedSize() + delta); + } else { + objectStoreVO.setAllocatedSize(delta); + } + return update(objectStoreVO.getId(), objectStoreVO); + } + }); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java index 18cc06a6573..23b650acc79 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ObjectStoreVO.java @@ -60,8 +60,11 @@ public class ObjectStoreVO implements ObjectStore { @Column(name = "total_size") private Long totalSize; - @Column(name = "used_bytes") - private Long usedBytes; + @Column(name = "used_size") + private Long usedSize; + + @Column(name = "allocated_size") + private Long allocatedSize; @Transient Map details; @@ -130,18 +133,26 @@ public class ObjectStoreVO implements ObjectStore { this.totalSize = totalSize; } - public Long getUsedBytes() { - return usedBytes; + public Long getUsedSize() { + return usedSize; } - public void setUsedBytes(Long usedBytes) { - this.usedBytes = usedBytes; + public void setUsedSize(Long usedSize) { + this.usedSize = usedSize; } public void setDetails(Map details) { this.details = details; } + public Long getAllocatedSize() { + return allocatedSize; + } + + public void setAllocatedSize(Long allocatedSize) { + this.allocatedSize = allocatedSize; + } + @Override public String toString() { return String.format("ObjectStore %s", diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java index 7600cdb9b81..37aa70abb6e 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java @@ -168,4 +168,7 @@ public interface PrimaryDataStoreDao extends GenericDao { List listByIds(List ids); List findStoragePoolsByEmptyStorageAccessGroups(Long dcId, Long podId, Long clusterId, ScopeType scope, HypervisorType hypervisorType); + + List findPoolsByStorageTypeAndZone(Storage.StoragePoolType storageType, Long zoneId); + } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 71d5c93f027..8b230d03154 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -916,6 +916,14 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase return listBy(sc); } + @Override + public List findPoolsByStorageTypeAndZone(Storage.StoragePoolType storageType, Long zoneId) { + SearchCriteria sc = AllFieldSearch.create(); + sc.setParameters("poolType", storageType); + sc.addAnd("dataCenterId", Op.EQ, zoneId); + return listBy(sc); + } + private SearchCriteria createStoragePoolSearchCriteria(Long storagePoolId, String storagePoolName, Long zoneId, String path, Long podId, Long clusterId, Long hostId, String address, ScopeType scopeType, StoragePoolStatus status, String keyword, String storageAccessGroup) { diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java index db4c64bd0ab..902cb73dc05 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java @@ -61,8 +61,11 @@ StateDao listExtractedSnapshotsBeforeDate(Date beforeDate); + List listSnapshotsBySnapshotId(long snapshotId); + List listReadyBySnapshot(long snapshotId, DataStoreRole role); + List listReadyBySnapshotId(long snapshotId); SnapshotDataStoreVO findBySourceSnapshot(long snapshotId, DataStoreRole role); List findBySnapshotIdAndNotInDestroyedHiddenState(long snapshotId); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java index b5faa6caedf..241c3df2e4a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java @@ -16,24 +16,6 @@ // under the License. package org.apache.cloudstack.storage.datastore.db; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.Map; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; -import org.apache.commons.collections.CollectionUtils; -import org.springframework.stereotype.Component; - import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.DataStoreRole; import com.cloud.storage.SnapshotVO; @@ -47,6 +29,25 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; + +import org.apache.commons.collections.CollectionUtils; + +import org.springframework.stereotype.Component; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; + @Component public class SnapshotDataStoreDaoImpl extends GenericDaoBase implements SnapshotDataStoreDao { private static final String STORE_ID = "store_id"; @@ -76,6 +77,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore; private SearchBuilder searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq; + private SearchBuilder searchBySnapshotId; protected static final List HYPERVISORS_SUPPORTING_SNAPSHOTS_CHAINING = List.of(Hypervisor.HypervisorType.XenServer); @@ -187,6 +189,11 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase listSnapshotsBySnapshotId(long snapshotId) { + SearchCriteria sc = searchBySnapshotId.create(); + sc.setParameters(SNAPSHOT_ID, snapshotId); + return listBy(sc); + } + @Override public List listReadyBySnapshot(long snapshotId, DataStoreRole role) { SearchCriteria sc = createSearchCriteriaBySnapshotIdAndStoreRole(snapshotId, role); @@ -410,6 +424,14 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase listReadyBySnapshotId(long snapshotId) { + SearchCriteria sc = searchBySnapshotId.create(); + sc.setParameters(SNAPSHOT_ID, snapshotId); + sc.setParameters(STATE, State.Ready); + return listBy(sc); + } + @Override public SnapshotDataStoreVO findBySourceSnapshot(long snapshotId, DataStoreRole role) { SearchCriteria sc = createSearchCriteriaBySnapshotIdAndStoreRole(snapshotId, role); diff --git a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml index 4ae4506cab7..f26e6ad4984 100644 --- a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml +++ b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml @@ -272,6 +272,7 @@ + diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41910to42000.sql b/engine/schema/src/main/resources/META-INF/db/schema-41910to42000.sql index c36b71c2f25..eec4ac3f028 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41910to42000.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41910to42000.sql @@ -55,7 +55,7 @@ UPDATE `cloud`.`service_offering` SET ram_size = 512 WHERE unique_name IN ("Clou AND system_use = 1 AND ram_size < 512; -- NSX Plugin -- -CREATE TABLE `cloud`.`nsx_providers` ( +CREATE TABLE IF NOT EXISTS `cloud`.`nsx_providers` ( `id` bigint unsigned NOT NULL auto_increment COMMENT 'id', `uuid` varchar(40), `zone_id` bigint unsigned NOT NULL COMMENT 'Zone ID', diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql index 54124fbc709..e7ca5519ac0 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql @@ -19,9 +19,9 @@ -- Schema upgrade from 4.20.1.0 to 4.21.0.0 --; --- Add columns max_backup and backup_interval_type to backup table -ALTER TABLE `cloud`.`backup_schedule` ADD COLUMN `max_backups` int(8) default NULL COMMENT 'maximum number of backups to maintain'; -ALTER TABLE `cloud`.`backups` ADD COLUMN `backup_interval_type` int(5) COMMENT 'type of backup, e.g. manual, recurring - hourly, daily, weekly or monthly'; +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backup_schedule', 'max_backups', 'INT(8) UNSIGNED NOT NULL DEFAULT 0 COMMENT ''Maximum number of backups to be retained'''); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'backup_schedule_id', 'BIGINT(20) UNSIGNED'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backup_schedule', 'quiescevm', 'tinyint(1) default NULL COMMENT "Quiesce VM before taking backup"'); -- Update default value for the config 'vm.network.nic.max.secondary.ipaddresses' (and value to default value if value is null) UPDATE `cloud`.`configuration` SET default_value = '10' WHERE name = 'vm.network.nic.max.secondary.ipaddresses'; @@ -665,6 +665,96 @@ ALTER TABLE `cloud`.`networks` MODIFY COLUMN `gateway` varchar(255) DEFAULT NULL ALTER TABLE `cloud`.`networks` MODIFY COLUMN `ip6_cidr` varchar(1024) DEFAULT NULL COMMENT 'IPv6 cidr(s) for this network'; ALTER TABLE `cloud`.`networks` MODIFY COLUMN `ip6_gateway` varchar(1024) DEFAULT NULL COMMENT 'IPv6 gateway(s) for this network'; +-- Add columns name, description and backup_interval_type to backup table +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'name', 'VARCHAR(255) NULL COMMENT "name of the backup"'); +UPDATE `cloud`.`backups` backup INNER JOIN `cloud`.`vm_instance` vm ON backup.vm_id = vm.id SET backup.name = vm.name; +ALTER TABLE `cloud`.`backups` MODIFY COLUMN `name` VARCHAR(255) NOT NULL; +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'description', 'VARCHAR(1024) COMMENT "description for the backup"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'backup_interval_type', 'int(5) COMMENT "type of backup, e.g. manual, recurring - hourly, daily, weekly or monthly"'); + +-- Create backup details table +CREATE TABLE IF NOT EXISTS `cloud`.`backup_details` ( + `id` bigint unsigned NOT NULL auto_increment, + `backup_id` bigint unsigned NOT NULL COMMENT 'backup id', + `name` varchar(255) NOT NULL, + `value` TEXT NOT NULL, + `display` tinyint(1) NOT NULL DEFAULT 1 COMMENT 'Should detail be displayed to the end user', + PRIMARY KEY (`id`), + CONSTRAINT `fk_backup_details__backup_id` FOREIGN KEY `fk_backup_details__backup_id`(`backup_id`) REFERENCES `backups`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- Add diskOfferingId, deviceId, minIops and maxIops to backed_volumes in backups table +UPDATE `cloud`.`backups` b +INNER JOIN `cloud`.`vm_instance` vm ON b.vm_id = vm.id +SET b.backed_volumes = ( + SELECT CONCAT("[", + GROUP_CONCAT( + CONCAT( + "{\"uuid\":\"", v.uuid, "\",", + "\"type\":\"", v.volume_type, "\",", + "\"size\":", v.`size`, ",", + "\"path\":\"", IFNULL(v.path, 'null'), "\",", + "\"deviceId\":", IFNULL(v.device_id, 'null'), ",", + "\"diskOfferingId\":\"", doff.uuid, "\",", + "\"minIops\":", IFNULL(v.min_iops, 'null'), ",", + "\"maxIops\":", IFNULL(v.max_iops, 'null'), + "}" + ) + SEPARATOR "," + ), + "]") + FROM `cloud`.`volumes` v + LEFT JOIN `cloud`.`disk_offering` doff ON v.disk_offering_id = doff.id + WHERE v.instance_id = vm.id +); + +-- Add diskOfferingId, deviceId, minIops and maxIops to backup_volumes in vm_instance table +UPDATE `cloud`.`vm_instance` vm +SET vm.backup_volumes = ( + SELECT CONCAT("[", + GROUP_CONCAT( + CONCAT( + "{\"uuid\":\"", v.uuid, "\",", + "\"type\":\"", v.volume_type, "\",", + "\"size\":", v.`size`, ",", + "\"path\":\"", IFNULL(v.path, 'null'), "\",", + "\"deviceId\":", IFNULL(v.device_id, 'null'), ",", + "\"diskOfferingId\":\"", doff.uuid, "\",", + "\"minIops\":", IFNULL(v.min_iops, 'null'), ",", + "\"maxIops\":", IFNULL(v.max_iops, 'null'), + "}" + ) + SEPARATOR "," + ), + "]") + FROM `cloud`.`volumes` v + LEFT JOIN `cloud`.`disk_offering` doff ON v.disk_offering_id = doff.id + WHERE v.instance_id = vm.id +) +WHERE vm.backup_offering_id IS NOT NULL; + +-- Add column allocated_size to object_store table. Rename column 'used_bytes' to 'used_size' +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.object_store', 'allocated_size', 'bigint unsigned COMMENT "allocated size in bytes"'); +ALTER TABLE `cloud`.`object_store` CHANGE COLUMN `used_bytes` `used_size` BIGINT UNSIGNED COMMENT 'used size in bytes'; +ALTER TABLE `cloud`.`object_store` MODIFY COLUMN `total_size` bigint unsigned COMMENT 'total size in bytes'; +UPDATE `cloud`.`object_store` +JOIN ( + SELECT object_store_id, SUM(quota) AS total_quota + FROM `cloud`.`bucket` + WHERE removed IS NULL + GROUP BY object_store_id +) buckets_quota_sum_view ON `object_store`.id = buckets_quota_sum_view.object_store_id +SET `object_store`.allocated_size = buckets_quota_sum_view.total_quota; + +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.console_session', 'domain_id', 'bigint(20) unsigned NOT NULL'); + +UPDATE `cloud`.`console_session` `cs` +SET `cs`.`domain_id` = ( + SELECT `acc`.`domain_id` + FROM `cloud`.`account` `acc` + WHERE `acc`.`id` = `cs`.`account_id` +); + -- Disk controller mappings CREATE TABLE IF NOT EXISTS `cloud`.`disk_controller_mapping` ( `id` bigint(20) unsigned NOT NULL auto_increment, diff --git a/engine/schema/src/test/java/com/cloud/storage/dao/VMTemplateDaoImplTest.java b/engine/schema/src/test/java/com/cloud/storage/dao/VMTemplateDaoImplTest.java index 7f151730c9c..3c8e4c046ae 100644 --- a/engine/schema/src/test/java/com/cloud/storage/dao/VMTemplateDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/storage/dao/VMTemplateDaoImplTest.java @@ -36,6 +36,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; @@ -316,4 +317,24 @@ public class VMTemplateDaoImplTest { verify(searchCriteria).setParameters("extensionId", extensionId); verify(templateDao).customSearchIncludingRemoved(eq(searchCriteria), isNull()); } + + @Test + public void testFindSystemVMReadyTemplate() { + Long zoneId = 1L; + VMTemplateVO systemVmTemplate1 = mock(VMTemplateVO.class); + Mockito.when(systemVmTemplate1.getArch()).thenReturn(CPU.CPUArch.x86); + VMTemplateVO systemVmTemplate2 = mock(VMTemplateVO.class); + Mockito.when(systemVmTemplate2.getArch()).thenReturn(CPU.CPUArch.x86); + VMTemplateVO systemVmTemplate3 = mock(VMTemplateVO.class); + Mockito.when(systemVmTemplate3.getArch()).thenReturn(CPU.CPUArch.arm64); + Mockito.when(systemVmTemplate3.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + List templates = Arrays.asList(systemVmTemplate1, systemVmTemplate2, systemVmTemplate3); + Mockito.when(hostDao.listDistinctHypervisorTypes(zoneId)).thenReturn(Arrays.asList(Hypervisor.HypervisorType.KVM)); + SearchBuilder sb = mock(SearchBuilder.class); + templateDao.readySystemTemplateSearch = sb; + when(sb.create()).thenReturn(mock(SearchCriteria.class)); + doReturn(templates).when(templateDao).listBy(any(SearchCriteria.class), any(Filter.class)); + VMTemplateVO readyTemplate = templateDao.findSystemVMReadyTemplate(zoneId, Hypervisor.HypervisorType.KVM, CPU.CPUArch.arm64.getType()); + Assert.assertEquals(CPU.CPUArch.arm64, readyTemplate.getArch()); + } } diff --git a/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java b/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java index 27656fd33b0..6573a5565f3 100644 --- a/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java +++ b/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java @@ -192,7 +192,7 @@ public class SystemVmTemplateRegistrationTest { public void testValidateTemplateFile_fileNotFound() { SystemVmTemplateRegistration.MetadataTemplateDetails details = new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM, - "name", "file", "url", "checksum", CPU.CPUArch.amd64); + "name", "file", "url", "checksum", CPU.CPUArch.amd64, "guestos"); SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey( details.getHypervisorType(), details.getArch()), details); doReturn(null).when(systemVmTemplateRegistration).getTemplateFile(details); @@ -209,7 +209,7 @@ public class SystemVmTemplateRegistrationTest { public void testValidateTemplateFile_checksumMismatch() { SystemVmTemplateRegistration.MetadataTemplateDetails details = new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM, - "name", "file", "url", "checksum", CPU.CPUArch.amd64); + "name", "file", "url", "checksum", CPU.CPUArch.amd64, "guestos"); File dummyFile = new File("dummy.txt"); SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey( details.getHypervisorType(), details.getArch()), details); @@ -228,7 +228,7 @@ public class SystemVmTemplateRegistrationTest { public void testValidateTemplateFile_success() { SystemVmTemplateRegistration.MetadataTemplateDetails details = new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM, - "name", "file", "url", "checksum", CPU.CPUArch.amd64); + "name", "file", "url", "checksum", CPU.CPUArch.amd64, "guestos"); File dummyFile = new File("dummy.txt"); SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey( details.getHypervisorType(), details.getArch()), details); @@ -285,7 +285,7 @@ public class SystemVmTemplateRegistrationTest { SystemVmTemplateRegistration.MetadataTemplateDetails details = new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM, "name", "nonexistent.qcow2", "http://example.com/file.qcow2", - "", CPU.CPUArch.arm64); + "", CPU.CPUArch.arm64, "guestos"); try (MockedStatic filesMock = Mockito.mockStatic(Files.class); MockedStatic httpMock = Mockito.mockStatic(HttpUtils.class)) { filesMock.when(() -> Files.isWritable(any(Path.class))).thenReturn(true); @@ -301,7 +301,7 @@ public class SystemVmTemplateRegistrationTest { SystemVmTemplateRegistration.MetadataTemplateDetails details = new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM, "name", "file.qcow2", "http://example.com/file.qcow2", - "", CPU.CPUArch.arm64); + "", CPU.CPUArch.arm64, "guestos"); try (MockedStatic filesMock = Mockito.mockStatic(Files.class); MockedStatic httpMock = Mockito.mockStatic(HttpUtils.class)) { filesMock.when(() -> Files.isWritable(any(Path.class))).thenReturn(false); diff --git a/engine/schema/src/test/java/org/apache/cloudstack/backup/dao/BackupDaoImplTest.java b/engine/schema/src/test/java/org/apache/cloudstack/backup/dao/BackupDaoImplTest.java new file mode 100644 index 00000000000..b12cfb4c14c --- /dev/null +++ b/engine/schema/src/test/java/org/apache/cloudstack/backup/dao/BackupDaoImplTest.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup.dao; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.backup.BackupVO; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class BackupDaoImplTest { + @Spy + @InjectMocks + private BackupDaoImpl backupDao; + + @Mock + BackupDetailsDao backupDetailsDao; + + @Test + public void testLoadDetails() { + Long backupId = 1L; + BackupVO backup = new BackupVO(); + ReflectionTestUtils.setField(backup, "id", backupId); + Map details = new HashMap<>(); + details.put("key1", "value1"); + details.put("key2", "value2"); + + Mockito.when(backupDetailsDao.listDetailsKeyPairs(backupId)).thenReturn(details); + + backupDao.loadDetails(backup); + + Assert.assertEquals(details, backup.getDetails()); + Mockito.verify(backupDetailsDao).listDetailsKeyPairs(backupId); + } + + @Test + public void testSaveDetails() { + Long backupId = 1L; + BackupVO backup = new BackupVO(); + ReflectionTestUtils.setField(backup, "id", backupId); + Map details = new HashMap<>(); + details.put("key1", "value1"); + details.put("key2", "value2"); + backup.setDetails(details); + + backupDao.saveDetails(backup); + + Mockito.verify(backupDetailsDao).saveDetails(Mockito.anyList()); + } +} diff --git a/engine/schema/templateConfig.sh b/engine/schema/templateConfig.sh index bed51a48a8f..d10b8668b12 100644 --- a/engine/schema/templateConfig.sh +++ b/engine/schema/templateConfig.sh @@ -42,6 +42,15 @@ function getGenericName() { fi } +function getGuestOS() { + hypervisor=$(echo "$1" | tr "[:upper:]" "[:lower:]") + if [[ "$hypervisor" == "vmware" || "$hypervisor" == "xenserver" ]]; then + echo "Other Linux (64-bit)" + else + echo "Debian GNU/Linux 12 (64-bit)" + fi +} + function getChecksum() { local fileData="$1" local hvName=$2 @@ -60,13 +69,14 @@ function createMetadataFile() { section="${template%%:*}" sectionHv="${section%%-*}" hvName=$(getGenericName $sectionHv) + guestos=$(getGuestOS $sectionHv) downloadurl="${template#*:}" arch=$(echo ${downloadurl#*"/systemvmtemplate-$VERSION-"} | cut -d'-' -f 1) templatename="systemvm-${sectionHv%.*}-${VERSION}-${arch}" checksum=$(getChecksum "$fileData" "$VERSION-${arch}-$hvName") filename=$(echo ${downloadurl##*'/'}) - echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\narch = $arch\n" >> $METADATAFILE + echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\narch = $arch\nguestos = $guestos\n" >> $METADATAFILE done } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/CephSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/CephSnapshotStrategy.java index 04cca2e8f92..d9d028d4d08 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/CephSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/CephSnapshotStrategy.java @@ -23,6 +23,7 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; @@ -46,6 +47,9 @@ public class CephSnapshotStrategy extends StorageSystemSnapshotStrategy { @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { + if (SnapshotOperation.COPY.equals(op)) { + return StrategyPriority.CANT_HANDLE; + } long volumeId = snapshot.getVolumeId(); VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(volumeId); boolean baseVolumeExists = volumeVO.getRemoved() == null; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java index aedc2a12d0f..c1981941ac0 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java @@ -627,9 +627,14 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { + if (SnapshotOperation.COPY.equals(op)) { + return StrategyPriority.CANT_HANDLE; + } + if (SnapshotOperation.TAKE.equals(op)) { return validateVmSnapshot(snapshot); } + if (SnapshotOperation.REVERT.equals(op)) { long volumeId = snapshot.getVolumeId(); VolumeVO volumeVO = volumeDao.findById(volumeId); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java index 0d48cb944ae..c1e38fc9251 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java @@ -22,6 +22,7 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; @@ -44,6 +45,9 @@ public class ScaleIOSnapshotStrategy extends StorageSystemSnapshotStrategy { @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { + if (SnapshotOperation.COPY.equals(op)) { + return StrategyPriority.CANT_HANDLE; + } long volumeId = snapshot.getVolumeId(); VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(volumeId); boolean baseVolumeExists = volumeVO.getRemoved() == null; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index 567df1262f6..10740289c8f 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -46,6 +46,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.StorageAction; import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; @@ -899,4 +900,35 @@ public class SnapshotServiceImpl implements SnapshotService { ep.sendMessageAsync(cmd, caller); return future; } + + public AsyncCallFuture copySnapshot(SnapshotInfo sourceSnapshot, SnapshotInfo destSnapshot, SnapshotStrategy strategy) { + try { + if (destSnapshot.getStatus() == ObjectInDataStoreStateMachine.State.Allocated) { + destSnapshot.processEvent(Event.CreateOnlyRequested); + } else if (sourceSnapshot.getStatus() == ObjectInDataStoreStateMachine.State.Ready) { + destSnapshot.processEvent(Event.CopyRequested); + } else { + logger.info(String.format("Cannot copy snapshot to another storage in different zone. It's not in the right state %s", sourceSnapshot.getStatus())); + sourceSnapshot.processEvent(Event.OperationFailed); + throw new CloudRuntimeException(String.format("Cannot copy snapshot to another storage in different zone. It's not in the right state %s", sourceSnapshot.getStatus())); + } + } catch (Exception e) { + logger.debug("Failed to change snapshot state: " + e.toString()); + sourceSnapshot.processEvent(Event.OperationFailed); + throw new CloudRuntimeException(e); + } + + AsyncCallFuture future = new AsyncCallFuture(); + try { + CopySnapshotContext context = new CopySnapshotContext<>(null, sourceSnapshot, destSnapshot, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().copySnapshotZoneAsyncCallback(null, null)).setContext(context); + strategy.copySnapshot(sourceSnapshot, destSnapshot, caller); + } catch (Exception e) { + logger.debug("Failed to take snapshot: " + destSnapshot.getId(), e); + destSnapshot.processEvent(Event.OperationFailed); + throw new CloudRuntimeException("Failed to copy snapshot" + destSnapshot.getId()); + } + return future; + } } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java index 9838e41f8f6..8b90e58124a 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java @@ -38,6 +38,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -912,7 +913,9 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { Snapshot.LocationType locationType = snapshot.getLocationType(); - + if (SnapshotOperation.COPY.equals(op)) { + return StrategyPriority.CANT_HANDLE; + } // If the snapshot exists on Secondary Storage, we can't delete it. if (SnapshotOperation.DELETE.equals(op)) { if (Snapshot.LocationType.SECONDARY.equals(locationType)) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/object/datastore/ObjectStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/object/datastore/ObjectStoreHelper.java index c58d801e40e..a2275576bbe 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/object/datastore/ObjectStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/object/datastore/ObjectStoreHelper.java @@ -18,6 +18,7 @@ */ package org.apache.cloudstack.storage.object.datastore; +import com.cloud.configuration.Resource; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao; import org.apache.cloudstack.storage.datastore.db.ObjectStoreDetailVO; @@ -44,6 +45,7 @@ public class ObjectStoreHelper { store.setUuid(UUID.randomUUID().toString()); store.setUrl((String)params.get("url")); store.setName((String)params.get("name")); + store.setTotalSize((Long)params.get("size") * Resource.ResourceType.bytesToGiB); store = ObjectStoreDao.persist(store); diff --git a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLB.java b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLB.java index 780a09b883e..7c7e2605e74 100644 --- a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLB.java +++ b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLB.java @@ -48,13 +48,14 @@ public interface IndirectAgentLB { List getManagementServerList(Long hostId, Long dcId, List orderedHostIdList, String lbAlgorithm); /** - * Compares received management server list against expected list for a host in a zone. + * Compares received management server list against expected list for a host in a zone and LB algorithm. * @param hostId host id * @param dcId zone id * @param receivedMSHosts received management server list - * @return true if mgmtHosts is up to date, false if not + * @param lbAlgorithm received LB algorithm + * @return true if mgmtHosts and LB algorithm are up to date, false if not */ - boolean compareManagementServerList(Long hostId, Long dcId, List receivedMSHosts, String lbAlgorithm); + boolean compareManagementServerListAndLBAlgorithm(Long hostId, Long dcId, List receivedMSHosts, String lbAlgorithm); /** * Returns the configure LB algorithm diff --git a/packaging/el8/cloud.spec b/packaging/el8/cloud.spec index 995f758033a..b1f49ae5a08 100644 --- a/packaging/el8/cloud.spec +++ b/packaging/el8/cloud.spec @@ -102,7 +102,7 @@ The Apache CloudStack files shared between agent and management server Summary: CloudStack Agent for KVM hypervisors Requires: (openssh-clients or openssh) Requires: java-17-openjdk -Requires: tzdata-java +Requires: (tzdata-java or timezone-java) Requires: %{name}-common = %{_ver} Requires: libvirt Requires: libvirt-daemon-driver-storage-rbd @@ -143,7 +143,7 @@ The CloudStack baremetal agent %package usage Summary: CloudStack Usage calculation server Requires: java-17-openjdk -Requires: tzdata-java +Requires: (tzdata-java or timezone-java) Group: System Environment/Libraries %description usage The CloudStack usage calculation service diff --git a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java index 2d43f9e8d3c..05b4b52ccb8 100644 --- a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java +++ b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java @@ -16,16 +16,23 @@ // under the License. package org.apache.cloudstack.backup; +import java.util.ArrayList; import java.util.Arrays; import java.util.Date; -import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.UUID; import javax.inject.Inject; -import com.cloud.configuration.Resource; +import com.cloud.offering.DiskOffering; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; + import org.apache.cloudstack.backup.dao.BackupDao; import com.cloud.utils.Pair; @@ -35,12 +42,16 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; public class DummyBackupProvider extends AdapterBase implements BackupProvider { - - @Inject private BackupDao backupDao; @Inject private VolumeDao volumeDao; + @Inject + private BackupManager backupManager; + @Inject + private StoragePoolHostDao storagePoolHostDao; + @Inject + private DiskOfferingDao diskOfferingDao; @Override public String getName() { @@ -80,24 +91,39 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { } @Override - public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { - logger.debug("Restoring volume {} from backup {} on the Dummy Backup Provider", volumeUuid, backup); - throw new CloudRuntimeException("Dummy plugin does not support this feature"); + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + final VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); + final StoragePoolHostVO dataStore = storagePoolHostDao.findByUuid(dataStoreUuid); + final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); + + logger.debug("Restoring volume {} from backup {} on the Dummy Backup Provider", backupVolumeInfo, backup); + + VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), + backup.getDomainId(), backup.getAccountId(), 0, null, + backup.getSize(), null, null, null); + String volumeUUID = UUID.randomUUID().toString(); + String volumeName = volume != null ? volume.getName() : backupVolumeInfo.getUuid(); + restoredVolume.setName("RestoredVol-" + volumeName); + restoredVolume.setProvisioningType(diskOffering.getProvisioningType()); + restoredVolume.setUpdated(new Date()); + restoredVolume.setUuid(volumeUUID); + restoredVolume.setRemoved(null); + restoredVolume.setDisplayVolume(true); + restoredVolume.setPoolId(dataStore.getPoolId()); + restoredVolume.setPath(restoredVolume.getUuid()); + restoredVolume.setState(Volume.State.Copying); + restoredVolume.setSize(backupVolumeInfo.getSize()); + restoredVolume.setDiskOfferingId(diskOffering.getId()); + + try { + volumeDao.persist(restoredVolume); + } catch (Exception e) { + throw new CloudRuntimeException("Unable to create restored volume due to: " + e); + } + return new Pair<>(true, volumeUUID); } - @Override - public Map getBackupMetrics(Long zoneId, List vms) { - final Map metrics = new HashMap<>(); - final Backup.Metric metric = new Backup.Metric(1000L, 100L); - if (vms == null || vms.isEmpty()) { - return metrics; - } - for (VirtualMachine vm : vms) { - if (vm != null) { - metrics.put(vm, metric); - } - } - return metrics; + public void syncBackupMetrics(Long zoneId) { } @Override @@ -106,7 +132,7 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { } @Override - public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) { + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm) { return null; } @@ -118,11 +144,11 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { @Override public boolean willDeleteBackupsOnOfferingRemoval() { - return true; + return false; } @Override - public Pair takeBackup(VirtualMachine vm) { + public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { logger.debug("Starting backup for VM {} on Dummy provider", vm); BackupVO backup = new BackupVO(); @@ -130,20 +156,50 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { backup.setExternalId("dummy-external-id"); backup.setType("FULL"); backup.setDate(new Date()); - backup.setSize(1024000L); - backup.setProtectedSize(Resource.ResourceType.bytesToGiB); + long virtualSize = 0L; + for (final Volume volume: volumeDao.findByInstance(vm.getId())) { + if (Volume.State.Ready.equals(volume.getState())) { + virtualSize += volume.getSize(); + } + } + backup.setSize(virtualSize); + backup.setProtectedSize(virtualSize); backup.setStatus(Backup.Status.BackedUp); backup.setBackupOfferingId(vm.getBackupOfferingId()); backup.setAccountId(vm.getAccountId()); backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); - backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); + backup.setName(backupManager.getBackupNameFromVM(vm)); + List volumes = new ArrayList<>(volumeDao.findByInstance(vm.getId())); + backup.setBackedUpVolumes(backupManager.createVolumeInfoFromVolumes(volumes)); + Map details = backupManager.getBackupDetailsFromVM(vm); + backup.setDetails(details); + backup = backupDao.persist(backup); return new Pair<>(true, backup); } @Override public boolean deleteBackup(Backup backup, boolean forced) { + return backupDao.remove(backup.getId()); + } + + @Override + public boolean supportsInstanceFromBackup() { + return true; + } + + @Override + public Pair getBackupStorageStats(Long zoneId) { + return new Pair<>(8L * 1024 * 1024 * 1024, 10L * 1024 * 1024 * 1024); + } + + @Override + public void syncBackupStorageStats(Long zoneId) { + } + + @Override + public boolean restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { return true; } } diff --git a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java index f73d82d87c4..e5f98ad291b 100644 --- a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java +++ b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java @@ -24,12 +24,15 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; +import com.cloud.offering.DiskOffering; +import com.cloud.resource.ResourceManager; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeApiServiceImpl; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.Pair; @@ -50,7 +53,6 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.commons.collections.CollectionUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -63,7 +65,6 @@ import java.util.Date; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.HashMap; import java.util.Objects; import java.util.Optional; import java.util.UUID; @@ -78,6 +79,9 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co @Inject private BackupRepositoryDao backupRepositoryDao; + @Inject + private BackupRepositoryService backupRepositoryService; + @Inject private HostDao hostDao; @@ -102,6 +106,15 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co @Inject private VMSnapshotDetailsDao vmSnapshotDetailsDao; + @Inject + BackupManager backupManager; + + @Inject + ResourceManager resourceManager; + + @Inject + private DiskOfferingDao diskOfferingDao; + protected Host getLastVMHypervisorHost(VirtualMachine vm) { Long hostId = vm.getLastHostId(); if (hostId == null) { @@ -122,13 +135,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co } } // Try to find any Host in the zone - for (final HostVO hostInZone : hostDao.listByDataCenterIdAndHypervisorType(host.getDataCenterId(), Hypervisor.HypervisorType.KVM)) { - if (hostInZone.getStatus() == Status.Up) { - LOG.debug("Found Host {} in zone {}", hostInZone, host.getDataCenterId()); - return hostInZone; - } - } - return null; + return resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, host.getDataCenterId()); } protected Host getVMHypervisorHost(VirtualMachine vm) { @@ -150,7 +157,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co } @Override - public Pair takeBackup(final VirtualMachine vm) { + public Pair takeBackup(final VirtualMachine vm, Boolean quiesceVM) { final Host host = getVMHypervisorHost(vm); final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(vm.getBackupOfferingId()); @@ -167,6 +174,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co command.setBackupRepoType(backupRepository.getType()); command.setBackupRepoAddress(backupRepository.getAddress()); command.setMountOptions(backupRepository.getMountOptions()); + command.setQuiesce(quiesceVM); if (VirtualMachine.State.Stopped.equals(vm.getState())) { List vmVolumes = volumeDao.findByInstance(vm.getId()); @@ -179,8 +187,14 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co try { answer = (BackupAnswer) agentManager.send(host.getId(), command); } catch (AgentUnavailableException e) { + logger.error("Unable to contact backend control plane to initiate backup for VM {}", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); } catch (OperationTimedoutException e) { + logger.error("Operation to initiate backup timed out for VM {}", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); } @@ -188,15 +202,23 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co backupVO.setDate(new Date()); backupVO.setSize(answer.getSize()); backupVO.setStatus(Backup.Status.BackedUp); - backupVO.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); + List volumes = new ArrayList<>(volumeDao.findByInstance(vm.getId())); + backupVO.setBackedUpVolumes(backupManager.createVolumeInfoFromVolumes(volumes)); if (backupDao.update(backupVO.getId(), backupVO)) { return new Pair<>(true, backupVO); } else { throw new CloudRuntimeException("Failed to update backup"); } } else { - backupVO.setStatus(Backup.Status.Failed); - backupDao.remove(backupVO.getId()); + logger.error("Failed to take backup for VM {}: {}", vm.getInstanceName(), answer != null ? answer.getDetails() : "No answer received"); + if (answer.getNeedsCleanup()) { + logger.error("Backup cleanup failed for VM {}. Leaving the backup in Error state.", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Error); + backupDao.update(backupVO.getId(), backupVO); + } else { + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } return new Pair<>(false, null); } } @@ -219,19 +241,35 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co backup.setAccountId(vm.getAccountId()); backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); + backup.setName(backupManager.getBackupNameFromVM(vm)); + Map details = backupManager.getBackupDetailsFromVM(vm); + backup.setDetails(details); + return backupDao.persist(backup); } + @Override + public boolean restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + return restoreVMBackup(vm, backup); + } + @Override public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { - List backedVolumes = backup.getBackedUpVolumes(); - List volumes = backedVolumes.stream() - .map(volume -> volumeDao.findByUuid(volume.getUuid())) - .sorted((v1, v2) -> Long.compare(v1.getDeviceId(), v2.getDeviceId())) + return restoreVMBackup(vm, backup); + } + + private boolean restoreVMBackup(VirtualMachine vm, Backup backup) { + List backedVolumesUUIDs = backup.getBackedUpVolumes().stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(Backup.VolumeInfo::getUuid) + .collect(Collectors.toList()); + + List restoreVolumes = volumeDao.findByInstance(vm.getId()).stream() + .sorted(Comparator.comparingLong(VolumeVO::getDeviceId)) .collect(Collectors.toList()); LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm, backup); - BackupRepository backupRepository = getBackupRepository(vm, backup); + BackupRepository backupRepository = getBackupRepository(backup); final Host host = getLastVMHypervisorHost(vm); RestoreBackupCommand restoreCommand = new RestoreBackupCommand(); @@ -240,7 +278,8 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co restoreCommand.setBackupRepoAddress(backupRepository.getAddress()); restoreCommand.setMountOptions(backupRepository.getMountOptions()); restoreCommand.setVmName(vm.getName()); - restoreCommand.setVolumePaths(getVolumePaths(volumes)); + restoreCommand.setBackupVolumesUUIDs(backedVolumesUUIDs); + restoreCommand.setRestoreVolumePaths(getVolumePaths(restoreVolumes)); restoreCommand.setVmExists(vm.getRemoved() == null); restoreCommand.setVmState(vm.getState()); @@ -250,7 +289,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co } catch (AgentUnavailableException e) { throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); } catch (OperationTimedoutException e) { - throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + throw new CloudRuntimeException("Operation to restore backup timed out, please try again"); } return answer.getResult(); } @@ -276,24 +315,22 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co } @Override - public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { - final VolumeVO volume = volumeDao.findByUuid(volumeUuid); - final VirtualMachine backupSourceVm = vmInstanceDao.findById(backup.getVmId()); + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + final VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); + final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); final StoragePoolHostVO dataStore = storagePoolHostDao.findByUuid(dataStoreUuid); final HostVO hostVO = hostDao.findByIp(hostIp); - Optional matchingVolume = getBackedUpVolumeInfo(backupSourceVm.getBackupVolumeList(), volumeUuid); - Long backedUpVolumeSize = matchingVolume.isPresent() ? matchingVolume.get().getSize() : 0L; - - LOG.debug("Restoring vm volume {} from backup {} on the NAS Backup Provider", volume, backup); - BackupRepository backupRepository = getBackupRepository(backupSourceVm, backup); + LOG.debug("Restoring vm volume {} from backup {} on the NAS Backup Provider", backupVolumeInfo, backup); + BackupRepository backupRepository = getBackupRepository(backup); VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), backup.getDomainId(), backup.getAccountId(), 0, null, backup.getSize(), null, null, null); String volumeUUID = UUID.randomUUID().toString(); - restoredVolume.setName("RestoredVol-"+volume.getName()); - restoredVolume.setProvisioningType(volume.getProvisioningType()); + String volumeName = volume != null ? volume.getName() : backupVolumeInfo.getUuid(); + restoredVolume.setName("RestoredVol-" + volumeName); + restoredVolume.setProvisioningType(diskOffering.getProvisioningType()); restoredVolume.setUpdated(new Date()); restoredVolume.setUuid(volumeUUID); restoredVolume.setRemoved(null); @@ -302,20 +339,20 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co restoredVolume.setPath(restoredVolume.getUuid()); restoredVolume.setState(Volume.State.Copying); restoredVolume.setFormat(Storage.ImageFormat.QCOW2); - restoredVolume.setSize(backedUpVolumeSize); - restoredVolume.setDiskOfferingId(volume.getDiskOfferingId()); + restoredVolume.setSize(backupVolumeInfo.getSize()); + restoredVolume.setDiskOfferingId(diskOffering.getId()); RestoreBackupCommand restoreCommand = new RestoreBackupCommand(); restoreCommand.setBackupPath(backup.getExternalId()); restoreCommand.setBackupRepoType(backupRepository.getType()); restoreCommand.setBackupRepoAddress(backupRepository.getAddress()); restoreCommand.setVmName(vmNameAndState.first()); - restoreCommand.setVolumePaths(Collections.singletonList(String.format("%s/%s", dataStore.getLocalPath(), volumeUUID))); - restoreCommand.setDiskType(volume.getVolumeType().name().toLowerCase(Locale.ROOT)); + restoreCommand.setRestoreVolumePaths(Collections.singletonList(String.format("%s/%s", dataStore.getLocalPath(), volumeUUID))); + restoreCommand.setDiskType(backupVolumeInfo.getType().name().toLowerCase(Locale.ROOT)); restoreCommand.setMountOptions(backupRepository.getMountOptions()); restoreCommand.setVmExists(null); restoreCommand.setVmState(vmNameAndState.second()); - restoreCommand.setRestoreVolumeUUID(volumeUuid); + restoreCommand.setRestoreVolumeUUID(backupVolumeInfo.getUuid()); BackupAnswer answer; try { @@ -323,7 +360,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co } catch (AgentUnavailableException e) { throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); } catch (OperationTimedoutException e) { - throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + throw new CloudRuntimeException("Operation to restore backed up volume timed out, please try again"); } if (answer.getResult()) { @@ -337,15 +374,10 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co return new Pair<>(answer.getResult(), answer.getDetails()); } - private BackupRepository getBackupRepository(VirtualMachine vm, Backup backup) { - BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(vm.getBackupOfferingId()); - final String errorMessage = "No valid backup repository found for the VM, please check the attached backup offering"; + private BackupRepository getBackupRepository(Backup backup) { + BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(backup.getBackupOfferingId()); if (backupRepository == null) { - logger.warn(errorMessage + "Re-attempting with the backup offering associated with the backup"); - } - backupRepository = backupRepositoryDao.findByBackupOfferingId(backup.getBackupOfferingId()); - if (backupRepository == null) { - throw new CloudRuntimeException(errorMessage); + throw new CloudRuntimeException(String.format("No valid backup repository found for the backup %s, please check the attached backup offering", backup.getUuid())); } return backupRepository; } @@ -363,8 +395,13 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co throw new CloudRuntimeException("No valid backup repository found for the VM, please check the attached backup offering"); } - final VirtualMachine vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); - final Host host = getLastVMHypervisorHost(vm); + final Host host; + final VirtualMachine vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); + if (vm != null) { + host = getLastVMHypervisorHost(vm); + } else { + host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, backup.getZoneId()); + } DeleteBackupCommand command = new DeleteBackupCommand(backup.getExternalId(), backupRepository.getType(), backupRepository.getAddress(), backupRepository.getMountOptions()); @@ -375,7 +412,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co } catch (AgentUnavailableException e) { throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); } catch (OperationTimedoutException e) { - throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + throw new CloudRuntimeException("Operation to delete backup timed out, please try again"); } if (answer != null && answer.getResult()) { @@ -386,30 +423,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co return false; } - @Override - public Map getBackupMetrics(Long zoneId, List vms) { - final Map metrics = new HashMap<>(); - if (CollectionUtils.isEmpty(vms)) { - LOG.warn("Unable to get VM Backup Metrics because the list of VMs is empty."); - return metrics; - } - - for (final VirtualMachine vm : vms) { - Long vmBackupSize = 0L; - Long vmBackupProtectedSize = 0L; - for (final Backup backup: backupDao.listByVmId(null, vm.getId())) { - if (Objects.nonNull(backup.getSize())) { - vmBackupSize += backup.getSize(); - } - if (Objects.nonNull(backup.getProtectedSize())) { - vmBackupProtectedSize += backup.getProtectedSize(); - } - } - Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize); - LOG.debug("Metrics for VM {} is [backup size: {}, data size: {}].", vm, vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize()); - metrics.put(vm, vmBackupMetric); - } - return metrics; + public void syncBackupMetrics(Long zoneId) { } @Override @@ -418,7 +432,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co } @Override - public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) { + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm) { return null; } @@ -445,6 +459,45 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co return false; } + @Override + public boolean supportsInstanceFromBackup() { + return true; + } + + @Override + public Pair getBackupStorageStats(Long zoneId) { + final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, getName()); + Long totalSize = 0L; + Long usedSize = 0L; + for (final BackupRepository repository : repositories) { + if (repository.getCapacityBytes() != null) { + totalSize += repository.getCapacityBytes(); + } + if (repository.getUsedBytes() != null) { + usedSize += repository.getUsedBytes(); + } + } + return new Pair<>(usedSize, totalSize); + } + + @Override + public void syncBackupStorageStats(Long zoneId) { + final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, getName()); + final Host host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId); + for (final BackupRepository repository : repositories) { + GetBackupStorageStatsCommand command = new GetBackupStorageStatsCommand(repository.getType(), repository.getAddress(), repository.getMountOptions()); + BackupStorageStatsAnswer answer; + try { + answer = (BackupStorageStatsAnswer) agentManager.send(host.getId(), command); + backupRepositoryDao.updateCapacity(repository, answer.getTotalSize(), answer.getUsedSize()); + } catch (AgentUnavailableException e) { + logger.warn("Unable to contact backend control plane to get backup stats for repository: {}", repository.getName()); + } catch (OperationTimedoutException e) { + logger.warn("Operation to get backup stats timed out for the repository: " + repository.getName()); + } + } + } + @Override public List listBackupOfferings(Long zoneId) { final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, getName()); diff --git a/plugins/backup/nas/src/test/java/org/apache/cloudstack/backup/NASBackupProviderTest.java b/plugins/backup/nas/src/test/java/org/apache/cloudstack/backup/NASBackupProviderTest.java new file mode 100644 index 00000000000..d6f29dc1aac --- /dev/null +++ b/plugins/backup/nas/src/test/java/org/apache/cloudstack/backup/NASBackupProviderTest.java @@ -0,0 +1,230 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; + +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupRepositoryDao; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.agent.AgentManager; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.Pair; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; + +@RunWith(MockitoJUnitRunner.class) +public class NASBackupProviderTest { + @Spy + @InjectMocks + private NASBackupProvider nasBackupProvider; + + @Mock + private BackupDao backupDao; + + @Mock + private BackupRepositoryDao backupRepositoryDao; + + @Mock + private BackupOfferingDao backupOfferingDao; + + @Mock + private VMInstanceDao vmInstanceDao; + + @Mock + private AgentManager agentManager; + + @Mock + private VolumeDao volumeDao; + + @Mock + private HostDao hostDao; + + @Mock + private BackupManager backupManager; + + @Mock + private ResourceManager resourceManager; + + @Test + public void testDeleteBackup() throws OperationTimedoutException, AgentUnavailableException { + Long hostId = 1L; + BackupVO backup = new BackupVO(); + backup.setBackupOfferingId(1L); + backup.setVmId(1L); + backup.setExternalId("externalId"); + ReflectionTestUtils.setField(backup, "id", 1L); + + BackupRepositoryVO backupRepository = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1024L); + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(hostId); + HostVO host = mock(HostVO.class); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.when(backupRepositoryDao.findByBackupOfferingId(1L)).thenReturn(backupRepository); + Mockito.when(vmInstanceDao.findByIdIncludingRemoved(1L)).thenReturn(vm); + Mockito.when(agentManager.send(anyLong(), Mockito.any(DeleteBackupCommand.class))).thenReturn(new BackupAnswer(new DeleteBackupCommand(null, null, null, null), true, "details")); + Mockito.when(backupDao.remove(1L)).thenReturn(true); + + boolean result = nasBackupProvider.deleteBackup(backup, true); + Assert.assertTrue(result); + } + + @Test + public void testSyncBackupStorageStats() throws AgentUnavailableException, OperationTimedoutException { + BackupRepositoryVO backupRepository = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1024L); + + HostVO host = mock(HostVO.class); + Mockito.when(resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, 1L)).thenReturn(host); + + Mockito.when(backupRepositoryDao.listByZoneAndProvider(1L, "nas")).thenReturn(Collections.singletonList(backupRepository)); + GetBackupStorageStatsCommand command = new GetBackupStorageStatsCommand("nfs", "address", "sync"); + BackupStorageStatsAnswer answer = new BackupStorageStatsAnswer(command, true, null); + answer.setTotalSize(100L); + answer.setUsedSize(50L); + Mockito.when(agentManager.send(anyLong(), Mockito.any(GetBackupStorageStatsCommand.class))).thenReturn(answer); + + nasBackupProvider.syncBackupStorageStats(1L); + Mockito.verify(backupRepositoryDao, Mockito.times(1)).updateCapacity(backupRepository, 100L, 50L); + } + + @Test + public void testListBackupOfferings() { + BackupRepositoryVO backupRepository = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1024L); + ReflectionTestUtils.setField(backupRepository, "uuid", "uuid"); + + Mockito.when(backupRepositoryDao.listByZoneAndProvider(1L, "nas")).thenReturn(Collections.singletonList(backupRepository)); + + List result = nasBackupProvider.listBackupOfferings(1L); + Assert.assertEquals(1, result.size()); + Assert.assertEquals("test-repo", result.get(0).getName()); + Assert.assertEquals("uuid", result.get(0).getUuid()); + } + + @Test + public void testGetBackupStorageStats() { + BackupRepositoryVO backupRepository1 = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1000L); + backupRepository1.setUsedBytes(500L); + + BackupRepositoryVO backupRepository2 = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 2000L); + backupRepository2.setUsedBytes(600L); + + Mockito.when(backupRepositoryDao.listByZoneAndProvider(1L, "nas")) + .thenReturn(List.of(backupRepository1, backupRepository2)); + + Pair result = nasBackupProvider.getBackupStorageStats(1L); + Assert.assertEquals(Long.valueOf(1100L), result.first()); + Assert.assertEquals(Long.valueOf(3000L), result.second()); + } + + @Test + public void takeBackupSuccessfully() throws AgentUnavailableException, OperationTimedoutException { + Long vmId = 1L; + Long hostId = 2L; + Long backupOfferingId = 3L; + Long accountId = 4L; + Long domainId = 5L; + Long zoneId = 6L; + Long backupId = 7L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getId()).thenReturn(vmId); + Mockito.when(vm.getHostId()).thenReturn(hostId); + Mockito.when(vm.getInstanceName()).thenReturn("test-vm"); + Mockito.when(vm.getBackupOfferingId()).thenReturn(backupOfferingId); + Mockito.when(vm.getAccountId()).thenReturn(accountId); + Mockito.when(vm.getDomainId()).thenReturn(domainId); + Mockito.when(vm.getDataCenterId()).thenReturn(zoneId); + Mockito.when(vm.getState()).thenReturn(VMInstanceVO.State.Running); + + BackupRepository backupRepository = mock(BackupRepository.class); + Mockito.when(backupRepository.getType()).thenReturn("nfs"); + Mockito.when(backupRepository.getAddress()).thenReturn("address"); + Mockito.when(backupRepository.getMountOptions()).thenReturn("sync"); + Mockito.when(backupRepositoryDao.findByBackupOfferingId(backupOfferingId)).thenReturn(backupRepository); + + HostVO host = mock(HostVO.class); + Mockito.when(host.getId()).thenReturn(hostId); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + + VolumeVO volume1 = mock(VolumeVO.class); + Mockito.when(volume1.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume1.getSize()).thenReturn(100L); + VolumeVO volume2 = mock(VolumeVO.class); + Mockito.when(volume2.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume2.getSize()).thenReturn(200L); + Mockito.when(volumeDao.findByInstance(vmId)).thenReturn(List.of(volume1, volume2)); + + BackupAnswer answer = mock(BackupAnswer.class); + Mockito.when(answer.getResult()).thenReturn(true); + Mockito.when(answer.getSize()).thenReturn(100L); + Mockito.when(agentManager.send(anyLong(), Mockito.any(TakeBackupCommand.class))).thenReturn(answer); + + Mockito.when(backupDao.persist(Mockito.any(BackupVO.class))).thenAnswer(invocation -> invocation.getArgument(0)); + Mockito.when(backupDao.update(Mockito.anyLong(), Mockito.any(BackupVO.class))).thenReturn(true); + + Pair result = nasBackupProvider.takeBackup(vm, false); + + Assert.assertTrue(result.first()); + Assert.assertNotNull(result.second()); + BackupVO backup = (BackupVO) result.second(); + Assert.assertEquals(Optional.ofNullable(100L), Optional.ofNullable(backup.getSize())); + Assert.assertEquals(Backup.Status.BackedUp, backup.getStatus()); + Assert.assertEquals("FULL", backup.getType()); + Assert.assertEquals(Optional.of(300L), Optional.of(backup.getProtectedSize())); + Assert.assertEquals(Optional.of(backupOfferingId), Optional.of(backup.getBackupOfferingId())); + Assert.assertEquals(Optional.of(accountId), Optional.of(backup.getAccountId())); + Assert.assertEquals(Optional.of(domainId), Optional.of(backup.getDomainId())); + Assert.assertEquals(Optional.of(zoneId), Optional.of(backup.getZoneId())); + + Mockito.verify(backupDao).persist(Mockito.any(BackupVO.class)); + Mockito.verify(backupDao).update(Mockito.anyLong(), Mockito.any(BackupVO.class)); + Mockito.verify(agentManager).send(anyLong(), Mockito.any(TakeBackupCommand.class)); + } +} diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java index 504a551bb30..f39aedb55f2 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java @@ -21,21 +21,27 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; +import com.cloud.offering.DiskOffering; +import com.cloud.offering.ServiceOffering; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.utils.script.Script; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; +import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.ssh.SshHelper; -import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.dao.BackupOfferingDaoImpl; import org.apache.cloudstack.backup.networker.NetworkerClient; @@ -116,6 +122,18 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid @Inject private VMInstanceDao vmInstanceDao; + @Inject + private VMTemplateDao vmTemplateDao; + + @Inject + ServiceOfferingDao serviceOfferingDao; + + @Inject + private BackupManager backupManager; + + @Inject + private DiskOfferingDao diskOfferingDao; + private static String getUrlDomain(String url) throws URISyntaxException { URI uri; try { @@ -371,10 +389,10 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid } @Override - public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { String networkerServer; - VolumeVO volume = volumeDao.findByUuid(volumeUuid); - VMInstanceVO backupSourceVm = vmInstanceDao.findById(backup.getVmId()); + VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); + final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); StoragePoolHostVO dataStore = storagePoolHostDao.findByUuid(dataStoreUuid); HostVO hostVO = hostDao.findByIp(hostIp); @@ -384,9 +402,8 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid final String SSID = networkerBackup.getShortId(); final String clusterName = networkerBackup.getClientHostname(); final String destinationNetworkerClient = hostVO.getName().split("\\.")[0]; - Long restoredVolumeDiskSize = 0L; - LOG.debug("Restoring volume {} with uuid {} from backup {} on the Networker Backup Provider", volume, volumeUuid, backup); + LOG.debug("Restoring volume {} with uuid {} from backup {} on the Networker Backup Provider", volume, backupVolumeInfo, backup); if ( SSID.isEmpty() ) { LOG.debug("There was an error retrieving the SSID for backup with id " + externalBackupId + " from EMC NEtworker"); @@ -401,18 +418,13 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid throw new CloudRuntimeException(String.format("Failed to convert API to HOST : %s", e)); } - // Find volume size from backup vols - for ( Backup.VolumeInfo VMVolToRestore : backupSourceVm.getBackupVolumeList()) { - if (VMVolToRestore.getUuid().equals(volumeUuid)) - restoredVolumeDiskSize = (VMVolToRestore.getSize()); - } - VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), backup.getDomainId(), backup.getAccountId(), 0, null, backup.getSize(), null, null, null); - restoredVolume.setName("RV-"+volume.getName()); - restoredVolume.setProvisioningType(volume.getProvisioningType()); + String volumeName = volume != null ? volume.getName() : backupVolumeInfo.getUuid(); + restoredVolume.setName("RV-" + volumeName); + restoredVolume.setProvisioningType(diskOffering.getProvisioningType()); restoredVolume.setUpdated(new Date()); restoredVolume.setUuid(UUID.randomUUID().toString()); restoredVolume.setRemoved(null); @@ -420,8 +432,8 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid restoredVolume.setPoolId(volume.getPoolId()); restoredVolume.setPath(restoredVolume.getUuid()); restoredVolume.setState(Volume.State.Copying); - restoredVolume.setSize(restoredVolumeDiskSize); - restoredVolume.setDiskOfferingId(volume.getDiskOfferingId()); + restoredVolume.setSize(backupVolumeInfo.getSize()); + restoredVolume.setDiskOfferingId(diskOffering.getId()); try { volumeDao.persist(restoredVolume); @@ -461,7 +473,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid } @Override - public Pair takeBackup(VirtualMachine vm) { + public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { String networkerServer; String clusterName; @@ -511,7 +523,10 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid LOG.info("EMC Networker finished backup job for vm {} with saveset Time: {}", vm, saveTime); BackupVO backup = getClient(vm.getDataCenterId()).registerBackupForVm(vm, backupJobStart, saveTime); if (backup != null) { - backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); + List volumes = new ArrayList<>(volumeDao.findByInstance(vm.getId())); + backup.setBackedUpVolumes(backupManager.createVolumeInfoFromVolumes(volumes)); + Map details = backupManager.getBackupDetailsFromVM(vm); + backup.setDetails(details); backupDao.persist(backup); return new Pair<>(true, backup); } else { @@ -536,35 +551,11 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid return false; } - @Override - public Map getBackupMetrics(Long zoneId, List vms) { - final Map metrics = new HashMap<>(); - long vmBackupSize=0L; - long vmBackupProtectedSize=0L; - - if (CollectionUtils.isEmpty(vms)) { - LOG.warn("Unable to get VM Backup Metrics because the list of VMs is empty."); - return metrics; - } - - for (final VirtualMachine vm : vms) { - for ( Backup.VolumeInfo thisVMVol : vm.getBackupVolumeList()) { - vmBackupProtectedSize += (thisVMVol.getSize() / 1024L / 1024L); - } - final ArrayList vmBackups = getClient(zoneId).getBackupsForVm(vm); - for ( String vmBackup : vmBackups ) { - NetworkerBackup vmNwBackup = getClient(zoneId).getNetworkerBackupInfo(vmBackup); - vmBackupSize += vmNwBackup.getSize().getValue() / 1024L; - } - Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize); - LOG.debug(String.format("Metrics for VM [%s] is [backup size: %s, data size: %s].", vm, vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize())); - metrics.put(vm, vmBackupMetric); - } - return metrics; + public void syncBackupMetrics(Long zoneId) { } @Override - public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) { + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm) { // Technically an administrator can manually create a backup for a VM by utilizing the KVM scripts // with the proper parameters. So we will register any backups taken on the Networker side from // outside Cloudstack. If ever Networker will support KVM out of the box this functionality also will @@ -597,6 +588,16 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid backup.setAccountId(vm.getAccountId()); backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); + backup.setName(backupManager.getBackupNameFromVM(vm)); + + HashMap details = new HashMap<>(); + details.put(ApiConstants.HYPERVISOR, vm.getHypervisorType().toString()); + ServiceOffering serviceOffering = serviceOfferingDao.findById(vm.getServiceOfferingId()); + details.put(ApiConstants.SERVICE_OFFERING_ID, serviceOffering.getUuid()); + VirtualMachineTemplate template = vmTemplateDao.findById(vm.getTemplateId()); + details.put(ApiConstants.TEMPLATE_ID, template.getUuid()); + backup.setDetails(details); + backupDao.persist(backup); return backup; } @@ -611,6 +612,25 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid return backupIds.stream().map(id -> new Backup.RestorePoint(id, null, null)).collect(Collectors.toList()); } + @Override + public boolean supportsInstanceFromBackup() { + return false; + } + + @Override + public Pair getBackupStorageStats(Long zoneId) { + return new Pair<>(0L, 0L); + } + + @Override + public void syncBackupStorageStats(Long zoneId) { + } + @Override public boolean willDeleteBackupsOnOfferingRemoval() { return false; } + + @Override + public boolean restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + return true; + } } diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java index 36bfd456475..271fec78188 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java @@ -24,6 +24,7 @@ import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.backup.BackupVO; import org.apache.cloudstack.backup.networker.api.NetworkerBackup; @@ -45,6 +46,7 @@ import org.apache.http.impl.client.HttpClientBuilder; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import javax.inject.Inject; import javax.net.ssl.SSLContext; import javax.net.ssl.X509TrustManager; import java.io.IOException; @@ -65,6 +67,9 @@ import java.util.List; import static org.apache.cloudstack.backup.NetworkerBackupProvider.BACKUP_IDENTIFIER; public class NetworkerClient { + @Inject + BackupManager backupManager; + private static final Logger LOG = LogManager.getLogger(NetworkerClient.class); private final URI apiURI; private final String apiName; @@ -267,6 +272,8 @@ public class NetworkerClient { backup.setAccountId(vm.getAccountId()); backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); + backup.setName(backupManager.getBackupNameFromVM(vm)); + return backup; } catch (final IOException e) { LOG.error("Failed to register backup from EMC Networker due to:", e); diff --git a/plugins/backup/networker/src/test/java/org/apache/backup/networker/NetworkerClientTest.java b/plugins/backup/networker/src/test/java/org/apache/backup/networker/NetworkerClientTest.java index 96f8a7a7977..f8cff7ba196 100644 --- a/plugins/backup/networker/src/test/java/org/apache/backup/networker/NetworkerClientTest.java +++ b/plugins/backup/networker/src/test/java/org/apache/backup/networker/NetworkerClientTest.java @@ -30,8 +30,11 @@ import java.text.SimpleDateFormat; import java.time.Instant; import java.util.Date; import java.util.List; + import com.cloud.vm.VMInstanceVO; import com.github.tomakehurst.wiremock.client.VerificationException; + +import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.backup.BackupVO; import org.apache.cloudstack.backup.networker.NetworkerClient; @@ -40,6 +43,9 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; +import org.mockito.Mockito; +import org.springframework.test.util.ReflectionTestUtils; + import com.github.tomakehurst.wiremock.client.BasicCredentials; import com.github.tomakehurst.wiremock.junit.WireMockRule; @@ -58,6 +64,7 @@ public class NetworkerClientTest { .willReturn(aResponse() .withStatus(200))); client = new NetworkerClient(url, adminUsername, adminPassword, false, 60); + ReflectionTestUtils.setField(client, "backupManager", Mockito.mock(BackupManager.class)); } @Test diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java index 93278e80851..c81c5d34ea2 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java @@ -24,12 +24,9 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.stream.Collectors; import javax.inject.Inject; -import org.apache.cloudstack.backup.Backup.Metric; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.veeam.VeeamClient; import org.apache.cloudstack.backup.veeam.api.Job; @@ -45,6 +42,7 @@ import com.cloud.dc.VmwareDatacenter; import com.cloud.hypervisor.vmware.VmwareDatacenterZoneMap; import com.cloud.dc.dao.VmwareDatacenterDao; import com.cloud.hypervisor.vmware.dao.VmwareDatacenterZoneMapDao; +import com.cloud.storage.Volume; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; @@ -102,8 +100,12 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, @Inject private VirtualMachineManager virtualMachineManager; @Inject + private BackupManager backupManager; + @Inject private VolumeDao volumeDao; + private Map backupFilesMetricsMap = new HashMap<>(); + protected VeeamClient getClient(final Long zoneId) { try { return new VeeamClient(VeeamUrl.valueIn(zoneId), VeeamVersion.valueIn(zoneId), VeeamUsername.valueIn(zoneId), VeeamPassword.valueIn(zoneId), @@ -213,11 +215,11 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, @Override public boolean willDeleteBackupsOnOfferingRemoval() { - return true; + return false; } @Override - public Pair takeBackup(final VirtualMachine vm) { + public Pair takeBackup(final VirtualMachine vm, Boolean quiesceVM) { final VeeamClient client = getClient(vm.getDataCenterId()); Boolean result = client.startBackupJob(vm.getBackupExternalId()); return new Pair<>(result, null); @@ -244,7 +246,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, client.syncBackupRepository(); - List allBackups = backupDao.listByVmId(backup.getZoneId(), backup.getVmId()); + List allBackups = backupDao.listByVmIdAndOffering(backup.getZoneId(), backup.getVmId(), backup.getBackupOfferingId()); for (Backup b : allBackups) { if (b.getId() != backup.getId()) { backupDao.remove(b.getId()); @@ -289,62 +291,71 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, } @Override - public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { final Long zoneId = backup.getZoneId(); final String restorePointId = backup.getExternalId(); - return getClient(zoneId).restoreVMToDifferentLocation(restorePointId, hostIp, dataStoreUuid); + return getClient(zoneId).restoreVMToDifferentLocation(restorePointId, null, hostIp, dataStoreUuid); } @Override - public Map getBackupMetrics(final Long zoneId, final List vms) { - final Map metrics = new HashMap<>(); - if (CollectionUtils.isEmpty(vms)) { - logger.warn("Unable to get VM Backup Metrics because the list of VMs is empty."); - return metrics; - } - - List vmUuids = vms.stream().filter(Objects::nonNull).map(VirtualMachine::getUuid).collect(Collectors.toList()); - logger.debug(String.format("Get Backup Metrics for VMs: [%s].", String.join(", ", vmUuids))); - - final Map backendMetrics = getClient(zoneId).getBackupMetrics(); - for (final VirtualMachine vm : vms) { - if (vm == null || !backendMetrics.containsKey(vm.getUuid())) { - continue; - } - - Metric metric = backendMetrics.get(vm.getUuid()); - logger.debug("Metrics for VM [{}] is [backup size: {}, data size: {}].", vm, - metric.getBackupSize(), metric.getDataSize()); - metrics.put(vm, metric); - } - return metrics; + public void syncBackupMetrics(Long zoneId) { + backupFilesMetricsMap = getClient(zoneId).getBackupMetrics(); } @Override - public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm, Backup.Metric metric) { + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm) { BackupVO backup = new BackupVO(); backup.setVmId(vm.getId()); backup.setExternalId(restorePoint.getId()); backup.setType(restorePoint.getType()); backup.setDate(restorePoint.getCreated()); backup.setStatus(Backup.Status.BackedUp); - if (metric != null) { - backup.setSize(metric.getBackupSize()); - backup.setProtectedSize(metric.getDataSize()); + if (restorePoint.getBackupSize() != null) { + backup.setSize(restorePoint.getBackupSize()); + } + if (restorePoint.getDataSize() != null) { + backup.setProtectedSize(restorePoint.getDataSize()); } backup.setBackupOfferingId(vm.getBackupOfferingId()); backup.setAccountId(vm.getAccountId()); backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); - backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); + backup.setName(backupManager.getBackupNameFromVM(vm)); + List volumes = new ArrayList<>(volumeDao.findByInstance(vm.getId())); + backup.setBackedUpVolumes(backupManager.createVolumeInfoFromVolumes(volumes)); + Map details = backupManager.getBackupDetailsFromVM(vm); + backup.setDetails(details); backupDao.persist(backup); return backup; } @Override public List listRestorePoints(VirtualMachine vm) { + final VmwareDatacenter vmwareDC = findVmwareDatacenterForVM(vm); String backupName = getGuestBackupName(vm.getInstanceName(), vm.getUuid()); - return getClient(vm.getDataCenterId()).listRestorePoints(backupName, vm.getInstanceName()); + return getClient(vm.getDataCenterId()).listRestorePoints(backupName, vmwareDC.getVcenterHost(), vm.getInstanceName(), backupFilesMetricsMap); + } + + @Override + public boolean restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + final Long zoneId = backup.getZoneId(); + final String restorePointId = backup.getExternalId(); + final String restoreLocation = vm.getInstanceName(); + return getClient(zoneId).restoreVMToDifferentLocation(restorePointId, restoreLocation, hostIp, dataStoreUuid).first(); + } + + @Override + public boolean supportsInstanceFromBackup() { + return true; + } + + @Override + public Pair getBackupStorageStats(Long zoneId) { + return new Pair<>(0L, 0L); + } + + @Override + public void syncBackupStorageStats(Long zoneId) { } @Override diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java index 9accc0714de..e2df854f16d 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java @@ -17,8 +17,6 @@ package org.apache.cloudstack.backup.veeam; -import static org.apache.cloudstack.backup.VeeamBackupProvider.BACKUP_IDENTIFIER; - import java.io.IOException; import java.io.InputStream; import java.net.SocketTimeoutException; @@ -109,7 +107,6 @@ public class VeeamClient { private static final String BACKUP_FILE_REFERENCE = "BackupFileReference"; private static final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); - private String veeamServerIp; private final Integer veeamServerVersion; private String veeamServerUsername; @@ -659,9 +656,7 @@ public class VeeamClient { public boolean deleteJobAndBackup(final String jobName) { Pair result = executePowerShellCommands(Arrays.asList( String.format("$job = Get-VBRJob -Name '%s'", jobName), - "if ($job) { Remove-VBRJob -Job $job -Confirm:$false }", - String.format("$backup = Get-VBRBackup -Name '%s'", jobName), - "if ($backup) { Remove-VBRBackup -Backup $backup -FromDisk -Confirm:$false }" + "if ($job) { Remove-VBRJob -Job $job -Confirm:$false }" )); return result != null && result.first() && !result.second().contains(FAILED_TO_DELETE); } @@ -721,40 +716,19 @@ public class VeeamClient { throw new CloudRuntimeException("Could not get backup metrics via Veeam B&R API"); } for (final BackupFile backupFile : backupFiles.getBackupFiles()) { - String vmUuid = null; - String backupName = null; - List links = backupFile.getLink(); - for (Link link : links) { - if (BACKUP_REFERENCE.equals(link.getType())) { - backupName = link.getName(); - break; - } - } - if (backupName != null && backupName.contains(BACKUP_IDENTIFIER)) { - final String[] names = backupName.split(BACKUP_IDENTIFIER); - if (names.length > 1) { - vmUuid = names[1]; - } - } - if (vmUuid == null) { + String backupFileId = StringUtils.substringAfterLast(backupFile.getUid(), ":"); + if (backupFileId.isEmpty()) { continue; } - if (vmUuid.contains(" - ")) { - vmUuid = vmUuid.split(" - ")[0]; - } - Long usedSize = 0L; - Long dataSize = 0L; - if (metrics.containsKey(vmUuid)) { - usedSize = metrics.get(vmUuid).getBackupSize(); - dataSize = metrics.get(vmUuid).getDataSize(); - } + Long backupSize = null; + Long dataSize = null; if (backupFile.getBackupSize() != null) { - usedSize += Long.valueOf(backupFile.getBackupSize()); + backupSize = Long.valueOf(backupFile.getBackupSize()); } if (backupFile.getDataSize() != null) { - dataSize += Long.valueOf(backupFile.getDataSize()); + dataSize = Long.valueOf(backupFile.getDataSize()); } - metrics.put(vmUuid, new Backup.Metric(usedSize, dataSize)); + metrics.put(backupFileId, new Backup.Metric(backupSize, dataSize)); } } catch (final IOException e) { logger.error("Failed to process response to get backup metrics via Veeam B&R API due to:", e); @@ -768,23 +742,14 @@ public class VeeamClient { final List cmds = Arrays.asList( "$backups = Get-VBRBackup", "foreach ($backup in $backups) {" + - " $backup.JobName;" + - " $storageGroups = $backup.GetStorageGroups();" + - " foreach ($group in $storageGroups) {" + - " $usedSize = 0;" + - " $dataSize = 0;" + - " $sizePerStorage = $group.GetStorages().Stats.BackupSize;" + - " $dataPerStorage = $group.GetStorages().Stats.DataSize;" + - " foreach ($size in $sizePerStorage) {" + - " $usedSize += $size;" + - " }" + - " foreach ($size in $dataPerStorage) {" + - " $dataSize += $size;" + - " }" + - " $usedSize;" + - " $dataSize;" + + " $restorePoints = Get-VBRRestorePoint -Backup $backup;" + + " foreach ($restorePoint in $restorePoints) {" + + " $backupFile = $restorePoint.GetStorage();" + + " $restorePoint.Id.Guid;" + + " $backupFile.Stats.BackupSize;" + + " $backupFile.Stats.DataSize;" + + " echo \"" + separator + "\";" + " }" + - " echo \"" + separator + "\"" + "}" ); Pair response = executePowerShellCommands(cmds); @@ -796,24 +761,22 @@ public class VeeamClient { protected Map processPowerShellResultForBackupMetrics(final String result) { logger.debug("Processing powershell result: " + result); - final String separator = "====="; - final Map sizes = new HashMap<>(); + Map metrics = new HashMap<>(); for (final String block : result.split(separator + "\r\n")) { final String[] parts = block.split("\r\n"); if (parts.length != 3) { continue; } - final String backupName = parts[0]; - if (backupName != null && backupName.contains(BACKUP_IDENTIFIER)) { - final String[] names = backupName.split(BACKUP_IDENTIFIER); - sizes.put(names[names.length - 1], new Backup.Metric(Long.valueOf(parts[1]), Long.valueOf(parts[2]))); - } + final String restorePointId = parts[0]; + final Long backupSize = Long.valueOf(parts[1]); + final Long dataSize = Long.valueOf(parts[2]); + metrics.put(restorePointId, new Backup.Metric(backupSize, dataSize)); } - return sizes; + return metrics; } - private Backup.RestorePoint getRestorePointFromBlock(String[] parts) { + private Backup.RestorePoint getRestorePointFromBlock(String[] parts, Map metricsMap) { logger.debug(String.format("Processing block of restore points: [%s].", StringUtils.join(parts, ", "))); String id = null; Date created = null; @@ -834,10 +797,17 @@ public class VeeamClient { type = split[1].trim(); } } - return new Backup.RestorePoint(id, created, type); + Backup.Metric metric = metricsMap.get(id); + Long backupSize = null; + Long dataSize = null; + if (metric != null) { + backupSize = metric.getBackupSize(); + dataSize = metric.getDataSize(); + } + return new Backup.RestorePoint(id, created, type, backupSize, dataSize); } - public List listRestorePointsLegacy(String backupName, String vmInternalName) { + public List listRestorePointsLegacy(String backupName, String vmInternalName, Map metricsMap) { final List cmds = Arrays.asList( String.format("$backup = Get-VBRBackup -Name '%s'", backupName), String.format("if ($backup) { $restore = (Get-VBRRestorePoint -Backup:$backup -Name \"%s\" ^| Where-Object {$_.IsConsistent -eq $true})", vmInternalName), @@ -855,26 +825,26 @@ public class VeeamClient { } logger.debug(String.format("Found restore points from [backupName: %s, vmInternalName: %s] which is: [%s].", backupName, vmInternalName, block)); final String[] parts = block.split("\r\n"); - restorePoints.add(getRestorePointFromBlock(parts)); + restorePoints.add(getRestorePointFromBlock(parts, metricsMap)); } return restorePoints; } - public List listRestorePoints(String backupName, String vmInternalName) { + public List listRestorePoints(String backupName, String vmwareDcName, String vmInternalName, Map metricsMap) { if (isLegacyServer()) { - return listRestorePointsLegacy(backupName, vmInternalName); + return listRestorePointsLegacy(backupName, vmInternalName, metricsMap); } else { - return listVmRestorePointsViaVeeamAPI(vmInternalName); + return listVmRestorePointsViaVeeamAPI(vmwareDcName, vmInternalName, metricsMap); } } - public List listVmRestorePointsViaVeeamAPI(String vmInternalName) { + public List listVmRestorePointsViaVeeamAPI(String vmwareDcName, String vmInternalName, Map metricsMap) { logger.debug(String.format("Trying to list VM restore points via Veeam B&R API for VM %s: ", vmInternalName)); try { final HttpResponse response = get(String.format("/vmRestorePoints?format=Entity")); checkResponseOK(response); - return processHttpResponseForVmRestorePoints(response.getEntity().getContent(), vmInternalName); + return processHttpResponseForVmRestorePoints(response.getEntity().getContent(), vmwareDcName, vmInternalName, metricsMap); } catch (final IOException e) { logger.error("Failed to list VM restore points via Veeam B&R API due to:", e); checkResponseTimeOut(e); @@ -882,21 +852,24 @@ public class VeeamClient { return new ArrayList<>(); } - public List processHttpResponseForVmRestorePoints(InputStream content, String vmInternalName) { + public List processHttpResponseForVmRestorePoints(InputStream content, String vmwareDcName, String vmInternalName, Map metricsMap) { List vmRestorePointList = new ArrayList<>(); try { final ObjectMapper objectMapper = new XmlMapper(); final VmRestorePoints vmRestorePoints = objectMapper.readValue(content, VmRestorePoints.class); + final String hierarchyId = findDCHierarchy(vmwareDcName); + final String hierarchyUuid = StringUtils.substringAfterLast(hierarchyId, ":"); if (vmRestorePoints == null) { throw new CloudRuntimeException("Could not get VM restore points via Veeam B&R API"); } for (final VmRestorePoint vmRestorePoint : vmRestorePoints.getVmRestorePoints()) { logger.debug(String.format("Processing VM restore point Name=%s, VmDisplayName=%s for vm name=%s", vmRestorePoint.getName(), vmRestorePoint.getVmDisplayName(), vmInternalName)); - if (!vmInternalName.equals(vmRestorePoint.getVmDisplayName())) { + if (!vmInternalName.equals(vmRestorePoint.getVmDisplayName()) || !vmRestorePoint.getHierarchyObjRef().contains(hierarchyUuid)) { continue; } boolean isReady = true; + String backupFileId = ""; List links = vmRestorePoint.getLink(); for (Link link : links) { if (Arrays.asList(BACKUP_FILE_REFERENCE, RESTORE_POINT_REFERENCE).contains(link.getType()) && !link.getRel().equals("Up")) { @@ -904,15 +877,27 @@ public class VeeamClient { isReady = false; break; } + if (link.getType() != null && link.getType().equals(BACKUP_FILE_REFERENCE)) { + backupFileId = StringUtils.substringAfterLast(link.getHref(), "/"); + } } if (!isReady) { continue; } - String vmRestorePointId = vmRestorePoint.getUid().substring(vmRestorePoint.getUid().lastIndexOf(':') + 1); + String vmRestorePointId = StringUtils.substringAfterLast(vmRestorePoint.getUid(), ":"); Date created = formatDate(vmRestorePoint.getCreationTimeUtc()); String type = vmRestorePoint.getPointType(); logger.debug(String.format("Adding restore point %s, %s, %s", vmRestorePointId, created, type)); - vmRestorePointList.add(new Backup.RestorePoint(vmRestorePointId, created, type)); + Long backupSize = null; + Long dataSize = null; + if (!backupFileId.isEmpty()) { + Backup.Metric metric = metricsMap.get(backupFileId); + if (metric != null) { + backupSize = metric.getBackupSize(); + dataSize = metric.getDataSize(); + } + } + vmRestorePointList.add(new Backup.RestorePoint(vmRestorePointId, created, type, backupSize, dataSize)); } } catch (final IOException | ParseException e) { logger.error("Failed to process response to get VM restore points via Veeam B&R API due to:", e); @@ -925,15 +910,17 @@ public class VeeamClient { return dateFormat.parse(StringUtils.substring(date, 0, 19)); } - public Pair restoreVMToDifferentLocation(String restorePointId, String hostIp, String dataStoreUuid) { - final String restoreLocation = RESTORE_VM_SUFFIX + UUID.randomUUID().toString(); + public Pair restoreVMToDifferentLocation(String restorePointId, String restoreLocation, String hostIp, String dataStoreUuid) { + if (restoreLocation == null) { + restoreLocation = RESTORE_VM_SUFFIX + UUID.randomUUID().toString(); + } final String datastoreId = dataStoreUuid.replace("-",""); final List cmds = Arrays.asList( "$points = Get-VBRRestorePoint", - String.format("foreach($point in $points) { if ($point.Id -eq '%s') { break; } }", restorePointId), + String.format("foreach($point in $points) { if ($point.Id -eq '%s') { $restorePoint = $point; break; } }", restorePointId), String.format("$server = Get-VBRServer -Name \"%s\"", hostIp), String.format("$ds = Find-VBRViDatastore -Server:$server -Name \"%s\"", datastoreId), - String.format("$job = Start-VBRRestoreVM -RestorePoint:$point -Server:$server -Datastore:$ds -VMName \"%s\" -RunAsync", restoreLocation), + String.format("$job = Start-VBRRestoreVM -RestorePoint:$restorePoint -Server:$server -Datastore:$ds -VMName \"%s\" -RunAsync", restoreLocation), "while (-not (Get-VBRRestoreSession -Id $job.Id).IsCompleted) { Start-Sleep -Seconds 10 }" ); Pair result = executePowerShellCommands(cmds); diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java index cbfe2fda592..a82ff551b8a 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/VeeamBackupProviderTest.java @@ -93,13 +93,14 @@ public class VeeamBackupProviderTest { backup.setExternalId("abc"); backup.setType("Full"); backup.setZoneId(3l); + backup.setBackupOfferingId(4l); Mockito.when(vmInstanceDao.findByIdIncludingRemoved(Mockito.anyLong())).thenReturn(vmInstanceVO); Mockito.doReturn(client).when(backupProvider).getClient(2l); Mockito.doReturn(true).when(client).deleteBackup("abc"); List backups = new ArrayList<>(); backups.add(backup); - Mockito.when(backupDao.listByVmId(3l, 1l)).thenReturn(backups); + Mockito.when(backupDao.listByVmIdAndOffering(3l, 1l, 4l)).thenReturn(backups); Mockito.verify(backupDao, Mockito.never()).remove(Mockito.anyLong()); boolean result = backupProvider.deleteBackup(backup, true); assertEquals(true, result); @@ -115,6 +116,7 @@ public class VeeamBackupProviderTest { Mockito.when(backup.getVmId()).thenReturn(1l); Mockito.when(backup.getExternalId()).thenReturn("abc"); Mockito.when(backup.getZoneId()).thenReturn(3l); + Mockito.when(backup.getBackupOfferingId()).thenReturn(4l); BackupVO backup2 = Mockito.mock(BackupVO.class); Mockito.when(backup2.getId()).thenReturn(2l); @@ -122,10 +124,7 @@ public class VeeamBackupProviderTest { Mockito.when(vmInstanceDao.findByIdIncludingRemoved(Mockito.anyLong())).thenReturn(vmInstanceVO); Mockito.doReturn(client).when(backupProvider).getClient(2l); Mockito.doReturn(true).when(client).deleteBackup("abc"); - List backups = new ArrayList<>(); - backups.add(backup); - backups.add(backup2); - Mockito.when(backupDao.listByVmId(3l, 1l)).thenReturn(backups); + Mockito.when(backupDao.listByVmIdAndOffering(3l, 1l, 4l)).thenReturn(List.of(backup, backup2)); boolean result = backupProvider.deleteBackup(backup, true); Mockito.verify(backupDao, Mockito.times(1)).remove(2l); assertEquals(true, result); diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java index 63d6896bb85..3485f402417 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java @@ -209,32 +209,46 @@ public class VeeamClientTest { private void verifyBackupMetrics(Map metrics) { - Assert.assertEquals(2, metrics.size()); + Assert.assertEquals(7, metrics.size()); - Assert.assertTrue(metrics.containsKey("d1bd8abd-fc73-4b77-9047-7be98a2ecb72")); - Assert.assertEquals(537776128L, (long) metrics.get("d1bd8abd-fc73-4b77-9047-7be98a2ecb72").getBackupSize()); - Assert.assertEquals(2147506644L, (long) metrics.get("d1bd8abd-fc73-4b77-9047-7be98a2ecb72").getDataSize()); + Assert.assertTrue(metrics.containsKey("d93d7c7d-068a-4e8f-ba54-e08cea3cb9d2")); + Assert.assertEquals(537776128L, (long) metrics.get("d93d7c7d-068a-4e8f-ba54-e08cea3cb9d2").getBackupSize()); + Assert.assertEquals(2147506644L, (long) metrics.get("d93d7c7d-068a-4e8f-ba54-e08cea3cb9d2").getDataSize()); - Assert.assertTrue(metrics.containsKey("0d752ca6-d628-4d85-a739-75275e4661e6")); - Assert.assertEquals(1268682752L, (long) metrics.get("0d752ca6-d628-4d85-a739-75275e4661e6").getBackupSize()); - Assert.assertEquals(15624049921L, (long) metrics.get("0d752ca6-d628-4d85-a739-75275e4661e6").getDataSize()); + Assert.assertTrue(metrics.containsKey("d2110f5f-aa22-4e67-8084-5d8597f26d63")); + Assert.assertEquals(579756032L, (long) metrics.get("d2110f5f-aa22-4e67-8084-5d8597f26d63").getBackupSize()); + Assert.assertEquals(7516219400L, (long) metrics.get("d2110f5f-aa22-4e67-8084-5d8597f26d63").getDataSize()); } @Test public void testProcessPowerShellResultForBackupMetrics() { - String result = "i-2-3-VM-CSBKP-d1bd8abd-fc73-4b77-9047-7be98a2ecb72\r\n" + + String result = "d93d7c7d-068a-4e8f-ba54-e08cea3cb9d2\r\n" + "537776128\r\n" + "2147506644\r\n" + "=====\r\n" + - "i-13-22-VM-CSBKP-b3b3cb75-cfbf-4496-9c63-a08a93347276\r\n" + + "4b1181fd-7b1e-4af1-a76b-8284a8953b99\r\n" + + "12398592\r\n" + + "71329948\r\n" + "=====\r\n" + - "backup-job-based-on-sla\r\n" + + "8e9a854e-9bb8-4a34-815c-a6ab17a1e72f\r\n" + + "11870208\r\n" + + "72378524\r\n" + "=====\r\n" + - "i-12-20-VM-CSBKP-9f292f11-00ec-4915-84f0-e3895828640e\r\n" + + "7c54d13d-7b9c-465a-8ec8-7a276bde57dd\r\n" + + "12083200\r\n" + + "69232800\r\n" + "=====\r\n" + - "i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275e4661e6\r\n" + - "1268682752\r\n" + - "15624049921\r\n" + + "094564ff-02a1-46c7-b9e5-e249b8b9acf6\r\n" + + "14217216\r\n" + + "76572832\r\n" + + "=====\r\n" + + "1f6f5c49-92ef-4757-b327-e63ae9f1fdea\r\n" + + "12460032\r\n" + + "72378524\r\n" + + "=====\r\n" + + "d2110f5f-aa22-4e67-8084-5d8597f26d63\r\n" + + "579756032\r\n" + + "7516219400\r\n" + "=====\r\n"; Map metrics = client.processPowerShellResultForBackupMetrics(result); @@ -294,22 +308,6 @@ public class VeeamClientTest { " 2023-11-01T00:00:35.163Z\n" + " vib\n" + " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " V:\\Backup\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275e4661e6\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275eD2023-11-04T000109_2AC1.vbk\n" + - " 581083136\n" + - " 7516219404\n" + - " 5.82\n" + - " 2.22\n" + - " 2023-11-04T00:00:24.973Z\n" + - " vbk\n" + - " \n" + " \n" + " \n" + " \n" + @@ -326,54 +324,6 @@ public class VeeamClientTest { " 2023-10-28T23:00:33.233Z\n" + " vib\n" + " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " V:\\Backup\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275e4661e6\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275eD2023-10-30T000022_0CE3.vib\n" + - " 14409728\n" + - " 76572828\n" + - " 1\n" + - " 6.25\n" + - " 2023-10-30T00:00:22.7Z\n" + - " vib\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " V:\\Backup\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275e4661e6\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275eD2023-11-06T000018_055B.vib\n" + - " 17883136\n" + - " 80767136\n" + - " 1\n" + - " 5\n" + - " 2023-11-06T00:00:18.253Z\n" + - " vib\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " \n" + - " V:\\Backup\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275e4661e6\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275eD2023-11-02T000029_65BE.vib\n" + - " 12521472\n" + - " 72378525\n" + - " 1\n" + - " 6.67\n" + - " 2023-11-02T00:00:29.05Z\n" + - " vib\n" + - " \n" + " \n" + " \n" + " \n" + @@ -463,14 +413,72 @@ public class VeeamClientTest { Map metrics = client.getBackupMetricsViaVeeamAPI(); Assert.assertEquals(1, metrics.size()); - Assert.assertTrue(metrics.containsKey("506760dc-ed77-40d6-a91d-e0914e7a1ad8")); - Assert.assertEquals(535875584L, (long) metrics.get("506760dc-ed77-40d6-a91d-e0914e7a1ad8").getBackupSize()); - Assert.assertEquals(2147507235L, (long) metrics.get("506760dc-ed77-40d6-a91d-e0914e7a1ad8").getDataSize()); + Assert.assertTrue(metrics.containsKey("6bf10cad-9181-45d9-9cc5-dd669366a381")); + Assert.assertEquals(535875584L, (long) metrics.get("6bf10cad-9181-45d9-9cc5-dd669366a381").getBackupSize()); + Assert.assertEquals(2147507235L, (long) metrics.get("6bf10cad-9181-45d9-9cc5-dd669366a381").getDataSize()); } @Test public void testListVmRestorePointsViaVeeamAPI() { - String xmlResponse = "\n" + + String backupFilesXmlResponse = "\n" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " V:\\Backup\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275e4661e6\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275eD2023-10-28T000059_745D.vbk\n" + + " 579756032\n" + + " 7516219400\n" + + " 5.83\n" + + " 2.22\n" + + " 2023-10-27T23:00:13.74Z\n" + + " vbk\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " V:\\Backup\\i-2-4-VM-CSBKP-0d752ca6-d628-4d85-a739-75275e4661e6\\i-2-4-VM-CSBKP-0d752ca6-d628-4d85-a739-75275eD2023-1036D2023-11-03T162535_89D6.vbk\n" + + " 12083200\n" + + " 69232800\n" + + " 1\n" + + " 6.67\n" + + " 2023-11-05T00:00:22.827Z\n" + + " vib\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " V:\\Backup\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275e4661e6\\i-2-5-VM-CSBKP-0d752ca6-d628-4d85-a739-75275eD2023-11-01T000035_BEBF.vib\n" + + " 12398592\n" + + " 71329948\n" + + " 1\n" + + " 6.67\n" + + " 2023-11-01T00:00:35.163Z\n" + + " vib\n" + + " \n" + + "\n"; + + wireMockRule.stubFor(get(urlMatching(".*/backupFiles\\?format=Entity")) + .willReturn(aResponse() + .withHeader("content-type", "application/xml") + .withStatus(200) + .withBody(backupFilesXmlResponse))); + + String vmRestorePointsXmlResponse = "\n" + "i-2-4-VM\n" + " Full\n" + " Full\n" + - " urn:VMware:Vm:adb5423b-b578-4c26-8ab8-cde9c1faec55.vm-1036\n" + + " urn:VMware:Vm:24490b30-81db-4038-821f-59694cd89519.vm-1036\n" + " \n" + "\n"; String vmName = "i-2-4-VM"; @@ -498,13 +506,38 @@ public class VeeamClientTest { .willReturn(aResponse() .withHeader("content-type", "application/xml") .withStatus(200) - .withBody(xmlResponse))); - List vmRestorePointList = client.listVmRestorePointsViaVeeamAPI(vmName); + .withBody(vmRestorePointsXmlResponse))); + + String hierarchyXmlResponse = "\n" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + + " " + + ""; + String vmwareDcName = "10.0.32.153"; + String hierarchyId = "urn:VMware:Vm:24490b30-81db-4038-821f-59694cd89519"; + + wireMockRule.stubFor(get(urlMatching(".*/hierarchyRoots")) + .willReturn(aResponse() + .withHeader("content-type", "application/xml") + .withStatus(200) + .withBody(hierarchyXmlResponse))); + + Map metricsMap = client.getBackupMetrics(); + List vmRestorePointList = client.listVmRestorePointsViaVeeamAPI(vmwareDcName, vmName, metricsMap); Assert.assertEquals(1, vmRestorePointList.size()); Assert.assertEquals("f6d504cf-eafe-4cd2-8dfc-e9cfe2f1e977", vmRestorePointList.get(0).getId()); Assert.assertEquals("2023-11-03 16:26:12", newDateFormat.format(vmRestorePointList.get(0).getCreated())); Assert.assertEquals("Full", vmRestorePointList.get(0).getType()); + Assert.assertEquals(12083200L, (long) vmRestorePointList.get(0).getBackupSize()); + Assert.assertEquals(69232800L, (long) vmRestorePointList.get(0).getDataSize()); } @Test diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java index 52c3de88d93..10012fba658 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java @@ -254,6 +254,15 @@ public class BridgeVifDriver extends VifDriverBase { intf.defBridgeNet(_bridges.get("private"), null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter)); } else if (nic.getType() == Networks.TrafficType.Storage) { String storageBrName = nic.getName() == null ? _bridges.get("private") : nic.getName(); + if (nic.getBroadcastType() == Networks.BroadcastDomainType.Storage) { + vNetId = Networks.BroadcastDomainType.getValue(nic.getBroadcastUri()); + protocol = Networks.BroadcastDomainType.Vlan.scheme(); + } + if (isValidProtocolAndVnetId(vNetId, protocol)) { + logger.debug(String.format("creating a vNet dev and bridge for %s traffic per traffic label %s", + Networks.TrafficType.Storage.name(), trafficLabel)); + storageBrName = createVnetBr(vNetId, storageBrName, protocol); + } intf.defBridgeNet(storageBrName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter)); } if (nic.getPxeDisable()) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtGpuDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtGpuDef.java index 80a34b33b59..06457b1d071 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtGpuDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtGpuDef.java @@ -49,7 +49,7 @@ public class LibvirtGpuDef { String mdevUuid = vgpuType.getBusAddress(); // For MDEV devices, busAddress contains the UUID String displayAttribute = vgpuType.isDisplay() ? "on" : "off"; - gpuBuilder.append("\n"); + gpuBuilder.append("\n"); gpuBuilder.append(" \n"); gpuBuilder.append("
\n"); gpuBuilder.append(" \n"); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index f0911ba4ea4..ec4a06ae022 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -249,9 +249,7 @@ public class LibvirtVMDef { guestDef.append("\n"); } } - if (_arch == null || ! (_arch.equals("aarch64") || _arch.equals("s390x"))) { // simplification of (as ref.) (!(_arch != null && _arch.equals("s390x")) || (_arch == null || !_arch.equals("aarch64"))) - guestDef.append("\n"); - } + guestDef.append("\n"); guestDef.append("\n"); if (iothreads) { guestDef.append(String.format("%s", NUMBER_OF_IOTHREADS)); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetBackupStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetBackupStatsCommandWrapper.java new file mode 100644 index 00000000000..9eb8cb71e3a --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetBackupStatsCommandWrapper.java @@ -0,0 +1,71 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.backup.BackupStorageStatsAnswer; +import org.apache.cloudstack.backup.GetBackupStorageStatsCommand; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.script.Script; + +@ResourceWrapper(handles = GetBackupStorageStatsCommand.class) +public class LibvirtGetBackupStatsCommandWrapper extends CommandWrapper { + @Override + public Answer execute(GetBackupStorageStatsCommand command, LibvirtComputingResource libvirtComputingResource) { + final String backupRepoType = command.getBackupRepoType(); + final String backupRepoAddress = command.getBackupRepoAddress(); + final String mountOptions = command.getMountOptions(); + + List commands = new ArrayList<>(); + commands.add(new String[]{ + libvirtComputingResource.getNasBackupPath(), + "-o", "stats", + "-t", backupRepoType, + "-s", backupRepoAddress, + "-m", mountOptions + }); + + Pair result = Script.executePipedCommands(commands, libvirtComputingResource.getCmdsTimeout()); + + logger.debug(String.format("Get backup storage stats result: %s , exit code: %s", result.second(), result.first())); + + if (result.first() != 0) { + logger.debug(String.format("Failed to get backup storage stats: %s", result.second())); + return new BackupStorageStatsAnswer(command, false, result.second()); + } + + BackupStorageStatsAnswer answer = new BackupStorageStatsAnswer(command, false, result.second()); + + String [] stats = result.second().split("\\s+"); + Long total = Long.parseLong(stats[1]) * 1024; + Long used = Long.parseLong(stats[2]) * 1024; + answer.setTotalSize(total); + answer.setUsedSize(used); + + return answer; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreBackupCommandWrapper.java index 8abc359250c..0e5091ebcf4 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreBackupCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreBackupCommandWrapper.java @@ -58,21 +58,22 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper volumePaths = command.getVolumePaths(); + List backedVolumeUUIDs = command.getBackupVolumesUUIDs(); + List restoreVolumePaths = command.getRestoreVolumePaths(); String restoreVolumeUuid = command.getRestoreVolumeUUID(); String newVolumeId = null; try { if (Objects.isNull(vmExists)) { - String volumePath = volumePaths.get(0); + String volumePath = restoreVolumePaths.get(0); int lastIndex = volumePath.lastIndexOf("/"); newVolumeId = volumePath.substring(lastIndex + 1); restoreVolume(backupPath, backupRepoType, backupRepoAddress, volumePath, diskType, restoreVolumeUuid, new Pair<>(vmName, command.getVmState()), mountOptions); } else if (Boolean.TRUE.equals(vmExists)) { - restoreVolumesOfExistingVM(volumePaths, backupPath, backupRepoType, backupRepoAddress, mountOptions); + restoreVolumesOfExistingVM(restoreVolumePaths, backedVolumeUUIDs, backupPath, backupRepoType, backupRepoAddress, mountOptions); } else { - restoreVolumesOfDestroyedVMs(volumePaths, vmName, backupPath, backupRepoType, backupRepoAddress, mountOptions); + restoreVolumesOfDestroyedVMs(restoreVolumePaths, vmName, backupPath, backupRepoType, backupRepoAddress, mountOptions); } } catch (CloudRuntimeException e) { String errorMessage = "Failed to restore backup for VM: " + vmName + "."; @@ -86,24 +87,24 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper volumePaths, String backupPath, - String backupRepoType, String backupRepoAddress, String mountOptions) { + private void restoreVolumesOfExistingVM(List restoreVolumePaths, List backedVolumesUUIDs, String backupPath, + String backupRepoType, String backupRepoAddress, String mountOptions) { String diskType = "root"; String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions); try { - for (int idx = 0; idx < volumePaths.size(); idx++) { - String volumePath = volumePaths.get(idx); - Pair bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null); + for (int idx = 0; idx < restoreVolumePaths.size(); idx++) { + String restoreVolumePath = restoreVolumePaths.get(idx); + String backupVolumeUuid = backedVolumesUUIDs.get(idx); + Pair bkpPathAndVolUuid = getBackupPath(mountDirectory, null, backupPath, diskType, backupVolumeUuid); diskType = "datadisk"; - if (!replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first())) { - throw new CloudRuntimeException(String.format("Unable to restore backup for volume [%s].", bkpPathAndVolUuid.second())); + if (!replaceVolumeWithBackup(restoreVolumePath, bkpPathAndVolUuid.first())) { + throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", bkpPathAndVolUuid.second())); } } } finally { unmountBackupDirectory(mountDirectory); deleteTemporaryDirectory(mountDirectory); } - } private void restoreVolumesOfDestroyedVMs(List volumePaths, String vmName, String backupPath, @@ -116,7 +117,7 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null); diskType = "datadisk"; if (!replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first())) { - throw new CloudRuntimeException(String.format("Unable to restore backup for volume [%s].", bkpPathAndVolUuid.second())); + throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", bkpPathAndVolUuid.second())); } } } finally { @@ -132,7 +133,7 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper getBackupPath(String mountDirectory, String volumePath, String backupPath, String diskType, String volumeUuid) { String bkpPath = String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath); - int lastIndex = volumePath.lastIndexOf(File.separator); - String volUuid = Objects.isNull(volumeUuid) ? volumePath.substring(lastIndex + 1) : volumeUuid; + String volUuid = Objects.isNull(volumeUuid) ? volumePath.substring(volumePath.lastIndexOf(File.separator) + 1) : volumeUuid; String backupFileName = String.format("%s.%s.qcow2", diskType.toLowerCase(Locale.ROOT), volUuid); bkpPath = String.format(FILE_PATH_PLACEHOLDER, bkpPath, backupFileName); return new Pair<>(bkpPath, volUuid); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeBackupCommandWrapper.java index 3c0cc53bb73..c7a67080fbf 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeBackupCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeBackupCommandWrapper.java @@ -36,6 +36,7 @@ import java.util.Objects; @ResourceWrapper(handles = TakeBackupCommand.class) public class LibvirtTakeBackupCommandWrapper extends CommandWrapper { + private static final Integer EXIT_CLEANUP_FAILED = 20; @Override public Answer execute(TakeBackupCommand command, LibvirtComputingResource libvirtComputingResource) { final String vmName = command.getVmName(); @@ -54,6 +55,7 @@ public class LibvirtTakeBackupCommandWrapper extends CommandWrapper capabilities = new ArrayList<>(); - private static String cpuArchCommand = "/usr/bin/arch"; + private static String cpuArchRetrieveExecutable = "arch"; private static List cpuInfoFreqFileNames = List.of("/sys/devices/system/cpu/cpu0/cpufreq/base_frequency","/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq"); public KVMHostInfo(long reservedMemory, long overCommitMemory, long manualSpeed, int reservedCpus) { @@ -255,6 +255,6 @@ public class KVMHostInfo { private String getCPUArchFromCommand() { LOGGER.info("Fetching host CPU arch"); - return Script.runSimpleBashScript(cpuArchCommand); + return Script.runSimpleBashScript(Script.getExecutableAbsolutePath(cpuArchRetrieveExecutable)); } } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtGpuDefTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtGpuDefTest.java index 5dbea4fabf9..0060e1d7ed4 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtGpuDefTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtGpuDefTest.java @@ -64,10 +64,9 @@ public class LibvirtGpuDefTest extends TestCase { String gpuXml = gpuDef.toString(); - assertTrue(gpuXml.contains("")); + assertTrue(gpuXml.contains("")); assertTrue(gpuXml.contains("
")); assertTrue(gpuXml.contains("")); - assertFalse(gpuXml.contains("vfio")); // MDEV should not contain vfio driver element } @Test diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/simulator/SimulatorGuru.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/simulator/SimulatorGuru.java index 246d86d2712..aae28c428b8 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/simulator/SimulatorGuru.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/simulator/SimulatorGuru.java @@ -17,6 +17,7 @@ package com.cloud.simulator; import java.util.Date; +import java.util.List; import java.util.Map; import javax.inject.Inject; @@ -35,8 +36,8 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.dao.NicDao; public class SimulatorGuru extends HypervisorGuruBase implements HypervisorGuru { @Inject @@ -90,6 +91,29 @@ public class SimulatorGuru extends HypervisorGuruBase implements HypervisorGuru return vm; } + @Override + public boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backup.VolumeInfo volumeInfo, VirtualMachine vm, long poolId, Backup backup) { + + VMInstanceVO targetVM = instanceDao.findVMByInstanceNameIncludingRemoved(vm.getName()); + List vmVolumes = volumeDao.findByInstance(targetVM.getId()); + VolumeVO restoredVolume = volumeDao.findByUuid(location); + if (restoredVolume != null) { + try { + volumeDao.attachVolume(restoredVolume.getId(), vm.getId(), getNextAvailableDeviceId(vmVolumes)); + restoredVolume.setState(Volume.State.Ready); + volumeDao.update(restoredVolume.getId(), restoredVolume); + return true; + } catch (Exception e) { + restoredVolume.setDisplay(false); + restoredVolume.setDisplayVolume(false); + restoredVolume.setState(Volume.State.Destroy); + volumeDao.update(restoredVolume.getId(), restoredVolume); + throw new RuntimeException("Unable to attach volume " + restoredVolume.getName() + " to VM" + vm.getName() + " due to : " + e.getMessage()); + } + } + return false; + } + @Override public boolean trackVmHostChange() { return false; @@ -100,4 +124,11 @@ public class SimulatorGuru extends HypervisorGuruBase implements HypervisorGuru return null; } + private long getNextAvailableDeviceId(List vmVolumes) { + if (vmVolumes == null || vmVolumes.isEmpty()) { + return 0; + } + long maxDeviceId = vmVolumes.stream() .mapToLong(VolumeVO::getDeviceId) .max() .orElse(-1); + return maxDeviceId + 1; + } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index 8f4d7e4da92..88df637b0dd 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@ -870,7 +870,9 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Co try { List list = new ArrayList<>(); for (VolumeVO vol : vmVolumes) { - list.add(new Backup.VolumeInfo(vol.getUuid(), vol.getPath(), vol.getVolumeType(), vol.getSize())); + DiskOfferingVO diskOffering = diskOfferingDao.findById(vol.getDiskOfferingId()); + list.add(new Backup.VolumeInfo(vol.getUuid(), vol.getPath(), vol.getVolumeType(), vol.getSize(), + vol.getDeviceId(), diskOffering.getUuid(), vol.getMinIops(), vol.getMaxIops())); } return GSON.toJson(list.toArray(), Backup.VolumeInfo[].class); } catch (Exception e) { diff --git a/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/guru/VMwareGuruTest.java b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/guru/VMwareGuruTest.java index 4da513db3e4..6e96330ac58 100644 --- a/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/guru/VMwareGuruTest.java +++ b/plugins/hypervisors/vmware/src/test/java/com/cloud/hypervisor/guru/VMwareGuruTest.java @@ -69,12 +69,14 @@ import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.hypervisor.vmware.util.VmwareHelper; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.Pair; import com.cloud.utils.UuidUtils; @@ -110,6 +112,9 @@ public class VMwareGuruTest { @Mock ClusterDetailsDao _clusterDetailsDao; + @Mock + DiskOfferingDao diskOfferingDao; + AutoCloseable closeable; @Mock @@ -187,19 +192,26 @@ public class VMwareGuruTest { @Test public void createVolumeInfoFromVolumesTestCorrectlyConvertOfVolumes() { + Long diskOfferingId = 5L; + DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); + Mockito.when(diskOffering.getUuid()).thenReturn("disk-offering-uuid"); + Mockito.when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOffering); + List volumesToTest = new ArrayList<>(); - VolumeVO root = new VolumeVO("test", 1l, 1l, 1l, 1l, 1l, "test", "/root/dir", ProvisioningType.THIN, 555l, Volume.Type.ROOT); + VolumeVO root = new VolumeVO("test", 1l, 1l, 1l, 1l, 6l, "test", "/root/dir", ProvisioningType.THIN, 555l, Volume.Type.ROOT); + root.setDiskOfferingId(diskOfferingId); String rootUuid = root.getUuid(); VolumeVO data = new VolumeVO("test", 1l, 1l, 1l, 1l, 1l, "test", "/root/dir/data", ProvisioningType.THIN, 1111000l, Volume.Type.DATADISK); + data.setDiskOfferingId(diskOfferingId); String dataUuid = data.getUuid(); volumesToTest.add(root); volumesToTest.add(data); String result = vMwareGuru.createVolumeInfoFromVolumes(volumesToTest); - String expected = String.format("[{\"uuid\":\"%s\",\"type\":\"ROOT\",\"size\":555,\"path\":\"/root/dir\"},{\"uuid\":\"%s\",\"type\":\"DATADISK\",\"size\":1111000,\"path\":\"/root/dir/data\"}]", rootUuid, dataUuid); + String expected = String.format("[{\"uuid\":\"%s\",\"type\":\"ROOT\",\"size\":555,\"path\":\"/root/dir\",\"diskOfferingId\":\"disk-offering-uuid\"},{\"uuid\":\"%s\",\"type\":\"DATADISK\",\"size\":1111000,\"path\":\"/root/dir/data\",\"diskOfferingId\":\"disk-offering-uuid\"}]", rootUuid, dataUuid); assertEquals(expected, result); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index d95bc0b9784..dc092608ff3 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -482,8 +482,14 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne return null; } - public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType, Map templateNodeTypeMap, KubernetesClusterNodeType nodeType) { - VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType); + public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType, Map templateNodeTypeMap, KubernetesClusterNodeType nodeType, + KubernetesSupportedVersion clusterKubernetesVersion) { + String systemVMPreferredArchitecture = ResourceManager.SystemVmPreferredArchitecture.valueIn(dataCenter.getId()); + VMTemplateVO cksIso = clusterKubernetesVersion != null ? + templateDao.findById(clusterKubernetesVersion.getIsoId()) : + null; + String preferredArchitecture = getCksClusterPreferredArch(systemVMPreferredArchitecture, cksIso); + VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType, preferredArchitecture); if (DataCenter.Type.Edge.equals(dataCenter.getType()) && template != null && !template.isDirectDownload()) { logger.debug(String.format("Template %s can not be used for edge zone %s", template, dataCenter)); template = templateDao.findRoutingTemplate(hypervisorType, networkHelper.getHypervisorRouterTemplateConfigMap().get(hypervisorType).valueIn(dataCenter.getId())); @@ -510,6 +516,14 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne throw new CloudRuntimeException("Not able to find the System or Routing template in ready state for the zone " + datacenterId); } + protected String getCksClusterPreferredArch(String systemVMPreferredArchitecture, VMTemplateVO cksIso) { + if (cksIso == null) { + return systemVMPreferredArchitecture; + } + String cksIsoArchName = cksIso.getArch().name(); + return cksIsoArchName.equals(systemVMPreferredArchitecture) ? systemVMPreferredArchitecture : cksIsoArchName; + } + protected void validateIsolatedNetworkIpRules(long ipId, FirewallRule.Purpose purpose, Network network, int clusterTotalNodeCount) { List rules = firewallRulesDao.listByIpPurposeProtocolAndNotRevoked(ipId, purpose, NetUtils.TCP_PROTO); for (FirewallRuleVO rule : rules) { @@ -1541,11 +1555,12 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne } Map templateNodeTypeMap = cmd.getTemplateNodeTypeMap(); - final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, DEFAULT); - final VMTemplateVO controlNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, CONTROL); - final VMTemplateVO workerNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, WORKER); - final VMTemplateVO etcdNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, ETCD); + final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, DEFAULT, clusterKubernetesVersion); + final VMTemplateVO controlNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, CONTROL, clusterKubernetesVersion); + final VMTemplateVO workerNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, WORKER, clusterKubernetesVersion); + final VMTemplateVO etcdNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, ETCD, clusterKubernetesVersion); final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId(), asNumber); + final SecurityGroup finalSecurityGroup = securityGroup; final KubernetesClusterVO cluster = Transaction.execute(new TransactionCallback() { @Override diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 1f2f42c3fe6..9be5f4c0776 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -36,6 +36,7 @@ import java.util.stream.Collectors; import javax.inject.Inject; +import com.cloud.kubernetes.version.KubernetesSupportedVersionVO; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -257,7 +258,8 @@ public class KubernetesClusterActionWorker { DataCenterVO dataCenterVO = dataCenterDao.findById(zoneId); VMTemplateVO template = templateDao.findById(templateId); Hypervisor.HypervisorType type = template.getHypervisorType(); - this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type, null, KubernetesClusterNodeType.DEFAULT); + KubernetesSupportedVersionVO kubernetesSupportedVersion = kubernetesSupportedVersionDao.findById(this.kubernetesCluster.getKubernetesVersionId()); + this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type, null, KubernetesClusterNodeType.DEFAULT, kubernetesSupportedVersion); this.controlNodeTemplate = templateDao.findById(this.kubernetesCluster.getControlNodeTemplateId()); this.workerNodeTemplate = templateDao.findById(this.kubernetesCluster.getWorkerNodeTemplateId()); this.etcdTemplate = templateDao.findById(this.kubernetesCluster.getEtcdNodeTemplateId()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 5546b38a060..d392612547b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -212,7 +212,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu for (Map.Entry> hostEntry : hosts_with_resevered_capacity.entrySet()) { Pair hp = hostEntry.getValue(); HostVO h = hp.first(); - if (!h.getHypervisorType().equals(clusterTemplate.getHypervisorType())) { + if (!h.getHypervisorType().equals(clusterTemplate.getHypervisorType()) || !h.getArch().equals(clusterTemplate.getArch())) { continue; } hostDao.loadHostTags(h); @@ -421,13 +421,13 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu List securityGroupIds = new ArrayList<>(); securityGroupIds.add(kubernetesCluster.getSecurityGroupId()); nodeVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, workerNodeTemplate, networkIds, securityGroupIds, owner, - hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, + hostName, hostName, null, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, null, addrs, null, null, Objects.nonNull(affinityGroupId) ? Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, null, UserVmManager.CKS_NODE, null, null); } else { nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, workerNodeTemplate, networkIds, owner, - hostName, hostName, null, null, null, + hostName, hostName, null, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, null, addrs, null, null, Objects.nonNull(affinityGroupId) ? Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null, null, null); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 21db2ebb8f7..9ffee220a10 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -276,13 +276,13 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif List securityGroupIds = new ArrayList<>(); securityGroupIds.add(kubernetesCluster.getSecurityGroupId()); controlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, securityGroupIds, owner, - hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, userDataId, userDataDetails, keypairs, + hostName, hostName, null, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, userDataId, userDataDetails, keypairs, requestedIps, addrs, null, null, Objects.nonNull(affinityGroupId) ? Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, null, UserVmManager.CKS_NODE, null, null); } else { controlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, owner, - hostName, hostName, null, null, null, + hostName, hostName, null, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, userDataId, userDataDetails, keypairs, requestedIps, addrs, null, null, Objects.nonNull(affinityGroupId) ? Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null, null, null); @@ -444,13 +444,13 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif List securityGroupIds = new ArrayList<>(); securityGroupIds.add(kubernetesCluster.getSecurityGroupId()); additionalControlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, securityGroupIds, owner, - hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, + hostName, hostName, null, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, null, addrs, null, null, Objects.nonNull(affinityGroupId) ? Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, null, UserVmManager.CKS_NODE, null, null); } else { additionalControlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, owner, - hostName, hostName, null, null, null, + hostName, hostName, null, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, null, addrs, null, null, Objects.nonNull(affinityGroupId) ? Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null, null, null); @@ -488,13 +488,13 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif List securityGroupIds = new ArrayList<>(); securityGroupIds.add(kubernetesCluster.getSecurityGroupId()); etcdNode = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, etcdTemplate, networkIds, securityGroupIds, owner, - hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, + hostName, hostName, null, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, Map.of(kubernetesCluster.getNetworkId(), requestedIps.get(etcdNodeIndex)), addrs, null, null, Objects.nonNull(affinityGroupId) ? Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, null, null, null, null); } else { etcdNode = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, etcdTemplate, networkIds, owner, - hostName, hostName, null, null, null, + hostName, hostName, null, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, Map.of(kubernetesCluster.getNetworkId(), requestedIps.get(etcdNodeIndex)), addrs, null, null, Objects.nonNull(affinityGroupId) ? Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null, null, null); diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java index 57715cc9309..0793482b8f1 100644 --- a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java @@ -21,6 +21,7 @@ package com.cloud.kubernetes.cluster; import com.cloud.api.query.dao.TemplateJoinDao; import com.cloud.api.query.vo.TemplateJoinVO; +import com.cloud.cpu.CPU; import com.cloud.dc.DataCenter; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; @@ -422,4 +423,22 @@ public class KubernetesClusterManagerImplTest { Assert.assertEquals(workerOffering, mapping.get(WORKER.name())); Assert.assertEquals(controlOffering, mapping.get(CONTROL.name())); } + + @Test + public void testGetCksClusterPreferredArchDifferentArchsPreferCKSIsoArch() { + String systemVMArch = "x86_64"; + VMTemplateVO cksIso = Mockito.mock(VMTemplateVO.class); + Mockito.when(cksIso.getArch()).thenReturn(CPU.CPUArch.arm64); + String cksClusterPreferredArch = kubernetesClusterManager.getCksClusterPreferredArch(systemVMArch, cksIso); + Assert.assertEquals(CPU.CPUArch.arm64.name(), cksClusterPreferredArch); + } + + @Test + public void testGetCksClusterPreferredArchSameArch() { + String systemVMArch = "x86_64"; + VMTemplateVO cksIso = Mockito.mock(VMTemplateVO.class); + Mockito.when(cksIso.getArch()).thenReturn(CPU.CPUArch.amd64); + String cksClusterPreferredArch = kubernetesClusterManager.getCksClusterPreferredArch(systemVMArch, cksIso); + Assert.assertEquals(CPU.CPUArch.amd64.name(), cksClusterPreferredArch); + } } diff --git a/plugins/maintenance/src/main/java/org/apache/cloudstack/maintenance/ManagementServerMaintenanceManagerImpl.java b/plugins/maintenance/src/main/java/org/apache/cloudstack/maintenance/ManagementServerMaintenanceManagerImpl.java index 16cf14e1fb1..516ed40d48b 100644 --- a/plugins/maintenance/src/main/java/org/apache/cloudstack/maintenance/ManagementServerMaintenanceManagerImpl.java +++ b/plugins/maintenance/src/main/java/org/apache/cloudstack/maintenance/ManagementServerMaintenanceManagerImpl.java @@ -622,6 +622,7 @@ public class ManagementServerMaintenanceManagerImpl extends ManagerBase implemen ManagementServerHostVO msHost = msHostDao.findByMsid(ManagementServerNode.getManagementServerId()); if (msHost == null) { logger.warn("Unable to find the management server, invalid node id"); + managementServerMaintenanceManager.cancelWaitForPendingJobs(); return; } msHostDao.updateState(msHost.getId(), State.Maintenance); @@ -658,6 +659,7 @@ public class ManagementServerMaintenanceManagerImpl extends ManagerBase implemen ManagementServerHostVO msHost = msHostDao.findByMsid(ManagementServerNode.getManagementServerId()); if (msHost == null) { logger.warn("Unable to find the management server, invalid node id"); + managementServerMaintenanceManager.cancelWaitForPendingJobs(); return; } if (totalAgents == 0) { @@ -693,6 +695,7 @@ public class ManagementServerMaintenanceManagerImpl extends ManagerBase implemen ManagementServerHostVO msHost = msHostDao.findByMsid(ManagementServerNode.getManagementServerId()); if (msHost == null) { logger.warn("Unable to find the management server, invalid node id"); + managementServerMaintenanceManager.cancelWaitForPendingJobs(); return; } msHostDao.updateState(msHost.getId(), State.ReadyToShutDown); diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java index 93d6afc1df8..b2fc033268c 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java @@ -644,7 +644,7 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements metricsResponse.setStorageUsedThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageThreshold); metricsResponse.setStorageUsedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageDisableThreshold); metricsResponse.setStorageAllocatedThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeAllocated(), poolResponse.getOverProvisionFactor(), storageThreshold); - metricsResponse.setStorageAllocatedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageDisableThreshold); + metricsResponse.setStorageAllocatedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeAllocated(), poolResponse.getOverProvisionFactor(), storageDisableThreshold); metricsResponses.add(metricsResponse); } return metricsResponses; diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/EventUtils.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/EventUtils.java index d47bf6eceeb..338caca1fbe 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/EventUtils.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/EventUtils.java @@ -48,6 +48,10 @@ public class EventUtils { private static EventDistributor eventDistributor; + private static final String MODULE_TOP_LEVEL_PACKAGE = + EventUtils.class.getPackage().getName().substring(0, + EventUtils.class.getPackage().getName().lastIndexOf('.')); + public EventUtils() { } @@ -143,6 +147,13 @@ public class EventUtils { @Override public void interceptComplete(Method method, Object target, Object event) { ActionEvent actionEvent = method.getAnnotation(ActionEvent.class); + boolean sameModule = false; + if (target != null && target.getClass().getPackage() != null) { + sameModule = target.getClass().getPackage().getName().startsWith(MODULE_TOP_LEVEL_PACKAGE); + } + if (!sameModule) { + return; + } if (actionEvent != null) { CallContext ctx = CallContext.current(); if (!actionEvent.create()) { diff --git a/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/api/command/AddNetrisProviderCmd.java b/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/api/command/AddNetrisProviderCmd.java index 5cdecc77d3b..9bb04a9bf0d 100644 --- a/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/api/command/AddNetrisProviderCmd.java +++ b/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/api/command/AddNetrisProviderCmd.java @@ -53,7 +53,7 @@ public class AddNetrisProviderCmd extends BaseCmd { @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "Netris provider name") private String name; - @Parameter(name = ApiConstants.URL, type = CommandType.STRING, required = true, description = "Netris provider URL") + @Parameter(name = ApiConstants.NETRIS_URL, type = CommandType.STRING, required = true, description = "Netris provider URL") private String url; @Parameter(name = ApiConstants.USERNAME, type = CommandType.STRING, required = true, description = "Username to login into Netris") diff --git a/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/api/response/NetrisProviderResponse.java b/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/api/response/NetrisProviderResponse.java index d130c3a7372..4dc9b6b3030 100644 --- a/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/api/response/NetrisProviderResponse.java +++ b/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/api/response/NetrisProviderResponse.java @@ -41,13 +41,9 @@ public class NetrisProviderResponse extends BaseResponse { @Param(description = "Zone name to which the Netris Provider is associated with") private String zoneName; - @SerializedName(ApiConstants.HOST_NAME) - @Param(description = "Netris Provider hostname or IP address") - private String hostname; - - @SerializedName(ApiConstants.PORT) - @Param(description = "Netris Provider port") - private String port; + @SerializedName(ApiConstants.NETRIS_URL) + @Param(description = "Netris Provider URL") + private String netrisUrl; @SerializedName(ApiConstants.SITE_NAME) @Param(description = "Netris Provider site") @@ -93,20 +89,12 @@ public class NetrisProviderResponse extends BaseResponse { this.zoneName = zoneName; } - public String getHostname() { - return hostname; + public String getNetrisUrl() { + return netrisUrl; } - public void setHostname(String hostname) { - this.hostname = hostname; - } - - public String getPort() { - return port; - } - - public void setPort(String port) { - this.port = port; + public void setNetrisUrl(String netrisUrl) { + this.netrisUrl = netrisUrl; } public String getSiteName() { diff --git a/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/service/NetrisProviderServiceImpl.java b/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/service/NetrisProviderServiceImpl.java index 14a845e1c3a..57714e4787c 100644 --- a/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/service/NetrisProviderServiceImpl.java +++ b/plugins/network-elements/netris/src/main/java/org/apache/cloudstack/service/NetrisProviderServiceImpl.java @@ -184,7 +184,7 @@ public class NetrisProviderServiceImpl implements NetrisProviderService { NetrisProviderResponse response = new NetrisProviderResponse(); response.setName(provider.getName()); response.setUuid(provider.getUuid()); - response.setHostname(provider.getUrl()); + response.setNetrisUrl(provider.getUrl()); response.setZoneId(zone.getUuid()); response.setZoneName(zone.getName()); response.setSiteName(provider.getSiteName()); diff --git a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java index c3a82c42189..61ff57fd058 100644 --- a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java +++ b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java @@ -24,10 +24,6 @@ import java.util.UUID; import javax.inject.Inject; -import com.cloud.configuration.Config; -import com.cloud.utils.SwiftUtil; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; - import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -37,6 +33,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao; import org.apache.cloudstack.storage.image.BaseImageStoreDriverImpl; @@ -47,8 +44,9 @@ import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.SwiftTO; +import com.cloud.configuration.Config; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.template.VirtualMachineTemplate; +import com.cloud.utils.SwiftUtil; import com.cloud.utils.exception.CloudRuntimeException; public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl { @@ -99,8 +97,13 @@ public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl { @Override public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback callback) { Long maxTemplateSizeInBytes = getMaxTemplateSizeInBytes(); - VirtualMachineTemplate tmpl = _templateDao.findById(data.getId()); DataStore cacheStore = cacheManager.getCacheStorage(dataStore.getScope()); + if (cacheStore == null) { + String errMsg = String.format("No cache store found for scope: %s", + dataStore.getScope().getScopeType().name()); + logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } DownloadCommand dcmd = new DownloadCommand((TemplateObjectTO)(data.getTO()), maxTemplateSizeInBytes); dcmd.setCacheStore(cacheStore.getTO()); dcmd.setProxy(getHttpProxy()); diff --git a/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java b/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java index 5b5eaa08dc5..23f155c16c5 100644 --- a/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java +++ b/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/driver/CephObjectStoreDriverImpl.java @@ -193,19 +193,19 @@ public class CephObjectStoreDriverImpl extends BaseObjectStoreDriverImpl { policyConfig = "{\"Version\":\"2012-10-17\",\"Statement\":[]}"; } - AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getAccessKey()); + AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getSecretKey()); client.setBucketPolicy(new SetBucketPolicyRequest(bucket.getName(), policyConfig)); } @Override public BucketPolicy getBucketPolicy(BucketTO bucket, long storeId) { - AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getAccessKey()); + AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getSecretKey()); return client.getBucketPolicy(new GetBucketPolicyRequest(bucket.getName())); } @Override public void deleteBucketPolicy(BucketTO bucket, long storeId) { - AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getAccessKey()); + AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getSecretKey()); client.deleteBucketPolicy(new DeleteBucketPolicyRequest(bucket.getName())); } @@ -255,7 +255,7 @@ public class CephObjectStoreDriverImpl extends BaseObjectStoreDriverImpl { @Override public boolean setBucketVersioning(BucketTO bucket, long storeId) { - AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getAccessKey()); + AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getSecretKey()); try { BucketVersioningConfiguration configuration = new BucketVersioningConfiguration().withStatus("Enabled"); @@ -272,7 +272,7 @@ public class CephObjectStoreDriverImpl extends BaseObjectStoreDriverImpl { @Override public boolean deleteBucketVersioning(BucketTO bucket, long storeId) { - AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getAccessKey()); + AmazonS3 client = getS3Client(getStoreURL(storeId), bucket.getAccessKey(), bucket.getSecretKey()); try { BucketVersioningConfiguration configuration = new BucketVersioningConfiguration().withStatus("Suspended"); diff --git a/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CephObjectStoreLifeCycleImpl.java b/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CephObjectStoreLifeCycleImpl.java index 8740d188ce0..f0b0ecf5c61 100644 --- a/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CephObjectStoreLifeCycleImpl.java +++ b/plugins/storage/object/ceph/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CephObjectStoreLifeCycleImpl.java @@ -54,6 +54,7 @@ public class CephObjectStoreLifeCycleImpl implements ObjectStoreLifeCycle { public DataStore initialize(Map dsInfos) { String url = (String)dsInfos.get("url"); String name = (String)dsInfos.get("name"); + Long size = (Long)dsInfos.get("size"); String providerName = (String)dsInfos.get("providerName"); Map details = (Map)dsInfos.get("details"); if (details == null) { @@ -67,6 +68,7 @@ public class CephObjectStoreLifeCycleImpl implements ObjectStoreLifeCycle { Map objectStoreParameters = new HashMap(); objectStoreParameters.put("name", name); objectStoreParameters.put("url", url); + objectStoreParameters.put("size", size); objectStoreParameters.put("providerName", providerName); objectStoreParameters.put("accesskey", accessKey); diff --git a/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/MinIOObjectStoreLifeCycleImpl.java b/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/MinIOObjectStoreLifeCycleImpl.java index 9d620b32b54..1fb0d515949 100644 --- a/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/MinIOObjectStoreLifeCycleImpl.java +++ b/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/MinIOObjectStoreLifeCycleImpl.java @@ -53,6 +53,7 @@ public class MinIOObjectStoreLifeCycleImpl implements ObjectStoreLifeCycle { String url = (String)dsInfos.get("url"); String name = (String)dsInfos.get("name"); + Long size = (Long)dsInfos.get("size"); String providerName = (String)dsInfos.get("providerName"); Map details = (Map)dsInfos.get("details"); if(details == null){ @@ -65,6 +66,7 @@ public class MinIOObjectStoreLifeCycleImpl implements ObjectStoreLifeCycle { Map objectStoreParameters = new HashMap(); objectStoreParameters.put("name", name); objectStoreParameters.put("url", url); + objectStoreParameters.put("size", size); objectStoreParameters.put("providerName", providerName); objectStoreParameters.put("accesskey", accessKey); diff --git a/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorObjectStoreLifeCycleImpl.java b/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorObjectStoreLifeCycleImpl.java index 6ceed041e8d..3c5bc76ba95 100644 --- a/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorObjectStoreLifeCycleImpl.java +++ b/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorObjectStoreLifeCycleImpl.java @@ -65,12 +65,14 @@ public class SimulatorObjectStoreLifeCycleImpl implements ObjectStoreLifeCycle { String url = (String)dsInfos.get("url"); String name = (String)dsInfos.get("name"); String providerName = (String)dsInfos.get("providerName"); + Long size = (Long)dsInfos.get("size"); Map details = (Map)dsInfos.get("details"); Map objectStoreParameters = new HashMap(); objectStoreParameters.put("name", name); objectStoreParameters.put("url", url); objectStoreParameters.put("providerName", providerName); + objectStoreParameters.put("size", size); ObjectStoreVO ids = objectStoreHelper.createObjectStore(objectStoreParameters, details); return objectStoreMgr.getObjectStore(ids.getId()); diff --git a/plugins/storage/sharedfs/storagevm/src/main/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycle.java b/plugins/storage/sharedfs/storagevm/src/main/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycle.java index a625761ec28..ac8d6a58f0c 100644 --- a/plugins/storage/sharedfs/storagevm/src/main/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycle.java +++ b/plugins/storage/sharedfs/storagevm/src/main/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycle.java @@ -175,10 +175,11 @@ public class StorageVmSharedFSLifeCycle implements SharedFSLifeCycle { customParameterMap.put("maxIopsDo", maxIops.toString()); } List keypairs = new ArrayList(); + String preferredArchitecture = ResourceManager.SystemVmPreferredArchitecture.valueIn(zoneId); for (final Iterator iter = hypervisors.iterator(); iter.hasNext();) { final Hypervisor.HypervisorType hypervisor = iter.next(); - VMTemplateVO template = templateDao.findSystemVMReadyTemplate(zoneId, hypervisor); + VMTemplateVO template = templateDao.findSystemVMReadyTemplate(zoneId, hypervisor, preferredArchitecture); if (template == null && !iter.hasNext()) { throw new CloudRuntimeException(String.format("Unable to find the systemvm template for %s or it was not downloaded in %s.", hypervisor.toString(), zone.toString())); } @@ -195,7 +196,7 @@ public class StorageVmSharedFSLifeCycle implements SharedFSLifeCycle { CallContext vmContext = CallContext.register(CallContext.current(), ApiCommandResourceType.VirtualMachine); try { vm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner, hostName, hostName, - diskOfferingId, size, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, + diskOfferingId, size, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.SHAREDFSVM, null, null, null); diff --git a/plugins/storage/sharedfs/storagevm/src/main/resources/conf/fsvm-init.yml b/plugins/storage/sharedfs/storagevm/src/main/resources/conf/fsvm-init.yml index 4d3572162c8..ceafa6c3cb1 100644 --- a/plugins/storage/sharedfs/storagevm/src/main/resources/conf/fsvm-init.yml +++ b/plugins/storage/sharedfs/storagevm/src/main/resources/conf/fsvm-init.yml @@ -30,14 +30,9 @@ write_files: } get_block_device() { - if [ "$HYPERVISOR" == "kvm" ]; then - BLOCK_DEVICE="vdb" - elif [ "$HYPERVISOR" == "xenserver" ]; then - BLOCK_DEVICE="xvdb" - elif [ "$HYPERVISOR" == "vmware" ]; then - BLOCK_DEVICE="sdb" - else - log "Unknown hypervisor" + BLOCK_DEVICE=$(lsblk -dn -o NAME,TYPE | awk '$2=="disk"{print $1}' | tail -n 1) + if [ -z "$BLOCK_DEVICE" ]; then + log "Unknown data disk" exit 1 fi echo "$BLOCK_DEVICE" diff --git a/plugins/storage/sharedfs/storagevm/src/test/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycleTest.java b/plugins/storage/sharedfs/storagevm/src/test/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycleTest.java index 21753257f75..c64e8c05c99 100644 --- a/plugins/storage/sharedfs/storagevm/src/test/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycleTest.java +++ b/plugins/storage/sharedfs/storagevm/src/test/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycleTest.java @@ -235,7 +235,7 @@ public class StorageVmSharedFSLifeCycleTest { when(serviceOfferingDao.findById(s_serviceOfferingId)).thenReturn(serviceOffering); VMTemplateVO template = mock(VMTemplateVO.class); - when(templateDao.findSystemVMReadyTemplate(s_zoneId, Hypervisor.HypervisorType.KVM)).thenReturn(template); + when(templateDao.findSystemVMReadyTemplate(s_zoneId, Hypervisor.HypervisorType.KVM, ResourceManager.SystemVmPreferredArchitecture.defaultValue())).thenReturn(template); when(template.getId()).thenReturn(s_templateId); return sharedFS; @@ -254,7 +254,7 @@ public class StorageVmSharedFSLifeCycleTest { when(vm.getId()).thenReturn(s_vmId); when(userVmService.createAdvancedVirtualMachine( any(DataCenter.class), any(ServiceOffering.class), any(VirtualMachineTemplate.class), anyList(), any(Account.class), anyString(), - anyString(), anyLong(), anyLong(), isNull(), any(Hypervisor.HypervisorType.class), any(BaseCmd.HTTPMethod.class), anyString(), + anyString(), anyLong(), anyLong(), any(), isNull(), any(Hypervisor.HypervisorType.class), any(BaseCmd.HTTPMethod.class), anyString(), isNull(), isNull(), anyList(), isNull(), any(Network.IpAddresses.class), isNull(), isNull(), isNull(), anyMap(), isNull(), isNull(), isNull(), isNull(), anyBoolean(), anyString(), isNull(), isNull(), isNull())).thenReturn(vm); @@ -302,7 +302,6 @@ public class StorageVmSharedFSLifeCycleTest { when(dataCenterDao.findById(s_zoneId)).thenReturn(zone); when(resourceMgr.getSupportedHypervisorTypes(s_zoneId, false, null)).thenReturn(List.of(Hypervisor.HypervisorType.KVM)); - when(templateDao.findSystemVMReadyTemplate(s_zoneId, Hypervisor.HypervisorType.KVM)).thenReturn(null); lifeCycle.deploySharedFS(sharedFS, s_networkId, s_diskOfferingId, s_size, s_minIops, s_maxIops); } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java index 9c0db25d52e..5851ee44d2e 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java @@ -87,8 +87,9 @@ public interface ProviderAdapter { /** * Copy a source object to a destination volume. The source object can be a Volume, Snapshot, or Template + * @param newSize the desired size in bytes for the destination volume (supports resize-during-copy) */ - public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetVolume); + public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolume, ProviderAdapterDataObject targetVolume, Long newSize); /** * Make a device-specific snapshot of the provided volume diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java index e573f453a6c..40d99526394 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -337,7 +337,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool); ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool); - outVolume = api.copy(context, sourceIn, destIn); + // Call provider adapter copy method with destination size parameter for resize-during-copy support + outVolume = api.copy(context, sourceIn, destIn, destdata.getSize()); // populate this data - it may be needed later destIn.setExternalName(outVolume.getExternalName()); diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index 715379daf86..41125f3e113 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -367,7 +367,8 @@ public class FlashArrayAdapter implements ProviderAdapter { @Override public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, - ProviderAdapterDataObject destDataObject) { + ProviderAdapterDataObject destDataObject, Long newSize) { + // Add new parameter as newSize to match method declaration but not used anywhere // private ManagedVolume copy(ManagedVolume sourceVolume, String destNamespace, // String destName) { if (sourceDataObject == null || sourceDataObject.getExternalName() == null diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java index 036144103b1..ee6b710efa6 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java @@ -288,15 +288,22 @@ public class PrimeraAdapter implements ProviderAdapter { @Override public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceVolumeInfo, - ProviderAdapterDataObject targetVolumeInfo) { + ProviderAdapterDataObject targetVolumeInfo, Long newSize) { + // Log the start of the copy operation with source volume details + logger.debug("PrimeraAdapter: Starting volume copy operation - source volume: '{}', target volume: '{}', requested new size: {} bytes ({} MiB)", + sourceVolumeInfo.getExternalName(), targetVolumeInfo.getName(), newSize, newSize / PrimeraAdapter.BYTES_IN_MiB); + + // Flag to determine copy method: online copy (direct clone) vs offline copy (with resize) + boolean onlineCopy = true; PrimeraVolumeCopyRequest request = new PrimeraVolumeCopyRequest(); PrimeraVolumeCopyRequestParameters parms = new PrimeraVolumeCopyRequestParameters(); assert sourceVolumeInfo.getExternalName() != null: "External provider name not provided on copy request to Primera volume provider"; - // if we have no external name, treat it as a new volume + // Generate external name for target volume if not already set if (targetVolumeInfo.getExternalName() == null) { targetVolumeInfo.setExternalName(ProviderVolumeNamer.generateObjectName(context, targetVolumeInfo)); + logger.debug("PrimeraAdapter: Generated external name '{}' for target volume", targetVolumeInfo.getExternalName()); } ProviderVolume sourceVolume = this.getVolume(context, sourceVolumeInfo); @@ -304,23 +311,71 @@ public class PrimeraAdapter implements ProviderAdapter { throw new RuntimeException("Source volume " + sourceVolumeInfo.getExternalUuid() + " with provider name " + sourceVolumeInfo.getExternalName() + " not found on storage provider"); } + // Determine copy method based on size difference + // Online copy: Direct clone without size change (faster, immediate) + // Offline copy: Copy with potential resize (slower, requires task completion wait) + Long sourceSize = sourceVolume.getAllocatedSizeInBytes(); + if (newSize == null || sourceSize == null || !newSize.equals(sourceSize)) { + logger.debug("PrimeraAdapter: Volume size change detected (source: {} bytes, target: {} bytes) - using offline copy method", + sourceSize, newSize); + onlineCopy = false; + } else { + logger.debug("PrimeraAdapter: No size change required (both {} bytes) - using online copy method for faster cloning", newSize); + } + + // Check if target volume already exists on the storage provider ProviderVolume targetVolume = this.getVolume(context, targetVolumeInfo); if (targetVolume == null) { - this.create(context, targetVolumeInfo, null, sourceVolume.getAllocatedSizeInBytes()); + if (!onlineCopy) { + // For offline copy, pre-create the target volume with the desired size + logger.debug("PrimeraAdapter: Offline copy mode - pre-creating target volume '{}' with size {} bytes", + targetVolumeInfo.getName(), sourceVolume.getAllocatedSizeInBytes()); + this.create(context, targetVolumeInfo, null, sourceVolume.getAllocatedSizeInBytes()); + } else { + // For online copy, the target volume will be created automatically during the clone operation + logger.debug("PrimeraAdapter: Online copy mode - target volume '{}' will be created automatically during clone operation", + targetVolumeInfo.getName()); + } + } else { + logger.warn("PrimeraAdapter: Target volume '{}' already exists on storage provider - proceeding with copy operation", + targetVolumeInfo.getExternalName()); } parms.setDestVolume(targetVolumeInfo.getExternalName()); - parms.setOnline(false); - parms.setPriority(1); + if (onlineCopy) { + // Online copy configuration: immediate clone with deduplication and compression + parms.setOnline(true); + parms.setDestCPG(cpg); + parms.setTpvv(false); + parms.setReduce(true); + logger.debug("PrimeraAdapter: Configuring online copy - destination CPG: '{}', deduplication enabled, thin provisioning disabled", cpg); + } else { + // Offline copy configuration: background task with high priority + parms.setOnline(false); + parms.setPriority(1); // Set high priority for faster completion + logger.debug("PrimeraAdapter: Configuring offline copy with high priority for target volume '{}'", targetVolumeInfo.getName()); + } + + // Set request parameters and initiate the copy operation request.setParameters(parms); PrimeraTaskReference taskref = POST("/volumes/" + sourceVolumeInfo.getExternalName(), request, new TypeReference() {}); if (taskref == null) { + logger.error("PrimeraAdapter: Failed to initiate copy operation - no task reference returned from storage provider"); throw new RuntimeException("Unable to retrieve task used to copy to newly created volume"); } - waitForTaskToComplete(taskref.getTaskid(), "copy volume " + sourceVolumeInfo.getExternalName() + " to " + - targetVolumeInfo.getExternalName(), taskWaitTimeoutMs); + // Handle task completion based on copy method + if (!onlineCopy) { + // Offline copy requires waiting for task completion + logger.debug("PrimeraAdapter: Offline copy initiated - waiting for task completion (TaskID: {})", taskref.getTaskid()); + waitForTaskToComplete(taskref.getTaskid(), "copy volume " + sourceVolumeInfo.getExternalName() + " to " + + targetVolumeInfo.getExternalName(), taskWaitTimeoutMs); + logger.debug("PrimeraAdapter: Offline copy operation completed successfully"); + } else { + // Online copy completes immediately + logger.debug("PrimeraAdapter: Online copy operation completed successfully (TaskID: {})", taskref.getTaskid()); + } return this.getVolume(context, targetVolumeInfo); } diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolAnswer.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolAnswer.java index 437e786f0f6..80b87a49acb 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolAnswer.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/agent/api/storage/StorPoolModifyStoragePoolAnswer.java @@ -36,14 +36,16 @@ public class StorPoolModifyStoragePoolAnswer extends Answer{ private List datastoreClusterChildren = new ArrayList<>(); private String clusterId; private String clientNodeId; + private String clusterLocation; - public StorPoolModifyStoragePoolAnswer(StorPoolModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map tInfo, String clusterId, String clientNodeId) { + public StorPoolModifyStoragePoolAnswer(StorPoolModifyStoragePoolCommand cmd, long capacityBytes, long availableBytes, Map tInfo, String clusterId, String clientNodeId, String clusterLocation) { super(cmd); result = true; poolInfo = new StoragePoolInfo(null, cmd.getPool().getHost(), cmd.getPool().getPath(), cmd.getLocalPath(), cmd.getPool().getType(), capacityBytes, availableBytes); templateInfo = tInfo; this.clusterId = clusterId; this.clientNodeId = clientNodeId; + this.clusterLocation = clusterLocation; } public StorPoolModifyStoragePoolAnswer(String errMsg) { @@ -101,4 +103,12 @@ public class StorPoolModifyStoragePoolAnswer extends Answer{ public void setClientNodeId(String clientNodeId) { this.clientNodeId = clientNodeId; } + + public String getClusterLocation() { + return clusterLocation; + } + + public void setClusterLocation(String clusterLocation) { + this.clusterLocation = clusterLocation; + } } diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java index 3e7118ab81d..da67812cbbe 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java @@ -103,9 +103,10 @@ public final class StorPoolDownloadTemplateCommandWrapper extends CommandWrapper final QemuImgFile srcFile = new QemuImgFile(srcDisk.getPath(), srcDisk.getFormat()); final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds()); - StorPoolStorageAdaptor.resize( Long.toString(srcDisk.getVirtualSize()), dst.getPath()); if (dst instanceof TemplateObjectTO) { + StorPoolStorageAdaptor.resize(Long.toString(srcDisk.getVirtualSize()), dst.getPath()); + ((TemplateObjectTO) dst).setSize(srcDisk.getVirtualSize()); } diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java index a44ff5473ae..8d6dcff8aed 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import org.apache.commons.lang3.StringUtils; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.StorPoolModifyStoragePoolAnswer; @@ -38,7 +39,9 @@ import com.cloud.resource.ResourceWrapper; import com.cloud.storage.template.TemplateProp; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; +import com.google.gson.JsonArray; import com.google.gson.JsonElement; +import com.google.gson.JsonObject; import com.google.gson.JsonParser; @ResourceWrapper(handles = StorPoolModifyStoragePoolCommand.class) @@ -51,6 +54,7 @@ public final class StorPoolModifyStorageCommandWrapper extends CommandWrapper tInfo = new HashMap<>(); - return new StorPoolModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo, clusterId, storagepool.getStorageNodeId()); + return new StorPoolModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo, clusterId, storagepool.getStorageNodeId(), clusterLocation); } catch (Exception e) { logger.debug(String.format("Could not modify storage due to %s", e.getMessage())); return new Answer(command, e); @@ -118,4 +122,28 @@ public final class StorPoolModifyStorageCommandWrapper extends CommandWrapper snapshotRecoveryFromRemoteCheck = new ConfigKey("Advanced", Integer.class, + "storpool.snapshot.recovery.from.remote.check", "300", + "Minimal interval (in seconds) to check and recover StorPool snapshot from remote", false); @Override public String getConfigComponentName() { @@ -77,7 +94,7 @@ public class StorPoolAbandonObjectsCollector extends ManagerBase implements Conf @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] { volumeCheckupTagsInterval, snapshotCheckupTagsInterval }; + return new ConfigKey[] { volumeCheckupTagsInterval, snapshotCheckupTagsInterval, snapshotRecoveryFromRemoteCheck }; } @Override @@ -93,6 +110,8 @@ public class StorPoolAbandonObjectsCollector extends ManagerBase implements Conf } _volumeTagsUpdateExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("StorPoolAbandonObjectsCollector")); + snapshotRecoveryCheckExecutor = Executors.newScheduledThreadPool(1, + new NamedThreadFactory("StorPoolSnapshotRecoveryCheck")); if (volumeCheckupTagsInterval.value() > 0) { _volumeTagsUpdateExecutor.scheduleAtFixedRate(new StorPoolVolumesTagsUpdate(), @@ -102,6 +121,10 @@ public class StorPoolAbandonObjectsCollector extends ManagerBase implements Conf _volumeTagsUpdateExecutor.scheduleAtFixedRate(new StorPoolSnapshotsTagsUpdate(), snapshotCheckupTagsInterval.value(), snapshotCheckupTagsInterval.value(), TimeUnit.SECONDS); } + if (snapshotRecoveryFromRemoteCheck.value() > 0) { + snapshotRecoveryCheckExecutor.scheduleAtFixedRate(new StorPoolSnapshotRecoveryCheck(), + snapshotRecoveryFromRemoteCheck.value(), snapshotRecoveryFromRemoteCheck.value(), TimeUnit.SECONDS); + } } class StorPoolVolumesTagsUpdate extends ManagedContextRunnable { @@ -322,4 +345,84 @@ public class StorPoolAbandonObjectsCollector extends ManagerBase implements Conf } return map; } + + class StorPoolSnapshotRecoveryCheck extends ManagedContextRunnable { + + @Override + protected void runInContext() { + List spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME); + if (CollectionUtils.isEmpty(spPools)) { + return; + } + List snapshotDetails = snapshotDetailsDao.findDetails(StorPoolUtil.SP_RECOVERED_SNAPSHOT); + if (CollectionUtils.isEmpty(snapshotDetails)) { + return; + } + Map onePoolforZone = new HashMap<>(); + for (StoragePoolVO storagePoolVO : spPools) { + onePoolforZone.put(storagePoolVO.getDataCenterId(), storagePoolVO); + } + List recoveredSnapshots = new ArrayList<>(); + for (StoragePoolVO storagePool : onePoolforZone.values()) { + collectRecoveredSnapshotAfterExport(snapshotDetails, recoveredSnapshots, storagePool); + } + for (Long recoveredSnapshot : recoveredSnapshots) { + snapshotDetailsDao.remove(recoveredSnapshot); + } + } + + private void collectRecoveredSnapshotAfterExport(List snapshotDetails, List recoveredSnapshots, StoragePoolVO storagePool) { + try { + logger.debug(String.format("Checking StorPool recovered snapshots for zone [%s]", + storagePool.getDataCenterId())); + SpConnectionDesc conn = StorPoolUtil.getSpConnection(storagePool.getUuid(), + storagePool.getId(), storagePoolDetailsDao, storagePoolDao); + JsonArray arr = StorPoolUtil.snapshotsList(conn); + List snapshots = snapshotsForRecovery(arr); + if (snapshots.isEmpty()) { + return; + } + for (SnapshotDetailsVO snapshot : snapshotDetails) { + String[] snapshotOnRemote = snapshot.getValue().split(";"); + if (snapshotOnRemote.length != 2) { + continue; + } + String name = snapshot.getValue().split(";")[0]; + String location = snapshot.getValue().split(";")[1]; + if (name == null || location == null) { + StorPoolUtil.spLog("Could not find name or location for the snapshot %s", snapshot.getValue()); + continue; + } + if (snapshots.contains(name)) { + findRecoveredSnapshots(recoveredSnapshots, conn, snapshot, name, location); + } + } + } catch (Exception e) { + logger.debug(String.format("Could not collect StorPool recovered snapshots %s", e.getMessage())); + } + } + + private void findRecoveredSnapshots(List recoveredSnapshots, SpConnectionDesc conn, SnapshotDetailsVO snapshot, String name, String location) { + Long clusterId = StorPoolHelper.findClusterIdByGlobalId(StorPoolUtil.getSnapshotClusterId(name, conn), clusterDao); + conn = StorPoolHelper.getSpConnectionDesc(conn, clusterId); + SpApiResponse resp = StorPoolUtil.snapshotUnexport(name, location, conn); + if (resp.getError() == null) { + StorPoolUtil.spLog("Unexport of snapshot %s was successful", name); + recoveredSnapshots.add(snapshot.getId()); + } else { + StorPoolUtil.spLog("Could not recover StorPool snapshot %s", resp.getError()); + } + } + } + + private static List snapshotsForRecovery(JsonArray arr) { + List snapshots = new ArrayList<>(); + for (int i = 0; i < arr.size(); i++) { + boolean recoveringFromRemote = arr.get(i).getAsJsonObject().get("recoveringFromRemote").getAsBoolean(); + if (!recoveringFromRemote) { + snapshots.add(arr.get(i).getAsJsonObject().get("name").getAsString()); + } + } + return snapshots; + } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java index d93990ee071..c305c393c9b 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java @@ -18,18 +18,32 @@ */ package org.apache.cloudstack.storage.datastore.driver; +import com.cloud.storage.dao.SnapshotDetailsVO; + import java.util.HashMap; import java.util.List; import java.util.Map; import javax.inject.Inject; -import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.tags.dao.ResourceTagDao; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.VMInstanceDao; + import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; @@ -68,6 +82,7 @@ import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.volume.VolumeObject; + import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; @@ -112,17 +127,7 @@ import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.storage.dao.VMTemplateDetailsDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.dao.VolumeDetailsDao; -import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.utils.Pair; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.VirtualMachineManager; -import com.cloud.vm.dao.VMInstanceDao; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -187,7 +192,10 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { @Override public Map getCapabilities() { - return null; + Map mapCapabilities = new HashMap<>(); + mapCapabilities.put(DataStoreCapabilities.CAN_COPY_SNAPSHOT_BETWEEN_ZONES_AND_SAME_POOL_TYPE.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_TEMPLATE_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + return mapCapabilities; } @Override @@ -520,6 +528,8 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } catch (Exception e) { err = String.format("Could not delete volume due to %s", e.getMessage()); } + } else if (data.getType() == DataObjectType.SNAPSHOT) { + err = deleteSnapshot((SnapshotInfo) data, err); } else { err = String.format("Invalid DataObjectType \"%s\" passed to deleteAsync", data.getType()); } @@ -534,6 +544,18 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { callback.complete(res); } + private String deleteSnapshot(SnapshotInfo data, String err) { + SnapshotInfo snapshot = data; + SpConnectionDesc conn = StorPoolUtil.getSpConnection(snapshot.getDataStore().getUuid(), snapshot.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao); + String name = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshot.getPath(), true); + SpApiResponse resp = StorPoolUtil.snapshotDelete(name, conn); + if (resp.getError() != null) { + err = String.format("Failed to clean-up Storpool snapshot %s. Error: %s", name, resp.getError()); + StorPoolUtil.spLog(err); + } + return err; + } + private void tryToSnapshotVolumeBeforeDelete(VolumeInfo vinfo, DataStore dataStore, String name, SpConnectionDesc conn) { Integer deleteAfter = StorPoolConfigurationManager.DeleteAfterInterval.valueIn(dataStore.getId()); if (deleteAfter != null && deleteAfter > 0 && vinfo.getPassphraseId() == null) { @@ -606,7 +628,22 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { @Override public boolean canCopy(DataObject srcData, DataObject dstData) { - return true; + DataObjectType srcType = srcData.getType(); + DataObjectType dstType = dstData.getType(); + if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.VOLUME) { + return true; + } else if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.SNAPSHOT) { + return true; + } else if (srcType == DataObjectType.VOLUME && dstType == DataObjectType.TEMPLATE) { + return true; + } else if (srcType == DataObjectType.TEMPLATE && dstType == DataObjectType.TEMPLATE) { + return true; + } else if (srcType == DataObjectType.TEMPLATE && dstType == DataObjectType.VOLUME) { + return true; + } else if (srcType == DataObjectType.VOLUME && dstType == DataObjectType.VOLUME) { + return true; + } + return false; } @Override @@ -624,13 +661,12 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { try { if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.VOLUME) { SnapshotInfo sinfo = (SnapshotInfo)srcData; - final String snapshotName = StorPoolHelper.getSnapshotName(srcData.getId(), srcData.getUuid(), snapshotDataStoreDao, snapshotDetailsDao); - VolumeInfo vinfo = (VolumeInfo)dstData; final String volumeName = vinfo.getUuid(); final Long size = vinfo.getSize(); SpConnectionDesc conn = StorPoolUtil.getSpConnection(vinfo.getDataStore().getUuid(), vinfo.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao); + String snapshotName = StorPoolStorageAdaptor.getVolumeNameFromPath(((SnapshotInfo) srcData).getPath(), true); StorPoolVolumeDef spVolume = createVolumeWithTags(sinfo, snapshotName, vinfo, volumeName, size, conn); SpApiResponse resp = StorPoolUtil.volumeCreate(spVolume, conn); @@ -640,9 +676,10 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { VolumeObjectTO to = (VolumeObjectTO)dstData.getTO(); to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false))); to.setSize(size); + updateVolumePoolType(vinfo); answer = new CopyCmdAnswer(to); - StorPoolUtil.spLog("Created volume=%s with uuid=%s from snapshot=%s with uuid=%s", StorPoolUtil.getNameFromResponse(resp, false), to.getUuid(), snapshotName, sinfo.getUuid()); + StorPoolUtil.spLog("Created volume=%s with uuid=%s from snapshot=%s with uuid=%s", StorPoolUtil.getNameFromResponse(resp, false), volumeName, snapshotName, sinfo.getUuid()); } else if (resp.getError().getName().equals("objectDoesNotExist")) { //check if snapshot is on secondary storage StorPoolUtil.spLog("Snapshot %s does not exists on StorPool, will try to create a volume from a snapshot on secondary storage", snapshotName); @@ -658,8 +695,24 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } else { answer = new Answer(cmd, false, String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, emptyVolumeCreateResp.getError())); } + VolumeObjectTO to = (VolumeObjectTO) dstData.getTO(); + to.setPath(StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false))); + to.setSize(size); + + answer = new CopyCmdAnswer(to); + StorPoolUtil.spLog("Created volume=%s with uuid=%s from snapshot=%s with uuid=%s", StorPoolUtil.getNameFromResponse(resp, false), to.getUuid(), snapshotName, sinfo.getUuid()); } else { - answer = new Answer(cmd, false, String.format("The snapshot %s does not exists neither on primary, neither on secondary storage. Cannot create volume from snapshot", snapshotName)); + err = String.format("Could not create volume from a snapshot due to {}", resp.getError()); + } + } else if (sinfo.getDataStore().getRole().equals(DataStoreRole.Image)) { + //check if snapshot is on secondary storage + StorPoolUtil.spLog("Snapshot %s does not exists on StorPool, will try to create a volume from a snapshot on secondary storage", sinfo.getName()); + SnapshotDataStoreVO snap = getSnapshotImageStoreRef(sinfo.getId(), vinfo.getDataCenterId()); + SpApiResponse emptyVolumeCreateResp = StorPoolUtil.volumeCreate(volumeName, null, size, null, null, "volume", null, conn); + if (emptyVolumeCreateResp.getError() == null) { + answer = createVolumeFromSnapshot(srcData, dstData, size, emptyVolumeCreateResp); + } else { + answer = new Answer(cmd, false, String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, emptyVolumeCreateResp.getError())); } } else { answer = new Answer(cmd, false, String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError())); @@ -668,7 +721,7 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { SnapshotInfo sinfo = (SnapshotInfo)srcData; SnapshotDetailsVO snapshotDetail = snapshotDetailsDao.findDetail(sinfo.getId(), StorPoolUtil.SP_DELAY_DELETE); // bypass secondary storage - if (StorPoolConfigurationManager.BypassSecondaryStorage.value() || snapshotDetail != null) { + if (Boolean.FALSE.equals(SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value())) { SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData.getTO(); answer = new CopyCmdAnswer(snapshot); } else { @@ -678,9 +731,9 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { final String snapName = StorPoolStorageAdaptor.getVolumeNameFromPath(((SnapshotInfo) srcData).getPath(), true); SpConnectionDesc conn = StorPoolUtil.getSpConnection(srcData.getDataStore().getUuid(), srcData.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao); try { - Long clusterId = StorPoolHelper.findClusterIdByGlobalId(snapName, clusterDao); - EndPoint ep = clusterId != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, hostDao)) : selector.select(srcData, dstData); - if (ep == null) { + Long clusterId = StorPoolHelper.findClusterIdByGlobalId(StorPoolUtil.getSnapshotClusterId(snapName, conn), clusterDao); + HostVO host = clusterId != null ? StorPoolHelper.findHostByCluster(clusterId, hostDao) : null; + EndPoint ep = host != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(host) : selector.select(srcData, dstData); if (ep == null) { err = "No remote endpoint to send command, check if host or ssvm is down?"; } else { answer = ep.sendMessage(cmd); @@ -712,8 +765,7 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { StorPoolHelper.getTimeout(StorPoolHelper.PrimaryStorageDownloadWait, configDao), VirtualMachineManager.ExecuteInSequence.value()); try { - Long clusterId = StorPoolHelper.findClusterIdByGlobalId(volumeName, clusterDao); - EndPoint ep2 = clusterId != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, hostDao)) : selector.select(srcData, dstData); + EndPoint ep2 = selector.select(srcData, dstData); if (ep2 == null) { err = "No remote endpoint to send command, check if host or ssvm is down?"; } else { @@ -937,8 +989,9 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.copyAsnc command=%s ", cmd); try { - Long clusterId = StorPoolHelper.findClusterIdByGlobalId(snapshotName, clusterDao); - EndPoint ep = clusterId != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, hostDao)) : selector.select(srcData, dstData); + Long clusterId = StorPoolHelper.findClusterIdByGlobalId(StorPoolUtil.getSnapshotClusterId(snapshotName, conn), clusterDao); + HostVO host = clusterId != null ? StorPoolHelper.findHostByCluster(clusterId, hostDao) : null; + EndPoint ep = host != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(host) : selector.select(srcData, dstData); StorPoolUtil.spLog("selector.select(srcData, dstData) ", ep); if (ep == null) { ep = selector.select(dstData); diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java index 7e0986bc63b..e27e15e04a8 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java @@ -170,6 +170,7 @@ public class StorPoolHostListener implements HypervisorHostListener { } StorPoolHelper.setSpClusterIdIfNeeded(hostId, mspAnswer.getClusterId(), clusterDao, hostDao, clusterDetailsDao); + StorPoolHelper.setLocationIfNeeded(pool, storagePoolDetailsDao, mspAnswer.getClusterLocation()); StorPoolUtil.spLog("Connection established between storage pool [%s] and host [%s]", poolVO, host); return true; diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java index f13d296af3b..685b99e12d5 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java @@ -19,26 +19,6 @@ package org.apache.cloudstack.storage.datastore.util; -import java.sql.PreparedStatement; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.config.impl.ConfigurationVO; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; -import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse; -import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager; -import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.commons.collections4.CollectionUtils; - import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; @@ -49,6 +29,7 @@ import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor; import com.cloud.server.ResourceTag; import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.storage.DataStoreRole; +import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDetailsDao; @@ -65,6 +46,28 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse; +import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager; +import org.apache.cloudstack.storage.to.VolumeObjectTO; + +import org.apache.commons.collections4.CollectionUtils; + +import java.sql.PreparedStatement; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; + public class StorPoolHelper { private static final String UPDATE_SNAPSHOT_DETAILS_VALUE = "UPDATE `cloud`.`snapshot_details` SET value=? WHERE id=?"; @@ -218,6 +221,22 @@ public class StorPoolHelper { } } + public static void setLocationIfNeeded(StoragePool storagePool, StoragePoolDetailsDao storagePoolDetails, + String location) { + if (location == null) { + return; + } + StoragePoolDetailVO storagePoolDetailVO = storagePoolDetails.findDetail(storagePool.getId(), + StorPoolConfigurationManager.StorPoolClusterLocation.key()); + if (storagePoolDetailVO == null) { + storagePoolDetails.persist(new StoragePoolDetailVO(storagePool.getId(), + StorPoolConfigurationManager.StorPoolClusterLocation.key(), location, true)); + } else if (storagePoolDetailVO.getValue() == null || !storagePoolDetailVO.getValue().equals(location)) { + storagePoolDetailVO.setValue(location); + storagePoolDetails.update(storagePoolDetailVO.getId(), storagePoolDetailVO); + } + } + public static Long findClusterIdByGlobalId(String globalId, ClusterDao clusterDao) { List clusterIds = clusterDao.listAllIds(); if (clusterIds.size() == 1) { @@ -238,7 +257,7 @@ public class StorPoolHelper { public static HostVO findHostByCluster(Long clusterId, HostDao hostDao) { List host = hostDao.findByClusterId(clusterId); - return host != null ? host.get(0) : null; + return CollectionUtils.isNotEmpty(host) ? host.get(0) : null; } public static int getTimeout(String cfg, ConfigurationDao configDao) { @@ -289,4 +308,15 @@ public class StorPoolHelper { } return true; } + + public static StorPoolUtil.SpConnectionDesc getSpConnectionDesc(StorPoolUtil.SpConnectionDesc connectionLocal, Long clusterId) { + + String subClusterEndPoint = StorPoolConfigurationManager.StorPoolSubclusterEndpoint.valueIn(clusterId); + if (StringUtils.isNotEmpty(subClusterEndPoint)) { + String host = subClusterEndPoint.split(";")[0].split("=")[1]; + String token = subClusterEndPoint.split(";")[1].split("=")[1]; + connectionLocal = new StorPoolUtil.SpConnectionDesc(host, token, connectionLocal.getTemplateName()); + } + return connectionLocal; + } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java index fa9248033bf..dc4dacba450 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java @@ -137,6 +137,9 @@ public class StorPoolUtil { public static final String DELAY_DELETE = "delayDelete"; public static final String SP_TIER = "SP_QOSCLASS"; + public static final String SP_RECOVERED_SNAPSHOT = "SP_RECOVERED_SNAPSHOT"; + + public static final String SP_REMOTE_LOCATION = "SP_REMOTE_LOCATION"; public static final String OBJECT_DOES_NOT_EXIST = "objectDoesNotExist"; @@ -429,6 +432,14 @@ public class StorPoolUtil { return resp.getError() == null ? true : objectExists(resp.getError()); } + public static boolean snapshotRecovered(final String name, SpConnectionDesc conn) { + SpApiResponse resp = GET("Snapshot/" + name, conn); + JsonObject obj = resp.fullJson.getAsJsonObject(); + JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject(); + boolean recoveringFromRemote = data.getAsJsonPrimitive("recoveringFromRemote").getAsBoolean(); + return recoveringFromRemote; + } + public static JsonArray snapshotsList(SpConnectionDesc conn) { SpApiResponse resp = GET("MultiCluster/SnapshotsList", conn); JsonObject obj = resp.fullJson.getAsJsonObject(); @@ -675,6 +686,42 @@ public class StorPoolUtil { return resp.getError() == null ? POST("MultiCluster/SnapshotDelete/" + name, null, conn) : resp; } + public static SpApiResponse snapshotExport(String name, String location, SpConnectionDesc conn) { + Map json = new HashMap<>(); + json.put("snapshot", name); + json.put("location", location); + return POST("SnapshotExport", json, conn); + } + + public static SpApiResponse snapshotUnexport(String name, String location, SpConnectionDesc conn) { + Map json = new HashMap<>(); + json.put("snapshot", name); + json.put("force", true); + json.put("all", true); + return POST("SnapshotUnexport", json, conn); + } + + public static String getSnapshotClusterId(String snapshotName, SpConnectionDesc conn) { + SpApiResponse resp = POST("MultiCluster/SnapshotUpdate/" + snapshotName, new HashMap<>(), conn); + JsonObject json = resp.fullJson.getAsJsonObject(); + return json.get("clusterId").getAsString(); + } + + public static SpApiResponse snapshotFromRemote(String name, String remoteLocation, String template, Map tags, + SpConnectionDesc conn) { + Map json = new HashMap<>(); + json.put("remoteId", name); + json.put("remoteLocation", remoteLocation); + json.put("template", template); + json.put("name", ""); + json.put("tags", tags); + return POST("SnapshotFromRemote", json, conn); + } + + public static SpApiResponse snapshotReconcile(String name, SpConnectionDesc conn) { + return POST("SnapshotReconcile/" + name, null, conn); + } + public static SpApiResponse detachAllForced(final String name, final boolean snapshot, SpConnectionDesc conn) { final String type = snapshot ? "snapshot" : "volume"; List> json = new ArrayList<>(); diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java index aa972d44343..f260c566986 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java @@ -19,12 +19,42 @@ package org.apache.cloudstack.storage.motion; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.inject.Inject; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.MigrateAnswer; +import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo; +import com.cloud.agent.api.ModifyTargetsAnswer; +import com.cloud.agent.api.ModifyTargetsCommand; +import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.storage.StorPoolBackupTemplateFromSnapshotCommand; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.StorageManager; +import com.cloud.storage.VMTemplateDetailVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotDetailsDao; +import com.cloud.storage.dao.SnapshotDetailsVO; +import com.cloud.storage.dao.VMTemplateDetailsDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; @@ -55,48 +85,21 @@ import org.apache.cloudstack.storage.datastore.util.StorPoolHelper; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc; -import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; + import org.apache.commons.collections.MapUtils; -import org.apache.logging.log4j.Logger; + import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + import org.springframework.stereotype.Component; -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.MigrateAnswer; -import com.cloud.agent.api.MigrateCommand; -import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo; -import com.cloud.agent.api.ModifyTargetsAnswer; -import com.cloud.agent.api.ModifyTargetsCommand; -import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.storage.StorPoolBackupTemplateFromSnapshotCommand; -import com.cloud.agent.api.to.DataObjectType; -import com.cloud.agent.api.to.VirtualMachineTO; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.exception.AgentUnavailableException; -import com.cloud.exception.OperationTimedoutException; -import com.cloud.host.Host; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.StorageManager; -import com.cloud.storage.VMTemplateDetailVO; -import com.cloud.storage.Volume; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.GuestOSCategoryDao; -import com.cloud.storage.dao.GuestOSDao; -import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.SnapshotDetailsDao; -import com.cloud.storage.dao.SnapshotDetailsVO; -import com.cloud.storage.dao.VMTemplateDetailsDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachineManager; -import com.cloud.vm.dao.VMInstanceDao; +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; @Component public class StorPoolDataMotionStrategy implements DataMotionStrategy { @@ -149,10 +152,13 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { public StrategyPriority canHandle(DataObject srcData, DataObject destData) { DataObjectType srcType = srcData.getType(); DataObjectType dstType = destData.getType(); + if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.TEMPLATE) { SnapshotInfo sinfo = (SnapshotInfo) srcData; - VolumeInfo volume = sinfo.getBaseVolume(); - StoragePoolVO storagePool = _storagePool.findById(volume.getPoolId()); + if (!sinfo.getDataStore().getRole().equals(DataStoreRole.Primary)) { + return StrategyPriority.CANT_HANDLE; + } + StoragePoolVO storagePool = _storagePool.findById(sinfo.getDataStore().getId()); if (!storagePool.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) { return StrategyPriority.CANT_HANDLE; } @@ -163,7 +169,7 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { String snapshotName = StorPoolHelper.getSnapshotName(sinfo.getId(), sinfo.getUuid(), _snapshotStoreDao, _snapshotDetailsDao); StorPoolUtil.spLog("StorPoolDataMotionStrategy.canHandle snapshot name=%s", snapshotName); - if (snapshotName != null && StorPoolConfigurationManager.BypassSecondaryStorage.value()) { + if (snapshotName != null) { return StrategyPriority.HIGHEST; } } @@ -175,13 +181,12 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { AsyncCompletionCallback callback) { SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData.getTO(); TemplateObjectTO template = (TemplateObjectTO) destData.getTO(); - DataStore store = _dataStore.getDataStore(snapshot.getVolume().getDataStore().getUuid(), - snapshot.getVolume().getDataStore().getRole()); + DataStore store = _dataStore.getDataStore(snapshot.getDataStore().getUuid(), + snapshot.getDataStore().getRole()); SnapshotInfo sInfo = _snapshotDataFactory.getSnapshot(snapshot.getId(), store); - VolumeInfo vInfo = sInfo.getBaseVolume(); - SpConnectionDesc conn = StorPoolUtil.getSpConnection(vInfo.getDataStore().getUuid(), - vInfo.getDataStore().getId(), _storagePoolDetails, _storagePool); + SpConnectionDesc conn = StorPoolUtil.getSpConnection(sInfo.getDataStore().getUuid(), + sInfo.getDataStore().getId(), _storagePoolDetails, _storagePool); String name = template.getUuid(); String volumeName = ""; @@ -209,11 +214,9 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { // final String snapName = // StorpoolStorageAdaptor.getVolumeNameFromPath(((SnapshotInfo) // srcData).getPath(), true); - Long clusterId = StorPoolHelper.findClusterIdByGlobalId(parentName, _clusterDao); - EndPoint ep2 = clusterId != null - ? RemoteHostEndPoint - .getHypervisorHostEndPoint(StorPoolHelper.findHostByCluster(clusterId, _hostDao)) - : _selector.select(sInfo, destData); + Long clusterId = StorPoolHelper.findClusterIdByGlobalId(StorPoolUtil.getSnapshotClusterId(parentName, conn), _clusterDao); + HostVO host = clusterId != null ? StorPoolHelper.findHostByCluster(clusterId, _hostDao) : null; + EndPoint ep2 = host != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(host) : _selector.select(srcData, destData); if (ep2 == null) { err = "No remote endpoint to send command, check if host or ssvm is down?"; } else { @@ -238,7 +241,7 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { StorPoolUtil.volumeDelete(volumeName, conn); } _vmTemplateDetailsDao.persist(new VMTemplateDetailVO(template.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, - String.valueOf(vInfo.getDataStore().getId()), false)); + String.valueOf(sInfo.getDataStore().getId()), false)); StorPoolUtil.spLog("StorPoolDataMotionStrategy.copyAsync Creating snapshot=%s for StorPool template=%s", volumeName, conn.getTemplateName()); final CopyCommandResult cmd = new CopyCommandResult(null, answer); diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java index e4e930c8dee..00cef88c4cf 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java @@ -53,6 +53,10 @@ public class StorPoolConfigurationManager implements Configurable { "storpool.list.snapshots.delete.after.interval", "360", "The interval (in seconds) to fetch the StorPool snapshots with deleteAfter flag", false); + public static final ConfigKey StorPoolClusterLocation = new ConfigKey(String.class, "sp.cluster.location", "Advanced", null, + "StorPool cluster location", true, ConfigKey.Scope.StoragePool, null); + public static final ConfigKey StorPoolSubclusterEndpoint = new ConfigKey<>(String.class, "sp.cluster.endpoint", "Advanced", null, + "StorPool sub-cluster endpoint", true, ConfigKey.Scope.Cluster, null); @Override public String getConfigComponentName() { @@ -61,6 +65,6 @@ public class StorPoolConfigurationManager implements Configurable { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint, VolumesStatsInterval, StorageStatsInterval, DeleteAfterInterval, ListSnapshotsWithDeleteAfterInterval }; + return new ConfigKey[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint, VolumesStatsInterval, StorageStatsInterval, DeleteAfterInterval, ListSnapshotsWithDeleteAfterInterval, StorPoolClusterLocation, StorPoolSubclusterEndpoint }; } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java index 5ec86df91e1..60c91bc4aed 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java @@ -16,12 +16,15 @@ // under the License. package org.apache.cloudstack.storage.snapshot; +import com.cloud.api.query.dao.SnapshotJoinDao; +import com.cloud.api.query.vo.SnapshotJoinVO; +import com.cloud.dc.dao.ClusterDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; -import com.cloud.storage.VolumeVO; +import com.cloud.storage.Storage; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; @@ -30,8 +33,13 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; +import java.util.HashMap; +import java.util.Map; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; @@ -39,18 +47,25 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.util.StorPoolHelper; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; + import org.apache.commons.collections.CollectionUtils; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; + +import org.jetbrains.annotations.NotNull; import org.springframework.stereotype.Component; import javax.inject.Inject; @@ -82,6 +97,10 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { DataStoreManager dataStoreMgr; @Inject SnapshotZoneDao snapshotZoneDao; + @Inject + SnapshotJoinDao snapshotJoinDao; + @Inject + private ClusterDao clusterDao; @Override public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) { @@ -104,48 +123,86 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { public boolean deleteSnapshot(Long snapshotId, Long zoneId) { final SnapshotVO snapshotVO = _snapshotDao.findById(snapshotId); - VolumeVO volume = _volumeDao.findByIdIncludingRemoved(snapshotVO.getVolumeId()); String name = StorPoolHelper.getSnapshotName(snapshotId, snapshotVO.getUuid(), _snapshotStoreDao, _snapshotDetailsDao); boolean res = false; // clean-up snapshot from Storpool storage pools - StoragePoolVO storage = _primaryDataStoreDao.findById(volume.getPoolId()); - if (storage.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) { - try { - SpConnectionDesc conn = StorPoolUtil.getSpConnection(storage.getUuid(), storage.getId(), storagePoolDetailsDao, _primaryDataStoreDao); - SpApiResponse resp = StorPoolUtil.snapshotDelete(name, conn); - if (resp.getError() != null) { - final String err = String.format("Failed to clean-up Storpool snapshot %s. Error: %s", name, resp.getError()); - StorPoolUtil.spLog(err); - markSnapshotAsDestroyedIfAlreadyRemoved(snapshotId, resp.getError().getName().equals(StorPoolUtil.OBJECT_DOES_NOT_EXIST)); - throw new CloudRuntimeException(err); - } else { - res = deleteSnapshotFromDbIfNeeded(snapshotVO, zoneId); - markSnapshotAsDestroyedIfAlreadyRemoved(snapshotId,true); - StorPoolUtil.spLog("StorpoolSnapshotStrategy.deleteSnapshot: executed successfully=%s, snapshot %s, name=%s", res, snapshotVO, name); + List snapshotDataStoreVOS; + List snapshotJoinVOList = snapshotJoinDao.listBySnapshotIdAndZoneId(zoneId, snapshotId); + try { + for (SnapshotJoinVO snapshot: snapshotJoinVOList) { + if (State.Destroyed.equals(snapshot.getStatus())) { + continue; } - } catch (Exception e) { - String errMsg = String.format("Cannot delete snapshot due to %s", e.getMessage()); - throw new CloudRuntimeException(errMsg); + if (snapshot.getStoreRole().isImageStore()) { + continue; + } + StoragePoolVO storage = _primaryDataStoreDao.findById(snapshot.getStoreId()); + if (zoneId != null) { + if (!zoneId.equals(snapshot.getDataCenterId())) { + continue; + } + res = deleteSnapshot(snapshotId, zoneId, snapshotVO, name, storage); + break; + } + res = deleteSnapshot(snapshotId, zoneId, snapshotVO, name, storage); } + } catch (Exception e) { + String errMsg = String.format("Cannot delete snapshot due to %s", e.getMessage()); + throw new CloudRuntimeException(errMsg); } - - List snapshots = _snapshotStoreDao.listBySnapshotIdAndState(snapshotId, State.Ready); - if (res || CollectionUtils.isEmpty(snapshots)) { + snapshotDataStoreVOS = _snapshotStoreDao.listSnapshotsBySnapshotId(snapshotId); + boolean areAllSnapshotsDestroyed = snapshotDataStoreVOS.stream().allMatch(v -> v.getState().equals(State.Destroyed) || v.getState().equals(State.Destroying)); + if (areAllSnapshotsDestroyed) { updateSnapshotToDestroyed(snapshotVO); return true; } return res; } - private void markSnapshotAsDestroyedIfAlreadyRemoved(Long snapshotId, boolean isSnapshotDeleted) { - if (!isSnapshotDeleted) { - return; + private boolean deleteSnapshot(Long snapshotId, Long zoneId, SnapshotVO snapshotVO, String name, StoragePoolVO storage) { + + boolean res = false; + SpConnectionDesc conn = StorPoolUtil.getSpConnection(storage.getUuid(), storage.getId(), storagePoolDetailsDao, _primaryDataStoreDao); + SpApiResponse resp = StorPoolUtil.snapshotDelete(name, conn); + List snapshotInfos = snapshotDataFactory.getSnapshots(snapshotId, zoneId); + processResult(snapshotInfos, ObjectInDataStoreStateMachine.Event.DestroyRequested); + if (resp.getError() != null) { + if (resp.getError().getDescr().contains("still exported")) { + processResult(snapshotInfos, Event.OperationFailed); + throw new CloudRuntimeException(String.format("The snapshot [%s] was exported to another cluster. [%s]", name, resp.getError())); + } + final String err = String.format("Failed to clean-up Storpool snapshot %s. Error: %s", name, resp.getError()); + StorPoolUtil.spLog(err); + if (resp.getError().getName().equals("objectDoesNotExist")) { + return true; + } + } else { + res = deleteSnapshotFromDbIfNeeded(snapshotVO, zoneId); + StorPoolUtil.spLog("StorpoolSnapshotStrategy.deleteSnapshot: executed successfully=%s, snapshot uuid=%s, name=%s", res, snapshotVO.getUuid(), name); } - List snapshotsOnStore = _snapshotStoreDao.listBySnapshotIdAndState(snapshotId, State.Ready); - for (SnapshotDataStoreVO snapshot : snapshotsOnStore) { - if (snapshot.getInstallPath() != null && snapshot.getInstallPath().contains(StorPoolUtil.SP_DEV_PATH)) { - snapshot.setState(State.Destroyed); - _snapshotStoreDao.update(snapshot.getId(), snapshot); + if (res) { + processResult(snapshotInfos, Event.OperationSuccessed); + cleanUpDestroyedRecords(snapshotId); + } else { + processResult(snapshotInfos, Event.OperationFailed); + } + return res; + } + + private void cleanUpDestroyedRecords(Long snapshotId) { + List snapshots = _snapshotStoreDao.listBySnapshotId(snapshotId); + for (SnapshotDataStoreVO snapshot : snapshots) { + if (snapshot.getInstallPath().contains("/dev/storpool-byid") && State.Destroyed.equals(snapshot.getState())) { + _snapshotStoreDao.remove(snapshot.getId()); + } + } + } + + private void processResult(List snapshotInfos, ObjectInDataStoreStateMachine.Event event) { + for (SnapshotInfo snapshot : snapshotInfos) { + SnapshotObject snapshotObject = (SnapshotObject) snapshot; + if (DataStoreRole.Primary.equals(snapshotObject.getDataStore().getRole())) { + snapshotObject.processEvent(event); } } } @@ -154,29 +211,32 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { logger.debug("StorpoolSnapshotStrategy.canHandle: snapshot {}, op={}", snapshot, op); - if (op != SnapshotOperation.DELETE) { + if (op != SnapshotOperation.DELETE && op != SnapshotOperation.COPY) { return StrategyPriority.CANT_HANDLE; } - SnapshotDataStoreVO snapshotOnPrimary = _snapshotStoreDao.findOneBySnapshotAndDatastoreRole(snapshot.getId(), DataStoreRole.Primary); - if (snapshotOnPrimary == null) { + List pools = _primaryDataStoreDao.findPoolsByStorageType(Storage.StoragePoolType.StorPool); + if (CollectionUtils.isEmpty(pools)) { return StrategyPriority.CANT_HANDLE; } - if (zoneId != null) { // If zoneId is present, then it should be same as the zoneId of primary store - StoragePoolVO storagePoolVO = _primaryDataStoreDao.findById(snapshotOnPrimary.getDataStoreId()); - if (!zoneId.equals(storagePoolVO.getDataCenterId())) { - return StrategyPriority.CANT_HANDLE; + List snapshots = snapshotJoinDao.listBySnapshotIdAndZoneId(zoneId, snapshot.getId()); + boolean snapshotNotOnStorPool = snapshots.stream().filter(s -> DataStoreRole.Primary.equals(s.getStoreRole())).count() == 0; + + if (snapshotNotOnStorPool) { + for (SnapshotJoinVO snapshotOnStore : snapshots) { + SnapshotDataStoreVO snap = _snapshotStoreDao.findOneBySnapshotAndDatastoreRole(snapshot.getId(), DataStoreRole.Image); + if (snap != null && snap.getInstallPath() != null && snap.getInstallPath().startsWith(StorPoolUtil.SP_DEV_PATH)) { + return StrategyPriority.HIGHEST; + } + } + return StrategyPriority.CANT_HANDLE; + } + for (StoragePoolVO pool : pools) { + SnapshotDataStoreVO snapshotOnPrimary = _snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Primary, pool.getId(), snapshot.getId()); + if (snapshotOnPrimary != null && (snapshotOnPrimary.getState().equals(State.Ready) || snapshotOnPrimary.getState().equals(State.Created))) { + return StrategyPriority.HIGHEST; } } - String name = StorPoolHelper.getSnapshotName(snapshot.getId(), snapshot.getUuid(), _snapshotStoreDao, _snapshotDetailsDao); - if (name != null) { - StorPoolUtil.spLog("StorpoolSnapshotStrategy.canHandle: globalId=%s", name); - return StrategyPriority.HIGHEST; - } - SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshot.getId(), snapshot.getUuid()); - if (snapshotDetails != null) { - _snapshotDetailsDao.remove(snapshotDetails.getId()); - } return StrategyPriority.CANT_HANDLE; } @@ -250,48 +310,23 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { protected boolean deleteSnapshotOnImageAndPrimary(long snapshotId, DataStore store) { SnapshotInfo snapshotOnImage = snapshotDataFactory.getSnapshot(snapshotId, store); SnapshotObject obj = (SnapshotObject)snapshotOnImage; - boolean areLastSnapshotRef = areLastSnapshotRef(snapshotId); - try { - if (areLastSnapshotRef) { - obj.processEvent(Snapshot.Event.DestroyRequested); - } - } catch (NoTransitionException e) { - logger.debug("Failed to set the state to destroying: ", e); - return false; - } + boolean result = false; try { - boolean result = deleteSnapshotChain(snapshotOnImage); + result = deleteSnapshotChain(snapshotOnImage); _snapshotStoreDao.updateDisplayForSnapshotStoreRole(snapshotId, store.getId(), store.getRole(), false); - if (areLastSnapshotRef) { - obj.processEvent(Snapshot.Event.OperationSucceeded); - } - if (result) { - SnapshotDataStoreVO snapshotOnPrimary = _snapshotStoreDao.findOneBySnapshotAndDatastoreRole(snapshotOnImage.getSnapshotId(), DataStoreRole.Primary); - if (snapshotOnPrimary != null) { - snapshotOnPrimary.setState(State.Destroyed); - _snapshotStoreDao.update(snapshotOnPrimary.getId(), snapshotOnPrimary); - } - } } catch (Exception e) { logger.debug("Failed to delete snapshot: ", e); - try { - if (areLastSnapshotRef) { - obj.processEvent(Snapshot.Event.OperationFailed); - } - } catch (NoTransitionException e1) { - logger.debug("Failed to change snapshot state: " + e.toString()); - } return false; } - return true; + return result; } private boolean deleteSnapshotFromDbIfNeeded(SnapshotVO snapshotVO, Long zoneId) { final long snapshotId = snapshotVO.getId(); SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(snapshotId, snapshotVO.getUuid()); if (snapshotDetails != null) { - _snapshotDetailsDao.removeDetails(snapshotId); + _snapshotDetailsDao.remove(snapshotId); } if (zoneId != null && List.of(Snapshot.State.Allocated, Snapshot.State.CreatedOnPrimary).contains(snapshotVO.getState())) { @@ -327,19 +362,15 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { return true; } - if (snapshotVO.getState() == Snapshot.State.CreatedOnPrimary) { - snapshotVO.setState(Snapshot.State.Destroyed); - _snapshotDao.update(snapshotId, snapshotVO); - return true; - } - if (!Snapshot.State.BackedUp.equals(snapshotVO.getState()) && !Snapshot.State.Error.equals(snapshotVO.getState()) && !Snapshot.State.Destroying.equals(snapshotVO.getState())) { throw new InvalidParameterValueException(String.format("Can't delete snapshot %s due to it is in %s Status", snapshotVO, snapshotVO.getState())); } - List storeRefs = _snapshotStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image); + List storeRefs = _snapshotStoreDao.listBySnapshotAndDataStoreRole(snapshotId, DataStoreRole.Image); if (zoneId != null) { storeRefs.removeIf(ref -> !zoneId.equals(dataStoreMgr.getStoreZoneId(ref.getDataStoreId(), ref.getRole()))); + } else { + storeRefs.removeIf(ref -> !ref.getState().equals(State.Ready)); } for (SnapshotDataStoreVO ref : storeRefs) { if (!deleteSnapshotOnImageAndPrimary(snapshotId, dataStoreMgr.getDataStore(ref.getDataStoreId(), ref.getRole()))) { @@ -354,7 +385,6 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { if (CollectionUtils.isNotEmpty(retrieveSnapshotEntries(snapshotId, null))) { return true; } - updateSnapshotToDestroyed(snapshotVO); return true; } @@ -380,4 +410,104 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { @Override public void postSnapshotCreation(SnapshotInfo snapshot) { } + + @Override + public void copySnapshot(DataObject snapshot, DataObject snapshotDest, AsyncCompletionCallback callback) { + // export snapshot on remote + StoragePoolVO storagePoolVO = _primaryDataStoreDao.findById(snapshotDest.getDataStore().getId()); + String location = StorPoolConfigurationManager.StorPoolClusterLocation.valueIn(snapshotDest.getDataStore().getId()); + StorPoolUtil.spLog("StorpoolSnapshotStrategy.copySnapshot: snapshot %s to pool=%s", snapshot.getUuid(), storagePoolVO.getName()); + SnapshotInfo srcSnapshot = (SnapshotInfo) snapshot; + SnapshotInfo destSnapshot = (SnapshotInfo) snapshotDest; + String err = null; + String snapshotName = StorPoolStorageAdaptor.getVolumeNameFromPath(srcSnapshot.getPath(), false); + if (location != null) { + SpApiResponse resp = exportSnapshot(snapshot, location, snapshotName); + if (resp.getError() != null) { + err = String.format("Failed to export snapshot [{}] from [{}] due to [{}]", snapshotName, location, resp.getError()); + StorPoolUtil.spLog(err); + completeCallback(callback, destSnapshot.getPath(), err); + return; + } + keepExportedSnapshot(snapshot, location, snapshotName); + + SpConnectionDesc connectionRemote = StorPoolUtil.getSpConnection(storagePoolVO.getUuid(), + storagePoolVO.getId(), storagePoolDetailsDao, _primaryDataStoreDao); + SpApiResponse respFromRemote = copySnapshotFromRemote(snapshot, storagePoolVO, snapshotName, connectionRemote); + + if (respFromRemote.getError() != null) { + err = String.format("Failed to copy snapshot [{}] to [{}] due to [{}]", snapshotName, location, respFromRemote.getError()); + StorPoolUtil.spLog(err); + completeCallback(callback, destSnapshot.getPath(), err); + return; + } + StorPoolUtil.spLog("The snapshot [%s] was copied from remote", snapshotName); + + respFromRemote = StorPoolUtil.snapshotReconcile("~" + snapshotName, connectionRemote); + if (respFromRemote.getError() != null) { + err = String.format("Failed to reconcile snapshot [{}] from [{}] due to [{}]", snapshotName, location, respFromRemote.getError()); + StorPoolUtil.spLog(err); + completeCallback(callback, destSnapshot.getPath(), err); + return; + } + updateSnapshotPath(snapshotDest, srcSnapshot, destSnapshot); + } else { + completeCallback(callback, destSnapshot.getPath(), "The snapshot is not in the right location"); + } + SnapshotObjectTO snap = (SnapshotObjectTO) snapshotDest.getTO(); + snap.setPath(srcSnapshot.getPath()); + completeCallback(callback, destSnapshot.getPath(), err); + } + + private void completeCallback(AsyncCompletionCallback callback, String snapshotPath, String err) { + CreateCmdResult res = new CreateCmdResult(snapshotPath, null); + res.setResult(err); + callback.complete(res); + } + + private void updateSnapshotPath(DataObject snapshotDest, SnapshotInfo srcSnapshot, SnapshotInfo destSnapshot) { + + SnapshotDataStoreVO snapshotStore = _snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Primary, snapshotDest.getDataStore().getId(), destSnapshot.getSnapshotId()); + snapshotStore.setInstallPath(srcSnapshot.getPath()); + _snapshotStoreDao.update(snapshotStore.getId(), snapshotStore); + } + + @NotNull + private SpApiResponse copySnapshotFromRemote(DataObject snapshot, StoragePoolVO storagePoolVO, String snapshotName, SpConnectionDesc connectionRemote) { + + String localLocation = StorPoolConfigurationManager.StorPoolClusterLocation + .valueIn(snapshot.getDataStore().getId()); + StoragePoolDetailVO template = storagePoolDetailsDao.findDetail(storagePoolVO.getId(), + StorPoolUtil.SP_TEMPLATE); + Map tags = addStorPoolTags(snapshot); + SpApiResponse respFromRemote = StorPoolUtil.snapshotFromRemote(snapshotName, localLocation, + template.getValue(), tags, connectionRemote); + return respFromRemote; + } + + @NotNull + private static Map addStorPoolTags(DataObject snapshot) { + Map tags = new HashMap<>(); + tags.put("cs", "snapshot"); + tags.put("uuid", snapshot.getUuid()); + return tags; + } + + private void keepExportedSnapshot(DataObject snapshot, String location, String snapshotName) { + + String detail = "~" + snapshotName + ";" + location; + SnapshotDetailsVO snapshotForRecovery = new SnapshotDetailsVO(snapshot.getId(), StorPoolUtil.SP_RECOVERED_SNAPSHOT, detail, true); + _snapshotDetailsDao.persist(snapshotForRecovery); + } + + @NotNull + private SpApiResponse exportSnapshot(DataObject snapshot, String location, String snapshotName) { + + SpConnectionDesc connectionLocal = StorPoolUtil.getSpConnection(snapshot.getDataStore().getUuid(), + snapshot.getDataStore().getId(), storagePoolDetailsDao, _primaryDataStoreDao); + Long clusterId = StorPoolHelper.findClusterIdByGlobalId(StorPoolUtil.getSnapshotClusterId("~" + snapshotName, connectionLocal), clusterDao); + connectionLocal = StorPoolHelper.getSpConnectionDesc(connectionLocal, clusterId); + SpApiResponse resp = StorPoolUtil.snapshotExport("~" + snapshotName, location, connectionLocal); + return resp; + } } diff --git a/scripts/vm/hypervisor/kvm/gpudiscovery.sh b/scripts/vm/hypervisor/kvm/gpudiscovery.sh index 67627deef57..662abaed2cb 100755 --- a/scripts/vm/hypervisor/kvm/gpudiscovery.sh +++ b/scripts/vm/hypervisor/kvm/gpudiscovery.sh @@ -473,7 +473,7 @@ for VM in "${VMS[@]}"; do # -- MDEV hostdevs: use xmlstarlet to extract UUIDs -- while IFS= read -r UUID; do [[ -n "$UUID" ]] && mdev_to_vm["$UUID"]="$VM" - done < <(echo "$xml" | xmlstarlet sel -T -t -m "//hostdev[@type='mdev']" -v "@uuid" -n 2>/dev/null || true) + done < <(echo "$xml" | xmlstarlet sel -T -t -m "//hostdev[@type='mdev']/source/address" -v "@uuid" -n 2>/dev/null || true) done # Helper: convert a VM name to JSON value (quoted string or null) @@ -516,6 +516,55 @@ parse_and_add_gpu_properties() { fi } +# Finds and formats mdev instances for a given PCI device (PF or VF). +# Appends JSON strings for each found mdev instance to the global 'vlist' array. +# Arguments: +# $1: mdev_base_path (e.g., /sys/bus/pci/devices/.../mdev_supported_types) +# $2: bdf (e.g., 01:00.0) +process_mdev_instances() { + local mdev_base_path="$1" + local bdf="$2" + + if [[ ! -d "$mdev_base_path" ]]; then + return + fi + + for PROF_DIR in "$mdev_base_path"/*; do + [[ -d "$PROF_DIR" ]] || continue + + local PROFILE_NAME + if [[ -f "$PROF_DIR/name" ]]; then + PROFILE_NAME=$(<"$PROF_DIR/name") + else + PROFILE_NAME=$(basename "$PROF_DIR") + fi + + parse_and_add_gpu_properties "$PROF_DIR/description" + + local DEVICE_DIR="$PROF_DIR/devices" + if [[ -d "$DEVICE_DIR" ]]; then + for UDIR in "$DEVICE_DIR"/*; do + [[ -d "$UDIR" ]] || continue + local MDEV_UUID + MDEV_UUID=$(basename "$UDIR") + + local DOMAIN="0x0000" + local BUS="0x${bdf:0:2}" + local SLOT="0x${bdf:3:2}" + local FUNC="0x${bdf:6:1}" + + local raw + raw="${mdev_to_vm[$MDEV_UUID]:-}" + local USED_JSON + USED_JSON=$(to_json_vm "$raw") + + vlist+=( + "{\"mdev_uuid\":\"$MDEV_UUID\",\"profile_name\":$(json_escape "$PROFILE_NAME"),\"max_instances\":$MAX_INSTANCES,\"video_ram\":$VIDEO_RAM,\"max_heads\":$MAX_HEADS,\"max_resolution_x\":$MAX_RESOLUTION_X,\"max_resolution_y\":$MAX_RESOLUTION_Y,\"libvirt_address\":{\"domain\":\"$DOMAIN\",\"bus\":\"$BUS\",\"slot\":\"$SLOT\",\"function\":\"$FUNC\"},\"used_by_vm\":$USED_JSON}") + done + fi + done +} + # === GPU Discovery === mapfile -t LINES < <(lspci -nnm) @@ -588,51 +637,9 @@ for LINE in "${LINES[@]}"; do # === vGPU (MDEV) instances === VGPU_ARRAY="[]" declare -a vlist=() + # Process mdev on the Physical Function MDEV_BASE="/sys/bus/pci/devices/0000:$PCI_ADDR/mdev_supported_types" - if [[ -d "$MDEV_BASE" ]]; then - for PROF_DIR in "$MDEV_BASE"/*; do - [[ -d "$PROF_DIR" ]] || continue - - # Read the human-readable profile name from the 'name' file - if [[ -f "$PROF_DIR/name" ]]; then - PROFILE_NAME=$(<"$PROF_DIR/name") - else - PROFILE_NAME=$(basename "$PROF_DIR") - fi - - # Fetch max_instance from the description file, if present - parse_and_add_gpu_properties "$PROF_DIR/description" - - # Under each profile, existing UUIDs appear in: - # /sys/bus/pci/devices/0000:$PCI_ADDR/mdev_supported_types//devices/* - DEVICE_DIR="$PROF_DIR/devices" - if [[ -d "$DEVICE_DIR" ]]; then - for UDIR in "$DEVICE_DIR"/*; do - [[ -d $UDIR ]] || continue - MDEV_UUID=$(basename "$UDIR") - - # libvirt_address uses PF BDF - DOMAIN="0x0000" - BUS="0x${PCI_ADDR:0:2}" - SLOT="0x${PCI_ADDR:3:2}" - FUNC="0x${PCI_ADDR:6:1}" - - # Determine which VM uses this UUID - raw="${mdev_to_vm[$MDEV_UUID]:-}" - USED_JSON=$(to_json_vm "$raw") - - vlist+=( - "{\"mdev_uuid\":\"$MDEV_UUID\",\"profile_name\":$(json_escape "$PROFILE_NAME"),\"max_instances\":$MAX_INSTANCES,\"video_ram\":$VIDEO_RAM,\"max_heads\":$MAX_HEADS,\"max_resolution_x\":$MAX_RESOLUTION_X,\"max_resolution_y\":$MAX_RESOLUTION_Y,\"libvirt_address\":{\"domain\":\"$DOMAIN\",\"bus\":\"$BUS\",\"slot\":\"$SLOT\",\"function\":\"$FUNC\"},\"used_by_vm\":$USED_JSON}") - done - fi - done - if [ ${#vlist[@]} -gt 0 ]; then - VGPU_ARRAY="[$( - IFS=, - echo "${vlist[*]}" - )]" - fi - fi + process_mdev_instances "$MDEV_BASE" "$PCI_ADDR" # === VF instances (SR-IOV / MIG) === VF_ARRAY="[]" @@ -644,6 +651,12 @@ for LINE in "${LINES[@]}"; do VF_ADDR=${VF_PATH##*/} # e.g. "0000:65:00.2" VF_BDF="${VF_ADDR:5}" # "65:00.2" + # For NVIDIA SR-IOV, check for vGPU (mdev) on the VF itself + if [[ "$VENDOR_ID" == "10de" ]]; then + VF_MDEV_BASE="$VF_PATH/mdev_supported_types" + process_mdev_instances "$VF_MDEV_BASE" "$VF_BDF" + fi + DOMAIN="0x0000" BUS="0x${VF_BDF:0:2}" SLOT="0x${VF_BDF:3:2}" @@ -674,6 +687,14 @@ for LINE in "${LINES[@]}"; do fi fi + # Consolidate all vGPU instances (from PF and VFs) + if [ ${#vlist[@]} -gt 0 ]; then + VGPU_ARRAY="[$( + IFS=, + echo "${vlist[*]}" + )]" + fi + # === full_passthrough block === # If vgpu_instances and vf_instances are empty, we can assume full passthrough FP_ENABLED=0 diff --git a/scripts/vm/hypervisor/kvm/nasbackup.sh b/scripts/vm/hypervisor/kvm/nasbackup.sh index 9dedaef154a..588c3791769 100755 --- a/scripts/vm/hypervisor/kvm/nasbackup.sh +++ b/scripts/vm/hypervisor/kvm/nasbackup.sh @@ -16,7 +16,7 @@ ## specific language governing permissions and limitations ## under the License. -set -e +set -eo pipefail # CloudStack B&R NAS Backup and Recovery Tool for KVM @@ -31,8 +31,11 @@ NAS_ADDRESS="" MOUNT_OPTS="" BACKUP_DIR="" DISK_PATHS="" +QUIESCE="" logFile="/var/log/cloudstack/agent/agent.log" +EXIT_CLEANUP_FAILED=20 + log() { [[ "$verb" -eq 1 ]] && builtin echo "$@" if [[ "$1" == "-ne" || "$1" == "-e" || "$1" == "-n" ]]; then @@ -88,7 +91,7 @@ sanity_checks() { backup_running_vm() { mount_operation - mkdir -p $dest + mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } name="root" echo "" > $dest/backup.xml @@ -99,8 +102,31 @@ backup_running_vm() { done echo "" >> $dest/backup.xml + local thaw=0 + if [[ ${QUIESCE} == "true" ]]; then + if virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-freeze"}' > /dev/null 2>/dev/null; then + thaw=1 + fi + fi + # Start push backup - virsh -c qemu:///system backup-begin --domain $VM --backupxml $dest/backup.xml > /dev/null 2>/dev/null + local backup_begin=0 + if virsh -c qemu:///system backup-begin --domain $VM --backupxml $dest/backup.xml 2>&1 > /dev/null; then + backup_begin=1; + fi + + if [[ $thaw -eq 1 ]]; then + if ! response=$(virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-thaw"}' 2>&1 > /dev/null); then + echo "Failed to thaw the filesystem for vm $VM: $response" + cleanup + exit 1 + fi + fi + + if [[ $backup_begin -ne 1 ]]; then + cleanup + exit 1 + fi # Backup domain information virsh -c qemu:///system dumpxml $VM > $dest/domain-config.xml 2>/dev/null @@ -108,9 +134,18 @@ backup_running_vm() { virsh -c qemu:///system domiflist $VM > $dest/domiflist.xml 2>/dev/null virsh -c qemu:///system domblklist $VM > $dest/domblklist.xml 2>/dev/null - until virsh -c qemu:///system domjobinfo $VM --completed --keep-completed 2>/dev/null | grep "Completed" > /dev/null; do + while true; do + status=$(virsh -c qemu:///system domjobinfo $VM --completed --keep-completed | awk '/Job type:/ {print $3}') + case "$status" in + Completed) + break ;; + Failed) + echo "Virsh backup job failed" + cleanup ;; + esac sleep 5 done + rm -f $dest/backup.xml sync @@ -124,14 +159,18 @@ backup_running_vm() { backup_stopped_vm() { mount_operation - mkdir -p $dest + mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } IFS="," name="root" for disk in $DISK_PATHS; do volUuid="${disk##*/}" - qemu-img convert -O qcow2 $disk $dest/$name.$volUuid.qcow2 | tee -a "$logFile" + output="$dest/$name.$volUuid.qcow2" + if ! qemu-img convert -O qcow2 "$disk" "$output" > "$logFile" 2> >(cat >&2); then + echo "qemu-img convert failed for $disk $output" + cleanup + fi name="datadisk" done sync @@ -148,13 +187,22 @@ delete_backup() { rmdir $mount_point } +get_backup_stats() { + mount_operation + + echo $mount_point + df -P $mount_point 2>/dev/null | awk 'NR==2 {print $2, $3}' + umount $mount_point + rmdir $mount_point +} + mount_operation() { mount_point=$(mktemp -d -t csbackup.XXXXX) dest="$mount_point/${BACKUP_DIR}" if [ ${NAS_TYPE} == "cifs" ]; then MOUNT_OPTS="${MOUNT_OPTS},nobrl" fi - mount -t ${NAS_TYPE} ${NAS_ADDRESS} ${mount_point} $([[ ! -z "${MOUNT_OPTS}" ]] && echo -o ${MOUNT_OPTS}) | tee -a "$logFile" + mount -t ${NAS_TYPE} ${NAS_ADDRESS} ${mount_point} $([[ ! -z "${MOUNT_OPTS}" ]] && echo -o ${MOUNT_OPTS}) 2>&1 | tee -a "$logFile" if [ $? -eq 0 ]; then log -ne "Successfully mounted ${NAS_TYPE} store" else @@ -163,9 +211,22 @@ mount_operation() { fi } +cleanup() { + local status=0 + + rm -rf "$dest" || { echo "Failed to delete $dest"; status=1; } + umount "$mount_point" || { echo "Failed to unmount $mount_point"; status=1; } + rmdir "$mount_point" || { echo "Failed to remove mount point $mount_point"; status=1; } + + if [[ $status -ne 0 ]]; then + echo "Backup cleanup failed" + exit $EXIT_CLEANUP_FAILED + fi +} + function usage { echo "" - echo "Usage: $0 -o -v|--vm -t -s -m -p -d " + echo "Usage: $0 -o -v|--vm -t -s -m -p -d -q|--quiesce " echo "" exit 1 } @@ -202,6 +263,11 @@ while [[ $# -gt 0 ]]; do shift shift ;; + -q|--quiesce) + QUIESCE="$2" + shift + shift + ;; -d|--diskpaths) DISK_PATHS="$2" shift @@ -222,12 +288,14 @@ done sanity_checks if [ "$OP" = "backup" ]; then - STATE=$(virsh -c qemu:///system list | grep $VM | awk '{print $3}') - if [ "$STATE" = "running" ]; then + STATE=$(virsh -c qemu:///system list | awk -v vm="$VM" '$2 == vm {print $3}') + if [ -n "$STATE" ] && [ "$STATE" = "running" ]; then backup_running_vm else backup_stopped_vm fi elif [ "$OP" = "delete" ]; then delete_backup +elif [ "$OP" = "stats" ]; then + get_backup_stats fi diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index 3240bfcc8ab..ed777486a12 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -37,11 +37,17 @@ import javax.inject.Inject; import javax.mail.MessagingException; import javax.naming.ConfigurationException; +import com.cloud.dc.DataCenter; +import com.cloud.dc.Pod; +import com.cloud.org.Cluster; + +import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextTimerTask; +import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.mailing.MailAddress; @@ -64,11 +70,9 @@ import com.cloud.capacity.dao.CapacityDaoImpl.SummedCapacity; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; -import com.cloud.dc.Pod; import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; @@ -82,7 +86,6 @@ import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.network.Ipv6Service; import com.cloud.network.dao.IPAddressDao; -import com.cloud.org.Cluster; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceManager; import com.cloud.storage.StorageManager; @@ -143,8 +146,12 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi @Inject ConfigurationManager _configMgr; @Inject + protected BackupManager backupManager; + @Inject protected ConfigDepot _configDepot; @Inject + private ObjectStoreDao _objectStoreDao; + @Inject Ipv6Service ipv6Service; @Inject HostDao hostDao; @@ -157,6 +164,8 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi private double _vlanCapacityThreshold = 0.75; private double _directNetworkPublicIpCapacityThreshold = 0.75; private double _localStorageCapacityThreshold = 0.75; + private double _backupStorageCapacityThreshold = 0.75; + private double _objectStorageCapacityThreshold = 0.75; Map _capacityTypeThresholdMap = new HashMap<>(); private final ExecutorService _executor; @@ -199,6 +208,8 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi String vlanCapacityThreshold = _configDao.getValue(Config.VlanCapacityThreshold.key()); String directNetworkPublicIpCapacityThreshold = _configDao.getValue(Config.DirectNetworkPublicIpCapacityThreshold.key()); String localStorageCapacityThreshold = _configDao.getValue(Config.LocalStorageCapacityThreshold.key()); + String backupStorageCapacityThreshold = _configDao.getValue(BackupManager.BackupStorageCapacityThreshold.key()); + String objectStorageCapacityThreshold = _configDao.getValue(_storageMgr.ObjectStorageCapacityThreshold.key()); if (publicIPCapacityThreshold != null) { _publicIPCapacityThreshold = Double.parseDouble(publicIPCapacityThreshold); @@ -218,6 +229,12 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi if (localStorageCapacityThreshold != null) { _localStorageCapacityThreshold = Double.parseDouble(localStorageCapacityThreshold); } + if (backupStorageCapacityThreshold != null) { + _backupStorageCapacityThreshold = Double.parseDouble(backupStorageCapacityThreshold); + } + if (objectStorageCapacityThreshold != null) { + _objectStorageCapacityThreshold = Double.parseDouble(objectStorageCapacityThreshold); + } _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_PUBLIC_IP, _publicIPCapacityThreshold); _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_PRIVATE_IP, _privateIPCapacityThreshold); @@ -226,6 +243,8 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_DIRECT_ATTACHED_PUBLIC_IP, _directNetworkPublicIpCapacityThreshold); _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_LOCAL_STORAGE, _localStorageCapacityThreshold); _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET, Ipv6SubnetCapacityThreshold.value()); + _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_BACKUP_STORAGE, _backupStorageCapacityThreshold); + _capacityTypeThresholdMap.put(Capacity.CAPACITY_TYPE_OBJECT_STORAGE, _objectStorageCapacityThreshold); String capacityCheckPeriodStr = configs.get("capacity.check.period"); if (capacityCheckPeriodStr != null) { @@ -549,7 +568,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi for (Short capacityType : dataCenterCapacityTypes) { List capacity = _capacityDao.findCapacityBy(capacityType.intValue(), dc.getId(), null, null); - if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE) { + if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE || + capacityType == Capacity.CAPACITY_TYPE_OBJECT_STORAGE || + capacityType == Capacity.CAPACITY_TYPE_BACKUP_STORAGE) { capacity.add(getUsedStats(capacityType, dc.getId(), null, null)); } if (capacity == null || capacity.isEmpty()) { @@ -618,18 +639,22 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi } private SummedCapacity getUsedStats(short capacityType, long zoneId, Long podId, Long clusterId) { - CapacityVO capacity; + CapacityVO capacity = null; + if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE) { capacity = _storageMgr.getSecondaryStorageUsedStats(null, zoneId); - } else { + } else if (capacityType == Capacity.CAPACITY_TYPE_STORAGE) { capacity = _storageMgr.getStoragePoolUsedStats(null, clusterId, podId, zoneId); + } else if (capacityType == Capacity.CAPACITY_TYPE_OBJECT_STORAGE) { + capacity = _storageMgr.getObjectStorageUsedStats(zoneId); + } else if (capacityType == Capacity.CAPACITY_TYPE_BACKUP_STORAGE) { + capacity = (CapacityVO) backupManager.getBackupStorageUsedStats(zoneId); } if (capacity != null) { return new SummedCapacity(capacity.getUsedCapacity(), 0, capacity.getTotalCapacity(), capacityType, clusterId, podId); } else { return null; } - } private void generateEmailAlert(DataCenterVO dc, HostPodVO pod, ClusterVO cluster, double totalCapacity, double usedCapacity, short capacityType) { @@ -706,6 +731,16 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi msgContent = String.format("Number of unallocated virtual network guest IPv6 subnets is low, total: [%s], allocated: [%s] (%s%%).", totalInString, usedInString, percentual); alertType = AlertManager.AlertType.ALERT_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET; break; + case Capacity.CAPACITY_TYPE_BACKUP_STORAGE: + msgSubject = "System Alert: Low Available Backup Storage in availability zone " + dc.getName(); + msgContent = "Available backup storage space is low, total: " + totalInString + " MB, used: " + usedInString + " MB (" + percentual + "%)"; + alertType = AlertManager.AlertType.ALERT_TYPE_BACKUP_STORAGE; + break; + case Capacity.CAPACITY_TYPE_OBJECT_STORAGE: + msgSubject = "System Alert: Low Available Object Storage in availability zone " + dc.getName(); + msgContent = "Available object storage space is low, total: " + totalInString + " MB, used: " + usedInString + " MB (" + percentual + "%)"; + alertType = AlertManager.AlertType.ALERT_TYPE_OBJECT_STORAGE; + break; } try { @@ -724,6 +759,8 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_SECONDARY_STORAGE); dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_VLAN); dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET); + dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_BACKUP_STORAGE); + dataCenterCapacityTypes.add(Capacity.CAPACITY_TYPE_OBJECT_STORAGE); return dataCenterCapacityTypes; } diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index 1471ee4220a..80043d0e279 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -45,7 +45,6 @@ import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.AsyncJobResponse; import org.apache.cloudstack.api.response.BackupOfferingResponse; -import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.api.response.BackupScheduleResponse; import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.api.response.DomainResponse; @@ -75,7 +74,6 @@ import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.VpcOfferingResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.backup.BackupSchedule; import org.apache.cloudstack.backup.dao.BackupDao; @@ -362,7 +360,11 @@ import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + public class ApiDBUtils { + private static final Logger log = LogManager.getLogger(ApiDBUtils.class); private static ManagementServer s_ms; static AsyncJobManager s_asyncMgr; static SecurityGroupManager s_securityGroupMgr; @@ -1065,6 +1067,10 @@ public class ApiDBUtils { return s_storageMgr.getSecondaryStorageUsedStats(hostId, zoneId); } + public static CapacityVO getObjectStorageUsedStats(Long zoneId) { + return s_storageMgr.getObjectStorageUsedStats(zoneId); + } + // /////////////////////////////////////////////////////////// // Dao methods // // /////////////////////////////////////////////////////////// @@ -1102,7 +1108,7 @@ public class ApiDBUtils { return null; } - public static DiskOfferingVO findDiskOfferingById(Long diskOfferingId) { + public static DiskOfferingVO findNonComputeDiskOfferingById(Long diskOfferingId) { if (diskOfferingId == null) { return null; } @@ -1113,6 +1119,14 @@ public class ApiDBUtils { return null; } + public static DiskOfferingVO findDiskOfferingById(Long diskOfferingId) { + if (diskOfferingId == null) { + return null; + } + DiskOfferingVO off = s_diskOfferingDao.findByIdIncludingRemoved(diskOfferingId); + return off; + } + public static ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(Long diskOfferingId, boolean includingRemoved) { ServiceOfferingVO off = s_serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId, includingRemoved); return off; @@ -1707,6 +1721,21 @@ public class ApiDBUtils { return s_zoneDao.listByIds(zoneIds); } + public static List findSnapshotPolicyPools(SnapshotPolicy policy, Volume volume) { + List poolDetails = s_snapshotPolicyDetailsDao.findDetails(policy.getId(), ApiConstants.STORAGE_ID); + List poolIds = new ArrayList<>(); + for (SnapshotPolicyDetailVO detail : poolDetails) { + try { + poolIds.add(Long.valueOf(detail.getValue())); + } catch (NumberFormatException ignored) { + log.debug(String.format("Could not parse the storage ID value of %s", detail.getValue()), ignored); + } + } + if (volume != null && !poolIds.contains(volume.getPoolId())) { + poolIds.add(0, volume.getPoolId()); + } + return s_storagePoolDao.listByIds(poolIds); + } public static VpcOffering findVpcOfferingById(long offeringId) { return s_vpcOfferingDao.findById(offeringId); } @@ -2264,10 +2293,6 @@ public class ApiDBUtils { return s_resourceIconDao.findByResourceUuid(resourceUUID, resourceType); } - public static BackupResponse newBackupResponse(Backup backup) { - return s_backupDao.newBackupResponse(backup); - } - public static BackupScheduleResponse newBackupScheduleResponse(BackupSchedule schedule) { return s_backupScheduleDao.newBackupScheduleResponse(schedule); } diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 0293e0d08fb..64d6e8b6929 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -77,7 +77,6 @@ import org.apache.cloudstack.api.response.AutoScaleVmGroupResponse; import org.apache.cloudstack.api.response.AutoScaleVmProfileResponse; import org.apache.cloudstack.api.response.BackupOfferingResponse; import org.apache.cloudstack.api.response.BackupRepositoryResponse; -import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.api.response.BackupScheduleResponse; import org.apache.cloudstack.api.response.BgpPeerResponse; import org.apache.cloudstack.api.response.BucketResponse; @@ -88,6 +87,7 @@ import org.apache.cloudstack.api.response.ConditionResponse; import org.apache.cloudstack.api.response.ConfigurationGroupResponse; import org.apache.cloudstack.api.response.ConfigurationResponse; import org.apache.cloudstack.api.response.ConfigurationSubGroupResponse; +import org.apache.cloudstack.api.response.ConsoleSessionResponse; import org.apache.cloudstack.api.response.ControlledEntityResponse; import org.apache.cloudstack.api.response.ControlledViewEntityResponse; import org.apache.cloudstack.api.response.CounterResponse; @@ -198,7 +198,6 @@ import org.apache.cloudstack.api.response.VpcOfferingResponse; import org.apache.cloudstack.api.response.VpcResponse; import org.apache.cloudstack.api.response.VpnUsersResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.backup.BackupRepository; import org.apache.cloudstack.backup.BackupSchedule; @@ -207,6 +206,7 @@ import org.apache.cloudstack.backup.dao.BackupRepositoryDao; import org.apache.cloudstack.config.Configuration; import org.apache.cloudstack.config.ConfigurationGroup; import org.apache.cloudstack.config.ConfigurationSubGroup; +import org.apache.cloudstack.consoleproxy.ConsoleSession; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.direct.download.DirectDownloadCertificate; import org.apache.cloudstack.direct.download.DirectDownloadCertificateHostMap; @@ -886,6 +886,15 @@ public class ApiResponseHelper implements ResponseGenerator { zoneResponses.add(zoneResponse); } policyResponse.setZones(new HashSet<>(zoneResponses)); + List poolResponses = new ArrayList<>(); + List pools = ApiDBUtils.findSnapshotPolicyPools(policy, vol); + for (StoragePoolVO pool : pools) { + StoragePoolResponse storagePoolResponse = new StoragePoolResponse(); + storagePoolResponse.setId(pool.getUuid()); + storagePoolResponse.setName(pool.getName()); + poolResponses.add(storagePoolResponse); + } + policyResponse.setStoragePools(new HashSet<>(poolResponses)); return policyResponse; } @@ -1483,6 +1492,7 @@ public class ApiResponseHelper implements ResponseGenerator { capacities.add(ApiDBUtils.getStoragePoolUsedStats(poolId, clusterId, podId, zoneId)); if (clusterId == null && podId == null) { capacities.add(ApiDBUtils.getSecondaryStorageUsedStats(poolId, zoneId)); + capacities.add(ApiDBUtils.getObjectStorageUsedStats(zoneId)); } List capacityResponses = new ArrayList(); @@ -2140,6 +2150,11 @@ public class ApiResponseHelper implements ResponseGenerator { List capacityResponses = new ArrayList(); for (Capacity summedCapacity : result) { + if (summedCapacity.getTotalCapacity() == 0 && + (summedCapacity.getCapacityType() == Capacity.CAPACITY_TYPE_BACKUP_STORAGE || + summedCapacity.getCapacityType() == Capacity.CAPACITY_TYPE_OBJECT_STORAGE)) { + continue; + } CapacityResponse capacityResponse = new CapacityResponse(); capacityResponse.setCapacityTotal(summedCapacity.getTotalCapacity()); if (summedCapacity.getAllocatedCapacity() != null) { @@ -5058,11 +5073,6 @@ public class ApiResponseHelper implements ResponseGenerator { return response; } - @Override - public BackupResponse createBackupResponse(Backup backup) { - return ApiDBUtils.newBackupResponse(backup); - } - @Override public BackupScheduleResponse createBackupScheduleResponse(BackupSchedule schedule) { return ApiDBUtils.newBackupScheduleResponse(schedule); @@ -5615,4 +5625,71 @@ protected Map getResourceIconsUsingOsCategory(List listOfForwardHeaders = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK + public static final ConfigKey listOfForwardHeaders = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK , String.class , "proxy.header.names" , "X-Forwarded-For,HTTP_CLIENT_IP,HTTP_X_FORWARDED_FOR" , "a list of names to check for allowed ipaddresses from a proxy set header. See \"proxy.cidr\" for the proxies allowed to set these headers." , true , ConfigKey.Scope.Global); - static final ConfigKey proxyForwardList = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK + public static final ConfigKey proxyForwardList = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK , String.class , "proxy.cidr" , "" diff --git a/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java b/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java index bfe256305d5..bfd8b827ec5 100644 --- a/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java +++ b/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java @@ -69,6 +69,7 @@ public class ParamGenericValidationWorker implements DispatchWorker { defaultParamNames.add(ApiConstants.ID); defaultParamNames.add(ApiConstants.SIGNATURE_VERSION); defaultParamNames.add(ApiConstants.EXPIRES); + defaultParamNames.add(ApiConstants.SCHEDULE_ID); defaultParamNames.add("_"); } diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index e03b901d2f7..f0a848196a4 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -37,6 +37,14 @@ import java.util.stream.Stream; import javax.inject.Inject; +import com.cloud.dc.Pod; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.org.Cluster; +import com.cloud.server.ManagementService; +import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; +import com.cloud.cluster.ManagementServerHostPeerJoinVO; + import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker; @@ -229,7 +237,6 @@ import com.cloud.api.query.vo.TemplateJoinVO; import com.cloud.api.query.vo.UserAccountJoinVO; import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.api.query.vo.VolumeJoinVO; -import com.cloud.cluster.ManagementServerHostPeerJoinVO; import com.cloud.cluster.ManagementServerHostVO; import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.cluster.dao.ManagementServerHostPeerJoinDao; @@ -237,14 +244,12 @@ import com.cloud.cpu.CPU; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DedicatedResourceVO; -import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; -import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DedicatedResourceDao; -import com.cloud.dc.dao.HostPodDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; +import com.cloud.event.Event; import com.cloud.event.EventVO; import com.cloud.event.dao.EventDao; import com.cloud.event.dao.EventJoinDao; @@ -278,7 +283,6 @@ import com.cloud.network.security.dao.SecurityGroupVMMapDao; import com.cloud.network.vo.PublicIpQuarantineVO; import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; -import com.cloud.org.Cluster; import com.cloud.org.Grouping; import com.cloud.projects.Project; import com.cloud.projects.Project.ListProjectResourcesCriteria; @@ -290,7 +294,6 @@ import com.cloud.projects.dao.ProjectDao; import com.cloud.projects.dao.ProjectInvitationDao; import com.cloud.resource.ResourceManager; import com.cloud.resource.icon.dao.ResourceIconDao; -import com.cloud.server.ManagementService; import com.cloud.server.ResourceManagerUtil; import com.cloud.server.ResourceMetaDataService; import com.cloud.server.ResourceTag; @@ -320,7 +323,6 @@ import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.BucketDao; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSDao; -import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolTagsDao; import com.cloud.storage.dao.VMTemplateDao; @@ -873,6 +875,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q Long startId = cmd.getStartId(); final String resourceUuid = cmd.getResourceId(); final String resourceTypeStr = cmd.getResourceType(); + final String stateStr = cmd.getState(); ApiCommandResourceType resourceType = null; Long resourceId = null; if (resourceTypeStr != null) { @@ -902,6 +905,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q accountMgr.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.ListEntry, entity.getAccountId() == caller.getId(), entity); } } + Event.State state = null; + if (StringUtils.isNotBlank(stateStr)) { + state = EnumUtils.getEnum(Event.State.class, stateStr); + if (state == null) { + throw new InvalidParameterValueException(String.format("Invalid %s specified: %s", ApiConstants.STATE, stateStr)); + } + } Ternary domainIdRecursiveListProject = new Ternary<>(cmd.getDomainId(), cmd.isRecursive(), null); accountMgr.buildACLSearchParameters(caller, id, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); @@ -925,7 +935,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q eventSearchBuilder.and("createDateB", eventSearchBuilder.entity().getCreateDate(), SearchCriteria.Op.BETWEEN); eventSearchBuilder.and("createDateG", eventSearchBuilder.entity().getCreateDate(), SearchCriteria.Op.GTEQ); eventSearchBuilder.and("createDateL", eventSearchBuilder.entity().getCreateDate(), SearchCriteria.Op.LTEQ); - eventSearchBuilder.and("state", eventSearchBuilder.entity().getState(), SearchCriteria.Op.NEQ); + eventSearchBuilder.and("state", eventSearchBuilder.entity().getState(), SearchCriteria.Op.EQ); eventSearchBuilder.or("startId", eventSearchBuilder.entity().getStartId(), SearchCriteria.Op.EQ); eventSearchBuilder.and("createDate", eventSearchBuilder.entity().getCreateDate(), SearchCriteria.Op.BETWEEN); eventSearchBuilder.and("displayEvent", eventSearchBuilder.entity().isDisplay(), SearchCriteria.Op.EQ); @@ -994,6 +1004,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q sc.setParameters("archived", cmd.getArchived()); } + if (state != null) { + sc.setParameters("state", state); + } + Pair, Integer> eventPair; // event_view will not have duplicate rows for each event, so // searchAndCount should be good enough. @@ -5888,9 +5902,17 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q public SnapshotResponse listSnapshot(CopySnapshotCmd cmd) { Account caller = CallContext.current().getCallingAccount(); List zoneIds = cmd.getDestinationZoneIds(); + Long zoneId = null; + String location = null; + if (CollectionUtils.isNotEmpty(zoneIds)) { + zoneId = zoneIds.get(0); + location = Snapshot.LocationType.SECONDARY.name(); + } else { + location = cmd.getSnapshot().getLocationType() != null ? cmd.getSnapshot().getLocationType().name() : null; + } Pair, Integer> result = searchForSnapshotsWithParams(cmd.getId(), null, null, null, null, null, - null, null, zoneIds.get(0), Snapshot.LocationType.SECONDARY.name(), + null, null, zoneId, location, false, null, null, null, null, null, null, null, true, false, caller); ResponseView respView = ResponseView.Restricted; diff --git a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDao.java index 25dfbfe6714..7ca1d7f72f7 100644 --- a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDao.java @@ -17,13 +17,13 @@ package com.cloud.api.query.dao; -import java.util.List; +import com.cloud.api.query.vo.SnapshotJoinVO; +import com.cloud.utils.db.GenericDao; import org.apache.cloudstack.api.ResponseObject; import org.apache.cloudstack.api.response.SnapshotResponse; -import com.cloud.api.query.vo.SnapshotJoinVO; -import com.cloud.utils.db.GenericDao; +import java.util.List; public interface SnapshotJoinDao extends GenericDao { @@ -34,4 +34,6 @@ public interface SnapshotJoinDao extends GenericDao { List searchBySnapshotStorePair(String... pairs); List findByDistinctIds(Long zoneId, Long... ids); + + List listBySnapshotIdAndZoneId(Long zoneId, Long snapshotId); } diff --git a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java index 2944e69c22a..9ea14edf2b7 100644 --- a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java @@ -26,6 +26,22 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.api.ApiDBUtils; +import com.cloud.api.ApiResponseHelper; +import com.cloud.api.query.vo.SnapshotJoinVO; +import com.cloud.storage.GuestOS; +import com.cloud.storage.Snapshot; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.Volume.Type; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.user.Account; +import com.cloud.user.AccountService; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.VMInstanceVO; + import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ResponseObject; @@ -38,21 +54,6 @@ import org.apache.cloudstack.query.QueryService; import org.apache.commons.collections.CollectionUtils; -import com.cloud.api.ApiDBUtils; -import com.cloud.api.ApiResponseHelper; -import com.cloud.api.query.vo.SnapshotJoinVO; -import com.cloud.storage.GuestOS; -import com.cloud.storage.Snapshot; -import com.cloud.storage.VMTemplateStorageResourceAssoc; -import com.cloud.storage.Volume.Type; -import com.cloud.storage.VolumeVO; -import com.cloud.user.Account; -import com.cloud.user.AccountService; -import com.cloud.utils.db.Filter; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.vm.VMInstanceVO; - public class SnapshotJoinDaoImpl extends GenericDaoBaseWithTagInformation implements SnapshotJoinDao { @Inject @@ -68,6 +69,8 @@ public class SnapshotJoinDaoImpl extends GenericDaoBaseWithTagInformation snapshotIdsSearch; + private final SearchBuilder snapshotByZoneSearch; + SnapshotJoinDaoImpl() { snapshotStorePairSearch = createSearchBuilder(); snapshotStorePairSearch.and("snapshotStoreState", snapshotStorePairSearch.entity().getStoreState(), SearchCriteria.Op.IN); @@ -79,6 +82,11 @@ public class SnapshotJoinDaoImpl extends GenericDaoBaseWithTagInformation listBySnapshotIdAndZoneId(Long zoneId, Long snapshotId) { + if (snapshotId == null) { + return new ArrayList<>(); + } + SearchCriteria sc = snapshotByZoneSearch.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + sc.setParameters("id", snapshotId); + return listBy(sc); + } } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 8092c63c6fd..9762fc5ed3e 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -253,7 +253,7 @@ public class UserVmJoinDaoImpl extends GenericDaoBaseWithTagInformation getStorageCapacities(Long clusterId, Long podId, Long zoneId, List poolIds, Short capacityType) { - List capacityTypes = Arrays.asList(Capacity.CAPACITY_TYPE_STORAGE, Capacity.CAPACITY_TYPE_SECONDARY_STORAGE); + List capacityTypes = Arrays.asList(Capacity.CAPACITY_TYPE_STORAGE, Capacity.CAPACITY_TYPE_SECONDARY_STORAGE, + Capacity.CAPACITY_TYPE_BACKUP_STORAGE, Capacity.CAPACITY_TYPE_OBJECT_STORAGE); if (capacityType != null && !capacityTypes.contains(capacityType)) { return null; } @@ -3568,7 +3573,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe capacityTypes = capacityTypes.stream().filter(x -> x.equals(capacityType)).collect(Collectors.toList()); } if (CollectionUtils.isNotEmpty(poolIds)) { - capacityTypes = capacityTypes.stream().filter(x -> x != Capacity.CAPACITY_TYPE_SECONDARY_STORAGE).collect(Collectors.toList()); + capacityTypes = capacityTypes.stream().filter(x -> x == Capacity.CAPACITY_TYPE_STORAGE).collect(Collectors.toList()); } if (CollectionUtils.isEmpty(capacityTypes)) { return null; @@ -3594,6 +3599,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (capacityTypes.contains(Capacity.CAPACITY_TYPE_STORAGE)) { capacities.add(_storageMgr.getStoragePoolUsedStats(dc.getId(), podId, clusterId, poolIds)); } + if (capacityTypes.contains(Capacity.CAPACITY_TYPE_OBJECT_STORAGE)) { + capacities.add(_storageMgr.getObjectStorageUsedStats(dc.getId())); + } + if (capacityTypes.contains(Capacity.CAPACITY_TYPE_BACKUP_STORAGE)) { + capacities.add((CapacityVO) backupManager.getBackupStorageUsedStats(dc.getId())); + } for (CapacityVO capacity : capacities) { if (capacity.getTotalCapacity() != 0) { capacity.setUsedPercentage((float)capacity.getUsedCapacity() / capacity.getTotalCapacity()); @@ -3608,6 +3619,22 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return list; } + private void addZoneWideCapacitiesByType(final Integer capacityType, Long zId, List taggedCapacities) { + if (capacityType == null) { + taggedCapacities.add(_storageMgr.getSecondaryStorageUsedStats(null, zId)); + taggedCapacities.add(_storageMgr.getObjectStorageUsedStats(zId)); + taggedCapacities.add((CapacityVO) backupManager.getBackupStorageUsedStats(zId)); + return; + } + + if (capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE) { + taggedCapacities.add(_storageMgr.getSecondaryStorageUsedStats(null, zId)); + } else if (capacityType == Capacity.CAPACITY_TYPE_OBJECT_STORAGE) { + taggedCapacities.add(_storageMgr.getObjectStorageUsedStats(zId)); + } else if (capacityType == Capacity.CAPACITY_TYPE_BACKUP_STORAGE) { + taggedCapacities.add((CapacityVO) backupManager.getBackupStorageUsedStats(zId)); + } + } protected List listCapacitiesWithDetails(final Long zoneId, final Long podId, Long clusterId, final Integer capacityType, final String tag, List dcList) { @@ -3636,11 +3663,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe for (final Long zId : dcList) { // op_host_Capacity contains only allocated stats and the real time // stats are stored "in memory". - // List secondary storage capacity only when the api is invoked for the zone layer. - if ((capacityType == null || capacityType == Capacity.CAPACITY_TYPE_SECONDARY_STORAGE) && - podId == null && clusterId == null && - StringUtils.isEmpty(t)) { - taggedCapacities.add(_storageMgr.getSecondaryStorageUsedStats(null, zId)); + // List secondary, object and backup storage capacities only when the api is invoked for the zone layer. + if (podId == null && clusterId == null && StringUtils.isEmpty(t)) { + addZoneWideCapacitiesByType(capacityType, zId, taggedCapacities); } if ((capacityType == null || capacityType == Capacity.CAPACITY_TYPE_STORAGE) && storagePoolIdsForCapacity.first()) { taggedCapacities.add(_storageMgr.getStoragePoolUsedStats(zId, podId, clusterId, storagePoolIdsForCapacity.second())); @@ -4244,8 +4269,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(ConfigureOutOfBandManagementCmd.class); cmdList.add(IssueOutOfBandManagementPowerActionCmd.class); cmdList.add(ChangeOutOfBandManagementPasswordCmd.class); + cmdList.add(GetUserKeysCmd.class); + + // Console Session APIs cmdList.add(CreateConsoleEndpointCmd.class); + cmdList.add(ListConsoleSessionsCmd.class); //user data APIs cmdList.add(RegisterUserDataCmd.class); diff --git a/server/src/main/java/com/cloud/storage/CreateSnapshotPayload.java b/server/src/main/java/com/cloud/storage/CreateSnapshotPayload.java index 7ade5e51ef5..39dafbbeb41 100644 --- a/server/src/main/java/com/cloud/storage/CreateSnapshotPayload.java +++ b/server/src/main/java/com/cloud/storage/CreateSnapshotPayload.java @@ -29,6 +29,7 @@ public class CreateSnapshotPayload { private boolean asyncBackup; private List zoneIds; private boolean kvmIncrementalSnapshot = false; + private List storagePoolIds; public Long getSnapshotPolicyId() { return snapshotPolicyId; @@ -85,6 +86,15 @@ public class CreateSnapshotPayload { } public void setKvmIncrementalSnapshot(boolean kvmIncrementalSnapshot) { + this.kvmIncrementalSnapshot = kvmIncrementalSnapshot; } + + public List getStoragePoolIds() { + return storagePoolIds; + } + + public void setStoragePoolIds(List storagePoolIds) { + this.storagePoolIds = storagePoolIds; + } } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 76be0ed6b56..f6bef8b2e8c 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -4588,7 +4588,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C VmwareAllowParallelExecution, DataStoreDownloadFollowRedirects, AllowVolumeReSizeBeyondAllocation, - StoragePoolHostConnectWorkers + StoragePoolHostConnectWorkers, + ObjectStorageCapacityThreshold }; } @@ -4637,7 +4638,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override @ActionEvent(eventType = EventTypes.EVENT_OBJECT_STORE_CREATE, eventDescription = "creating object storage") - public ObjectStore discoverObjectStore(String name, String url, String providerName, Map details) + public ObjectStore discoverObjectStore(String name, String url, Long size, String providerName, Map details) throws IllegalArgumentException, InvalidParameterValueException { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName); @@ -4667,6 +4668,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C Map params = new HashMap<>(); params.put("url", url); params.put("name", name); + if (size == null) { + params.put("size", 0L); + } else { + params.put("size", size); + } params.put("providerName", storeProvider.getName()); params.put("role", DataStoreRole.Object); params.put("details", details); @@ -4750,8 +4756,28 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if(cmd.getName() != null ) { objectStoreVO.setName(cmd.getName()); } + if (cmd.getSize() != null) { + objectStoreVO.setTotalSize(cmd.getSize() * ResourceType.bytesToGiB); + } _objectStoreDao.update(id, objectStoreVO); logger.debug("Successfully updated object store: {}", objectStoreVO); return objectStoreVO; } + + @Override + public CapacityVO getObjectStorageUsedStats(Long zoneId) { + List objectStores = _objectStoreDao.listObjectStores(); + Long allocated = 0L; + Long total = 0L; + for (ObjectStoreVO objectStore: objectStores) { + if (objectStore.getAllocatedSize() != null) { + allocated += objectStore.getAllocatedSize(); + } + if (objectStore.getTotalSize() != null) { + total += objectStore.getTotalSize(); + } + } + CapacityVO capacity = new CapacityVO(null, zoneId, null, null, allocated, total, Capacity.CAPACITY_TYPE_OBJECT_STORAGE); + return capacity; + } } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 8f374029b1d..56b0ccd6723 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -65,6 +65,7 @@ import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationSer import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; @@ -148,6 +149,7 @@ import com.cloud.domain.dao.DomainDao; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; +import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; @@ -187,6 +189,7 @@ import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.template.TemplateManager; import com.cloud.user.Account; import com.cloud.user.AccountManager; +import com.cloud.user.AccountService; import com.cloud.user.ResourceLimitService; import com.cloud.user.User; import com.cloud.user.VmDiskStatisticsVO; @@ -370,6 +373,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic public static final String KVM_FILE_BASED_STORAGE_SNAPSHOT = "kvmFileBasedStorageSnapshot"; + public AccountService _accountService; + protected Gson _gson; private static final List SupportedHypervisorsForVolResize = Arrays.asList(HypervisorType.KVM, HypervisorType.XenServer, @@ -954,7 +959,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic String userSpecifiedName = getVolumeNameFromCommand(cmd); - return commitVolume(cmd, caller, owner, displayVolume, zoneId, diskOfferingId, provisioningType, size, minIops, maxIops, parentVolume, userSpecifiedName, + return commitVolume(cmd.getSnapshotId(), caller, owner, displayVolume, zoneId, diskOfferingId, provisioningType, size, minIops, maxIops, parentVolume, userSpecifiedName, _uuidMgr.generateUuid(Volume.class, cmd.getCustomId()), details); } @@ -968,7 +973,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } } - private VolumeVO commitVolume(final CreateVolumeCmd cmd, final Account caller, final Account owner, final Boolean displayVolume, final Long zoneId, final Long diskOfferingId, + private VolumeVO commitVolume(final Long snapshotId, final Account caller, final Account owner, final Boolean displayVolume, final Long zoneId, final Long diskOfferingId, final Storage.ProvisioningType provisioningType, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume, final String userSpecifiedName, final String uuid, final Map details) { return Transaction.execute(new TransactionCallback() { @Override @@ -996,7 +1001,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic volume = _volsDao.persist(volume); - if (cmd.getSnapshotId() == null && displayVolume) { + if (snapshotId == null && displayVolume) { // for volume created from snapshot, create usage event after volume creation UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), diskOfferingId, null, size, Volume.class.getName(), volume.getUuid(), displayVolume); @@ -2867,7 +2872,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic try { List list = new ArrayList<>(); for (VolumeVO vol : vmVolumes) { - list.add(new Backup.VolumeInfo(vol.getUuid(), vol.getPath(), vol.getVolumeType(), vol.getSize())); + DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId()); + String diskOfferingUuid = diskOffering != null ? diskOffering.getUuid() : null; + list.add(new Backup.VolumeInfo(vol.getUuid(), vol.getPath(), vol.getVolumeType(), vol.getSize(), + vol.getDeviceId(), diskOfferingUuid, vol.getMinIops(), vol.getMaxIops())); } return GsonHelper.getGson().toJson(list.toArray(), Backup.VolumeInfo[].class); } catch (Exception e) { @@ -3199,6 +3207,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic try { answer = _agentMgr.send(hostId, cmd); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException(String.format("%s. Please contact your system administrator.", errorMsg)); } catch (Exception e) { throw new CloudRuntimeException(errorMsg + " due to: " + e.getMessage()); } @@ -3805,9 +3815,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Override @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "taking snapshot", async = true) public Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, boolean quiescevm, - Snapshot.LocationType locationType, boolean asyncBackup, Map tags, List zoneIds) - throws ResourceAllocationException { - final Snapshot snapshot = takeSnapshotInternal(volumeId, policyId, snapshotId, account, quiescevm, locationType, asyncBackup, zoneIds); + Snapshot.LocationType locationType, boolean asyncBackup, Map tags, List zoneIds, List poolIds, Boolean useStorageReplication) + + throws ResourceAllocationException { + final Snapshot snapshot = takeSnapshotInternal(volumeId, policyId, snapshotId, account, quiescevm, locationType, asyncBackup, zoneIds, poolIds, useStorageReplication); if (snapshot != null && MapUtils.isNotEmpty(tags)) { taggedResourceService.createTags(Collections.singletonList(snapshot.getUuid()), ResourceTag.ResourceObjectType.Snapshot, tags, null); } @@ -3815,10 +3826,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } private Snapshot takeSnapshotInternal(Long volumeId, Long policyId, Long snapshotId, Account account, - boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup, List zoneIds) + boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup, List zoneIds, List poolIds, Boolean useStorageReplication) throws ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); VolumeInfo volume = volFactory.getVolume(volumeId); + poolIds = snapshotHelper.addStoragePoolsForCopyToPrimary(volume, zoneIds, poolIds, useStorageReplication); + canCopyOnPrimary(poolIds, volume,CollectionUtils.isEmpty(poolIds)); if (volume == null) { throw new InvalidParameterValueException("Creating snapshot failed due to volume:" + volumeId + " doesn't exist"); } @@ -3831,6 +3844,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } List details = snapshotPolicyDetailsDao.findDetails(policyId, ApiConstants.ZONE_ID); zoneIds = details.stream().map(d -> Long.valueOf(d.getValue())).collect(Collectors.toList()); + poolIds = getPoolIdsByPolicy(policyId, poolIds); } if (CollectionUtils.isNotEmpty(zoneIds)) { for (Long destZoneId : zoneIds) { @@ -3869,14 +3883,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic placeHolder = createPlaceHolderWork(vm.getId()); try { return orchestrateTakeVolumeSnapshot(volumeId, policyId, snapshotId, account, quiescevm, - locationType, asyncBackup, zoneIds); + locationType, asyncBackup, zoneIds, poolIds); } finally { _workJobDao.expunge(placeHolder.getId()); } } else { Outcome outcome = takeVolumeSnapshotThroughJobQueue(vm.getId(), volumeId, policyId, - snapshotId, account.getId(), quiescevm, locationType, asyncBackup, zoneIds); + snapshotId, account.getId(), quiescevm, locationType, asyncBackup, zoneIds, poolIds); try { outcome.get(); @@ -3909,13 +3923,26 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (CollectionUtils.isNotEmpty(zoneIds)) { payload.setZoneIds(zoneIds); } + if (CollectionUtils.isNotEmpty(poolIds)) { + payload.setStoragePoolIds(poolIds); + } volume.addPayload(payload); return volService.takeSnapshot(volume); } } + @NotNull + private List getPoolIdsByPolicy(Long policyId, List poolIds) { + if (CollectionUtils.isNotEmpty(poolIds)) { + throw new InvalidParameterValueException(String.format("%s can not be specified for snapshots linked with snapshot policy", ApiConstants.STORAGE_ID_LIST)); + } + List poolDetails = snapshotPolicyDetailsDao.findDetails(policyId, ApiConstants.STORAGE_ID); + poolIds = poolDetails.stream().map(d -> Long.valueOf(d.getValue())).collect(Collectors.toList()); + return poolIds; + } + private Snapshot orchestrateTakeVolumeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account account, - boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup, List zoneIds) + boolean quiescevm, Snapshot.LocationType locationType, boolean asyncBackup, List zoneIds, List poolIds) throws ResourceAllocationException { VolumeInfo volume = volFactory.getVolume(volumeId); @@ -3928,7 +3955,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new InvalidParameterValueException(String.format("Volume: %s is not in %s state but %s. Cannot take snapshot.", volume.getVolume(), Volume.State.Ready, volume.getState())); } - boolean isSnapshotOnStorPoolOnly = volume.getStoragePoolType() == StoragePoolType.StorPool && BooleanUtils.toBoolean(_configDao.getValue("sp.bypass.secondary.storage")); + boolean isSnapshotOnStorPoolOnly = volume.getStoragePoolType() == StoragePoolType.StorPool && SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value(); if (volume.getEncryptFormat() != null && volume.getAttachedVM() != null && volume.getAttachedVM().getState() != State.Stopped && !isSnapshotOnStorPoolOnly) { logger.debug(String.format("Refusing to take snapshot of encrypted volume (%s) on running VM (%s)", volume, volume.getAttachedVM())); throw new UnsupportedOperationException("Volume snapshots for encrypted volumes are not supported if VM is running"); @@ -3945,6 +3972,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (CollectionUtils.isNotEmpty(zoneIds)) { payload.setZoneIds(zoneIds); } + if (CollectionUtils.isNotEmpty(poolIds)) { + payload.setStoragePoolIds(poolIds); + } + volume.addPayload(payload); return volService.takeSnapshot(volume); @@ -3960,7 +3991,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Override @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "allocating snapshot", create = true) - public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType, List zoneIds) throws ResourceAllocationException { + public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType, List zoneIds, List poolIds, Boolean useStorageReplication) throws ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); VolumeInfo volume = volFactory.getVolume(volumeId); @@ -3994,6 +4025,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new InvalidParameterValueException(String.format("Volume: %s is for System VM , Creating snapshot against System VM volumes is not supported", volume.getVolume())); } } + snapshotHelper.addStoragePoolsForCopyToPrimary(volume, zoneIds, poolIds, useStorageReplication); + canCopyOnPrimary(poolIds, volume,CollectionUtils.isEmpty(poolIds)); StoragePoolVO storagePoolVO = _storagePoolDao.findById(volume.getPoolId()); @@ -4009,6 +4042,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (storagePool == null) { throw new InvalidParameterValueException(String.format("Volume: %s please attach this volume to a VM before create snapshot for it", volume.getVolume())); } + boolean canCopyOnPrimary = useStorageReplication; if (CollectionUtils.isNotEmpty(zoneIds)) { if (policyId != null && policyId > 0) { @@ -4017,7 +4051,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (Snapshot.LocationType.PRIMARY.equals(locationType)) { throw new InvalidParameterValueException(String.format("%s cannot be specified with snapshot %s as %s", ApiConstants.ZONE_ID_LIST, ApiConstants.LOCATION_TYPE, Snapshot.LocationType.PRIMARY)); } - if (Boolean.FALSE.equals(SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value())) { + if (Boolean.FALSE.equals(SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value()) && !canCopyOnPrimary) { throw new InvalidParameterValueException("Backing up of snapshot has been disabled. Snapshot can not be taken for multiple zones"); } if (DataCenter.Type.Edge.equals(zone.getType())) { @@ -4041,6 +4075,25 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return snapshotMgr.allocSnapshot(volumeId, policyId, snapshotName, locationType, false, zoneIds); } + private boolean canCopyOnPrimary(List poolIds, VolumeInfo volume, boolean isPoolIdsEmpty) { + if (!isPoolIdsEmpty) { + for (Long poolId : poolIds){ + DataStore dataStore = dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + StoragePoolVO sPool = _storagePoolDao.findById(poolId); + if (dataStore != null + && !dataStore.getDriver().getCapabilities().containsKey(DataStoreCapabilities.CAN_COPY_SNAPSHOT_BETWEEN_ZONES_AND_SAME_POOL_TYPE.toString()) + && sPool.getPoolType() != volume.getStoragePoolType() + && volume.getPoolId() == poolId) { + throw new InvalidParameterValueException("The specified pool doesn't support copying snapshots between zones" + poolId); + } + } + } else { + return false; + } + snapshotHelper.checkIfThereAreMoreThanOnePoolInTheZone(poolIds); + return true; + } + @Override public Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName, Long vmSnapshotId) throws ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); @@ -4719,6 +4772,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic try { answer = (AttachAnswer)_agentMgr.send(hostId, cmd); + } catch (AgentUnavailableException e) { + if (host != null) { + volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore); + } + throw new CloudRuntimeException(String.format("%s. Please contact your system administrator.", errorMsg)); } catch (Exception e) { if (host != null) { volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore); @@ -5170,7 +5228,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } public Outcome takeVolumeSnapshotThroughJobQueue(final Long vmId, final Long volumeId, final Long policyId, final Long snapshotId, final Long accountId, final boolean quiesceVm, - final Snapshot.LocationType locationType, final boolean asyncBackup, final List zoneIds) { + final Snapshot.LocationType locationType, final boolean asyncBackup, final List zoneIds, List poolIds) { final CallContext context = CallContext.current(); final User callingUser = context.getCallingUser(); @@ -5192,7 +5250,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // save work context info (there are some duplications) VmWorkTakeVolumeSnapshot workInfo = new VmWorkTakeVolumeSnapshot(callingUser.getId(), accountId != null ? accountId : callingAccount.getId(), vm.getId(), - VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, policyId, snapshotId, quiesceVm, locationType, asyncBackup, zoneIds); + VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, policyId, snapshotId, quiesceVm, locationType, asyncBackup, zoneIds, poolIds); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); @@ -5243,7 +5301,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic private Pair orchestrateTakeVolumeSnapshot(VmWorkTakeVolumeSnapshot work) throws Exception { Account account = _accountDao.findById(work.getAccountId()); orchestrateTakeVolumeSnapshot(work.getVolumeId(), work.getPolicyId(), work.getSnapshotId(), account, - work.isQuiesceVm(), work.getLocationType(), work.isAsyncBackup(), work.getZoneIds()); + work.isQuiesceVm(), work.getLocationType(), work.isAsyncBackup(), work.getZoneIds(), work.getPoolIds()); return new Pair(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(work.getSnapshotId())); } diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java index aea5961d047..6e2059e5776 100644 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java @@ -61,6 +61,12 @@ public interface SnapshotManager extends Configurable { ConfigKey snapshotDeltaMax = new ConfigKey<>(Integer.class, "snapshot.delta.max", "Snapshots", "16", "Max delta snapshots between two full snapshots. " + "Only valid for KVM and XenServer.", true, ConfigKey.Scope.Global, null); + ConfigKey snapshotShowChainSize = new ConfigKey<>(Boolean.class, "snapshot.show.chain.size", "Snapshots", "false", + "Whether to show chain size (sum of physical size of snapshot and all its parents) for incremental snapshots in the snapshot response", + true, ConfigKey.Scope.Global, null); + + public static final ConfigKey UseStorageReplication = new ConfigKey(Boolean.class, "use.storage.replication", "Snapshots", "false", "For snapshot copy to another primary storage in a different zone. Supports only StorPool storage for now", true, ConfigKey.Scope.StoragePool, null); + void deletePoliciesForVolume(Long volumeId); /** diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 1e54fc6e224..818e63bbcc1 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.storage.snapshot; + +import com.cloud.storage.StoragePoolStatus; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -53,10 +55,12 @@ import org.apache.cloudstack.api.command.user.snapshot.UpdateSnapshotPolicyCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; @@ -294,7 +298,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {BackupRetryAttempts, BackupRetryInterval, SnapshotHourlyMax, SnapshotDailyMax, SnapshotMonthlyMax, SnapshotWeeklyMax, usageSnapshotSelection, - SnapshotInfo.BackupSnapshotAfterTakingSnapshot, VmStorageSnapshotKvm, kvmIncrementalSnapshot, snapshotDeltaMax}; + SnapshotInfo.BackupSnapshotAfterTakingSnapshot, VmStorageSnapshotKvm, kvmIncrementalSnapshot, snapshotDeltaMax, snapshotShowChainSize, UseStorageReplication}; } @Override @@ -919,6 +923,19 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } } } + final SnapshotVO postDeleteSnapshotEntry = _snapshotDao.findById(snapshotId); + if (postDeleteSnapshotEntry == null || Snapshot.State.Destroyed.equals(postDeleteSnapshotEntry.getState())) { + annotationDao.removeByEntityType(AnnotationService.EntityType.SNAPSHOT.name(), snapshotCheck.getUuid()); + + if (snapshotCheck.getState() != Snapshot.State.Error && snapshotCheck.getState() != Snapshot.State.Destroyed) { + _resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.snapshot); + } + } + for (SnapshotDataStoreVO snapshotStoreRef : snapshotStoreRefs) { + if (ObjectInDataStoreStateMachine.State.Ready.equals(snapshotStoreRef.getState()) && !DataStoreRole.Primary.equals(snapshotStoreRef.getRole())) { + _resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.secondary_storage, new Long(snapshotStoreRef.getPhysicalSize())); + } + } return result; } @@ -1114,11 +1131,13 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement return success; } - protected void validatePolicyZones(List zoneIds, VolumeVO volume, Account caller) { - if (CollectionUtils.isEmpty(zoneIds)) { + protected void validatePolicyZones(List zoneIds, List poolIds, VolumeVO volume, Account caller) { + boolean hasPools = CollectionUtils.isNotEmpty(poolIds); + boolean hasZones = CollectionUtils.isNotEmpty(zoneIds); + if (!hasZones && !hasPools) { return; } - if (Boolean.FALSE.equals(SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value())) { + if (Boolean.FALSE.equals(SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value()) && hasZones && !hasPools) { throw new InvalidParameterValueException("Backing up of snapshot has been disabled. Snapshot can not be taken for multiple zones"); } final DataCenterVO zone = dataCenterDao.findById(volume.getDataCenterId()); @@ -1126,8 +1145,17 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement throw new InvalidParameterValueException("Backing up of snapshot is not supported by the zone of the volume. Snapshots can not be taken for multiple zones"); } boolean isRootAdminCaller = _accountMgr.isRootAdmin(caller.getId()); - for (Long zoneId : zoneIds) { - getCheckedDestinationZoneForSnapshotCopy(zoneId, isRootAdminCaller); + + if (hasZones) { + for (Long zoneId : zoneIds) { + getCheckedDestinationZoneForSnapshotCopy(zoneId, isRootAdminCaller); + } + } + if (hasPools) { + snapshotHelper.checkIfThereAreMoreThanOnePoolInTheZone(poolIds); + for (Long poolId : poolIds) { + getCheckedDestinationStorageForSnapshotCopy(poolId, isRootAdminCaller); + } } } @@ -1230,15 +1258,18 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } final List zoneIds = cmd.getZoneIds(); - validatePolicyZones(zoneIds, volume, caller); + VolumeInfo volumeInfo = volFactory.getVolume(volumeId); + final List poolIds = snapshotHelper.addStoragePoolsForCopyToPrimary(volumeInfo, zoneIds, cmd.getStoragePoolIds(), cmd.useStorageReplication()); + + validatePolicyZones(zoneIds, poolIds, volume, caller); Map tags = cmd.getTags(); boolean active = true; - return persistSnapshotPolicy(volume, schedule, timezoneId, intvType, maxSnaps, display, active, tags, zoneIds); + return persistSnapshotPolicy(volume, schedule, timezoneId, intvType, maxSnaps, display, active, tags, zoneIds, poolIds); } - protected SnapshotPolicyVO persistSnapshotPolicy(VolumeVO volume, String schedule, String timezone, IntervalType intervalType, int maxSnaps, boolean display, boolean active, Map tags, List zoneIds) { + protected SnapshotPolicyVO persistSnapshotPolicy(VolumeVO volume, String schedule, String timezone, IntervalType intervalType, int maxSnaps, boolean display, boolean active, Map tags, List zoneIds, List poolIds) { long volumeId = volume.getId(); GlobalLock createSnapshotPolicyLock = GlobalLock.getInternLock("createSnapshotPolicy_" + volumeId); @@ -1250,13 +1281,14 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement logger.debug("Acquired lock for creating snapshot policy [{}] for volume {}.", intervalType, volume); + try { SnapshotPolicyVO policy = _snapshotPolicyDao.findOneByVolumeInterval(volumeId, intervalType); if (policy == null) { - policy = createSnapshotPolicy(volumeId, schedule, timezone, intervalType, maxSnaps, display, zoneIds); + policy = createSnapshotPolicy(volumeId, schedule, timezone, intervalType, maxSnaps, display, zoneIds, poolIds); } else { - updateSnapshotPolicy(policy, schedule, timezone, intervalType, maxSnaps, active, display, zoneIds); + updateSnapshotPolicy(policy, schedule, timezone, intervalType, maxSnaps, active, display, zoneIds, poolIds); } createTagsForSnapshotPolicy(tags, policy); @@ -1268,7 +1300,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } } - protected SnapshotPolicyVO createSnapshotPolicy(long volumeId, String schedule, String timezone, IntervalType intervalType, int maxSnaps, boolean display, List zoneIds) { + protected SnapshotPolicyVO createSnapshotPolicy(long volumeId, String schedule, String timezone, IntervalType intervalType, int maxSnaps, boolean display, List zoneIds, List poolIds) { SnapshotPolicyVO policy = new SnapshotPolicyVO(volumeId, schedule, timezone, intervalType, maxSnaps, display); policy = _snapshotPolicyDao.persist(policy); if (CollectionUtils.isNotEmpty(zoneIds)) { @@ -1278,12 +1310,19 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } snapshotPolicyDetailsDao.saveDetails(details); } + if (CollectionUtils.isNotEmpty(poolIds)) { + List details = new ArrayList<>(); + for (Long poolId : poolIds) { + details.add(new SnapshotPolicyDetailVO(policy.getId(), ApiConstants.STORAGE_ID, String.valueOf(poolId))); + } + snapshotPolicyDetailsDao.saveDetails(details); + } _snapSchedMgr.scheduleNextSnapshotJob(policy); logger.debug(String.format("Created snapshot policy %s.", new ReflectionToStringBuilder(policy, ToStringStyle.JSON_STYLE).setExcludeFieldNames("id", "uuid", "active"))); return policy; } - protected void updateSnapshotPolicy(SnapshotPolicyVO policy, String schedule, String timezone, IntervalType intervalType, int maxSnaps, boolean active, boolean display, List zoneIds) { + protected void updateSnapshotPolicy(SnapshotPolicyVO policy, String schedule, String timezone, IntervalType intervalType, int maxSnaps, boolean active, boolean display, List zoneIds, List poolIds) { String previousPolicy = new ReflectionToStringBuilder(policy, ToStringStyle.JSON_STYLE).setExcludeFieldNames("id", "uuid").toString(); boolean previousDisplay = policy.isDisplay(); policy.setSchedule(schedule); @@ -1301,7 +1340,14 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } snapshotPolicyDetailsDao.saveDetails(details); } - + if (CollectionUtils.isNotEmpty(poolIds)) { + List details = snapshotPolicyDetailsDao.listDetails(policy.getId()); + details = details.stream().filter(d -> !ApiConstants.STORAGE_ID.equals(d.getName())).collect(Collectors.toList()); + for (Long poolId : poolIds) { + details.add(new SnapshotPolicyDetailVO(policy.getId(), ApiConstants.STORAGE_ID, String.valueOf(poolId))); + } + snapshotPolicyDetailsDao.saveDetails(details); + } _snapSchedMgr.scheduleOrCancelNextSnapshotJobOnDisplayChange(policy, previousDisplay); taggedResourceService.deleteTags(Collections.singletonList(policy.getUuid()), ResourceObjectType.SnapshotPolicy, null); logger.debug(String.format("Updated snapshot policy %s to %s.", previousPolicy, new ReflectionToStringBuilder(policy, ToStringStyle.JSON_STYLE) @@ -1325,8 +1371,10 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement for (SnapshotPolicyVO policy : policies) { List details = snapshotPolicyDetailsDao.findDetails(policy.getId(), ApiConstants.ZONE_ID); List zoneIds = details.stream().map(d -> Long.valueOf(d.getValue())).collect(Collectors.toList()); + List poolDetails = snapshotPolicyDetailsDao.findDetails(policy.getId(), ApiConstants.STORAGE_ID); + List poolIds = poolDetails.stream().map(d -> Long.valueOf(d.getValue())).collect(Collectors.toList()); persistSnapshotPolicy(destVolume, policy.getSchedule(), policy.getTimezone(), intervalTypes[policy.getInterval()], policy.getMaxSnaps(), - policy.isDisplay(), policy.isActive(), taggedResourceService.getTagsFromResource(ResourceObjectType.SnapshotPolicy, policy.getId()), zoneIds); + policy.isDisplay(), policy.isActive(), taggedResourceService.getTagsFromResource(ResourceObjectType.SnapshotPolicy, policy.getId()), zoneIds, poolIds); } } @@ -1580,12 +1628,19 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement if (backupSnapToSecondary) { if (!isKvmAndFileBasedStorage) { - backupSnapshotToSecondary(payload.getAsyncBackup(), snapshotStrategy, snapshotOnPrimary, payload.getZoneIds()); + backupSnapshotToSecondary(payload.getAsyncBackup(), snapshotStrategy, snapshotOnPrimary, payload.getZoneIds(), payload.getStoragePoolIds()); } else { postSnapshotDirectlyToSecondary(snapshot, snapshotOnPrimary, snapshotId); } } else { logger.debug("Skipping backup of snapshot [{}] to secondary due to configuration [{}].", snapshotOnPrimary.getUuid(), SnapshotInfo.BackupSnapshotAfterTakingSnapshot.key()); + + if (CollectionUtils.isNotEmpty(payload.getStoragePoolIds()) && payload.getAsyncBackup()) { + snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.COPY); + if (snapshotStrategy != null) { + backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshotOnPrimary, snapshotBackupRetries - 1, snapshotStrategy, payload.getZoneIds(), payload.getStoragePoolIds()), 0, TimeUnit.SECONDS); + } + } snapshotOnPrimary.markBackedUp(); } @@ -1606,8 +1661,13 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement // Correct the resource count of snapshot in case of delta snapshots. _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize() - snapshotStoreRef.getPhysicalSize())); - if (!payload.getAsyncBackup() && backupSnapToSecondary) { - copyNewSnapshotToZones(snapshotId, snapshot.getDataCenterId(), payload.getZoneIds()); + if (!payload.getAsyncBackup()) { + if (backupSnapToSecondary) { + copyNewSnapshotToZones(snapshotId, snapshot.getDataCenterId(), payload.getZoneIds()); + } + if (CollectionUtils.isNotEmpty(payload.getStoragePoolIds())) { + copyNewSnapshotToZonesOnPrimary(payload, snapshot); + } } } catch (Exception e) { logger.debug("post process snapshot failed", e); @@ -1652,10 +1712,45 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement return volumeInfo.getHypervisorType() == HypervisorType.KVM && fileBasedStores.contains(storagePool.getPoolType()); } + private void copyNewSnapshotToZonesOnPrimary(CreateSnapshotPayload payload, SnapshotInfo snapshot) { + SnapshotStrategy snapshotStrategy; + snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.COPY); + if (snapshotStrategy != null) { + for (Long storagePoolId : payload.getStoragePoolIds()) { + copySnapshotOnPool(snapshot, snapshotStrategy, storagePoolId); + } + } else { + logger.info("Unable to find snapshot strategy to handle the copy of a snapshot with id " + snapshot.getUuid()); + } + } - protected void backupSnapshotToSecondary(boolean asyncBackup, SnapshotStrategy snapshotStrategy, SnapshotInfo snapshotOnPrimary, List zoneIds) { + private boolean copySnapshotOnPool(SnapshotInfo snapshot, SnapshotStrategy snapshotStrategy, Long storagePoolId) { + DataStore store = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + SnapshotInfo snapshotOnStore = (SnapshotInfo) store.create(snapshot); + + try { + AsyncCallFuture future = snapshotSrv.copySnapshot(snapshot, snapshotOnStore, snapshotStrategy); + SnapshotResult result = future.get(); + if (result.isFailed()) { + logger.debug(String.format("Copy snapshot ID: %d failed for primary storage %s: %s", snapshot.getSnapshotId(), storagePoolId, result.getResult())); + return false; + } + snapshotZoneDao.addSnapshotToZone(snapshot.getId(), snapshotOnStore.getDataCenterId()); + _resourceLimitMgr.incrementResourceCount(CallContext.current().getCallingUserId(), ResourceType.primary_storage, snapshot.getSize()); + if (CallContext.current().getCallingUserId() != Account.ACCOUNT_ID_SYSTEM) { + SnapshotVO snapshotVO = _snapshotDao.findByIdIncludingRemoved(snapshot.getSnapshotId()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_COPY, CallContext.current().getCallingAccountId(), snapshotOnStore.getDataCenterId(), snapshotVO.getId(), null, null, null, snapshotVO.getSize(), + snapshotVO.getSize(), snapshotVO.getClass().getName(), snapshotVO.getUuid()); + } + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException("Could not copy the snapshot to another pool", e); + } + return true; + } + + protected void backupSnapshotToSecondary(boolean asyncBackup, SnapshotStrategy snapshotStrategy, SnapshotInfo snapshotOnPrimary, List zoneIds, List poolIds) { if (asyncBackup) { - backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshotOnPrimary, snapshotBackupRetries - 1, snapshotStrategy, zoneIds), 0, TimeUnit.SECONDS); + backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshotOnPrimary, snapshotBackupRetries - 1, snapshotStrategy, zoneIds, poolIds), 0, TimeUnit.SECONDS); } else { SnapshotInfo backupedSnapshot = snapshotStrategy.backupSnapshot(snapshotOnPrimary); if (backupedSnapshot != null) { @@ -1670,33 +1765,46 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement SnapshotStrategy snapshotStrategy; List zoneIds; + List poolIds; - public BackupSnapshotTask(SnapshotInfo snap, int maxRetries, SnapshotStrategy strategy, List zoneIds) { + public BackupSnapshotTask(SnapshotInfo snap, int maxRetries, SnapshotStrategy strategy, List zoneIds, List poolIds) { snapshot = snap; attempts = maxRetries; snapshotStrategy = strategy; this.zoneIds = zoneIds; + this.poolIds = poolIds; } @Override protected void runInContext() { try { logger.debug("Value of attempts is " + (snapshotBackupRetries - attempts)); + if (Boolean.TRUE.equals(SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value()) && CollectionUtils.isEmpty(poolIds)) { + SnapshotInfo backupedSnapshot = snapshotStrategy.backupSnapshot(snapshot); - SnapshotInfo backupedSnapshot = snapshotStrategy.backupSnapshot(snapshot); + if (backupedSnapshot != null) { + snapshotStrategy.postSnapshotCreation(snapshot); + copyNewSnapshotToZones(snapshot.getId(), snapshot.getDataCenterId(), zoneIds); + } + } - if (backupedSnapshot != null) { - snapshotStrategy.postSnapshotCreation(snapshot); - copyNewSnapshotToZones(snapshot.getId(), snapshot.getDataCenterId(), zoneIds); + if (CollectionUtils.isNotEmpty(poolIds)) { + for (Long poolId: poolIds) { + copySnapshotOnPool(snapshot, snapshotStrategy, poolId); + } } } catch (final Exception e) { - if (attempts >= 0) { - logger.debug("Backing up of snapshot failed, for snapshot {}, left with {} more attempts", snapshot, attempts); - backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshot, --attempts, snapshotStrategy, zoneIds), snapshotBackupRetryInterval, TimeUnit.SECONDS); - } else { - logger.debug("Done with {} attempts in backing up of snapshot {}", snapshotBackupRetries, snapshot.getSnapshotVO()); - snapshotSrv.cleanupOnSnapshotBackupFailure(snapshot); - } + decriseBackupSnapshotAttempts(); + } + } + + private void decriseBackupSnapshotAttempts() { + if (attempts >= 0) { + logger.debug("Backing up of snapshot failed, for snapshot {}, left with {} more attempts", snapshot, attempts); + backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshot, --attempts, snapshotStrategy, zoneIds, poolIds), snapshotBackupRetryInterval, TimeUnit.SECONDS); + } else { + logger.debug("Done with {} attempts in backing up of snapshot {}", snapshotBackupRetries, snapshot.getSnapshotVO()); + snapshotSrv.cleanupOnSnapshotBackupFailure(snapshot); } } } @@ -2080,26 +2188,21 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement return failedZones; } - protected Pair getCheckedSnapshotForCopy(final long snapshotId, final List destZoneIds, Long sourceZoneId) { - SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snapshot == null) { - throw new InvalidParameterValueException("Unable to find snapshot with id"); - } + protected Pair getCheckedSnapshotForCopy(final SnapshotVO snapshot, final List destZoneIds, Long sourceZoneId, boolean useStorageReplication) { // Verify snapshot is BackedUp and is on secondary store - if (!Snapshot.State.BackedUp.equals(snapshot.getState())) { + if (!Snapshot.State.BackedUp.equals(snapshot.getState()) && !useStorageReplication) { throw new InvalidParameterValueException("Snapshot is not backed up"); } - if (snapshot.getLocationType() != null && !Snapshot.LocationType.SECONDARY.equals(snapshot.getLocationType())) { + if (snapshot.getLocationType() != null && !Snapshot.LocationType.SECONDARY.equals(snapshot.getLocationType()) && !useStorageReplication) { throw new InvalidParameterValueException("Snapshot is not backed up"); } - if (CollectionUtils.isEmpty(destZoneIds)) { - throw new InvalidParameterValueException("Please specify valid destination zone(s)."); - } Volume volume = _volsDao.findById(snapshot.getVolumeId()); if (sourceZoneId == null) { sourceZoneId = volume.getDataCenterId(); } - if (destZoneIds.contains(sourceZoneId)) { + if (CollectionUtils.isEmpty(destZoneIds)) { + throw new InvalidParameterValueException("Please specify valid destination zone(s)."); + } else if (destZoneIds.contains(sourceZoneId)) { throw new InvalidParameterValueException("Please specify different source and destination zones."); } DataCenterVO sourceZone = dataCenterDao.findById(sourceZoneId); @@ -2124,16 +2227,42 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement return dstZone; } + protected StoragePoolVO getCheckedDestinationStorageForSnapshotCopy(long poolId, boolean isRootAdmin) { + StoragePoolVO destPool = _storagePoolDao.findById(poolId); + if (destPool == null) { + throw new InvalidParameterValueException("Please specify a valid destination pool."); + } + if (!StoragePoolStatus.Up.equals(destPool.getStatus()) && !isRootAdmin) { + throw new PermissionDeniedException("Cannot perform this operation, the storage pool is not in Up state or the user is not the Root Admin " + destPool.getName()); + } + DataCenterVO destZone = dataCenterDao.findById(destPool.getDataCenterId()); + if (DataCenter.Type.Edge.equals(destZone.getType())) { + logger.error(String.format("Edge zone %s specified for snapshot copy", destZone)); + throw new InvalidParameterValueException(String.format("Snapshot copy is not supported by zone %s", destZone.getName())); + } + return destPool; + } + @Override @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_COPY, eventDescription = "copying snapshot", create = false) public Snapshot copySnapshot(CopySnapshotCmd cmd) throws StorageUnavailableException, ResourceAllocationException { final Long snapshotId = cmd.getId(); Long sourceZoneId = cmd.getSourceZoneId(); List destZoneIds = cmd.getDestinationZoneIds(); + List storagePoolIds = cmd.getStoragePoolIds(); + Boolean useStorageReplication = cmd.useStorageReplication(); Account caller = CallContext.current().getCallingAccount(); - Pair snapshotZonePair = getCheckedSnapshotForCopy(snapshotId, destZoneIds, sourceZoneId); + SnapshotVO snapshotVO = _snapshotDao.findById(snapshotId); + if (snapshotVO == null) { + throw new InvalidParameterValueException("Unable to find snapshot with id"); + } + + Pair snapshotZonePair = getCheckedSnapshotForCopy(snapshotVO, destZoneIds, sourceZoneId, useStorageReplication); SnapshotVO snapshot = snapshotZonePair.first(); sourceZoneId = snapshotZonePair.second(); + VolumeInfo volume = volFactory.getVolume(snapshot.getVolumeId()); + storagePoolIds = snapshotHelper.addStoragePoolsForCopyToPrimary(volume, destZoneIds, storagePoolIds, useStorageReplication); + boolean canCopyBetweenStoragePools = CollectionUtils.isNotEmpty(storagePoolIds) && canCopyOnPrimary(storagePoolIds, snapshotVO); Map dataCenterVOs = new HashMap<>(); boolean isRootAdminCaller = _accountMgr.isRootAdmin(caller.getId()); for (Long destZoneId: destZoneIds) { @@ -2142,11 +2271,15 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } _accountMgr.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, true, snapshot); DataStore srcSecStore = getSnapshotZoneImageStore(snapshotId, sourceZoneId); - if (srcSecStore == null) { + if (srcSecStore == null && !canCopyBetweenStoragePools) { throw new InvalidParameterValueException(String.format("There is no snapshot ID: %s ready on image store", snapshot.getUuid())); } + if (canCopyBetweenStoragePools) { + snapshotHelper.checkIfThereAreMoreThanOnePoolInTheZone(storagePoolIds); + copySnapshotToPrimaryDifferentZone(storagePoolIds, snapshot); + } List failedZones = copySnapshotToZones(snapshot, srcSecStore, new ArrayList<>(dataCenterVOs.values())); - if (destZoneIds.size() > failedZones.size()){ + if (destZoneIds.size() > failedZones.size() || canCopyBetweenStoragePools){ if (!failedZones.isEmpty()) { logger.error(String.format("There were failures when copying snapshot to zones: %s", StringUtils.joinWith(", ", failedZones.toArray()))); @@ -2157,6 +2290,74 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } } + private boolean canCopyOnPrimary(List poolIds, Snapshot snapshot) { + List poolsToBeRemoved = new ArrayList<>(); + for (Long poolId : poolIds) { + PrimaryDataStore dataStore = (PrimaryDataStore) dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + if (isDataStoreNull(dataStore == null, poolsToBeRemoved, poolId)) continue; + + SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshot.getId(), poolId, DataStoreRole.Primary); + if (isSnapshotExistsOnPool(snapshot, dataStore, snapshotInfo)) continue; + + VolumeVO volume = _volsDao.findById(snapshot.getVolumeId()); + if (isDataStoreNull(volume == null, poolsToBeRemoved, poolId)) continue; + doesStorageSupportCopySnapshot(poolsToBeRemoved, poolId, dataStore, volume); + } + poolIds.removeAll(poolsToBeRemoved); + if (CollectionUtils.isEmpty(poolIds)) { + return false; + } + return true; + } + + private void doesStorageSupportCopySnapshot(List poolsToBeRemoved, Long poolId, PrimaryDataStore dataStore, VolumeVO volume) { + if (dataStore.getDriver() != null + && MapUtils.isNotEmpty(dataStore.getDriver().getCapabilities()) + && !dataStore.getDriver().getCapabilities().containsKey(DataStoreCapabilities.CAN_COPY_SNAPSHOT_BETWEEN_ZONES_AND_SAME_POOL_TYPE.toString()) + && dataStore.getPoolType() != volume.getPoolType()) { + poolsToBeRemoved.add(poolId); + logger.debug(String.format("The %s does not support copy to %s between zones", dataStore.getPoolType(), volume.getPoolType())); + } + } + + private boolean isSnapshotExistsOnPool(Snapshot snapshot, PrimaryDataStore dataStore, SnapshotInfo snapshotInfo) { + if (snapshotInfo != null) { + logger.debug(String.format("Snapshot [%s] already exist on pool [%s]", snapshot.getUuid(), dataStore.getName())); + return true; + } + return false; + } + + private static boolean isDataStoreNull(boolean object, List poolsToBeRemoved, Long poolId) { + if (object) { + poolsToBeRemoved.add(poolId); + return true; + } + return false; + } + + private void copySnapshotToPrimaryDifferentZone(List poolIds, SnapshotVO snapshot) { + VolumeInfo volume = volFactory.getVolume(snapshot.getVolumeId()); + if (volume == null) { + throw new CloudRuntimeException("Failed to find volume with id: " + snapshot.getVolumeId()); + } + CreateSnapshotPayload payload = setPayload(poolIds, volume, snapshot); + SnapshotInfo snapshotInfo = snapshotFactory.getSnapshotOnPrimaryStore(snapshot.getId()); + copyNewSnapshotToZonesOnPrimary(payload, snapshotInfo); + } + + private CreateSnapshotPayload setPayload(List poolIds, VolumeInfo vol, SnapshotVO snapshotCreate) { + CreateSnapshotPayload payload = new CreateSnapshotPayload(); + payload.setSnapshotId(snapshotCreate.getId()); + payload.setSnapshotPolicyId(SnapshotVO.MANUAL_POLICY_ID); + payload.setLocationType(snapshotCreate.getLocationType()); + payload.setAccount(_accountMgr.getAccount(vol.getAccountId())); + payload.setAsyncBackup(false); + payload.setQuiescevm(false); + payload.setStoragePoolIds(poolIds); + return payload; + } + protected void copyNewSnapshotToZones(long snapshotId, long zoneId, List destZoneIds) { if (CollectionUtils.isEmpty(destZoneIds)) { return; diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 81b75c23eba..9d9ec4a7c5c 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -322,6 +322,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Inject private HeuristicRuleHelper heuristicRuleHelper; + protected boolean backupSnapshotAfterTakingSnapshot = SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value(); + private TemplateAdapter getAdapter(HypervisorType type) { TemplateAdapter adapter = null; if (type == HypervisorType.BareMetal) { @@ -1693,13 +1695,25 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, AsyncCallFuture future = null; if (snapshotId != null) { - DataStoreRole dataStoreRole = snapshotHelper.getDataStoreRole(snapshot); - kvmSnapshotOnlyInPrimaryStorage = snapshotHelper.isKvmSnapshotOnlyInPrimaryStorage(snapshot, dataStoreRole); - snapInfo = _snapshotFactory.getSnapshotWithRoleAndZone(snapshotId, dataStoreRole, zoneId); + DataStoreRole dataStoreRole = snapshotHelper.getDataStoreRole(snapshot, zoneId); + kvmSnapshotOnlyInPrimaryStorage = snapshotHelper.isKvmSnapshotOnlyInPrimaryStorage(snapshot, dataStoreRole, zoneId); + snapInfo = _snapshotFactory.getSnapshotWithRoleAndZone(snapshotId, dataStoreRole, zoneId); boolean kvmIncrementalSnapshot = SnapshotManager.kvmIncrementalSnapshot.valueIn(_hostDao.findClusterIdByVolumeInfo(snapInfo.getBaseVolume())); - if (dataStoreRole == DataStoreRole.Image || kvmSnapshotOnlyInPrimaryStorage) { + boolean skipCopyToSecondary = false; + boolean keepOnPrimary = snapshotHelper.isStorageSupportSnapshotToTemplate(snapInfo); + if (keepOnPrimary) { + ImageStoreVO imageStore = _imgStoreDao.findOneByZoneAndProtocol(zoneId, "nfs"); + if (imageStore == null) { + throw new CloudRuntimeException(String.format("Could not find an NFS secondary storage pool on zone %s to use as a temporary location " + + "for instance conversion", zoneId)); + } + DataStore dataStore = _dataStoreMgr.getDataStore(imageStore.getId(), DataStoreRole.Image); + if (dataStore != null) { + store = dataStore; + } + } else if (dataStoreRole == DataStoreRole.Image) { snapInfo = snapshotHelper.backupSnapshotToSecondaryStorageIfNotExists(snapInfo, dataStoreRole, snapshot, kvmSnapshotOnlyInPrimaryStorage); _accountMgr.checkAccess(caller, null, true, snapInfo); DataStore snapStore = snapInfo.getDataStore(); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 91897c0977d..cd7d198eb61 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -25,6 +25,7 @@ import static org.apache.cloudstack.api.ApiConstants.MIN_IOPS; import java.io.IOException; import java.io.StringReader; import java.io.UnsupportedEncodingException; +import java.lang.reflect.Type; import java.net.URLDecoder; import java.text.SimpleDateFormat; import java.time.LocalDateTime; @@ -74,10 +75,13 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; +import org.apache.cloudstack.api.command.admin.vm.CreateVMFromBackupCmdByAdmin; import org.apache.cloudstack.api.command.admin.vm.DeployVMCmdByAdmin; import org.apache.cloudstack.api.command.admin.vm.ExpungeVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; +import org.apache.cloudstack.api.command.user.vm.BaseDeployVMCmd; +import org.apache.cloudstack.api.command.user.vm.CreateVMFromBackupCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVnfApplianceCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; @@ -98,8 +102,8 @@ import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd; import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; import org.apache.cloudstack.api.command.user.volume.ChangeOfferingForVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; -import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.backup.BackupVO; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntity; @@ -399,6 +403,8 @@ import com.cloud.vm.dao.VmStatsDao; import com.cloud.vm.snapshot.VMSnapshotManager; import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.google.gson.Gson; +import com.google.gson.reflect.TypeToken; public class UserVmManagerImpl extends ManagerBase implements UserVmManager, VirtualMachineGuru, Configurable { @@ -1008,14 +1014,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Override @ActionEvent(eventType = EventTypes.EVENT_VM_RESETSSHKEY, eventDescription = "resetting Vm SSHKey", async = true) public UserVm resetVMSSHKey(ResetVMSSHKeyCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException { - Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.finalizeOwner(caller, cmd.getAccountName(), cmd.getDomainId(), cmd.getProjectId()); - Long vmId = cmd.getId(); UserVmVO userVm = _vmDao.findById(cmd.getId()); if (userVm == null) { - throw new InvalidParameterValueException("unable to find a virtual machine by id" + cmd.getId()); + throw new InvalidParameterValueException("unable to find a virtual machine by id " + cmd.getId()); } if (UserVmManager.SHAREDFSVM.equals(userVm.getUserVmType())) { throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); @@ -1027,10 +1031,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir userVm.getName())); } - VMTemplateVO template = _templateDao.findByIdIncludingRemoved(userVm.getTemplateId()); - // Do parameters input validation - if (userVm.getState() == State.Error || userVm.getState() == State.Expunging) { logger.error("vm ({}) is not in the right state: {}", userVm, userVm.getState()); throw new InvalidParameterValueException("Vm with specified id is not in the right state"); @@ -1040,32 +1041,40 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Vm " + userVm + " should be stopped to do SSH Key reset"); } - if (cmd.getNames() == null || cmd.getNames().isEmpty()) { + List names = cmd.getNames(); + if (CollectionUtils.isEmpty(names)) { throw new InvalidParameterValueException("'keypair' or 'keypairs' must be specified"); } + userVm = resetVMSSHKeyInternal(userVm, owner, names); + return userVm; + } + + private UserVmVO resetVMSSHKeyInternal(UserVmVO userVm, Account owner, List names) throws ResourceUnavailableException, InsufficientCapacityException { + Account caller = CallContext.current().getCallingAccount(); + String keypairnames = ""; String sshPublicKeys = ""; List pairs = new ArrayList<>(); - pairs = _sshKeyPairDao.findByNames(owner.getAccountId(), owner.getDomainId(), cmd.getNames()); - if (pairs == null || pairs.size() != cmd.getNames().size()) { + pairs = _sshKeyPairDao.findByNames(owner.getAccountId(), owner.getDomainId(), names); + if (pairs == null || pairs.size() != names.size()) { throw new InvalidParameterValueException("Not all specified keypairs exist"); } sshPublicKeys = pairs.stream().map(p -> p.getPublicKey()).collect(Collectors.joining("\n")); - keypairnames = String.join(",", cmd.getNames()); + keypairnames = String.join(",", names); _accountMgr.checkAccess(caller, null, true, userVm); - boolean result = resetVMSSHKeyInternal(vmId, sshPublicKeys, keypairnames); + boolean result = resetVMSSHKeyInternal(userVm.getId(), sshPublicKeys, keypairnames); - UserVmVO vm = _vmDao.findById(vmId); + UserVmVO vm = _vmDao.findById(userVm.getId()); _vmDao.loadDetails(vm); if (!result) { throw new CloudRuntimeException("Failed to reset SSH Key for the virtual machine "); } - removeEncryptedPasswordFromUserVmVoDetails(vmId); + removeEncryptedPasswordFromUserVmVoDetails(userVm.getId()); _vmDao.loadDetails(userVm); return userVm; @@ -2515,17 +2524,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } try { - if (vm.getBackupOfferingId() != null) { - List backupsForVm = backupDao.listByVmId(vm.getDataCenterId(), vm.getId()); - if (CollectionUtils.isEmpty(backupsForVm)) { - backupManager.removeVMFromBackupOffering(vm.getId(), true); - } else { - throw new CloudRuntimeException(String.format("This VM [uuid: %s, name: %s] has a " - + "Backup Offering [id: %s, external id: %s] with %s backups. Please, remove the backup offering " - + "before proceeding to VM exclusion!", vm.getUuid(), vm.getInstanceName(), vm.getBackupOfferingId(), - vm.getBackupExternalId(), backupsForVm.size())); - } - } + backupManager.checkAndRemoveBackupOfferingBeforeExpunge(vm); autoScaleManager.removeVmFromVmGroup(vm.getId()); @@ -3523,6 +3522,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir final ControlledEntity[] volumesToDelete = volumesToBeDeleted.toArray(new ControlledEntity[0]); _accountMgr.checkAccess(ctx.getCallingAccount(), null, true, volumesToDelete); + if (expunge) { + backupManager.checkAndRemoveBackupOfferingBeforeExpunge(vm); + } + stopVirtualMachine(vmId, VmDestroyForcestop.value()); // Detach all data disks from VM @@ -3752,7 +3755,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Override @ActionEvent(eventType = EventTypes.EVENT_VM_CREATE, eventDescription = "deploying Vm", create = true) public UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List securityGroupIdList, - Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, + Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, List dataDiskInfoList, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, Long userDataId, String userDataDetails, List sshKeyPairs, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List affinityGroupIdList, Map customParametes, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, Map userVmOVFProperties, boolean dynamicScalingEnabled, Long overrideDiskOfferingId, Volume volume, Snapshot snapshot) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, @@ -3802,7 +3805,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, + return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, dataDiskInfoList, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, null, overrideDiskOfferingId, volume, snapshot); @@ -3811,7 +3814,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Override @ActionEvent(eventType = EventTypes.EVENT_VM_CREATE, eventDescription = "deploying Vm", create = true) public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List networkIdList, - List securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, + List securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, List dataDiskInfoList, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, Long userDataId, String userDataDetails, List sshKeyPairs, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, Map userVmOVFProperties, boolean dynamicScalingEnabled, Long overrideDiskOfferingId, String vmType, Volume volume, Snapshot snapshot) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException { @@ -3915,7 +3918,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, + return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, dataDiskInfoList, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, vmType, overrideDiskOfferingId, volume, snapshot); } @@ -3923,7 +3926,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Override @ActionEvent(eventType = EventTypes.EVENT_VM_CREATE, eventDescription = "deploying Vm", create = true) public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List networkIdList, Account owner, - String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, + String hostName, String displayName, Long diskOfferingId, Long diskSize, List dataDiskInfoList, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, Long userDataId, String userDataDetails, List sshKeyPairs, Map requestedIps, IpAddresses defaultIps, Boolean displayvm, String keyboard, List affinityGroupIdList, Map customParametrs, String customId, Map> dhcpOptionsMap, Map dataDiskTemplateToDiskOfferingMap, Map userVmOVFPropertiesMap, boolean dynamicScalingEnabled, String vmType, Long overrideDiskOfferingId, Volume volume, Snapshot snapshot) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, @@ -3977,7 +3980,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } verifyExtraDhcpOptionsNetwork(dhcpOptionsMap, networkList); - return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, null, group, httpmethod, userData, + return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, dataDiskInfoList, networkList, null, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap, dataDiskTemplateToDiskOfferingMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, overrideDiskOfferingId, volume, snapshot); } @@ -4107,7 +4110,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @DB private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate tmplt, String hostName, String displayName, Account owner, - Long diskOfferingId, Long diskSize, List networkList, List securityGroupIdList, String group, HTTPMethod httpmethod, String userData, + Long diskOfferingId, Long diskSize, List dataDiskInfoList, List networkList, List securityGroupIdList, String group, HTTPMethod httpmethod, String userData, Long userDataId, String userDataDetails, List sshKeyPairs, HypervisorType hypervisor, Account caller, Map requestedIps, IpAddresses defaultIps, Boolean isDisplayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map datadiskTemplateToDiskOfferringMap, @@ -4192,26 +4195,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } DiskOfferingVO rootDiskOffering = _diskOfferingDao.findById(rootDiskOfferingId); - long volumesSize = 0; - if (volume != null) { - volumesSize = volume.getSize(); - } else if (snapshot != null) { - VolumeVO volumeVO = _volsDao.findById(snapshot.getVolumeId()); - volumesSize = volumeVO != null ? volumeVO.getSize() : 0; - } else { - volumesSize = configureCustomRootDiskSize(customParameters, template, hypervisorType, rootDiskOffering); - } + long volumesSize = configureCustomRootDiskSize(customParameters, template, hypervisorType, rootDiskOffering); if (rootDiskOffering.getEncrypt() && hypervisorType != HypervisorType.KVM) { throw new InvalidParameterValueException("Root volume encryption is not supported for hypervisor type " + hypervisorType); } - long additionalDiskSize = 0L; - if (!isIso && diskOfferingId != null) { - DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId); - additionalDiskSize = verifyAndGetDiskSize(diskOffering, diskSize); - } - UserVm vm = getCheckedUserVmResource(zone, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, caller, requestedIps, defaultIps, isDisplayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, template, hypervisorType, accountId, offering, isIso, rootDiskOfferingId, volumesSize, additionalDiskSize, volume, snapshot); + UserVm vm = getCheckedUserVmResource(zone, hostName, displayName, owner, diskOfferingId, diskSize, dataDiskInfoList, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, caller, requestedIps, defaultIps, isDisplayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, template, hypervisorType, accountId, offering, isIso, rootDiskOfferingId, volumesSize, volume, snapshot); _securityGroupMgr.addInstanceToGroups(vm, securityGroupIdList); @@ -4224,14 +4214,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } private UserVm getCheckedUserVmResource(DataCenter zone, String hostName, String displayName, Account owner, - Long diskOfferingId, Long diskSize, List networkList, List securityGroupIdList, String group, + Long diskOfferingId, Long diskSize, List dataDiskInfoList, List networkList, List securityGroupIdList, String group, HTTPMethod httpmethod, String userData, Long userDataId, String userDataDetails, List sshKeyPairs, Account caller, Map requestedIps, IpAddresses defaultIps, Boolean isDisplayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map datadiskTemplateToDiskOfferringMap, Map userVmOVFPropertiesMap, boolean dynamicScalingEnabled, String vmType, VMTemplateVO template, HypervisorType hypervisorType, long accountId, ServiceOfferingVO offering, boolean isIso, - Long rootDiskOfferingId, long volumesSize, long additionalDiskSize, Volume volume, Snapshot snapshot) throws ResourceAllocationException { + Long rootDiskOfferingId, long volumesSize, Volume volume, Snapshot snapshot) throws ResourceAllocationException { if (!VirtualMachineManager.ResourceCountRunningVMsonly.value()) { List resourceLimitHostTags = resourceLimitService.getResourceLimitHostTags(offering, template); try (CheckedReservation vmReservation = new CheckedReservation(owner, ResourceType.user_vm, resourceLimitHostTags, 1l, reservationDao, resourceLimitService); @@ -4240,7 +4230,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir CheckedReservation gpuReservation = offering.getGpuCount() != null && offering.getGpuCount() > 0 ? new CheckedReservation(owner, ResourceType.gpu, resourceLimitHostTags, Long.valueOf(offering.getGpuCount()), reservationDao, resourceLimitService) : null; ) { - return getUncheckedUserVmResource(zone, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, caller, requestedIps, defaultIps, isDisplayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, template, hypervisorType, accountId, offering, isIso, rootDiskOfferingId, volumesSize, additionalDiskSize, volume, snapshot); + return getUncheckedUserVmResource(zone, hostName, displayName, owner, diskOfferingId, diskSize, dataDiskInfoList, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, caller, requestedIps, defaultIps, isDisplayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, template, hypervisorType, accountId, offering, isIso, rootDiskOfferingId, volumesSize, volume, snapshot); } catch (ResourceAllocationException | CloudRuntimeException e) { throw e; } catch (Exception e) { @@ -4249,7 +4239,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } else { - return getUncheckedUserVmResource(zone, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, caller, requestedIps, defaultIps, isDisplayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, template, hypervisorType, accountId, offering, isIso, rootDiskOfferingId, volumesSize, additionalDiskSize, volume, snapshot); + return getUncheckedUserVmResource(zone, hostName, displayName, owner, diskOfferingId, diskSize, dataDiskInfoList, networkList, securityGroupIdList, group, httpmethod, userData, userDataId, userDataDetails, sshKeyPairs, caller, requestedIps, defaultIps, isDisplayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, template, hypervisorType, accountId, offering, isIso, rootDiskOfferingId, volumesSize, volume, snapshot); } } @@ -4258,24 +4248,53 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return resourceLimitService.getResourceLimitStorageTags(diskOfferingVO); } + private List reserveStorageResourcesForVm(Account owner, Long diskOfferingId, Long diskSize, List dataDiskInfoList, Long rootDiskOfferingId, ServiceOfferingVO offering, Long rootDiskSize) throws ResourceAllocationException { + List checkedReservations = new ArrayList<>(); + + List rootResourceLimitStorageTags = getResourceLimitStorageTags(rootDiskOfferingId != null ? rootDiskOfferingId : offering.getDiskOfferingId()); + CheckedReservation rootVolumeReservation = new CheckedReservation(owner, ResourceType.volume, rootResourceLimitStorageTags, 1L, reservationDao, resourceLimitService); + checkedReservations.add(rootVolumeReservation); + CheckedReservation rootPrimaryStorageReservation = new CheckedReservation(owner, ResourceType.primary_storage, rootResourceLimitStorageTags, rootDiskSize, reservationDao, resourceLimitService); + checkedReservations.add(rootPrimaryStorageReservation); + + if (diskOfferingId != null) { + List additionalResourceLimitStorageTags = diskOfferingId != null ? getResourceLimitStorageTags(diskOfferingId) : null; + DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId); + Long size = verifyAndGetDiskSize(diskOffering, diskSize); + CheckedReservation additionalVolumeReservation = diskOfferingId != null ? new CheckedReservation(owner, ResourceType.volume, additionalResourceLimitStorageTags, 1L, reservationDao, resourceLimitService) : null; + checkedReservations.add(additionalVolumeReservation); + CheckedReservation additionalPrimaryStorageReservation = diskOfferingId != null ? new CheckedReservation(owner, ResourceType.primary_storage, additionalResourceLimitStorageTags, size, reservationDao, resourceLimitService) : null; + checkedReservations.add(additionalPrimaryStorageReservation); + + } + + if (dataDiskInfoList != null) { + for (VmDiskInfo vmDiskInfo : dataDiskInfoList) { + DiskOffering diskOffering = vmDiskInfo.getDiskOffering(); + List additionalResourceLimitStorageTagsForDataDisk = getResourceLimitStorageTags(vmDiskInfo.getDiskOffering().getId()); + Long size = verifyAndGetDiskSize(diskOffering, vmDiskInfo.getSize()); + CheckedReservation additionalVolumeReservation = new CheckedReservation(owner, ResourceType.volume, additionalResourceLimitStorageTagsForDataDisk, 1L, reservationDao, resourceLimitService); + checkedReservations.add(additionalVolumeReservation); + CheckedReservation additionalPrimaryStorageReservation = new CheckedReservation(owner, ResourceType.primary_storage, additionalResourceLimitStorageTagsForDataDisk, size, reservationDao, resourceLimitService); + checkedReservations.add(additionalPrimaryStorageReservation); + } + } + return checkedReservations; + } + private UserVm getUncheckedUserVmResource(DataCenter zone, String hostName, String displayName, Account owner, - Long diskOfferingId, Long diskSize, List networkList, List securityGroupIdList, String group, + Long diskOfferingId, Long diskSize, List dataDiskInfoList, List networkList, List securityGroupIdList, String group, HTTPMethod httpmethod, String userData, Long userDataId, String userDataDetails, List sshKeyPairs, Account caller, Map requestedIps, IpAddresses defaultIps, Boolean isDisplayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map datadiskTemplateToDiskOfferringMap, Map userVmOVFPropertiesMap, boolean dynamicScalingEnabled, String vmType, VMTemplateVO template, HypervisorType hypervisorType, long accountId, ServiceOfferingVO offering, boolean isIso, - Long rootDiskOfferingId, long volumesSize, long additionalDiskSize, Volume volume, Snapshot snapshot) throws ResourceAllocationException - { - List rootResourceLimitStorageTags = getResourceLimitStorageTags(rootDiskOfferingId != null ? rootDiskOfferingId : offering.getDiskOfferingId()); - List additionalResourceLimitStorageTags = diskOfferingId != null ? getResourceLimitStorageTags(diskOfferingId) : null; + Long rootDiskOfferingId, long volumesSize, Volume volume, Snapshot snapshot) throws ResourceAllocationException { + List checkedReservations = new ArrayList<>(); - try (CheckedReservation rootVolumeReservation = new CheckedReservation(owner, ResourceType.volume, rootResourceLimitStorageTags, 1L, reservationDao, resourceLimitService); - CheckedReservation additionalVolumeReservation = diskOfferingId != null ? new CheckedReservation(owner, ResourceType.volume, additionalResourceLimitStorageTags, 1L, reservationDao, resourceLimitService) : null; - CheckedReservation rootPrimaryStorageReservation = new CheckedReservation(owner, ResourceType.primary_storage, rootResourceLimitStorageTags, volumesSize, reservationDao, resourceLimitService); - CheckedReservation additionalPrimaryStorageReservation = diskOfferingId != null ? new CheckedReservation(owner, ResourceType.primary_storage, additionalResourceLimitStorageTags, additionalDiskSize, reservationDao, resourceLimitService) : null; - ) { + try { + checkedReservations = reserveStorageResourcesForVm(owner, diskOfferingId, diskSize, dataDiskInfoList, rootDiskOfferingId, offering, volumesSize); // verify security group ids if (securityGroupIdList != null) { @@ -4557,15 +4576,24 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir UserVmVO vm = commitUserVm(zone, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, userDataId, userDataDetails, caller, isDisplayVm, keyboard, accountId, userId, offering, isIso, sshPublicKeys, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap, - datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, rootDiskOfferingId, keypairnames, volume, snapshot); + datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, vmType, rootDiskOfferingId, keypairnames, dataDiskInfoList, volume, snapshot); assignInstanceToGroup(group, id); return vm; - } catch (ResourceAllocationException | CloudRuntimeException e) { + } catch (ResourceAllocationException | CloudRuntimeException e) { throw e; } catch (Exception e) { logger.error("error during resource reservation and allocation", e); throw new CloudRuntimeException(e); + } finally { + for (CheckedReservation checkedReservation : checkedReservations) { + try { + checkedReservation.close(); + } catch (Exception e) { + logger.error("error during resource reservation and allocation", e); + throw new CloudRuntimeException(e); + } + } } } @@ -4583,7 +4611,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - private long verifyAndGetDiskSize(DiskOfferingVO diskOffering, Long diskSize) { + private long verifyAndGetDiskSize(DiskOffering diskOffering, Long diskSize) { long size = 0l; if (diskOffering == null) { throw new InvalidParameterValueException("Specified disk offering cannot be found"); @@ -4698,7 +4726,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir final long accountId, final long userId, final ServiceOffering offering, final boolean isIso, final String sshPublicKeys, final LinkedHashMap> networkNicMap, final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map customParameters, final Map> extraDhcpOptionMap, final Map dataDiskTemplateToDiskOfferingMap, - final Map userVmOVFPropertiesMap, final VirtualMachine.PowerState powerState, final boolean dynamicScalingEnabled, String vmType, final Long rootDiskOfferingId, String sshkeypairs, Volume volume, Snapshot snapshot) throws InsufficientCapacityException { + final Map userVmOVFPropertiesMap, final VirtualMachine.PowerState powerState, final boolean dynamicScalingEnabled, String vmType, final Long rootDiskOfferingId, String sshkeypairs, + List dataDiskInfoList, Volume volume, Snapshot snapshot) throws InsufficientCapacityException { UserVmVO vm = new UserVmVO(id, instanceName, displayName, template.getId(), hypervisorType, template.getGuestOSId(), offering.isOfferHA(), offering.getLimitCpuUse(), owner.getDomainId(), owner.getId(), userId, offering.getId(), userData, userDataId, userDataDetails, hostName); vm.setUuid(uuidName); @@ -4816,7 +4845,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir orchestrateVirtualMachineCreate(vm, guestOSCategory, computeTags, rootDiskTags, plan, rootDiskSize, template, hostName, displayName, owner, diskOfferingId, diskSize, offering, isIso,networkNicMap, hypervisorType, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap, - rootDiskOfferingId, volume, snapshot); + rootDiskOfferingId, dataDiskInfoList, volume, snapshot); } CallContext.current().setEventDetails("Vm Id: " + vm.getUuid()); @@ -4849,16 +4878,16 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir ServiceOffering offering, boolean isIso, LinkedHashMap> networkNicMap, HypervisorType hypervisorType, Map> extraDhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, - Long rootDiskOfferingId, Volume volume, Snapshot snapshot) throws InsufficientCapacityException{ + Long rootDiskOfferingId, List dataDiskInfoList, Volume volume, Snapshot snapshot) throws InsufficientCapacityException{ try { if (isIso) { _orchSrvc.createVirtualMachineFromScratch(vm.getUuid(), Long.toString(owner.getAccountId()), vm.getIsoId().toString(), hostName, displayName, hypervisorType.name(), guestOSCategory.getName(), offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, - networkNicMap, plan, extraDhcpOptionMap, rootDiskOfferingId, volume, snapshot); + networkNicMap, plan, extraDhcpOptionMap, rootDiskOfferingId, dataDiskInfoList, volume, snapshot); } else { _orchSrvc.createVirtualMachine(vm.getUuid(), Long.toString(owner.getAccountId()), Long.toString(template.getId()), hostName, displayName, hypervisorType.name(), offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan, rootDiskSize, extraDhcpOptionMap, - dataDiskTemplateToDiskOfferingMap, diskOfferingId, rootDiskOfferingId, volume, snapshot); + dataDiskTemplateToDiskOfferingMap, diskOfferingId, rootDiskOfferingId, dataDiskInfoList, volume, snapshot); } if (logger.isDebugEnabled()) { @@ -4979,14 +5008,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir final Long diskOfferingId, final Long diskSize, final String userData, Long userDataId, String userDataDetails, final Account caller, final Boolean isDisplayVm, final String keyboard, final long accountId, final long userId, final ServiceOfferingVO offering, final boolean isIso, final String sshPublicKeys, final LinkedHashMap> networkNicMap, final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map customParameters, final Map> extraDhcpOptionMap, final Map dataDiskTemplateToDiskOfferingMap, - Map userVmOVFPropertiesMap, final boolean dynamicScalingEnabled, String vmType, final Long rootDiskOfferingId, String sshkeypairs, Volume volume, Snapshot snapshot) throws InsufficientCapacityException { + Map> extraDhcpOptionMap, final Map dataDiskTemplateToDiskOfferingMap, + Map userVmOVFPropertiesMap, final boolean dynamicScalingEnabled, String vmType, final Long rootDiskOfferingId, String sshkeypairs, + List dataDiskInfoList, Volume volume, Snapshot snapshot) throws InsufficientCapacityException { return commitUserVm(false, zone, null, null, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, userDataId, userDataDetails, isDisplayVm, keyboard, accountId, userId, offering, isIso, sshPublicKeys, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap, - userVmOVFPropertiesMap, null, dynamicScalingEnabled, vmType, rootDiskOfferingId, sshkeypairs, volume, snapshot); + userVmOVFPropertiesMap, null, dynamicScalingEnabled, vmType, rootDiskOfferingId, sshkeypairs, dataDiskInfoList, volume, snapshot); } public void validateRootDiskResize(final HypervisorType hypervisorType, Long rootDiskSize, VMTemplateVO templateVO, UserVmVO vm, final Map customParameters) throws InvalidParameterValueException @@ -6169,33 +6199,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return null; } - @Override - public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, - StorageUnavailableException, ResourceAllocationException { - //Verify that all objects exist before passing them to the service - Account owner = _accountService.getActiveAccountById(cmd.getEntityOwnerId()); - - verifyDetails(cmd.getDetails()); - - Long zoneId = cmd.getZoneId(); - - DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId); - if (zone == null) { - throw new InvalidParameterValueException("Unable to find zone by id=" + zoneId); - } - - Long serviceOfferingId = cmd.getServiceOfferingId(); - Long overrideDiskOfferingId = cmd.getOverrideDiskOfferingId(); - - ServiceOffering serviceOffering = _entityMgr.findById(ServiceOffering.class, serviceOfferingId); - if (serviceOffering == null) { - throw new InvalidParameterValueException("Unable to find service offering: " + serviceOfferingId); - } - + private void verifyServiceOffering(BaseDeployVMCmd cmd, ServiceOffering serviceOffering) { if (ServiceOffering.State.Inactive.equals(serviceOffering.getState())) { throw new InvalidParameterValueException(String.format("Service offering is inactive: [%s].", serviceOffering.getUuid())); } + Long overrideDiskOfferingId = cmd.getOverrideDiskOfferingId(); if (serviceOffering.getDiskOfferingStrictness() && overrideDiskOfferingId != null) { throw new InvalidParameterValueException(String.format("Cannot override disk offering id %d since provided service offering is strictly mapped to its disk offering", overrideDiskOfferingId)); } @@ -6207,55 +6216,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } } + } - Account caller = CallContext.current().getCallingAccount(); - Long callerId = caller.getId(); - - Long templateId = cmd.getTemplateId(); - VolumeInfo volume = null; - SnapshotVO snapshot = null; - - if (cmd.getVolumeId() != null) { - volume = getVolume(cmd.getVolumeId(), templateId, false); - if (volume == null) { - throw new InvalidParameterValueException("Could not find volume with id=" + cmd.getVolumeId()); - } - _accountMgr.checkAccess(caller, null, true, volume); - templateId = volume.getTemplateId(); - overrideDiskOfferingId = volume.getDiskOfferingId(); - } else if (cmd.getSnapshotId() != null) { - snapshot = _snapshotDao.findById(cmd.getSnapshotId()); - if (snapshot == null) { - throw new InvalidParameterValueException("Could not find snapshot with id=" + cmd.getSnapshotId()); - } - _accountMgr.checkAccess(caller, null, true, snapshot); - VolumeInfo volumeOfSnapshot = getVolume(snapshot.getVolumeId(), templateId, true); - templateId = volumeOfSnapshot.getTemplateId(); - overrideDiskOfferingId = volumeOfSnapshot.getDiskOfferingId(); - } - - boolean dynamicScalingEnabled = cmd.isDynamicScalingEnabled(); - - VirtualMachineTemplate template = null; - if (volume != null || snapshot != null) { - template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, templateId); - } else { - template = _entityMgr.findById(VirtualMachineTemplate.class, templateId); - } - // Make sure a valid template ID was specified - if (template == null) { - throw new InvalidParameterValueException("Unable to use template " + templateId); - } + private void verifyTemplate(BaseDeployVMCmd cmd, VirtualMachineTemplate template, Long serviceOfferingId) { if (TemplateType.VNF.equals(template.getTemplateType())) { vnfTemplateManager.validateVnfApplianceNics(template, cmd.getNetworkIds()); } else if (cmd instanceof DeployVnfApplianceCmd) { throw new InvalidParameterValueException("Can't deploy VNF appliance from a non-VNF template"); } - if (cmd.isVolumeOrSnapshotProvided() && - (!(HypervisorType.KVM.equals(template.getHypervisorType()) || HypervisorType.KVM.equals(cmd.getHypervisor())))) { - throw new InvalidParameterValueException("Deploying a virtual machine with existing volume/snapshot is supported only from KVM hypervisors"); - } ServiceOfferingJoinVO svcOffering = serviceOfferingJoinDao.findById(serviceOfferingId); if (template.isDeployAsIs()) { @@ -6272,6 +6241,74 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Boot type and boot mode are not supported on VMware for templates registered as deploy-as-is, as we honour what is defined in the template."); } } + } + + @Override + public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, + StorageUnavailableException, ResourceAllocationException { + //Verify that all objects exist before passing them to the service + Account owner = _accountService.getActiveAccountById(cmd.getEntityOwnerId()); + + verifyDetails(cmd.getDetails()); + + Long zoneId = cmd.getZoneId(); + + DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId); + if (zone == null) { + throw new InvalidParameterValueException("Unable to find zone by id=" + zoneId); + } + + Long serviceOfferingId = cmd.getServiceOfferingId(); + if (serviceOfferingId == null) { + throw new InvalidParameterValueException("Unable to execute API command deployvirtualmachine due to missing parameter serviceofferingid"); + } + Long overrideDiskOfferingId = cmd.getOverrideDiskOfferingId(); + + ServiceOffering serviceOffering = _entityMgr.findById(ServiceOffering.class, serviceOfferingId); + if (serviceOffering == null) { + throw new InvalidParameterValueException("Unable to find service offering: " + serviceOffering.getId()); + } + verifyServiceOffering(cmd, serviceOffering); + + Account caller = CallContext.current().getCallingAccount(); + Long callerId = caller.getId(); + + Long templateId = cmd.getTemplateId(); + VolumeInfo volume = null; + SnapshotVO snapshot = null; + + if (cmd.getVolumeId() != null) { + volume = getVolume(cmd.getVolumeId(), templateId, false); + if (volume == null) { + throw new InvalidParameterValueException("Could not find volume with id=" + cmd.getVolumeId()); + } + _accountMgr.checkAccess(caller, null, true, volume); + templateId = volume.getTemplateId(); + } else if (cmd.getSnapshotId() != null) { + snapshot = _snapshotDao.findById(cmd.getSnapshotId()); + if (snapshot == null) { + throw new InvalidParameterValueException("Could not find snapshot with id=" + cmd.getSnapshotId()); + } + _accountMgr.checkAccess(caller, null, true, snapshot); + VolumeInfo volumeOfSnapshot = getVolume(snapshot.getVolumeId(), templateId, true); + templateId = volumeOfSnapshot.getTemplateId(); + } + + VirtualMachineTemplate template = null; + if (volume != null || snapshot != null) { + template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, templateId); + } else { + template = _entityMgr.findById(VirtualMachineTemplate.class, templateId); + } + if (cmd.isVolumeOrSnapshotProvided() && + (!(HypervisorType.KVM.equals(template.getHypervisorType()) || HypervisorType.KVM.equals(cmd.getHypervisor())))) { + throw new InvalidParameterValueException("Deploying a virtual machine with existing volume/snapshot is supported only from KVM hypervisors"); + } + // Make sure a valid template ID was specified + if (template == null) { + throw new InvalidParameterValueException("Unable to use template " + templateId); + } + verifyTemplate(cmd, template, serviceOfferingId); Long diskOfferingId = cmd.getDiskOfferingId(); DiskOffering diskOffering = null; @@ -6285,6 +6322,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } + List dataDiskInfoList = cmd.getDataDiskInfoList(); + if (dataDiskInfoList != null && diskOfferingId != null) { + new InvalidParameterValueException("Cannot specify both disk offering id and data disk offering details"); + } + if (!zone.isLocalStorageEnabled()) { DiskOffering diskOfferingMappedInServiceOffering = _entityMgr.findById(DiskOffering.class, serviceOffering.getDiskOfferingId()); if (diskOfferingMappedInServiceOffering.isUseLocalStorage()) { @@ -6295,25 +6337,48 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - boolean isLeaseFeatureEnabled = VMLeaseManager.InstanceLeaseEnabled.value(); - if (isLeaseFeatureEnabled) { - validateLeaseProperties(cmd.getLeaseDuration(), cmd.getLeaseExpiryAction()); - } - List networkIds = cmd.getNetworkIds(); LinkedHashMap userVmNetworkMap = getVmOvfNetworkMapping(zone, owner, template, cmd.getVmNetworkMap()); if (MapUtils.isNotEmpty(userVmNetworkMap)) { networkIds = new ArrayList<>(userVmNetworkMap.values()); } - String userData = cmd.getUserData(); - Long userDataId = cmd.getUserdataId(); - String userDataDetails = null; - if (MapUtils.isNotEmpty(cmd.getUserdataDetails())) { - userDataDetails = cmd.getUserdataDetails().toString(); + return createVirtualMachine(cmd, zone, owner, serviceOffering, template, cmd.getHypervisor(), diskOfferingId, cmd.getSize(), overrideDiskOfferingId, dataDiskInfoList, networkIds, cmd.getIpToNetworkMap(), volume, snapshot); + } + + private UserVm createVirtualMachine(BaseDeployVMCmd cmd, DataCenter zone, Account owner, ServiceOffering serviceOffering, VirtualMachineTemplate template, + HypervisorType hypervisor, Long diskOfferingId, Long size, Long overrideDiskOfferingId, List dataDiskInfoList, + List networkIds, Map ipToNetworkMap, Volume volume, Snapshot snapshot) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, ResourceAllocationException { + + ServiceOfferingJoinVO svcOffering = serviceOfferingJoinDao.findById(serviceOffering.getId()); + boolean isLeaseFeatureEnabled = VMLeaseManager.InstanceLeaseEnabled.value(); + if (isLeaseFeatureEnabled) { + validateLeaseProperties(cmd.getLeaseDuration(), cmd.getLeaseExpiryAction()); } - userData = finalizeUserData(userData, userDataId, template); - userData = userDataManager.validateUserData(userData, cmd.getHttpMethod()); + + String userData = null; + Long userDataId = null; + String userDataDetails = null; + List sshKeyPairNames = new ArrayList(); + if (cmd instanceof CreateVMFromBackupCmd) { + if (cmd.getUserData() != null) { + throw new InvalidParameterValueException("User data not supported for instance created from backup"); + } + } else { + userData = cmd.getUserData(); + userDataId = cmd.getUserdataId(); + userDataDetails = null; + if (MapUtils.isNotEmpty(cmd.getUserdataDetails())) { + userDataDetails = cmd.getUserdataDetails().toString(); + } + userData = finalizeUserData(userData, userDataId, template); + userData = userDataManager.validateUserData(userData, cmd.getHttpMethod()); + + sshKeyPairNames = cmd.getSSHKeyPairNames(); + } + + Account caller = CallContext.current().getCallingAccount(); + Long callerId = caller.getId(); boolean isRootAdmin = _accountService.isRootAdmin(callerId); @@ -6327,9 +6392,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir String displayName = cmd.getDisplayName(); UserVm vm = null; IpAddresses addrs = new IpAddresses(ipAddress, ip6Address, macAddress); - Long size = cmd.getSize(); + boolean dynamicScalingEnabled = cmd.isDynamicScalingEnabled(); String group = cmd.getGroup(); - List sshKeyPairNames = cmd.getSSHKeyPairNames(); Boolean displayVm = cmd.isDisplayVm(); String keyboard = cmd.getKeyboard(); Map dataDiskTemplateToDiskOfferingMap = cmd.getDataDiskTemplateToDiskOfferingMap(); @@ -6339,7 +6403,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Can't specify network Ids in Basic zone"); } else { vm = createBasicSecurityGroupVirtualMachine(zone, serviceOffering, template, getSecurityGroupIdList(cmd, zone, template, owner), owner, name, displayName, diskOfferingId, - size , group , cmd.getHypervisor(), cmd.getHttpMethod(), userData, userDataId, userDataDetails, sshKeyPairNames, cmd.getIpToNetworkMap(), addrs, displayVm , keyboard , cmd.getAffinityGroupIdList(), + size , dataDiskInfoList, group , hypervisor, cmd.getHttpMethod(), userData, userDataId, userDataDetails, sshKeyPairNames, ipToNetworkMap, addrs, displayVm , keyboard , cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, overrideDiskOfferingId, volume, snapshot); } @@ -6347,7 +6411,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (_networkModel.checkSecurityGroupSupportForNetwork(owner, zone, networkIds, cmd.getSecurityGroupIdList())) { vm = createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, networkIds, getSecurityGroupIdList(cmd, zone, template, owner), owner, name, - displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, userDataId, userDataDetails, sshKeyPairNames, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, + displayName, diskOfferingId, size, dataDiskInfoList, group, hypervisor, cmd.getHttpMethod(), userData, userDataId, userDataDetails, sshKeyPairNames, ipToNetworkMap, addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, overrideDiskOfferingId, null, volume, snapshot); @@ -6355,8 +6419,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (cmd.getSecurityGroupIdList() != null && !cmd.getSecurityGroupIdList().isEmpty()) { throw new InvalidParameterValueException("Can't create vm with security groups; security group feature is not enabled per zone"); } - vm = createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner, name, displayName, diskOfferingId, size, group, - cmd.getHypervisor(), cmd.getHttpMethod(), userData, userDataId, userDataDetails, sshKeyPairNames, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(), + vm = createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner, name, displayName, diskOfferingId, size, dataDiskInfoList, group, + hypervisor, cmd.getHttpMethod(), userData, userDataId, userDataDetails, sshKeyPairNames, ipToNetworkMap, addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, null, overrideDiskOfferingId, volume, snapshot); if (cmd instanceof DeployVnfApplianceCmd) { vnfTemplateManager.createIsolatedNetworkRulesForVnfAppliance(zone, template, owner, vm, (DeployVnfApplianceCmd) cmd); @@ -6365,7 +6429,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } // check if this templateId has a child ISO - List child_templates = _templateDao.listByParentTemplatetId(templateId); + List child_templates = _templateDao.listByParentTemplatetId(template.getId()); for (VMTemplateVO tmpl: child_templates){ if (tmpl.getFormat() == Storage.ImageFormat.ISO){ logger.info("MDOV trying to attach disk {} to the VM {}", tmpl, vm); @@ -6385,10 +6449,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (cmd.getCopyImageTags()) { - VMTemplateVO templateOrIso = _templateDao.findById(templateId); + VMTemplateVO templateOrIso = _templateDao.findById(template.getId()); if (templateOrIso != null) { final ResourceTag.ResourceObjectType templateType = (templateOrIso.getFormat() == ImageFormat.ISO) ? ResourceTag.ResourceObjectType.ISO : ResourceTag.ResourceObjectType.Template; - final List resourceTags = resourceTagDao.listBy(templateId, templateType); + final List resourceTags = resourceTagDao.listBy(template.getId(), templateType); for (ResourceTag resourceTag : resourceTags) { final ResourceTagVO copyTag = new ResourceTagVO(resourceTag.getKey(), resourceTag.getValue(), resourceTag.getAccountId(), resourceTag.getDomainId(), vm.getId(), ResourceTag.ResourceObjectType.UserVm, resourceTag.getCustomer(), vm.getUuid()); resourceTagDao.persist(copyTag); @@ -9331,7 +9395,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir null, null, userData, null, null, isDisplayVm, keyboard, accountId, userId, serviceOffering, template.getFormat().equals(ImageFormat.ISO), sshPublicKeys, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, - null, null, null, powerState, dynamicScalingEnabled, null, serviceOffering.getDiskOfferingId(), null, null, null); + null, null, null, powerState, dynamicScalingEnabled, null, serviceOffering.getDiskOfferingId(), null, null, null, null); } @Override @@ -9376,6 +9440,234 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return true; } + private void updateDetailsWithRootDiskAttributes(Map details, VmDiskInfo rootVmDiskInfo) { + details.put(VmDetailConstants.ROOT_DISK_SIZE, rootVmDiskInfo.getSize().toString()); + if (rootVmDiskInfo.getMinIops() != null) { + details.put(MIN_IOPS, rootVmDiskInfo.getMinIops().toString()); + } + if (rootVmDiskInfo.getMaxIops() != null) { + details.put(MAX_IOPS, rootVmDiskInfo.getMaxIops().toString()); + } + } + + private void checkRootDiskSizeAgainstBackup(Long instanceVolumeSize,DiskOffering rootDiskOffering, Long backupVolumeSize) { + Long instanceRootDiskSize = rootDiskOffering.isCustomized() ? instanceVolumeSize : rootDiskOffering.getDiskSize() / GiB_TO_BYTES; + if (instanceRootDiskSize < backupVolumeSize) { + throw new InvalidParameterValueException( + String.format("Instance volume root disk size %d[GiB] cannot be less than the backed-up volume size %d[GiB].", + instanceVolumeSize, backupVolumeSize)); + } + } + + @Override + public UserVm allocateVMFromBackup(CreateVMFromBackupCmd cmd) throws InsufficientCapacityException, ResourceAllocationException, ResourceUnavailableException { + if (!backupManager.canCreateInstanceFromBackup(cmd.getBackupId())) { + throw new CloudRuntimeException("Create instance from backup is not supported for this provider."); + } + DataCenter zone = _dcDao.findById(cmd.getZoneId()); + if (zone == null) { + throw new InvalidParameterValueException("Unable to find zone by id=" + cmd.getZoneId()); + } + + BackupVO backup = backupDao.findById(cmd.getBackupId()); + if (backup == null) { + throw new InvalidParameterValueException("Backup " + cmd.getBackupId() + " does not exist"); + } + if (backup.getZoneId() != cmd.getZoneId()) { + throw new InvalidParameterValueException("Instance should be created in the same zone as the backup"); + } + backupManager.validateBackupForZone(backup.getZoneId()); + backupDao.loadDetails(backup); + + verifyDetails(cmd.getDetails()); + + UserVmVO backupVm = _vmDao.findByIdIncludingRemoved(backup.getVmId()); + HypervisorType hypervisorType = backupVm.getHypervisorType(); + + Long serviceOfferingId = cmd.getServiceOfferingId(); + ServiceOffering serviceOffering; + if (serviceOfferingId != null) { + serviceOffering = serviceOfferingDao.findById(serviceOfferingId); + if (serviceOffering == null) { + throw new InvalidParameterValueException("Unable to find service offering: " + serviceOffering.getId()); + } + } else { + String serviceOfferingUuid = backup.getDetail(ApiConstants.SERVICE_OFFERING_ID); + if (serviceOfferingUuid == null) { + throw new CloudRuntimeException("Backup doesn't contain service offering uuid. Please specify a valid service offering id while creating the instance"); + } + serviceOffering = serviceOfferingDao.findByUuid(serviceOfferingUuid); + if (serviceOffering == null) { + throw new CloudRuntimeException("Unable to find service offering with the uuid stored in backup. Please specify a valid service offering id while creating instance"); + } + } + verifyServiceOffering(cmd, serviceOffering); + + VirtualMachineTemplate template; + if (cmd.getTemplateId() != null) { + Long templateId = cmd.getTemplateId(); + template = _templateDao.findById(templateId); + if (template == null) { + throw new InvalidParameterValueException("Unable to use template " + templateId); + } + } else { + String templateUuid = backup.getDetail(ApiConstants.TEMPLATE_ID); + if (templateUuid == null) { + throw new CloudRuntimeException("Backup doesn't contain Template uuid. Please specify a valid Template/ISO while creating the instance"); + } + template = _templateDao.findByUuid(templateUuid); + if (template == null) { + throw new CloudRuntimeException("Unable to find template associated with the backup. Please specify a valid Template/ISO while creating instance"); + } + } + verifyTemplate(cmd, template, serviceOffering.getId()); + + Long size = cmd.getSize(); + + Long diskOfferingId = cmd.getDiskOfferingId(); + Boolean isIso = template.getFormat().equals(ImageFormat.ISO); + if (diskOfferingId != null) { + if (!isIso) { + throw new InvalidParameterValueException(ApiConstants.DISK_OFFERING_ID + " parameter is supported for creating instance from backup only for ISO. For creating VMs with templates, please use the parameter " + ApiConstants.DATADISKS_DETAILS); + } + DiskOffering diskOffering = _diskOfferingDao.findById(diskOfferingId); + if (diskOffering == null) { + throw new InvalidParameterValueException("Unable to find disk offering " + diskOfferingId); + } + if (diskOffering.isComputeOnly()) { + throw new InvalidParameterValueException(String.format("The disk offering %s provided is directly mapped to a service offering, please provide an individual disk offering", diskOffering)); + } + } + + Long overrideDiskOfferingId = cmd.getOverrideDiskOfferingId(); + + VmDiskInfo rootVmDiskInfoFromBackup = backupManager.getRootDiskInfoFromBackup(backup); + + if (isIso) { + if (diskOfferingId == null) { + diskOfferingId = rootVmDiskInfoFromBackup.getDiskOffering().getId(); + updateDetailsWithRootDiskAttributes(cmd.getDetails(), rootVmDiskInfoFromBackup); + size = rootVmDiskInfoFromBackup.getSize(); + } else { + DiskOffering rootDiskOffering = _diskOfferingDao.findById(diskOfferingId); + checkRootDiskSizeAgainstBackup(size, rootDiskOffering, rootVmDiskInfoFromBackup.getSize()); + } + } else { + if (overrideDiskOfferingId == null) { + overrideDiskOfferingId = serviceOffering.getDiskOfferingId(); + updateDetailsWithRootDiskAttributes(cmd.getDetails(), rootVmDiskInfoFromBackup); + } else { + DiskOffering overrideDiskOffering = _diskOfferingDao.findById(overrideDiskOfferingId); + if (overrideDiskOffering.isComputeOnly()) { + updateDetailsWithRootDiskAttributes(cmd.getDetails(), rootVmDiskInfoFromBackup); + } else { + String diskSizeFromDetails = cmd.getDetails().get(VmDetailConstants.ROOT_DISK_SIZE); + Long rootDiskSize = diskSizeFromDetails == null ? null : Long.parseLong(diskSizeFromDetails); + checkRootDiskSizeAgainstBackup(rootDiskSize, overrideDiskOffering, rootVmDiskInfoFromBackup.getSize()); + } + } + } + + List dataDiskInfoList = cmd.getDataDiskInfoList(); + if (dataDiskInfoList != null) { + backupManager.checkVmDisksSizeAgainstBackup(dataDiskInfoList, backup); + } else { + dataDiskInfoList = backupManager.getDataDiskInfoListFromBackup(backup); + } + + List networkIds = cmd.getNetworkIds(); + Account owner = _accountService.getActiveAccountById(cmd.getEntityOwnerId()); + LinkedHashMap userVmNetworkMap = getVmOvfNetworkMapping(zone, owner, template, cmd.getVmNetworkMap()); + if (MapUtils.isNotEmpty(userVmNetworkMap)) { + networkIds = new ArrayList<>(userVmNetworkMap.values()); + } + + Map ipToNetworkMap = cmd.getIpToNetworkMap(); + if (networkIds == null && ipToNetworkMap == null) { + networkIds = new ArrayList(); + ipToNetworkMap = backupManager.getIpToNetworkMapFromBackup(backup, cmd.getPreserveIp(), networkIds); + } + + UserVm vm = createVirtualMachine(cmd, zone, owner, serviceOffering, template, hypervisorType, diskOfferingId, size, overrideDiskOfferingId, dataDiskInfoList, networkIds, ipToNetworkMap, null, null); + + String vmSettingsFromBackup = backup.getDetail(ApiConstants.VM_SETTINGS); + if (vm != null && vmSettingsFromBackup != null) { + UserVmVO vmVO = _vmDao.findById(vm.getId()); + Map details = vmInstanceDetailsDao.listDetailsKeyPairs(vm.getId()); + vmVO.setDetails(details); + + Type type = new TypeToken>(){}.getType(); + Map vmDetailsFromBackup = new Gson().fromJson(vmSettingsFromBackup, type); + for (Entry entry : vmDetailsFromBackup.entrySet()) { + if (!details.containsKey(entry.getKey())) { + vmVO.setDetail(entry.getKey(), entry.getValue()); + } + } + _vmDao.saveDetails(vmVO); + } + + return vm; + } + + @Override + public UserVm restoreVMFromBackup(CreateVMFromBackupCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { + long vmId = cmd.getEntityId(); + Map diskOfferingMap = cmd.getDataDiskTemplateToDiskOfferingMap(); + Map additonalParams = new HashMap<>(); + UserVm vm; + + try { + vm = startVirtualMachine(vmId, null, null, null, diskOfferingMap, additonalParams, null); + + boolean status = stopVirtualMachine(CallContext.current().getCallingUserId(), vm.getId()) ; + if (!status) { + UserVmVO vmVO = _vmDao.findById(vmId); + expunge(vmVO); + logger.debug("Successfully cleaned up Instance {} after create Instance from backup failed", vmId); + throw new CloudRuntimeException("Unable to stop the Instance before restore"); + } + + Long isoId = vm.getIsoId(); + if (isoId != null) { + UserVmVO vmVO = _vmDao.findById(vmId); + vmVO.setIsoId(null); + _vmDao.update(vm.getId(), vmVO); + } + + backupManager.restoreBackupToVM(cmd.getBackupId(), vmId); + + } catch (CloudRuntimeException e) { + UserVmVO vmVO = _vmDao.findById(vmId); + try { + expunge(vmVO); + logger.debug("Successfully cleaned up Instance {} after create Instance from backup failed", vmId); + } catch (Exception cleanupException) { + logger.debug("Failed to cleanup Instance {} after create Instance from backup failed", vmId, cleanupException); + } + throw e; + } + + Account owner = _accountService.getActiveAccountById(cmd.getEntityOwnerId()); + UserVmVO userVm = _vmDao.findById(vmId); + + List sshKeyPairNames = cmd.getSSHKeyPairNames(); + if (sshKeyPairNames != null && !sshKeyPairNames.isEmpty()) { + vm = resetVMSSHKeyInternal(userVm, owner, sshKeyPairNames); + } + + if (cmd.getStartVm()) { + Long podId = null; + Long clusterId = null; + if (cmd instanceof CreateVMFromBackupCmdByAdmin) { + CreateVMFromBackupCmdByAdmin adminCmd = (CreateVMFromBackupCmdByAdmin)cmd; + podId = adminCmd.getPodId(); + clusterId = adminCmd.getClusterId(); + } + vm = startVirtualMachine(vmId, podId, clusterId, cmd.getHostId(), diskOfferingMap, additonalParams, cmd.getDeploymentPlanner()); + } + return vm; + } + /* Generate usage events related to unmanaging a VM */ diff --git a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java index fc893a7ef50..dc7b6282b08 100644 --- a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java @@ -149,7 +149,7 @@ public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implement } @Override - public boolean compareManagementServerList(final Long hostId, final Long dcId, final List receivedMSHosts, final String lbAlgorithm) { + public boolean compareManagementServerListAndLBAlgorithm(final Long hostId, final Long dcId, final List receivedMSHosts, final String lbAlgorithm) { if (receivedMSHosts == null || receivedMSHosts.isEmpty()) { return false; } diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index a7d03f1a9a3..2682d53dd20 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -16,19 +16,24 @@ // under the License. package org.apache.cloudstack.backup; +import java.lang.reflect.Type; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.TimeZone; import java.util.Timer; import java.util.TimerTask; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -41,6 +46,7 @@ import org.apache.cloudstack.api.command.admin.backup.ImportBackupOfferingCmd; import org.apache.cloudstack.api.command.admin.backup.ListBackupProviderOfferingsCmd; import org.apache.cloudstack.api.command.admin.backup.ListBackupProvidersCmd; import org.apache.cloudstack.api.command.admin.backup.UpdateBackupOfferingCmd; +import org.apache.cloudstack.api.command.admin.vm.CreateVMFromBackupCmdByAdmin; import org.apache.cloudstack.api.command.user.backup.AssignVirtualMachineToBackupOfferingCmd; import org.apache.cloudstack.api.command.user.backup.CreateBackupCmd; import org.apache.cloudstack.api.command.user.backup.CreateBackupScheduleCmd; @@ -56,7 +62,10 @@ import org.apache.cloudstack.api.command.user.backup.UpdateBackupScheduleCmd; import org.apache.cloudstack.api.command.user.backup.repository.AddBackupRepositoryCmd; import org.apache.cloudstack.api.command.user.backup.repository.DeleteBackupRepositoryCmd; import org.apache.cloudstack.api.command.user.backup.repository.ListBackupRepositoriesCmd; +import org.apache.cloudstack.api.command.user.vm.CreateVMFromBackupCmd; +import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupDetailsDao; import org.apache.cloudstack.backup.dao.BackupOfferingDao; import org.apache.cloudstack.backup.dao.BackupScheduleDao; import org.apache.cloudstack.context.CallContext; @@ -71,6 +80,7 @@ import org.apache.cloudstack.poll.BackgroundPollTask; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.lang.math.NumberUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; @@ -79,9 +89,16 @@ import com.amazonaws.util.CollectionUtils; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDispatcher; import com.cloud.api.ApiGsonHelper; +import com.cloud.api.query.dao.UserVmJoinDao; +import com.cloud.api.query.vo.UserVmJoinVO; +import com.cloud.capacity.Capacity; +import com.cloud.capacity.CapacityVO; import com.cloud.configuration.Resource; import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; import com.cloud.event.ActionEvent; import com.cloud.event.ActionEventUtils; import com.cloud.event.EventTypes; @@ -90,25 +107,38 @@ import com.cloud.event.UsageEventUtils; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.HypervisorGuru; import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.network.Network; +import com.cloud.network.NetworkService; +import com.cloud.network.dao.NetworkDao; +import com.cloud.offering.DiskOffering; +import com.cloud.offering.ServiceOffering; import com.cloud.projects.Project; +import com.cloud.serializer.GsonHelper; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.ScopeType; -import com.cloud.storage.Snapshot; +import com.cloud.storage.Storage; import com.cloud.storage.Volume; import com.cloud.storage.VolumeApiService; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; +import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountService; +import com.cloud.user.AccountVO; import com.cloud.user.DomainManager; import com.cloud.user.ResourceLimitService; import com.cloud.user.User; +import com.cloud.user.dao.AccountDao; import com.cloud.utils.DateUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; @@ -127,18 +157,26 @@ import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.VMInstanceDetailVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VmDiskInfo; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.dao.VMInstanceDetailsDao; import com.google.gson.Gson; +import com.google.gson.reflect.TypeToken; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; public class BackupManagerImpl extends ManagerBase implements BackupManager { @Inject private BackupDao backupDao; @Inject + private BackupDetailsDao backupDetailsDao; + @Inject private BackupScheduleDao backupScheduleDao; @Inject private BackupOfferingDao backupOfferingDao; @@ -151,6 +189,10 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { @Inject private DomainManager domainManager; @Inject + private AccountDao accountDao; + @Inject + private DomainDao domainDao; + @Inject private VolumeDao volumeDao; @Inject private DataCenterDao dataCenterDao; @@ -167,6 +209,18 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { @Inject private UserVmDao userVmDao; @Inject + private ServiceOfferingDao serviceOfferingDao; + @Inject + private VMTemplateDao vmTemplateDao; + @Inject + private UserVmJoinDao userVmJoinDao; + @Inject + private VMInstanceDetailsDao vmInstanceDetailsDao; + @Inject + private NetworkDao networkDao; + @Inject + private NetworkService networkService; + @Inject private ApiDispatcher apiDispatcher; @Inject private AsyncJobManager asyncJobManager; @@ -199,7 +253,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { if (zoneId == null || zoneId < 1) { throw new CloudRuntimeException("Invalid zone ID passed"); } - validateForZone(zoneId); + validateBackupForZone(zoneId); final Account account = CallContext.current().getCallingAccount(); if (!accountService.isRootAdmin(account.getId())) { throw new PermissionDeniedException("Parameter external can only be specified by a Root Admin, permission denied"); @@ -212,7 +266,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { @Override @ActionEvent(eventType = EventTypes.EVENT_VM_BACKUP_IMPORT_OFFERING, eventDescription = "importing backup offering", async = true) public BackupOffering importBackupOffering(final ImportBackupOfferingCmd cmd) { - validateForZone(cmd.getZoneId()); + validateBackupForZone(cmd.getZoneId()); final BackupOffering existingOffering = backupOfferingDao.findByExternalId(cmd.getExternalId(), cmd.getZoneId()); if (existingOffering != null) { throw new CloudRuntimeException("A backup offering with external ID " + cmd.getExternalId() + " already exists"); @@ -283,19 +337,81 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { throw new CloudRuntimeException("Could not find a backup offering with id: " + offeringId); } - if (vmInstanceDao.listByZoneWithBackups(offering.getZoneId(), offering.getId()).size() > 0) { + if (backupDao.listByOfferingId(offering.getId()).size() > 0) { + throw new CloudRuntimeException("Backup Offering cannot be removed as it has backups associated with it."); + } + + if (vmInstanceDao.listByZoneAndBackupOffering(offering.getZoneId(), offering.getId()).size() > 0) { throw new CloudRuntimeException("Backup offering is assigned to VMs, remove the assignment(s) in order to remove the offering."); } - validateForZone(offering.getZoneId()); + validateBackupForZone(offering.getZoneId()); return backupOfferingDao.remove(offering.getId()); } - public static String createVolumeInfoFromVolumes(List vmVolumes) { + private String getNicDetailsAsJson(final Long vmId) { + final List userVmJoinVOs = userVmJoinDao.searchByIds(vmId); + if (userVmJoinVOs != null && !userVmJoinVOs.isEmpty()) { + final List> nics = new ArrayList<>(); + final Set seen = new HashSet<>(); + for (UserVmJoinVO userVmJoinVO : userVmJoinVOs) { + Map nicInfo = new HashMap<>(); + String key = userVmJoinVO.getNetworkUuid(); + if (seen.add(key)) { + nicInfo.put(ApiConstants.NETWORK_ID, userVmJoinVO.getNetworkUuid()); + nicInfo.put(ApiConstants.IP_ADDRESS, userVmJoinVO.getIpAddress()); + nicInfo.put(ApiConstants.IP6_ADDRESS, userVmJoinVO.getIp6Address()); + nicInfo.put(ApiConstants.MAC_ADDRESS, userVmJoinVO.getMacAddress()); + nics.add(nicInfo); + } + } + if (!nics.isEmpty()) { + return new Gson().toJson(nics); + } + } + return null; + } + + @Override + public Map getBackupDetailsFromVM(VirtualMachine vm) { + HashMap details = new HashMap<>(); + + ServiceOffering serviceOffering = serviceOfferingDao.findById(vm.getServiceOfferingId()); + details.put(ApiConstants.SERVICE_OFFERING_ID, serviceOffering.getUuid()); + VirtualMachineTemplate template = vmTemplateDao.findById(vm.getTemplateId()); + details.put(ApiConstants.TEMPLATE_ID, template.getUuid()); + + List vmDetails = vmInstanceDetailsDao.listDetails(vm.getId()); + HashMap settings = new HashMap<>(); + for (VMInstanceDetailVO detail : vmDetails) { + settings.put(detail.getName(), detail.getValue()); + } + if (!settings.isEmpty()) { + details.put(ApiConstants.VM_SETTINGS, new Gson().toJson(settings)); + } + + String nicsJson = getNicDetailsAsJson(vm.getId()); + if (nicsJson != null) { + details.put(ApiConstants.NICS, nicsJson); + } + return details; + } + + @Override + public String getBackupNameFromVM(VirtualMachine vm) { + String displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, new Date()); + return (vm.getHostName() + '-' + displayTime); + } + + @Override + public String createVolumeInfoFromVolumes(List vmVolumes) { List list = new ArrayList<>(); - vmVolumes.sort(Comparator.comparing(VolumeVO::getDeviceId)); - for (VolumeVO vol : vmVolumes) { - list.add(new Backup.VolumeInfo(vol.getUuid(), vol.getPath(), vol.getVolumeType(), vol.getSize())); + vmVolumes.sort(Comparator.comparing(Volume::getDeviceId)); + for (Volume vol : vmVolumes) { + DiskOfferingVO diskOffering = diskOfferingDao.findById(vol.getDiskOfferingId()); + Backup.VolumeInfo volumeInfo = new Backup.VolumeInfo(vol.getUuid(), vol.getPath(), vol.getVolumeType(), vol.getSize(), + vol.getDeviceId(), diskOffering.getUuid(), vol.getMinIops(), vol.getMaxIops()); + list.add(volumeInfo); } return new Gson().toJson(list.toArray(), Backup.VolumeInfo[].class); } @@ -309,7 +425,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { throw new CloudRuntimeException("VM is not in running or stopped state"); } - validateForZone(vm.getDataCenterId()); + validateBackupForZone(vm.getDataCenterId()); accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); @@ -337,7 +453,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { try { long vmId = vm.getId(); vm.setBackupOfferingId(offering.getId()); - vm.setBackupVolumes(createVolumeInfoFromVolumes(volumeDao.findByInstance(vmId))); + vm.setBackupVolumes(createVolumeInfoFromVolumes(new ArrayList<>(volumeDao.findByInstance(vmId)))); if (!backupProvider.assignVMToBackupOffering(vm, offering)) { throw new CloudRuntimeException("Failed to assign the VM to the backup offering, please try removing the assignment and try again."); @@ -375,7 +491,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { throw new CloudRuntimeException(String.format("Can't find any VM with ID: [%s].", vmId)); } - validateForZone(vm.getDataCenterId()); + validateBackupForZone(vm.getDataCenterId()); accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); final BackupOfferingVO offering = backupOfferingDao.findById(vm.getBackupOfferingId()); @@ -398,6 +514,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { boolean result = false; try { result = backupProvider.removeVMFromBackupOffering(vm); + Long backupOfferingId = vm.getBackupOfferingId(); vm.setBackupOfferingId(null); vm.setBackupVolumes(null); vm.setBackupExternalId(null); @@ -408,9 +525,12 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { } } if ((result || forced) && vmInstanceDao.update(vm.getId(), vm)) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_BACKUP_OFFERING_REMOVE, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), - "Backup-" + vm.getHostName() + "-" + vm.getUuid(), vm.getBackupOfferingId(), null, null, - Backup.class.getSimpleName(), vm.getUuid()); + final List backups = backupDao.listByVmId(null, vm.getId()); + if (backups.size() == 0) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_BACKUP_OFFERING_REMOVED_AND_BACKUPS_DELETED, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), + "Backup-" + vm.getHostName() + "-" + vm.getUuid(), backupOfferingId, null, null, + Backup.class.getSimpleName(), vm.getUuid()); + } final List backupSchedules = backupScheduleDao.listByVM(vm.getId()); for(BackupSchedule backupSchedule: backupSchedules) { backupScheduleDao.remove(backupSchedule.getId()); @@ -430,52 +550,28 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { final DateUtil.IntervalType intervalType = cmd.getIntervalType(); final String scheduleString = cmd.getSchedule(); final TimeZone timeZone = TimeZone.getTimeZone(cmd.getTimezone()); - final Integer maxBackups = cmd.getMaxBackups(); if (intervalType == null) { throw new CloudRuntimeException("Invalid interval type provided"); } final VMInstanceVO vm = findVmById(vmId); - validateForZone(vm.getDataCenterId()); + validateBackupForZone(vm.getDataCenterId()); accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); if (vm.getBackupOfferingId() == null) { throw new CloudRuntimeException("Cannot configure backup schedule for the VM without having any backup offering"); } - if (maxBackups != null && maxBackups <= 0) { - throw new InvalidParameterValueException(String.format("maxBackups [%s] for instance %s should be greater than 0.", maxBackups, vm.getName())); - } - - Backup.Type backupType = Backup.Type.valueOf(intervalType.name()); - int intervalMaxBackups = backupType.getMax(); - if (maxBackups != null && maxBackups > intervalMaxBackups) { - throw new InvalidParameterValueException(String.format("maxBackups [%s] for instance %s exceeds limit [%s] for interval type [%s].", maxBackups, vm.getName(), - intervalMaxBackups, intervalType)); - } - - Account owner = accountManager.getAccount(vm.getAccountId()); - - long accountLimit = resourceLimitMgr.findCorrectResourceLimitForAccount(owner, Resource.ResourceType.backup, null); - long domainLimit = resourceLimitMgr.findCorrectResourceLimitForDomain(domainManager.getDomain(owner.getDomainId()), Resource.ResourceType.backup, null); - if (maxBackups != null && !accountManager.isRootAdmin(owner.getId()) && ((accountLimit != -1 && maxBackups > accountLimit) || (domainLimit != -1 && maxBackups > domainLimit))) { - String message = "domain/account"; - if (owner.getType() == Account.Type.PROJECT) { - message = "domain/project"; - } - throw new InvalidParameterValueException("Max number of backups shouldn't exceed the " + message + " level backup limit"); - } final BackupOffering offering = backupOfferingDao.findById(vm.getBackupOfferingId()); if (offering == null || !offering.isUserDrivenBackupAllowed()) { throw new CloudRuntimeException("The selected backup offering does not allow user-defined backup schedule"); } - if (maxBackups == null && !"veeam".equals(offering.getProvider())) { - throw new CloudRuntimeException("Please specify the maximum number of buckets to retain."); - } - if (maxBackups != null && "veeam".equals(offering.getProvider())) { - throw new CloudRuntimeException("The maximum backups to retain cannot be configured through CloudStack for Veeam. Retention is managed directly in Veeam based on the settings specified when creating the backup job."); + final int maxBackups = validateAndGetDefaultBackupRetentionIfRequired(cmd.getMaxBackups(), offering, vm); + + if (!"nas".equals(offering.getProvider()) && cmd.getQuiesceVM() != null) { + throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS backup provider"); } final String timezoneId = timeZone.getID(); @@ -492,7 +588,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { final BackupScheduleVO schedule = backupScheduleDao.findByVMAndIntervalType(vmId, intervalType); if (schedule == null) { - return backupScheduleDao.persist(new BackupScheduleVO(vmId, intervalType, scheduleString, timezoneId, nextDateTime, maxBackups)); + return backupScheduleDao.persist(new BackupScheduleVO(vmId, intervalType, scheduleString, timezoneId, nextDateTime, maxBackups, cmd.getQuiesceVM())); } schedule.setScheduleType((short) intervalType.ordinal()); @@ -500,14 +596,52 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { schedule.setTimezone(timezoneId); schedule.setScheduledTimestamp(nextDateTime); schedule.setMaxBackups(maxBackups); + schedule.setQuiesceVM(cmd.getQuiesceVM()); backupScheduleDao.update(schedule.getId(), schedule); return backupScheduleDao.findById(schedule.getId()); } + /** + * Validates the provided backup retention value and returns 0 as the default value if required. + * + * @param maxBackups The number of backups to retain, can be null + * @param offering The backup offering + * @param vm The VM associated with the backup schedule + * @return The validated number of backups to retain. If maxBackups is null, returns 0 as the default value + * @throws InvalidParameterValueException if the backup offering's provider is Veeam, or maxBackups is less than 0 or greater than the account and domain backup limits + */ + protected int validateAndGetDefaultBackupRetentionIfRequired(Integer maxBackups, BackupOffering offering, VirtualMachine vm) { + if (maxBackups == null) { + return 0; + } + if ("veeam".equals(offering.getProvider())) { + throw new InvalidParameterValueException("The maximum amount of backups to retain cannot be directly configured via Apache CloudStack for Veeam. " + + "Retention is managed directly in Veeam based on the settings specified when creating the backup job."); + } + if (maxBackups < 0) { + throw new InvalidParameterValueException("maxbackups value for backup schedule must be a non-negative integer."); + } + + Account owner = accountManager.getAccount(vm.getAccountId()); + long accountLimit = resourceLimitMgr.findCorrectResourceLimitForAccount(owner, Resource.ResourceType.backup, null); + boolean exceededAccountLimit = accountLimit != -1 && maxBackups > accountLimit; + + long domainLimit = resourceLimitMgr.findCorrectResourceLimitForDomain(domainManager.getDomain(owner.getDomainId()), Resource.ResourceType.backup, null); + boolean exceededDomainLimit = domainLimit != -1 && maxBackups > domainLimit; + + if (!accountManager.isRootAdmin(owner.getId()) && (exceededAccountLimit || exceededDomainLimit)) { + throw new InvalidParameterValueException( + String.format("'maxbackups' should not exceed the domain/%s backup limit.", owner.getType() == Account.Type.PROJECT ? "project" : "account") + ); + } + + return maxBackups; + } + @Override public List listBackupSchedule(final Long vmId) { final VMInstanceVO vm = findVmById(vmId); - validateForZone(vm.getDataCenterId()); + validateBackupForZone(vm.getDataCenterId()); accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); return backupScheduleDao.listByVM(vmId).stream().map(BackupSchedule.class::cast).collect(Collectors.toList()); @@ -545,7 +679,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { */ protected void checkCallerAccessToBackupScheduleVm(long vmId) { VMInstanceVO vm = findVmById(vmId); - validateForZone(vm.getDataCenterId()); + validateBackupForZone(vm.getDataCenterId()); accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); } @@ -564,32 +698,13 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { return success; } - private void postCreateScheduledBackup(Backup.Type backupType, Long vmId) { - DateUtil.IntervalType intervalType = DateUtil.IntervalType.valueOf(backupType.name()); - final BackupScheduleVO schedule = backupScheduleDao.findByVMAndIntervalType(vmId, intervalType); - if (schedule == null) { - return; - } - Integer maxBackups = schedule.getMaxBackups(); - if (maxBackups == null) { - return; - } - List backups = backupDao.listBackupsByVMandIntervalType(vmId, backupType); - while (backups.size() > maxBackups) { - BackupVO oldestBackup = backups.get(0); - if (deleteBackup(oldestBackup.getId(), false)) { - ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, oldestBackup.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_VM_BACKUP_DELETE, - "Successfully deleted oldest backup: " + oldestBackup.getId(), oldestBackup.getId(), ApiCommandResourceType.Backup.toString(), 0); - } - backups.remove(oldestBackup); - } - } - @Override @ActionEvent(eventType = EventTypes.EVENT_VM_BACKUP_CREATE, eventDescription = "creating VM backup", async = true) - public boolean createBackup(final Long vmId, final Long scheduleId) throws ResourceAllocationException { + public boolean createBackup(CreateBackupCmd cmd, Object job) throws ResourceAllocationException { + Long vmId = cmd.getVmId(); + final VMInstanceVO vm = findVmById(vmId); - validateForZone(vm.getDataCenterId()); + validateBackupForZone(vm.getDataCenterId()); accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); if (vm.getBackupOfferingId() == null) { @@ -601,20 +716,27 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { throw new CloudRuntimeException("VM backup offering not found"); } + final BackupProvider backupProvider = getBackupProvider(offering.getProvider()); + if (backupProvider == null) { + throw new CloudRuntimeException("VM backup provider not found for the offering"); + } + if (!offering.isUserDrivenBackupAllowed()) { throw new CloudRuntimeException("The assigned backup offering does not allow ad-hoc user backup"); } - Backup.Type type = getBackupType(scheduleId); + if (!"nas".equals(offering.getProvider()) && cmd.getQuiesceVM() != null) { + throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS backup provider"); + } + + Long backupScheduleId = getBackupScheduleId(job); + boolean isScheduledBackup = backupScheduleId != null; Account owner = accountManager.getAccount(vm.getAccountId()); try { resourceLimitMgr.checkResourceLimit(owner, Resource.ResourceType.backup); } catch (ResourceAllocationException e) { - if (type != Backup.Type.MANUAL) { - String msg = "Backup resource limit exceeded for account id : " + owner.getId() + ". Failed to create backup"; - logger.warn(msg); - alertManager.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Backup resource limit exceeded for account id : " + owner.getId() - + ". Failed to create backups; please use updateResourceLimit to increase the limit"); + if (isScheduledBackup) { + sendExceededBackupLimitAlert(owner.getUuid(), Resource.ResourceType.backup); } throw e; } @@ -632,11 +754,8 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { try { resourceLimitMgr.checkResourceLimit(owner, Resource.ResourceType.backup_storage, backupSize); } catch (ResourceAllocationException e) { - if (type != Backup.Type.MANUAL) { - String msg = "Backup storage space resource limit exceeded for account id : " + owner.getId() + ". Failed to create backup"; - logger.warn(msg); - alertManager.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Backup storage space resource limit exceeded for account id : " + owner.getId() - + ". Failed to create backups; please use updateResourceLimit to increase the limit"); + if (isScheduledBackup) { + sendExceededBackupLimitAlert(owner.getUuid(), Resource.ResourceType.backup_storage); } throw e; } @@ -646,34 +765,124 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { vmId, ApiCommandResourceType.VirtualMachine.toString(), true, 0); - - final BackupProvider backupProvider = getBackupProvider(offering.getProvider()); - if (backupProvider != null) { - Pair result = backupProvider.takeBackup(vm); - if (!result.first()) { - throw new CloudRuntimeException("Failed to create VM backup"); - } - Backup backup = result.second(); - if (backup != null) { - BackupVO vmBackup = backupDao.findById(result.second().getId()); - vmBackup.setBackupIntervalType((short) type.ordinal()); - backupDao.update(vmBackup.getId(), vmBackup); - resourceLimitMgr.incrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup); - resourceLimitMgr.incrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup_storage, backup.getSize()); - } - if (type != Backup.Type.MANUAL) { - postCreateScheduledBackup(type, vm.getId()); - } - return true; + Pair result = backupProvider.takeBackup(vm, cmd.getQuiesceVM()); + if (!result.first()) { + throw new CloudRuntimeException("Failed to create VM backup"); + } + Backup backup = result.second(); + if (backup != null) { + BackupVO vmBackup = backupDao.findById(result.second().getId()); + vmBackup.setBackupScheduleId(backupScheduleId); + if (cmd.getName() != null) { + vmBackup.setName(cmd.getName()); + } + vmBackup.setDescription(cmd.getDescription()); + backupDao.update(vmBackup.getId(), vmBackup); + resourceLimitMgr.incrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup); + resourceLimitMgr.incrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup_storage, backup.getSize()); + } + if (isScheduledBackup) { + deleteOldestBackupFromScheduleIfRequired(vmId, backupScheduleId); + } + return true; + } + + /** + * Sends an alert when the backup limit has been exceeded for a given account. + * + * @param ownerUuid The UUID of the account owner that exceeded the limit + * @param resourceType The type of resource limit that was exceeded (either {@link Resource.ResourceType#backup} or {@link Resource.ResourceType#backup_storage}) + * + */ + protected void sendExceededBackupLimitAlert(String ownerUuid, Resource.ResourceType resourceType) { + String message = String.format("Failed to create backup: backup %s limit exceeded for account with ID: %s.", + resourceType == Resource.ResourceType.backup ? "resource" : "storage space resource" , ownerUuid); + logger.warn(message); + alertManager.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, + message, message + " Please, use the 'updateResourceLimit' API to increase the backup limit."); + } + + /** + * Gets the backup schedule ID from the async job's payload. + * + * @param job The asynchronous job associated with the creation of the backup + * @return The backup schedule ID. Returns null if the backup has been manually created + */ + protected Long getBackupScheduleId(Object job) { + if (!(job instanceof AsyncJobVO)) { + return null; + } + + AsyncJobVO asyncJob = (AsyncJobVO) job; + logger.debug("Trying to retrieve [{}] parameter from the job [ID: {}] parameters.", ApiConstants.SCHEDULE_ID, asyncJob.getId()); + String jobParamsRaw = asyncJob.getCmdInfo(); + + if (!jobParamsRaw.contains(ApiConstants.SCHEDULE_ID)) { + logger.info("Job [ID: {}] parameters do not include the [{}] parameter. Thus, the current backup is a manual backup.", asyncJob.getId(), ApiConstants.SCHEDULE_ID); + return null; + } + + TypeToken> jobParamsType = new TypeToken<>(){}; + Map jobParams = GsonHelper.getGson().fromJson(jobParamsRaw, jobParamsType.getType()); + long backupScheduleId = NumberUtils.toLong(jobParams.get(ApiConstants.SCHEDULE_ID)); + logger.info("Job [ID: {}] parameters include the [{}] parameter, whose value is equal to [{}]. Thus, the current backup is a scheduled backup.", asyncJob.getId(), ApiConstants.SCHEDULE_ID, backupScheduleId); + return backupScheduleId == 0L ? null : backupScheduleId; + } + + /** + * Deletes the oldest backups from the schedule. If the backup schedule is not active, the schedule's retention is equal to 0, + * or the number of backups to be deleted is lower than one, then no backups are deleted. + * + * @param vmId The ID of the VM associated with the backups + * @param backupScheduleId Backup schedule ID of the backups + */ + protected void deleteOldestBackupFromScheduleIfRequired(Long vmId, long backupScheduleId) { + BackupScheduleVO backupScheduleVO = backupScheduleDao.findById(backupScheduleId); + if (backupScheduleVO == null || backupScheduleVO.getMaxBackups() == 0) { + logger.info("The schedule does not have a retention specified and, hence, not deleting any backups from it.", vmId); + return; + } + + logger.debug("Checking if it is required to delete the oldest backups from the schedule with ID [{}], to meet its retention requirement of [{}] backups.", backupScheduleId, backupScheduleVO.getMaxBackups()); + List backups = backupDao.listBySchedule(backupScheduleId); + int amountOfBackupsToDelete = backups.size() - backupScheduleVO.getMaxBackups(); + if (amountOfBackupsToDelete > 0) { + deleteExcessBackups(backups, amountOfBackupsToDelete, backupScheduleId); + } else { + logger.debug("Not required to delete any backups from the schedule [ID: {}]: [backups size: {}] and [retention: {}].", backupScheduleId, backups.size(), backupScheduleVO.getMaxBackups()); + } + } + + /** + * Deletes a certain number of backups associated with a schedule. + * + * @param backups List of backups associated with a schedule + * @param amountOfBackupsToDelete Number of backups to be deleted from the list of backups + * @param backupScheduleId ID of the backup schedule associated with the backups + */ + protected void deleteExcessBackups(List backups, int amountOfBackupsToDelete, long backupScheduleId) { + logger.debug("Deleting the [{}] oldest backups from the schedule [ID: {}].", amountOfBackupsToDelete, backupScheduleId); + + for (int i = 0; i < amountOfBackupsToDelete; i++) { + BackupVO backup = backups.get(i); + if (deleteBackup(backup.getId(), false)) { + String eventDescription = String.format("Successfully deleted backup for VM [ID: %s], suiting the retention specified in the backup schedule [ID: %s]", backup.getVmId(), backupScheduleId); + logger.info(eventDescription); + ActionEventUtils.onCompletedActionEvent( + User.UID_SYSTEM, backup.getAccountId(), EventVO.LEVEL_INFO, + EventTypes.EVENT_VM_BACKUP_DELETE, eventDescription, backup.getId(), ApiCommandResourceType.Backup.toString(), 0 + ); + } } - throw new CloudRuntimeException("Failed to create VM backup"); } @Override public Pair, Integer> listBackups(final ListBackupsCmd cmd) { final Long id = cmd.getId(); final Long vmId = cmd.getVmId(); + final String name = cmd.getName(); final Long zoneId = cmd.getZoneId(); + final Long backupOfferingId = cmd.getBackupOfferingId(); final Account caller = CallContext.current().getCallingAccount(); final String keyword = cmd.getKeyword(); List permittedAccounts = new ArrayList(); @@ -699,13 +908,16 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.and("idIN", sb.entity().getId(), SearchCriteria.Op.IN); sb.and("vmId", sb.entity().getVmId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); sb.and("zoneId", sb.entity().getZoneId(), SearchCriteria.Op.EQ); + sb.and("backupOfferingId", sb.entity().getBackupOfferingId(), SearchCriteria.Op.EQ); if (keyword != null) { + sb.or().op("keywordName", sb.entity().getName(), SearchCriteria.Op.LIKE); SearchBuilder vmSearch = vmInstanceDao.createSearchBuilder(); - vmSearch.and("name", vmSearch.entity().getHostName(), SearchCriteria.Op.LIKE); - sb.groupBy(sb.entity().getId()); sb.join("vmSearch", vmSearch, sb.entity().getVmId(), vmSearch.entity().getId(), JoinBuilder.JoinType.INNER); + sb.or("vmSearch", "keywordVmName", vmSearch.entity().getHostName(), SearchCriteria.Op.LIKE); + sb.cp(); } SearchCriteria sc = sb.create(); @@ -719,12 +931,22 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { sc.setParameters("vmId", vmId); } + if (name != null) { + sc.setParameters("name", name); + } + if (zoneId != null) { sc.setParameters("zoneId", zoneId); } + if (backupOfferingId != null) { + sc.setParameters("backupOfferingId", backupOfferingId); + } + if (keyword != null) { - sc.setJoinParameters("vmSearch", "name", "%" + keyword + "%"); + String keywordMatch = "%" + keyword + "%"; + sc.setParameters("keywordName", keywordMatch); + sc.setParameters("keywordVmName", keywordMatch); } Pair, Integer> result = backupDao.searchAndCount(sc, searchFilter); @@ -766,11 +988,14 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { if (backup == null) { throw new CloudRuntimeException("Backup " + backupId + " does not exist"); } - validateForZone(backup.getZoneId()); + if (backup.getStatus() != Backup.Status.BackedUp) { + throw new CloudRuntimeException("Backup should be in BackedUp state"); + } + validateBackupForZone(backup.getZoneId()); final VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); - if (vm == null) { - throw new CloudRuntimeException("VM ID " + backup.getVmId() + " couldn't be found on existing or removed VMs"); + if (vm == null || VirtualMachine.State.Expunging.equals(vm.getState())) { + throw new CloudRuntimeException("The Instance from which the backup was taken could not be found."); } accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); @@ -839,29 +1064,6 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { } } - private Backup.Type getBackupType(Long scheduleId) { - if (scheduleId.equals(Snapshot.MANUAL_POLICY_ID)) { - return Backup.Type.MANUAL; - } else { - BackupScheduleVO scheduleVO = backupScheduleDao.findById(scheduleId); - DateUtil.IntervalType intvType = scheduleVO.getScheduleType(); - return getBackupType(intvType); - } - } - - private Backup.Type getBackupType(DateUtil.IntervalType intvType) { - if (intvType.equals(DateUtil.IntervalType.HOURLY)) { - return Backup.Type.HOURLY; - } else if (intvType.equals(DateUtil.IntervalType.DAILY)) { - return Backup.Type.DAILY; - } else if (intvType.equals(DateUtil.IntervalType.WEEKLY)) { - return Backup.Type.WEEKLY; - } else if (intvType.equals(DateUtil.IntervalType.MONTHLY)) { - return Backup.Type.MONTHLY; - } - return null; - } - /** * Tries to update the state of given VM, given specified event * @param vm The VM to update its state @@ -928,6 +1130,206 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { return null; } + @Override + public void checkVmDisksSizeAgainstBackup(List vmDiskInfoList, Backup backup) { + List vmDiskInfoListFromBackup = getDataDiskInfoListFromBackup(backup); + int index = 0; + if (vmDiskInfoList.size() != vmDiskInfoListFromBackup.size()) { + throw new InvalidParameterValueException("Unable to create Instance from Backup " + + "as the backup has a different number of disks than the Instance."); + } + for (VmDiskInfo vmDiskInfo : vmDiskInfoList) { + if (index < vmDiskInfoListFromBackup.size()) { + if (vmDiskInfo.getSize() < vmDiskInfoListFromBackup.get(index).getSize()) { + throw new InvalidParameterValueException( + String.format("Instance volume size %d[GiB] cannot be less than the backed-up volume size %d[GiB].", + vmDiskInfo.getSize(), vmDiskInfoListFromBackup.get(index).getSize())); + } + } + index++; + } + } + + @Override + public VmDiskInfo getRootDiskInfoFromBackup(Backup backup) { + List volumes = backup.getBackedUpVolumes(); + VmDiskInfo rootDiskOffering = null; + if (volumes == null || volumes.isEmpty()) { + throw new CloudRuntimeException("Failed to get backed-up volumes info from backup"); + } + for (Backup.VolumeInfo volume : volumes) { + if (volume.getType() == Volume.Type.ROOT) { + DiskOfferingVO diskOffering = diskOfferingDao.findByUuid(volume.getDiskOfferingId()); + if (diskOffering == null) { + throw new CloudRuntimeException(String.format("Unable to find the root disk offering with uuid (%s) " + + "stored in backup. Please specify a valid root disk offering id while creating the instance", + volume.getDiskOfferingId())); + } + Long size = volume.getSize() / (1024 * 1024 * 1024); + rootDiskOffering = new VmDiskInfo(diskOffering, size, volume.getMinIops(), volume.getMaxIops()); + } + } + if (rootDiskOffering == null) { + throw new CloudRuntimeException("Failed to get the root disk in backed-up volumes info from backup"); + } + return rootDiskOffering; + } + + @Override + public List getDataDiskInfoListFromBackup(Backup backup) { + List vmDiskInfoList = new ArrayList<>(); + List volumes = backup.getBackedUpVolumes(); + if (volumes == null || volumes.isEmpty()) { + throw new CloudRuntimeException("Failed to get backed-up Volumes info from backup"); + } + for (Backup.VolumeInfo volume : volumes) { + if (volume.getType() == Volume.Type.DATADISK) { + DiskOfferingVO diskOffering = diskOfferingDao.findByUuid(volume.getDiskOfferingId()); + if (diskOffering == null || diskOffering.getState().equals(DiskOffering.State.Inactive)) { + throw new CloudRuntimeException("Unable to find the disk offering with uuid (" + volume.getDiskOfferingId() + ") stored in backup. " + + "Please specify a valid disk offering id while creating the instance"); + } + Long size = volume.getSize() / (1024 * 1024 * 1024); + vmDiskInfoList.add(new VmDiskInfo(diskOffering, size, volume.getMinIops(), volume.getMaxIops(), volume.getDeviceId())); + } + } + return vmDiskInfoList; + } + + @Override + public Map getIpToNetworkMapFromBackup(Backup backup, boolean preserveIps, List networkIds) + { + Map ipToNetworkMap = new LinkedHashMap(); + + String nicsJson = backup.getDetail(ApiConstants.NICS); + if (nicsJson == null) { + throw new CloudRuntimeException("Backup doesn't contain network information. " + + "Please specify at least one valid network while creating instance"); + } + + Type type = new TypeToken>>(){}.getType(); + List> nics = new Gson().fromJson(nicsJson, type); + + for (Map nic : nics) { + String networkUuid = nic.get(ApiConstants.NETWORK_ID); + if (networkUuid == null) { + throw new CloudRuntimeException("Backup doesn't contain network information. " + + "Please specify at least one valid network while creating instance"); + } + + Network network = networkDao.findByUuid(networkUuid); + if (network == null) { + throw new CloudRuntimeException("Unable to find network with the uuid " + networkUuid + " stored in backup. " + + "Please specify a valid network id while creating the instance"); + } + + Long networkId = network.getId(); + Network.IpAddresses ipAddresses = null; + + if (preserveIps) { + String ip = nic.get(ApiConstants.IP_ADDRESS); + String ipv6 = nic.get(ApiConstants.IP6_ADDRESS); + String mac = nic.get(ApiConstants.MAC_ADDRESS); + ipAddresses = networkService.getIpAddressesFromIps(ip, ipv6, mac); + } + + ipToNetworkMap.put(networkId, ipAddresses); + networkIds.add(networkId); + } + return ipToNetworkMap; + } + + @Override + public Boolean canCreateInstanceFromBackup(final Long backupId) { + final BackupVO backup = backupDao.findById(backupId); + BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); + if (offering == null) { + throw new CloudRuntimeException("Failed to find backup offering"); + } + final BackupProvider backupProvider = getBackupProvider(offering.getProvider()); + return backupProvider.supportsInstanceFromBackup(); + } + + @Override + public boolean restoreBackupToVM(final Long backupId, final Long vmId) throws ResourceUnavailableException { + final BackupVO backup = backupDao.findById(backupId); + if (backup == null) { + throw new CloudRuntimeException("Backup " + backupId + " does not exist"); + } + if (backup.getStatus() != Backup.Status.BackedUp) { + throw new CloudRuntimeException("Backup should be in BackedUp state"); + } + validateBackupForZone(backup.getZoneId()); + + VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(vmId); + if (vm == null) { + throw new CloudRuntimeException("Instance with ID " + backup.getVmId() + " couldn't be found."); + } + accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); + + if (vm.getRemoved() != null) { + throw new CloudRuntimeException("Instance with ID " + backup.getVmId() + " couldn't be found."); + } + if (!vm.getState().equals(VirtualMachine.State.Stopped)) { + throw new CloudRuntimeException("The VM should be in stopped state"); + } + + List backupVolumes = backup.getBackedUpVolumes(); + if (backupVolumes == null) { + throw new CloudRuntimeException("Backed up volumes info not found in the backup"); + } + + List vmVolumes = volumeDao.findByInstance(vmId); + if (vmVolumes.size() != backupVolumes.size()) { + throw new CloudRuntimeException("Unable to create Instance from backup as the backup has a different number of disks than the Instance"); + } + + BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); + if (offering == null) { + throw new CloudRuntimeException("Failed to find backup offering"); + } + final BackupProvider backupProvider = getBackupProvider(offering.getProvider()); + if (!backupProvider.supportsInstanceFromBackup()) { + throw new CloudRuntimeException("Create instance from backup is not supported by the " + offering.getProvider() + " provider."); + } + + String backupDetailsInMessage = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backup, "uuid", "externalId", "newVMId", "type", "status", "date"); + Long eventId = null; + try { + updateVmState(vm, VirtualMachine.Event.RestoringRequested, VirtualMachine.State.Restoring); + updateVolumeState(vm, Volume.Event.RestoreRequested, Volume.State.Restoring); + eventId = ActionEventUtils.onStartedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventTypes.EVENT_VM_CREATE_FROM_BACKUP, + String.format("Creating Instance %s from backup %s", vm.getInstanceName(), backup.getUuid()), + vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), + true, 0); + + String host = null; + String dataStore = null; + if (!"nas".equals(offering.getProvider())) { + Pair restoreInfo = getRestoreVolumeHostAndDatastore(vm); + host = restoreInfo.first().getPrivateIpAddress(); + dataStore = restoreInfo.second().getUuid(); + } + if (!backupProvider.restoreBackupToVM(vm, backup, host, dataStore)) { + throw new CloudRuntimeException(String.format("Error restoring backup [%s] to VM %s.", backupDetailsInMessage, vm.getUuid())); + } + } catch (Exception e) { + updateVolumeState(vm, Volume.Event.RestoreFailed, Volume.State.Ready); + updateVmState(vm, VirtualMachine.Event.RestoringFailed, VirtualMachine.State.Stopped); + logger.error(String.format("Failed to create Instance [%s] from backup [%s] due to: [%s].", vm.getInstanceName(), backupDetailsInMessage, e.getMessage()), e); + ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_VM_CREATE_FROM_BACKUP, + String.format("Failed to create Instance %s from backup %s", vm.getInstanceName(), backup.getUuid()), + vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), eventId); + throw new CloudRuntimeException(String.format("Error while creating Instance [%s] from backup [%s].", vm.getUuid(), backupDetailsInMessage)); + } + updateVolumeState(vm, Volume.Event.RestoreSucceeded, Volume.State.Ready); + updateVmState(vm, VirtualMachine.Event.RestoringSuccess, VirtualMachine.State.Stopped); + ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_VM_CREATE_FROM_BACKUP, + String.format("Successfully created Instance %s from backup %s", vm.getInstanceName(), backup.getUuid()), + vm.getId(), ApiCommandResourceType.VirtualMachine.toString(),eventId); + return true; + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_BACKUP_RESTORE, eventDescription = "restoring VM from backup", async = true) public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, final Long backupId, final Long vmId) throws Exception { @@ -938,7 +1340,10 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { if (backup == null) { throw new CloudRuntimeException("Provided backup not found"); } - validateForZone(backup.getZoneId()); + if (backup.getStatus() != Backup.Status.BackedUp) { + throw new CloudRuntimeException("Backup should be in BackedUp state"); + } + validateBackupForZone(backup.getZoneId()); final VMInstanceVO vm = findVmById(vmId); accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); @@ -951,11 +1356,22 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { throw new CloudRuntimeException("Cross zone backup restoration of volume is not allowed"); } - final VMInstanceVO vmFromBackup = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); - if (vmFromBackup == null) { - throw new CloudRuntimeException("VM reference for the provided VM backup not found"); + List volumeInfoList = backup.getBackedUpVolumes(); + if (volumeInfoList == null) { + final VMInstanceVO vmFromBackup = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); + if (vmFromBackup == null) { + throw new CloudRuntimeException("VM reference for the provided VM backup not found"); + } else if (vmFromBackup == null || vmFromBackup.getBackupVolumeList() == null) { + throw new CloudRuntimeException("Volumes metadata not found in the backup"); + } + volumeInfoList = vm.getBackupVolumeList(); } - accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vmFromBackup); + Backup.VolumeInfo backupVolumeInfo = getVolumeInfo(volumeInfoList, backedUpVolumeUuid); + if (backupVolumeInfo == null) { + throw new CloudRuntimeException("Failed to find volume with Id " + backedUpVolumeUuid + " in the backed-up volumes metadata"); + } + + accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); final BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); if (offering == null) { throw new CloudRuntimeException("Failed to find VM backup offering"); @@ -964,7 +1380,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { BackupProvider backupProvider = getBackupProvider(offering.getProvider()); VolumeVO backedUpVolume = volumeDao.findByUuid(backedUpVolumeUuid); Pair restoreInfo; - if (!"nas".equals(offering.getProvider())) { + if (!"nas".equals(offering.getProvider()) || (backedUpVolume == null)) { restoreInfo = getRestoreVolumeHostAndDatastore(vm); } else { restoreInfo = getRestoreVolumeHostAndDatastoreForNas(vm, backedUpVolume); @@ -981,36 +1397,36 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { String[] hostPossibleValues = {host.getPrivateIpAddress(), host.getName()}; String[] datastoresPossibleValues = {datastore.getUuid(), datastore.getName()}; - Pair result = restoreBackedUpVolume(backedUpVolumeUuid, backup, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Pair result = restoreBackedUpVolume(backupVolumeInfo, backup, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); if (BooleanUtils.isFalse(result.first())) { throw new CloudRuntimeException(String.format("Error restoring volume [%s] of VM [%s] to host [%s] using backup provider [%s] due to: [%s].", backedUpVolumeUuid, vm.getUuid(), host.getUuid(), backupProvider.getName(), result.second())); } - if (!attachVolumeToVM(vm.getDataCenterId(), result.second(), vmFromBackup.getBackupVolumeList(), + if (!attachVolumeToVM(vm.getDataCenterId(), result.second(), backupVolumeInfo, backedUpVolumeUuid, vm, datastore.getUuid(), backup)) { - throw new CloudRuntimeException(String.format("Error attaching volume [%s] to VM [%s]." + backedUpVolumeUuid, vm.getUuid())); + throw new CloudRuntimeException(String.format("Error attaching volume [%s] to VM [%s].", backedUpVolumeUuid, vm.getUuid())); } return true; } - protected Pair restoreBackedUpVolume(final String backedUpVolumeUuid, final BackupVO backup, BackupProvider backupProvider, String[] hostPossibleValues, - String[] datastoresPossibleValues, VMInstanceVO vm) { + protected Pair restoreBackedUpVolume(final Backup.VolumeInfo backupVolumeInfo, final BackupVO backup, + BackupProvider backupProvider, String[] hostPossibleValues, String[] datastoresPossibleValues, VMInstanceVO vm) { Pair result = new Pair<>(false, ""); for (String hostData : hostPossibleValues) { for (String datastoreData : datastoresPossibleValues) { logger.debug(String.format("Trying to restore volume [UUID: %s], using host [%s] and datastore [%s].", - backedUpVolumeUuid, hostData, datastoreData)); + backupVolumeInfo.getUuid(), hostData, datastoreData)); try { - result = backupProvider.restoreBackedUpVolume(backup, backedUpVolumeUuid, hostData, datastoreData, new Pair<>(vm.getName(), vm.getState())); + result = backupProvider.restoreBackedUpVolume(backup, backupVolumeInfo, hostData, datastoreData, new Pair<>(vm.getName(), vm.getState())); if (BooleanUtils.isTrue(result.first())) { return result; } } catch (Exception e) { logger.debug(String.format("Failed to restore volume [UUID: %s], using host [%s] and datastore [%s] due to: [%s].", - backedUpVolumeUuid, hostData, datastoreData, e.getMessage()), e); + backupVolumeInfo.getUuid(), hostData, datastoreData, e.getMessage()), e); } } } @@ -1024,23 +1440,32 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { if (backup == null) { throw new CloudRuntimeException("Backup " + backupId + " does not exist"); } + final Long vmId = backup.getVmId(); final VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(vmId); if (vm == null) { - throw new CloudRuntimeException("VM " + vmId + " does not exist"); + logger.warn("Instance {} not found for backup {} during delete backup", vmId, backup.toString()); } - validateForZone(vm.getDataCenterId()); - accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); + logger.debug("Deleting backup {} belonging to instance {}", backup.toString(), vmId); + + validateBackupForZone(backup.getZoneId()); + accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm == null ? backup : vm); final BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); if (offering == null) { throw new CloudRuntimeException(String.format("Backup offering with ID [%s] does not exist.", backup.getBackupOfferingId())); } - final BackupProvider backupProvider = getBackupProvider(offering.getProvider()); + final BackupProvider backupProvider = getBackupProvider(backup.getZoneId()); boolean result = backupProvider.deleteBackup(backup, forced); if (result) { - resourceLimitMgr.decrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup); - resourceLimitMgr.decrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup_storage, backup.getSize()); - return backupDao.remove(backup.getId()); + resourceLimitMgr.decrementResourceCount(backup.getAccountId(), Resource.ResourceType.backup); + Long backupSize = backup.getSize() != null ? backup.getSize() : 0L; + resourceLimitMgr.decrementResourceCount(backup.getAccountId(), Resource.ResourceType.backup_storage, backupSize); + if (backupDao.remove(backup.getId())) { + checkAndGenerateUsageForLastBackupDeletedAfterOfferingRemove(vm, backup); + return true; + } else { + return false; + } } throw new CloudRuntimeException("Failed to delete the backup"); } @@ -1085,32 +1510,36 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { /** * Attach volume to VM */ - private boolean attachVolumeToVM(Long zoneId, String restoredVolumeLocation, List backedUpVolumes, + private boolean attachVolumeToVM(Long zoneId, String restoredVolumeLocation, Backup.VolumeInfo backupVolumeInfo, String volumeUuid, VMInstanceVO vm, String datastoreUuid, Backup backup) throws Exception { HypervisorGuru guru = hypervisorGuruManager.getGuru(vm.getHypervisorType()); - Backup.VolumeInfo volumeInfo = getVolumeInfo(backedUpVolumes, volumeUuid); - if (volumeInfo == null) { - throw new CloudRuntimeException("Failed to find volume in the backedup volumes of ID " + volumeUuid); - } - volumeInfo.setType(Volume.Type.DATADISK); + backupVolumeInfo.setType(Volume.Type.DATADISK); - logger.debug("Attaching the restored volume to VM {}", vm); + logger.info("Attaching the restored volume {} to VM {}.", () -> ReflectionToStringBuilder.toString(backupVolumeInfo, ToStringStyle.JSON_STYLE), () -> vm); StoragePoolVO pool = primaryDataStoreDao.findByUuid(datastoreUuid); try { - return guru.attachRestoredVolumeToVirtualMachine(zoneId, restoredVolumeLocation, volumeInfo, vm, pool.getId(), backup); + return guru.attachRestoredVolumeToVirtualMachine(zoneId, restoredVolumeLocation, backupVolumeInfo, vm, pool.getId(), backup); } catch (Exception e) { throw new CloudRuntimeException("Error attach restored volume to VM " + vm.getUuid() + " due to: " + e.getMessage()); } } + private void checkAndGenerateUsageForLastBackupDeletedAfterOfferingRemove(VirtualMachine vm, Backup backup) { + if (vm != null && + (vm.getBackupOfferingId() == null || vm.getBackupOfferingId() != backup.getBackupOfferingId())) { + List backups = backupDao.listByVmIdAndOffering(vm.getDataCenterId(), vm.getId(), backup.getBackupOfferingId()); + if (backups.size() == 0) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_BACKUP_OFFERING_REMOVED_AND_BACKUPS_DELETED, vm.getAccountId(), + vm.getDataCenterId(), vm.getId(), "Backup-" + vm.getHostName() + "-" + vm.getUuid(), + backup.getBackupOfferingId(), null, null, Backup.class.getSimpleName(), vm.getUuid()); + } + } + } + @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); backgroundPollManager.submitTask(new BackupSyncTask(this)); - Backup.Type.HOURLY.setMax(BackupHourlyMax.value()); - Backup.Type.DAILY.setMax(BackupDailyMax.value()); - Backup.Type.WEEKLY.setMax(BackupWeeklyMax.value()); - Backup.Type.MONTHLY.setMax(BackupMonthlyMax.value()); return true; } @@ -1118,7 +1547,8 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { return !(BackupFrameworkEnabled.value() && BackupFrameworkEnabled.valueIn(zoneId)); } - private void validateForZone(final Long zoneId) { + @Override + public void validateBackupForZone(final Long zoneId) { if (zoneId == null || isDisabled(zoneId)) { throw new CloudRuntimeException("Backup and Recovery feature is disabled for the zone"); } @@ -1176,6 +1606,8 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { cmdList.add(AddBackupRepositoryCmd.class); cmdList.add(DeleteBackupRepositoryCmd.class); cmdList.add(ListBackupRepositoriesCmd.class); + cmdList.add(CreateVMFromBackupCmd.class); + cmdList.add(CreateVMFromBackupCmdByAdmin.class); return cmdList; } @@ -1191,16 +1623,13 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { BackupProviderPlugin, BackupSyncPollingInterval, BackupEnableAttachDetachVolumes, - BackupHourlyMax, - BackupDailyMax, - BackupWeeklyMax, - BackupMonthlyMax, DefaultMaxAccountBackups, DefaultMaxAccountBackupStorage, DefaultMaxProjectBackups, DefaultMaxProjectBackupStorage, DefaultMaxDomainBackups, - DefaultMaxDomainBackupStorage + DefaultMaxDomainBackupStorage, + BackupStorageCapacityThreshold }; } @@ -1293,6 +1722,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { for (final BackupScheduleVO backupSchedule: backupsToBeExecuted) { final Long backupScheduleId = backupSchedule.getId(); final Long vmId = backupSchedule.getVmId(); + final Boolean quiesceVm = backupSchedule.getQuiesceVM(); final VMInstanceVO vm = vmInstanceDao.findById(vmId); if (vm == null || vm.getBackupOfferingId() == null) { @@ -1333,7 +1763,10 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { true, 0); final Map params = new HashMap(); params.put(ApiConstants.VIRTUAL_MACHINE_ID, "" + vmId); - params.put(ApiConstants.SCHEDULE_ID, "" + backupScheduleId); + params.put(ApiConstants.SCHEDULE_ID, String.valueOf(backupScheduleId)); + if (quiesceVm != null) { + params.put(ApiConstants.QUIESCE_VM, "" + quiesceVm.toString()); + } params.put("ctxUserId", "1"); params.put("ctxAccountId", "" + vm.getAccountId()); params.put("ctxStartEventId", String.valueOf(eventId)); @@ -1427,42 +1860,93 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { continue; } - List vms = vmInstanceDao.listByZoneWithBackups(dataCenter.getId(), null); - if (vms == null || vms.isEmpty()) { - logger.debug("Can't find any VM to sync backups in zone {}", dataCenter); - continue; - } + backupProvider.syncBackupStorageStats(dataCenter.getId()); - final Map metrics = backupProvider.getBackupMetrics(dataCenter.getId(), new ArrayList<>(vms)); - syncBackupMetrics(backupProvider, metrics); + syncOutOfBandBackups(backupProvider, dataCenter); + + updateBackupUsageRecords(backupProvider, dataCenter); } } catch (final Throwable t) { logger.error(String.format("Error trying to run backup-sync background task due to: [%s].", t.getMessage()), t); } } - /** - * Tries to sync the VM backups. If one backup synchronization fails, only this VM backups are skipped, and the entire process does not stop. - */ - private void syncBackupMetrics(final BackupProvider backupProvider, final Map metrics) { - for (final VirtualMachine vm : metrics.keySet()) { - tryToSyncVMBackups(backupProvider, metrics, vm); + private void syncOutOfBandBackups(final BackupProvider backupProvider, DataCenter dataCenter) { + List vms = vmInstanceDao.listByZoneAndBackupOffering(dataCenter.getId(), null); + if (vms == null || vms.isEmpty()) { + logger.debug("Can't find any VM to sync backups in zone {}", dataCenter); + return; + } + backupProvider.syncBackupMetrics(dataCenter.getId()); + for (final VMInstanceVO vm : vms) { + try { + logger.debug(String.format("Trying to sync backups of VM [%s] using backup provider [%s].", vm, backupProvider.getName())); + // Sync out-of-band backups + syncBackups(backupProvider, vm); + } catch (final Exception e) { + logger.error("Failed to sync backup usage metrics and out-of-band backups of VM [{}] due to: [{}].", vm, e.getMessage(), e); + } } } - private Backup checkAndUpdateIfBackupEntryExistsForRestorePoint(Backup.RestorePoint restorePoint, List backupsInDb, VirtualMachine vm, Backup.Metric metric) { + private void updateBackupUsageRecords(final BackupProvider backupProvider, DataCenter dataCenter) { + List vmIdsWithBackups = backupDao.listVmIdsWithBackupsInZone(dataCenter.getId()); + List vmsWithBackups; + if (vmIdsWithBackups.size() == 0) { + vmsWithBackups = new ArrayList<>(); + } else { + vmsWithBackups = vmInstanceDao.listByIdsIncludingRemoved(vmIdsWithBackups); + } + List vmsWithBackupOffering = vmInstanceDao.listByZoneAndBackupOffering(dataCenter.getId(), null); //should return including removed + Set vms = Stream.concat(vmsWithBackups.stream(), vmsWithBackupOffering.stream()) .collect(Collectors.toSet()); + + for (final VirtualMachine vm : vms) { + + Map> backupOfferingToSizeMap = new HashMap<>(); + for (final Backup backup: backupDao.listByVmId(null, vm.getId())) { + Long backupSize = 0L; + Long backupProtectedSize = 0L; + if (Objects.nonNull(backup.getSize())) { + backupSize = backup.getSize(); + } + if (Objects.nonNull(backup.getProtectedSize())) { + backupProtectedSize = backup.getProtectedSize(); + } + Long offeringId = backup.getBackupOfferingId(); + if (backupOfferingToSizeMap.containsKey(offeringId)) { + Pair sizes = backupOfferingToSizeMap.get(offeringId); + sizes.set(sizes.first() + backupSize, sizes.second() + backupProtectedSize); + } else { + backupOfferingToSizeMap.put(offeringId, new Pair<>(backupSize, backupProtectedSize)); + } + } + + for (final Map.Entry> entry : backupOfferingToSizeMap.entrySet()) { + Long offeringId = entry.getKey(); + Pair sizes = entry.getValue(); + Long backupSize = sizes.first(); + Long protectedSize = sizes.second(); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_BACKUP_USAGE_METRIC, vm.getAccountId(), + vm.getDataCenterId(), vm.getId(), "Backup-" + vm.getHostName() + "-" + vm.getUuid(), + offeringId, null, backupSize, protectedSize, + Backup.class.getSimpleName(), vm.getUuid()); + } + } + } + + private Backup checkAndUpdateIfBackupEntryExistsForRestorePoint(Backup.RestorePoint restorePoint, List backupsInDb, VirtualMachine vm) { for (final Backup backupInDb : backupsInDb) { logger.debug(String.format("Checking if Backup %s with external ID %s for VM %s is valid", backupsInDb, backupInDb.getName(), vm)); if (restorePoint.getId().equals(backupInDb.getExternalId())) { - logger.debug(String.format("Found Backup %s in both Database and Networker", backupInDb)); - if (metric != null) { + logger.debug(String.format("Found Backup %s in both Database and Provider", backupInDb)); + if (restorePoint.getDataSize() != null && restorePoint.getBackupSize() != null) { logger.debug(String.format("Update backup [%s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", - backupInDb, backupInDb.getSize(), backupInDb.getProtectedSize(), metric.getBackupSize(), metric.getDataSize())); + backupInDb, backupInDb.getSize(), backupInDb.getProtectedSize(), restorePoint.getBackupSize(), restorePoint.getDataSize())); - resourceLimitMgr.decrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup_storage, backupInDb.getSize()); - ((BackupVO) backupInDb).setSize(metric.getBackupSize()); - ((BackupVO) backupInDb).setProtectedSize(metric.getDataSize()); - resourceLimitMgr.incrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup_storage, backupInDb.getSize()); + resourceLimitMgr.decrementResourceCount(backupInDb.getAccountId(), Resource.ResourceType.backup_storage, backupInDb.getSize()); + ((BackupVO) backupInDb).setSize(restorePoint.getBackupSize()); + ((BackupVO) backupInDb).setProtectedSize(restorePoint.getDataSize()); + resourceLimitMgr.incrementResourceCount(backupInDb.getAccountId(), Resource.ResourceType.backup_storage, backupInDb.getSize()); backupDao.update(backupInDb.getId(), ((BackupVO) backupInDb)); } @@ -1472,7 +1956,22 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { return null; } - private void syncBackups(BackupProvider backupProvider, VirtualMachine vm, Backup.Metric metric) { + private void processRemoveList(List removeList, VirtualMachine vm) { + for (final Long backupIdToRemove : removeList) { + logger.warn(String.format("Removing backup with ID: [%s].", backupIdToRemove)); + Backup backup = backupDao.findById(backupIdToRemove); + resourceLimitMgr.decrementResourceCount(backup.getAccountId(), Resource.ResourceType.backup); + resourceLimitMgr.decrementResourceCount(backup.getAccountId(), Resource.ResourceType.backup_storage, backup.getSize()); + boolean result = backupDao.remove(backupIdToRemove); + if (result) { + checkAndGenerateUsageForLastBackupDeletedAfterOfferingRemove(vm, backup); + } else { + logger.error("Failed to remove backup db entry ith ID: {} during sync backups", backupIdToRemove); + } + } + } + + private void syncBackups(BackupProvider backupProvider, VirtualMachine vm) { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { @@ -1485,14 +1984,14 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { final List removeList = backupsInDb.stream().map(InternalIdentity::getId).collect(Collectors.toList()); for (final Backup.RestorePoint restorePoint : restorePoints) { if (!(restorePoint.getId() == null || restorePoint.getType() == null || restorePoint.getCreated() == null)) { - Backup existingBackupEntry = checkAndUpdateIfBackupEntryExistsForRestorePoint(restorePoint, backupsInDb, vm, metric); + Backup existingBackupEntry = checkAndUpdateIfBackupEntryExistsForRestorePoint(restorePoint, backupsInDb, vm); if (existingBackupEntry != null) { removeList.remove(existingBackupEntry.getId()); continue; } } - Backup backup = backupProvider.createNewBackupEntryForRestorePoint(restorePoint, vm, metric); + Backup backup = backupProvider.createNewBackupEntryForRestorePoint(restorePoint, vm); if (backup != null) { logger.warn("Added backup found in provider [" + backup + "]"); resourceLimitMgr.incrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup); @@ -1507,35 +2006,11 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { vm.getId(), ApiCommandResourceType.VirtualMachine.toString(),0); } } - for (final Long backupIdToRemove : removeList) { - logger.warn(String.format("Removing backup with ID: [%s].", backupIdToRemove)); - Backup backup = backupDao.findById(backupIdToRemove); - resourceLimitMgr.decrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup); - resourceLimitMgr.decrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup_storage, backup.getSize()); - backupDao.remove(backupIdToRemove); - } + processRemoveList(removeList, vm); } }); } - private void tryToSyncVMBackups(BackupProvider backupProvider, Map metrics, VirtualMachine vm) { - try { - final Backup.Metric metric = metrics.get(vm); - if (metric != null) { - logger.debug(String.format("Trying to sync backups of VM [%s] using backup provider [%s].", vm, backupProvider.getName())); - // Sync out-of-band backups - syncBackups(backupProvider, vm, metric); - // Emit a usage event, update usage metric for the VM by the usage server - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_BACKUP_USAGE_METRIC, vm.getAccountId(), - vm.getDataCenterId(), vm.getId(), "Backup-" + vm.getHostName() + "-" + vm.getUuid(), - vm.getBackupOfferingId(), null, metric.getBackupSize(), metric.getDataSize(), - Backup.class.getSimpleName(), vm.getUuid()); - } - } catch (final Exception e) { - logger.error("Failed to sync backup usage metrics and out-of-band backups of VM [{}] due to: [{}].", vm, e.getMessage(), e); - } - } - @Override public Long getDelay() { return BackupSyncPollingInterval.value() * 1000L; @@ -1586,4 +2061,124 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { return response; } + Map getDetailsFromBackupDetails(Long backupId) { + Map details = backupDetailsDao.listDetailsKeyPairs(backupId, true); + if (details == null) { + return null; + } + if (details.containsKey(ApiConstants.TEMPLATE_ID)) { + VirtualMachineTemplate template = vmTemplateDao.findByUuid(details.get(ApiConstants.TEMPLATE_ID)); + if (template != null) { + details.put(ApiConstants.TEMPLATE_ID, template.getUuid()); + details.put(ApiConstants.TEMPLATE_NAME, template.getName()); + details.put(ApiConstants.IS_ISO, String.valueOf(template.getFormat().equals(Storage.ImageFormat.ISO))); + } + } + if (details.containsKey(ApiConstants.SERVICE_OFFERING_ID)) { + ServiceOffering serviceOffering = serviceOfferingDao.findByUuid(details.get(ApiConstants.SERVICE_OFFERING_ID)); + if (serviceOffering != null) { + details.put(ApiConstants.SERVICE_OFFERING_ID, serviceOffering.getUuid()); + details.put(ApiConstants.SERVICE_OFFERING_NAME, serviceOffering.getName()); + } + } + if (details.containsKey(ApiConstants.NICS)) { + Type type = new TypeToken>>() {}.getType(); + List> nics = new Gson().fromJson(details.get(ApiConstants.NICS), type); + + for (Map nic : nics) { + String networkUuid = nic.get(ApiConstants.NETWORK_ID); + if (networkUuid != null) { + Network network = networkDao.findByUuid(networkUuid); + if (network != null) { + nic.put(ApiConstants.NETWORK_NAME, network.getName()); + } + } + } + details.put(ApiConstants.NICS, new Gson().toJson(nics)); + } + return details; + } + + @Override + public BackupResponse createBackupResponse(Backup backup, Boolean listVmDetails) { + VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); + AccountVO account = accountDao.findByIdIncludingRemoved(backup.getAccountId()); + DomainVO domain = domainDao.findByIdIncludingRemoved(backup.getDomainId()); + DataCenterVO zone = dataCenterDao.findByIdIncludingRemoved(backup.getZoneId()); + Long offeringId = backup.getBackupOfferingId(); + BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(offeringId); + + BackupResponse response = new BackupResponse(); + response.setId(backup.getUuid()); + response.setName(backup.getName()); + response.setDescription(backup.getDescription()); + response.setVmName(vm.getHostName()); + response.setVmId(vm.getUuid()); + if (vm.getBackupOfferingId() == null || vm.getBackupOfferingId() != backup.getBackupOfferingId()) { + response.setVmOfferingRemoved(true); + } + response.setExternalId(backup.getExternalId()); + response.setType(backup.getType()); + response.setDate(backup.getDate()); + response.setSize(backup.getSize()); + response.setProtectedSize(backup.getProtectedSize()); + response.setStatus(backup.getStatus()); + response.setIntervalType("MANUAL"); + if (backup.getBackupScheduleId() != null) { + BackupScheduleVO scheduleVO = backupScheduleDao.findById(backup.getBackupScheduleId()); + if (scheduleVO != null) { + response.setIntervalType(scheduleVO.getScheduleType().toString()); + } + } + // ACS 4.20: For backups taken prior this release the backup.backed_volumes column would be empty hence use vm_instance.backup_volumes + String backedUpVolumes; + if (Objects.isNull(backup.getBackedUpVolumes())) { + backedUpVolumes = new Gson().toJson(vm.getBackupVolumeList().toArray(), Backup.VolumeInfo[].class); + } else { + backedUpVolumes = new Gson().toJson(backup.getBackedUpVolumes().toArray(), Backup.VolumeInfo[].class); + } + response.setVolumes(backedUpVolumes); + response.setBackupOfferingId(offering.getUuid()); + response.setBackupOffering(offering.getName()); + response.setAccountId(account.getUuid()); + response.setAccount(account.getAccountName()); + response.setDomainId(domain.getUuid()); + response.setDomain(domain.getName()); + response.setZoneId(zone.getUuid()); + response.setZone(zone.getName()); + + if (Boolean.TRUE.equals(listVmDetails)) { + Map vmDetails = new HashMap<>(); + vmDetails.put(ApiConstants.HYPERVISOR, vm.getHypervisorType().toString()); + Map details = getDetailsFromBackupDetails(backup.getId()); + vmDetails.putAll(details); + response.setVmDetails(vmDetails); + } + + response.setObjectName("backup"); + return response; + } + + @Override + public CapacityVO getBackupStorageUsedStats(Long zoneId) { + final BackupProvider backupProvider = getBackupProvider(zoneId); + Pair backupUsage = backupProvider.getBackupStorageStats(zoneId); + return new CapacityVO(null, zoneId, null, null, backupUsage.first(), backupUsage.second(), Capacity.CAPACITY_TYPE_BACKUP_STORAGE); + } + + @Override + public void checkAndRemoveBackupOfferingBeforeExpunge(VirtualMachine vm) { + if (vm.getBackupOfferingId() == null) { + return; + } + List backupsForVm = backupDao.listByVmIdAndOffering(vm.getDataCenterId(), vm.getId(), vm.getBackupOfferingId()); + if (org.apache.commons.collections.CollectionUtils.isEmpty(backupsForVm)) { + removeVMFromBackupOffering(vm.getId(), true); + } else { + throw new CloudRuntimeException(String.format("This Instance [uuid: %s, name: %s] has a " + + "Backup Offering [id: %s, external id: %s] with %s backups. Please, remove the backup offering " + + "before proceeding to VM exclusion!", vm.getUuid(), vm.getInstanceName(), vm.getBackupOfferingId(), + vm.getBackupExternalId(), backupsForVm.size())); + } + } } diff --git a/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java b/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java index 38ab45f20b4..306023a2263 100644 --- a/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.consoleproxy; +import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.List; @@ -27,7 +28,15 @@ import javax.crypto.spec.SecretKeySpec; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.domain.Domain; +import com.cloud.domain.dao.DomainDao; +import com.cloud.exception.InvalidParameterValueException; +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.ResponseObject; import org.apache.cloudstack.api.command.user.consoleproxy.ConsoleEndpoint; +import org.apache.cloudstack.api.command.user.consoleproxy.ListConsoleSessionsCmd; +import org.apache.cloudstack.api.response.ConsoleSessionResponse; +import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.security.keys.KeysManager; import org.apache.commons.codec.binary.Base64; @@ -86,6 +95,8 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce @Inject private AccountManager accountManager; @Inject + private DomainDao domainDao; + @Inject private VirtualMachineManager virtualMachineManager; @Inject private ManagementServer managementServer; @@ -103,6 +114,8 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce DataCenterDao dataCenterDao; @Inject private ConsoleSessionDao consoleSessionDao; + @Inject + private ResponseGenerator responseGenerator; private ScheduledExecutorService executorService = null; @@ -112,7 +125,7 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce protected Logger logger = LogManager.getLogger(ConsoleAccessManagerImpl.class); private static final List unsupportedConsoleVMState = Arrays.asList( - VirtualMachine.State.Stopped, VirtualMachine.State.Error, VirtualMachine.State.Destroyed + VirtualMachine.State.Stopped, VirtualMachine.State.Restoring, VirtualMachine.State.Error, VirtualMachine.State.Destroyed ); @Override @@ -181,6 +194,78 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce } } + @Override + public ListResponse listConsoleSessions(ListConsoleSessionsCmd cmd) { + Pair, Integer> consoleSessions = listConsoleSessionsInternal(cmd); + ListResponse response = new ListResponse<>(); + + ResponseObject.ResponseView responseView = ResponseObject.ResponseView.Restricted; + Long callerId = CallContext.current().getCallingAccountId(); + if (accountManager.isRootAdmin(callerId)) { + responseView = ResponseObject.ResponseView.Full; + } + + List consoleSessionResponses = new ArrayList<>(); + for (ConsoleSessionVO consoleSession : consoleSessions.first()) { + ConsoleSessionResponse consoleSessionResponse = responseGenerator.createConsoleSessionResponse(consoleSession, responseView); + consoleSessionResponses.add(consoleSessionResponse); + } + + response.setResponses(consoleSessionResponses, consoleSessions.second()); + return response; + } + + protected Pair, Integer> listConsoleSessionsInternal(ListConsoleSessionsCmd cmd) { + CallContext caller = CallContext.current(); + long domainId = getBaseDomainIdToListConsoleSessions(cmd.getDomainId()); + Long accountId = cmd.getAccountId(); + Long userId = cmd.getUserId(); + boolean isRecursive = cmd.isRecursive(); + + boolean isCallerNormalUser = accountManager.isNormalUser(caller.getCallingAccountId()); + if (isCallerNormalUser) { + accountId = caller.getCallingAccountId(); + userId = caller.getCallingUserId(); + } + + List domainIds = isRecursive ? domainDao.getDomainAndChildrenIds(domainId) : List.of(domainId); + + return consoleSessionDao.listConsoleSessions(cmd.getId(), domainIds, accountId, userId, + cmd.getHostId(), cmd.getStartDate(), cmd.getEndDate(), cmd.getVmId(), + cmd.getConsoleEndpointCreatorAddress(), cmd.getClientAddress(), cmd.isActiveOnly(), + cmd.getAcquired(), cmd.getPageSizeVal(), cmd.getStartIndex()); + } + + /** + * Determines the base domain ID for listing console sessions. + * + * If no domain ID is provided, returns the caller's domain ID. Otherwise, + * checks if the caller has access to that domain and returns the provided domain ID. + * + * @param domainId The domain ID to check, can be null + * @return The base domain ID to use for listing console sessions + * @throws PermissionDeniedException if the caller does not have access to the specified domain + */ + protected long getBaseDomainIdToListConsoleSessions(Long domainId) { + Account caller = CallContext.current().getCallingAccount(); + if (domainId == null) { + return caller.getDomainId(); + } + + Domain domain = domainDao.findById(domainId); + if (domain == null) { + throw new InvalidParameterValueException(String.format("Unable to find domain with ID [%s]. Verify the informed domain and try again.", domainId)); + } + + accountManager.checkAccess(caller, domain); + return domainId; + } + + @Override + public ConsoleSession listConsoleSessionById(long id) { + return consoleSessionDao.findByIdIncludingRemoved(id); + } + @Override public ConsoleEndpoint generateConsoleEndpoint(Long vmId, String extraSecurityToken, String clientAddress) { try { @@ -411,10 +496,13 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce } protected void persistConsoleSession(String sessionUuid, long instanceId, long hostId, String consoleEndpointCreatorAddress) { + CallContext caller = CallContext.current(); + ConsoleSessionVO consoleSessionVo = new ConsoleSessionVO(); consoleSessionVo.setUuid(sessionUuid); - consoleSessionVo.setAccountId(CallContext.current().getCallingAccountId()); - consoleSessionVo.setUserId(CallContext.current().getCallingUserId()); + consoleSessionVo.setDomainId(caller.getCallingAccount().getDomainId()); + consoleSessionVo.setAccountId(caller.getCallingAccountId()); + consoleSessionVo.setUserId(caller.getCallingUserId()); consoleSessionVo.setInstanceId(instanceId); consoleSessionVo.setHostId(hostId); consoleSessionVo.setConsoleEndpointCreatorAddress(consoleEndpointCreatorAddress); diff --git a/server/src/main/java/org/apache/cloudstack/gpu/GpuServiceImpl.java b/server/src/main/java/org/apache/cloudstack/gpu/GpuServiceImpl.java index 5890b176c8e..a553abbacd5 100644 --- a/server/src/main/java/org/apache/cloudstack/gpu/GpuServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/gpu/GpuServiceImpl.java @@ -795,7 +795,6 @@ public class GpuServiceImpl extends ManagerBase implements GpuService, Pluggable vgpuProfile.getName(), gpuDevice.getBusAddress(), gpuCard.getVendorId(), gpuCard.getVendorName(), gpuCard.getDeviceId(), gpuCard.getDeviceName()); vgpuInfo.setDisplay(serviceOffering.getGpuDisplay()); - if (gpuDevice.getParentGpuDeviceId() != null) { GpuDeviceVO parentGpuDevice = gpuDeviceDao.findById(gpuDevice.getParentGpuDeviceId()); if (parentGpuDevice != null) { @@ -891,14 +890,20 @@ public class GpuServiceImpl extends ManagerBase implements GpuService, Pluggable } else { // Update the device's info GpuDeviceVO parentGpuDevice = null; - if (existingDevice.getParentGpuDeviceId() == null - && deviceInfo.getParentBusAddress() != null) { + if (deviceInfo.getParentBusAddress() != null) { parentGpuDevice = gpuDeviceDao.findByHostIdAndBusAddress(host.getId(), deviceInfo.getParentBusAddress()); if (parentGpuDevice != null) { existingDevice.setParentGpuDeviceId(parentGpuDevice.getId()); + parentGpuDevice.setType(GpuDevice.DeviceType.VGPUOnly); + gpuDeviceDao.persist(parentGpuDevice); } } + if (deviceInfo.isPassthroughEnabled()) { + existingDevice.setType(deviceInfo.getDeviceType()); + } else { + existingDevice.setType(GpuDevice.DeviceType.VGPUOnly); + } if (existingDevice.getPciRoot() == null) { existingDevice.setPciRoot(deviceInfo.getPciRoot()); } @@ -913,7 +918,6 @@ public class GpuServiceImpl extends ManagerBase implements GpuService, Pluggable for (final GpuDeviceVO device : gpuDevicesToDisableMap.values()) { logger.info("Disabling GPU device {} on host {} due to missing address in the new devices on the host.", device, host); device.setState(GpuDevice.State.Error); - device.setManagedState(GpuDevice.ManagedState.Unmanaged); gpuDeviceDao.update(device.getId(), device); checkAndUpdateParentGpuDeviceState(device.getParentGpuDeviceId()); } @@ -1024,11 +1028,14 @@ public class GpuServiceImpl extends ManagerBase implements GpuService, Pluggable deviceInfo.getParentBusAddress()); if (parentGpuDevice != null) { parentGpuDeviceId = parentGpuDevice.getId(); + parentGpuDevice.setType(GpuDevice.DeviceType.VGPUOnly); + gpuDeviceDao.persist(parentGpuDevice); } } GpuDeviceVO gpuDevice = new GpuDeviceVO(card.getId(), vgpuProfile.getId(), deviceInfo.getBusAddress(), host.getId(), parentGpuDeviceId, deviceInfo.getNumaNode(), deviceInfo.getPciRoot()); gpuDevice.setHostId(host.getId()); + gpuDevice.setType(deviceInfo.getDeviceType()); gpuDevice.setBusAddress(deviceInfo.getBusAddress()); gpuDevice.setCardId(card.getId()); setStateAndVmName(deviceInfo, gpuDevice, parentGpuDevice); diff --git a/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java index 50ec8a827b4..45943e27d2b 100644 --- a/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java @@ -1021,9 +1021,10 @@ public class RoutedIpv4ManagerImpl extends ComponentLifecycleBase implements Rou } @Override - public boolean isVpcVirtualRouterGateway(VpcOffering vpcOffering) { + public boolean isValidGateway(VpcOffering vpcOffering) { return NetworkOffering.NetworkMode.ROUTED.equals(vpcOffering.getNetworkMode()) - && vpcOfferingServiceMapDao.findByServiceProviderAndOfferingId(Service.Gateway.getName(), Provider.VPCVirtualRouter.getName(), vpcOffering.getId()) != null; + && (vpcOfferingServiceMapDao.findByServiceProviderAndOfferingId(Service.Gateway.getName(), Provider.VPCVirtualRouter.getName(), vpcOffering.getId()) != null + || vpcOfferingServiceMapDao.findByServiceProviderAndOfferingId(Service.Gateway.getName(), Provider.Netris.getName(), vpcOffering.getId()) != null); } @Override diff --git a/server/src/main/java/org/apache/cloudstack/resource/ResourceCleanupServiceImpl.java b/server/src/main/java/org/apache/cloudstack/resource/ResourceCleanupServiceImpl.java index 298753a6c4f..b752a4c384f 100644 --- a/server/src/main/java/org/apache/cloudstack/resource/ResourceCleanupServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/resource/ResourceCleanupServiceImpl.java @@ -36,6 +36,8 @@ import javax.inject.Inject; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.resource.PurgeExpungedResourcesCmd; +import org.apache.cloudstack.backup.BackupVO; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; @@ -158,6 +160,8 @@ public class ResourceCleanupServiceImpl extends ManagerBase implements ResourceC ManagementServerHostDao managementServerHostDao; @Inject ServiceOfferingDetailsDao serviceOfferingDetailsDao; + @Inject + BackupDao backupDao; private ScheduledExecutorService expungedResourcesCleanupExecutor; private ExecutorService purgeExpungedResourcesJobExecutor; @@ -300,7 +304,7 @@ public class ResourceCleanupServiceImpl extends ManagerBase implements ResourceC .collect(Collectors.toCollection(HashSet::new)); } - protected Pair, List> getFilteredVmIdsForSnapshots(List vmIds) { + protected Pair, List> getFilteredVmIdsForSnapshotsAndBackups(List vmIds) { HashSet currentSkippedVmIds = new HashSet<>(); List activeSnapshots = vmSnapshotDao.searchByVms(vmIds); if (CollectionUtils.isNotEmpty(activeSnapshots)) { @@ -320,20 +324,33 @@ public class ResourceCleanupServiceImpl extends ManagerBase implements ResourceC } currentSkippedVmIds.addAll(vmIdsWithActiveVolumeSnapshots); } + + List backups = backupDao.searchByVmIds(vmIds); + if (CollectionUtils.isNotEmpty(backups)) { + HashSet vmIdsWithBackups = backups.stream().map(BackupVO::getVmId) + .collect(Collectors.toCollection(HashSet::new)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Skipping purging VMs with IDs %s as they have backups", + StringUtils.join(vmIdsWithBackups))); + } + currentSkippedVmIds.addAll(vmIdsWithBackups); + } + if (CollectionUtils.isNotEmpty(currentSkippedVmIds)) { vmIds.removeAll(currentSkippedVmIds); } + return new Pair<>(vmIds, new ArrayList<>(currentSkippedVmIds)); } - protected Pair, List> getVmIdsWithNoActiveSnapshots(final Date startDate, final Date endDate, - final Long batchSize, final List skippedVmIds) { + protected Pair, List> getVmIdsWithNoActiveSnapshotsAndBackups(final Date startDate, final Date endDate, + final Long batchSize, final List skippedVmIds) { List vms = vmInstanceDao.searchRemovedByRemoveDate(startDate, endDate, batchSize, skippedVmIds); if (CollectionUtils.isEmpty(vms)) { return new Pair<>(new ArrayList<>(), new ArrayList<>()); } List vmIds = vms.stream().map(VMInstanceVO::getId).collect(Collectors.toList()); - return getFilteredVmIdsForSnapshots(vmIds); + return getFilteredVmIdsForSnapshotsAndBackups(vmIds); } protected long purgeVMEntities(final Long batchSize, final Date startDate, final Date endDate) { @@ -344,7 +361,7 @@ public class ResourceCleanupServiceImpl extends ManagerBase implements ResourceC List skippedVmIds = new ArrayList<>(); do { Pair, List> allVmIds = - getVmIdsWithNoActiveSnapshots(startDate, endDate, batchSize, skippedVmIds); + getVmIdsWithNoActiveSnapshotsAndBackups(startDate, endDate, batchSize, skippedVmIds); List vmIds = allVmIds.first(); List currentSkippedVmIds = allVmIds.second(); count = vmIds.size() + currentSkippedVmIds.size(); @@ -364,7 +381,7 @@ public class ResourceCleanupServiceImpl extends ManagerBase implements ResourceC final Long batchSize = ExpungedResourcesPurgeBatchSize.value().longValue(); List vmIds = new ArrayList<>(); vmIds.add(vmId); - Pair, List> allVmIds = getFilteredVmIdsForSnapshots(vmIds); + Pair, List> allVmIds = getFilteredVmIdsForSnapshotsAndBackups(vmIds); if (CollectionUtils.isEmpty(allVmIds.first())) { return false; } diff --git a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java index d67180d3eb2..e7ec8c8208f 100644 --- a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java +++ b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java @@ -19,14 +19,19 @@ package org.apache.cloudstack.snapshot; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -import javax.inject.Inject; +import com.cloud.api.query.dao.SnapshotJoinDao; +import com.cloud.api.query.vo.SnapshotJoinVO; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; @@ -36,27 +41,31 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; + import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.Snapshot; -import com.cloud.storage.SnapshotVO; -import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.SnapshotDao; -import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; public class SnapshotHelper { protected Logger logger = LogManager.getLogger(getClass()); @@ -82,6 +91,9 @@ public class SnapshotHelper { @Inject protected PrimaryDataStoreDao primaryDataStoreDao; + @Inject + protected SnapshotJoinDao snapshotJoinDao; + protected boolean backupSnapshotAfterTakingSnapshot = SnapshotInfo.BackupSnapshotAfterTakingSnapshot.value(); protected final Set storagePoolTypesToValidateWithBackupSnapshotAfterTakingSnapshot = new HashSet<>(Arrays.asList(StoragePoolType.RBD, @@ -92,6 +104,22 @@ public class SnapshotHelper { * @param snapInfo the snapshot info to delete. */ public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) { + long storeId = snapInfo.getDataStore().getId(); + long zoneId = dataStorageManager.getStoreZoneId(storeId, snapInfo.getDataStore().getRole()); + + if (isStorageSupportSnapshotToTemplate(snapInfo)) { + logger.debug("The primary storage does not delete the snapshots even if there is a backup on secondary"); + return; + } + + List snapshots = snapshotJoinDao.listBySnapshotIdAndZoneId(zoneId, snapInfo.getSnapshotId()); + if (kvmSnapshotOnlyInPrimaryStorage || snapshots.size() <= 1) { + if (snapInfo != null) { + logger.trace(String.format("Snapshot [{}] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); + } + return; + } + if (snapInfo == null) { logger.warn("Unable to expunge snapshot due to its info is null."); return; @@ -118,15 +146,20 @@ public class SnapshotHelper { } } - long storeId = snapInfo.getDataStore().getId(); if (!DataStoreRole.Image.equals(snapInfo.getDataStore().getRole())) { - long zoneId = dataStorageManager.getStoreZoneId(storeId, snapInfo.getDataStore().getRole()); SnapshotInfo imageStoreSnapInfo = snapshotFactory.getSnapshotWithRoleAndZone(snapInfo.getId(), DataStoreRole.Image, zoneId); storeId = imageStoreSnapInfo.getDataStore().getId(); } snapshotDataStoreDao.expungeReferenceBySnapshotIdAndDataStoreRole(snapInfo.getId(), storeId, DataStoreRole.Image); } + public boolean isStorageSupportSnapshotToTemplate(SnapshotInfo snapInfo) { + if (DataStoreRole.Primary.equals(snapInfo.getDataStore().getRole())) { + Map capabilities = snapInfo.getDataStore().getDriver().getCapabilities(); + return org.apache.commons.collections4.MapUtils.isNotEmpty(capabilities) && capabilities.containsKey(DataStoreCapabilities.CAN_CREATE_TEMPLATE_FROM_SNAPSHOT.toString()); + } + return false; + } /** * Backup the snapshot to secondary storage if it should be backed up and was not yet or it is a temporary backup to create a volume. * @return The parameter snapInfo if the snapshot is not backupable, else backs up the snapshot to secondary storage and returns its info. @@ -181,8 +214,11 @@ public class SnapshotHelper { * @return true if hypervisor is {@link HypervisorType#KVM} and data store role is {@link DataStoreRole#Primary} and global setting "snapshot.backup.to.secondary" is false, * else false. */ - public boolean isKvmSnapshotOnlyInPrimaryStorage(Snapshot snapshot, DataStoreRole dataStoreRole){ - return snapshot.getHypervisorType() == Hypervisor.HypervisorType.KVM && dataStoreRole == DataStoreRole.Primary && !backupSnapshotAfterTakingSnapshot; + public boolean isKvmSnapshotOnlyInPrimaryStorage(Snapshot snapshot, DataStoreRole dataStoreRole, Long zoneId){ + List snapshots = snapshotJoinDao.listBySnapshotIdAndZoneId(zoneId, snapshot.getSnapshotId()); + boolean isKvmSnapshotOnlyInPrimaryStorage = snapshots.stream().filter(s -> s.getStoreRole().equals(DataStoreRole.Image)).count() == 0; + + return snapshot.getHypervisorType() == Hypervisor.HypervisorType.KVM && dataStoreRole == DataStoreRole.Primary && isKvmSnapshotOnlyInPrimaryStorage; } public DataStoreRole getDataStoreRole(Snapshot snapshot) { @@ -215,10 +251,21 @@ public class SnapshotHelper { return DataStoreRole.Image; } - /** - * Verifies if it is a KVM volume that has snapshots only in primary storage. - * @throws CloudRuntimeException If it is a KVM volume and has at least one snapshot only in primary storage. - */ + public DataStoreRole getDataStoreRole(Snapshot snapshot, Long zoneId) { + if (zoneId == null) { + getDataStoreRole(snapshot); + } + List snapshots = snapshotJoinDao.listBySnapshotIdAndZoneId(zoneId, snapshot.getId()); + boolean snapshotOnPrimary = snapshots.stream().anyMatch(s -> s.getStoreRole().equals(DataStoreRole.Primary)); + if (snapshotOnPrimary) { + return DataStoreRole.Primary; + } + return DataStoreRole.Image; + } + /** + * Verifies if it is a KVM volume that has snapshots only in primary storage. + * @throws CloudRuntimeException If it is a KVM volume and has at least one snapshot only in primary storage. + */ public void checkKvmVolumeSnapshotsOnlyInPrimaryStorage(VolumeVO volumeVo, HypervisorType hypervisorType) throws CloudRuntimeException { if (HypervisorType.KVM != hypervisorType) { logger.trace(String.format("The %s hypervisor [%s] is not KVM, therefore we will not check if the snapshots are only in primary storage.", volumeVo, hypervisorType)); @@ -271,10 +318,62 @@ public class SnapshotHelper { } public SnapshotInfo convertSnapshotIfNeeded(SnapshotInfo snapshotInfo) { + if (snapshotInfo.getParent() == null || !HypervisorType.KVM.equals(snapshotInfo.getHypervisorType())) { return snapshotInfo; } return snapshotService.convertSnapshot(snapshotInfo); } + + public void checkIfThereAreMoreThanOnePoolInTheZone(List poolIds) { + List poolsInOneZone = new ArrayList<>(); + for (Long poolId : poolIds) { + StoragePoolVO pool = primaryDataStoreDao.findById(poolId); + if (pool != null) { + poolsInOneZone.add(pool.getDataCenterId()); + } + } + boolean moreThanOnePoolForZone = poolsInOneZone.stream().filter(itr -> Collections.frequency(poolsInOneZone, itr) > 1).count() > 1; + if (moreThanOnePoolForZone) { + throw new CloudRuntimeException("Cannot copy the snapshot on multiple storage pools in one zone"); + } + } + + public List addStoragePoolsForCopyToPrimary(VolumeInfo volume, List destZoneIds, List storagePoolIds, Boolean useStorageReplication) { + if (useStorageReplication) { + if (volume == null) { + throw new InvalidParameterValueException("Could not find volume of a snapshot"); + } else if (!doesStorageSupportCopyBetweenZones(volume.getPoolId())){ + throw new InvalidParameterValueException("The storage pool does not support copy between zones"); + } + if (CollectionUtils.isEmpty(destZoneIds)) { + throw new InvalidParameterValueException("There is no destination zone provided"); + } + if (CollectionUtils.isEmpty(storagePoolIds)) { + storagePoolIds = new ArrayList<>(); + for (Long destZone : destZoneIds) { + List pools = primaryDataStoreDao.findPoolsByStorageTypeAndZone(volume.getStoragePoolType(), destZone); + if (CollectionUtils.isNotEmpty(pools)) { + StoragePoolVO storagePoolVO = pools.stream().filter(pool -> SnapshotManager.UseStorageReplication.valueIn(pool.getId()) == true).findFirst().get(); + storagePoolIds.add(storagePoolVO.getId()); + } + } + if (CollectionUtils.isEmpty(storagePoolIds)) { + throw new InvalidParameterValueException("Cannot copy snapshot to primary storage. There aren't storage pools that support this operation"); + } + } + destZoneIds.clear(); + } + return storagePoolIds; + } + + public boolean doesStorageSupportCopyBetweenZones(Long poolId) { + DataStore dataStore = dataStorageManager.getDataStore(poolId, DataStoreRole.Primary); + if (dataStore != null + && dataStore.getDriver().getCapabilities().containsKey(DataStoreCapabilities.CAN_COPY_SNAPSHOT_BETWEEN_ZONES_AND_SAME_POOL_TYPE.toString())) { + return true; + } + return false; + } } diff --git a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java index ea3361507ca..5a18f16fd72 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java @@ -173,6 +173,14 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic if (cmd.getQuota() != null) { objectStore.setQuota(bucketTO, cmd.getQuota()); resourceLimitManager.incrementResourceCount(bucket.getAccountId(), Resource.ResourceType.object_storage, (cmd.getQuota() * Resource.ResourceType.bytesToGiB)); + if (objectStoreVO.getTotalSize() != null && objectStoreVO.getTotalSize() != 0 && objectStoreVO.getAllocatedSize() != null) { + Long allocatedSize = objectStoreVO.getAllocatedSize() / Resource.ResourceType.bytesToGiB; + Long totalSize = objectStoreVO.getTotalSize() / Resource.ResourceType.bytesToGiB; + if (cmd.getQuota() + allocatedSize > totalSize) { + logger.error("Object store {}'s allocated size has reached the total size limit of {}GiB.", objectStoreVO.getName(), totalSize); + throw new CloudRuntimeException("Not enough space in object store to create the bucket"); + } + } } if (cmd.getPolicy() != null) { @@ -181,6 +189,9 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic bucket.setState(Bucket.State.Created); _bucketDao.update(bucket.getId(), bucket); + if (cmd.getQuota() != null) { + _objectStoreDao.updateAllocatedSize(objectStoreVO, cmd.getQuota() * Resource.ResourceType.bytesToGiB); + } } catch (Exception e) { logger.debug("Failed to create bucket with name: "+bucket.getName(), e); if(bucketCreated) { @@ -205,7 +216,10 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic ObjectStoreEntity objectStore = (ObjectStoreEntity)_dataStoreMgr.getDataStore(objectStoreVO.getId(), DataStoreRole.Object); if (objectStore.deleteBucket(bucketTO)) { resourceLimitManager.decrementResourceCount(bucket.getAccountId(), Resource.ResourceType.bucket); - resourceLimitManager.decrementResourceCount(bucket.getAccountId(), Resource.ResourceType.object_storage, (bucket.getQuota() * Resource.ResourceType.bytesToGiB)); + if (bucket.getQuota() != null) { + resourceLimitManager.decrementResourceCount(bucket.getAccountId(), Resource.ResourceType.object_storage, (bucket.getQuota() * Resource.ResourceType.bytesToGiB)); + _objectStoreDao.updateAllocatedSize(objectStoreVO, -(bucket.getQuota() * Resource.ResourceType.bytesToGiB)); + } return _bucketDao.remove(bucketId); } return false; @@ -265,6 +279,7 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic } else { resourceLimitManager.decrementResourceCount(bucket.getAccountId(), Resource.ResourceType.object_storage, ((-quotaDelta) * Resource.ResourceType.bytesToGiB)); } + _objectStoreDao.updateAllocatedSize(objectStoreVO, (quotaDelta * Resource.ResourceType.bytesToGiB)); } _bucketDao.update(bucket.getId(), bucket); } catch (Exception e) { @@ -313,9 +328,11 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic continue; } List buckets = _bucketDao.listByObjectStoreId(objectStoreVO.getId()); + Long objectStoreUsedBytes = 0L; for(BucketVO bucket : buckets) { Long size = bucketSizes.get(bucket.getName()); - if( size != null){ + if( size != null) { + objectStoreUsedBytes += size; bucket.setSize(size); _bucketDao.update(bucket.getId(), bucket); @@ -331,6 +348,8 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic } } } + objectStoreVO.setUsedSize(objectStoreUsedBytes); + _objectStoreDao.persist(objectStoreVO); } logger.debug("Completed updating bucket usage for all object stores"); } catch (Exception e) { diff --git a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java index d34d0b5873f..170fceae986 100644 --- a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java +++ b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java @@ -17,10 +17,15 @@ package com.cloud.alert; import java.io.UnsupportedEncodingException; +import java.util.HashMap; import java.util.List; +import java.util.Optional; import javax.mail.MessagingException; +import javax.naming.ConfigurationException; +import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.mailing.SMTPMailSender; @@ -39,6 +44,8 @@ import org.mockito.junit.MockitoJUnitRunner; import com.cloud.alert.dao.AlertDao; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.CapacityVO; +import com.cloud.capacity.dao.CapacityDao; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; @@ -96,6 +103,15 @@ public class AlertManagerImplTest { @Mock SMTPMailSender mailSenderMock; + @Mock + CapacityDao capacityDao; + + @Mock + BackupManager backupManager; + + @Mock + ConfigurationDao configDao; + private final String[] recipients = new String[]{"test@test.com"}; private final String senderAddress = "sender@test.com"; @@ -219,4 +235,37 @@ public class AlertManagerImplTest { Mockito.verify(storageManager, Mockito.times(2)).createCapacityEntry(sharedPool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, 10L); Mockito.verify(storageManager, Mockito.times(1)).createCapacityEntry(nonSharedPool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, 20L); } + + @Test + public void testCheckForAlerts() throws ConfigurationException { + Long zoneId = 1L; + Mockito.doNothing().when(alertManagerImplMock).recalculateCapacity(); + DataCenterVO dc = Mockito.mock(DataCenterVO.class); + Mockito.when(dc.getId()).thenReturn(zoneId); + Mockito.when(dc.getName()).thenReturn("zone1"); + Mockito.when(_dcDao.listAll()).thenReturn(List.of(dc)); + Mockito.when(_dcDao.findById(zoneId)).thenReturn(dc); + Mockito.when(configDao.getConfiguration("management-server", null)).thenReturn(new HashMap<>()); + + alertManagerImplMock.configure(null, null); + CapacityVO secondaryStorageCapacity = new CapacityVO(null, zoneId, null, null, 100L, 200L, Capacity.CAPACITY_TYPE_SECONDARY_STORAGE); + CapacityVO storagePoolCapacity = new CapacityVO(null, zoneId, null, null, 200L, 300L, Capacity.CAPACITY_TYPE_STORAGE); + CapacityVO objectStoreCapacity = new CapacityVO(null, zoneId, null, null, 200L, 300L, Capacity.CAPACITY_TYPE_OBJECT_STORAGE); + CapacityVO backupCapacity = new CapacityVO(null, zoneId, null, null, 180L, 200L, Capacity.CAPACITY_TYPE_BACKUP_STORAGE); + Mockito.when(storageManager.getSecondaryStorageUsedStats(null, zoneId)).thenReturn(secondaryStorageCapacity); + Mockito.when(storageManager.getObjectStorageUsedStats(zoneId)).thenReturn(objectStoreCapacity); + Mockito.when(backupManager.getBackupStorageUsedStats(zoneId)).thenReturn(backupCapacity); + alertManagerImplMock.checkForAlerts(); + + Mockito.verify(alertManagerImplMock).recalculateCapacity(); + + ArgumentCaptor alertCaptor = ArgumentCaptor.forClass(AlertVO.class); + verify(_alertDao).persist(alertCaptor.capture()); + AlertVO capturedAlert = alertCaptor.getValue(); + assertNotNull("Captured alert should not be null", capturedAlert); + assertEquals(Optional.of(zoneId), Optional.ofNullable(capturedAlert.getDataCenterId())); + assertEquals("System Alert: Low Available Backup Storage in availability zone zone1", capturedAlert.getSubject()); + assertEquals("Available backup storage space is low, total: 200.0 MB, used: 180.0 MB (90%)", capturedAlert.getContent()); + assertEquals(AlertManager.AlertType.ALERT_TYPE_BACKUP_STORAGE.getType(), capturedAlert.getType()); + } } diff --git a/server/src/test/java/com/cloud/api/ApiResponseHelperTest.java b/server/src/test/java/com/cloud/api/ApiResponseHelperTest.java index a5fc791de99..223b0740cf2 100644 --- a/server/src/test/java/com/cloud/api/ApiResponseHelperTest.java +++ b/server/src/test/java/com/cloud/api/ApiResponseHelperTest.java @@ -67,6 +67,7 @@ import org.springframework.test.util.ReflectionTestUtils; import com.cloud.capacity.Capacity; import com.cloud.configuration.Resource; import com.cloud.domain.DomainVO; +import com.cloud.host.HostVO; import com.cloud.network.PublicIpQuarantine; import com.cloud.network.as.AutoScaleVmGroup; import com.cloud.network.as.AutoScaleVmGroupVO; @@ -93,7 +94,11 @@ import com.cloud.user.UserDataVO; import com.cloud.user.UserVO; import com.cloud.user.dao.UserDataDao; import com.cloud.utils.net.Ip; +import com.cloud.vm.ConsoleSessionVO; import com.cloud.vm.NicSecondaryIp; +import com.cloud.vm.VMInstanceVO; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.response.ConsoleSessionResponse; @RunWith(MockitoJUnitRunner.class) public class ApiResponseHelperTest { @@ -124,6 +129,19 @@ public class ApiResponseHelperTest { @Mock ResourceIconManager resourceIconManager; + @Mock + private ConsoleSessionVO consoleSessionMock; + @Mock + private DomainVO domainVOMock; + @Mock + private UserVO userVOMock; + @Mock + private AccountVO accountVOMock; + @Mock + private HostVO hostVOMock; + @Mock + private VMInstanceVO vmInstanceVOMock; + @Spy @InjectMocks ApiResponseHelper apiResponseHelper = new ApiResponseHelper(); @@ -631,4 +649,114 @@ public class ApiResponseHelperTest { Mockito.verify(resourceIconManager, Mockito.never()).getByResourceTypeAndUuids(Mockito.any(), Mockito.anyCollection()); } + + private ConsoleSessionResponse getExpectedConsoleSessionResponseForTests(boolean fullView) { + ConsoleSessionResponse expected = new ConsoleSessionResponse(); + expected.setId("uuid"); + expected.setCreated(new Date()); + expected.setAcquired(new Date()); + expected.setRemoved(new Date()); + expected.setConsoleEndpointCreatorAddress("127.0.0.1"); + expected.setClientAddress("127.0.0.1"); + + if (fullView) { + expected.setDomain("domain"); + expected.setDomainPath("domainPath"); + expected.setDomainId("domainUuid"); + expected.setUser("user"); + expected.setUserId("userUuid"); + expected.setAccount("account"); + expected.setAccountId("accountUuid"); + expected.setHostName("host"); + expected.setHostId("hostUuid"); + expected.setVmId("vmUuid"); + expected.setVmName("vmName"); + } + + return expected; + } + + @Test + public void createConsoleSessionResponseTestShouldReturnRestrictedResponse() { + ConsoleSessionResponse expected = getExpectedConsoleSessionResponseForTests(false); + + try (MockedStatic apiDBUtilsStaticMock = Mockito.mockStatic(ApiDBUtils.class)) { + Mockito.when(consoleSessionMock.getUuid()).thenReturn(expected.getId()); + Mockito.when(consoleSessionMock.getDomainId()).thenReturn(2L); + Mockito.when(consoleSessionMock.getCreated()).thenReturn(expected.getCreated()); + Mockito.when(consoleSessionMock.getAcquired()).thenReturn(expected.getAcquired()); + Mockito.when(consoleSessionMock.getRemoved()).thenReturn(expected.getRemoved()); + Mockito.when(consoleSessionMock.getConsoleEndpointCreatorAddress()).thenReturn(expected.getConsoleEndpointCreatorAddress()); + Mockito.when(consoleSessionMock.getClientAddress()).thenReturn(expected.getClientAddress()); + + ConsoleSessionResponse response = apiResponseHelper.createConsoleSessionResponse(consoleSessionMock, ResponseObject.ResponseView.Restricted); + + Assert.assertEquals(expected.getId(), response.getId()); + Assert.assertEquals(expected.getCreated(), response.getCreated()); + Assert.assertEquals(expected.getAcquired(), response.getAcquired()); + Assert.assertEquals(expected.getRemoved(), response.getRemoved()); + Assert.assertEquals(expected.getConsoleEndpointCreatorAddress(), response.getConsoleEndpointCreatorAddress()); + Assert.assertEquals(expected.getClientAddress(), response.getClientAddress()); + } + } + + @Test + public void createConsoleSessionResponseTestShouldReturnFullResponse() { + ConsoleSessionResponse expected = getExpectedConsoleSessionResponseForTests(true); + + try (MockedStatic apiDBUtilsStaticMock = Mockito.mockStatic(ApiDBUtils.class)) { + Mockito.when(consoleSessionMock.getUuid()).thenReturn(expected.getId()); + Mockito.when(consoleSessionMock.getDomainId()).thenReturn(2L); + Mockito.when(consoleSessionMock.getAccountId()).thenReturn(2L); + Mockito.when(consoleSessionMock.getUserId()).thenReturn(2L); + Mockito.when(consoleSessionMock.getHostId()).thenReturn(2L); + Mockito.when(consoleSessionMock.getInstanceId()).thenReturn(2L); + Mockito.when(consoleSessionMock.getCreated()).thenReturn(expected.getCreated()); + Mockito.when(consoleSessionMock.getAcquired()).thenReturn(expected.getAcquired()); + Mockito.when(consoleSessionMock.getRemoved()).thenReturn(expected.getRemoved()); + Mockito.when(consoleSessionMock.getConsoleEndpointCreatorAddress()).thenReturn(expected.getConsoleEndpointCreatorAddress()); + Mockito.when(consoleSessionMock.getClientAddress()).thenReturn(expected.getClientAddress()); + + apiDBUtilsStaticMock.when(() -> ApiDBUtils.findDomainById(2L)).thenReturn(domainVOMock); + Mockito.when(domainVOMock.getName()).thenReturn(expected.getDomain()); + Mockito.when(domainVOMock.getPath()).thenReturn(expected.getDomainPath()); + Mockito.when(domainVOMock.getUuid()).thenReturn(expected.getDomainId()); + + Mockito.when(apiResponseHelper.findUserById(2L)).thenReturn(userVOMock); + Mockito.when(userVOMock.getUsername()).thenReturn(expected.getUser()); + Mockito.when(userVOMock.getUuid()).thenReturn(expected.getUserId()); + + Mockito.when(ApiDBUtils.findAccountById(2L)).thenReturn(accountVOMock); + Mockito.when(accountVOMock.getAccountName()).thenReturn(expected.getAccount()); + Mockito.when(accountVOMock.getUuid()).thenReturn(expected.getAccountId()); + + Mockito.when(apiResponseHelper.findHostById(2L)).thenReturn(hostVOMock); + Mockito.when(hostVOMock.getUuid()).thenReturn(expected.getHostId()); + Mockito.when(hostVOMock.getName()).thenReturn(expected.getHostName()); + + apiDBUtilsStaticMock.when(() -> ApiDBUtils.findVMInstanceById(2L)).thenReturn(vmInstanceVOMock); + Mockito.when(vmInstanceVOMock.getUuid()).thenReturn(expected.getVmId()); + Mockito.when(vmInstanceVOMock.getInstanceName()).thenReturn(expected.getVmName()); + + ConsoleSessionResponse response = apiResponseHelper.createConsoleSessionResponse(consoleSessionMock, ResponseObject.ResponseView.Full); + + Assert.assertEquals(expected.getId(), response.getId()); + Assert.assertEquals(expected.getCreated(), response.getCreated()); + Assert.assertEquals(expected.getAcquired(), response.getAcquired()); + Assert.assertEquals(expected.getRemoved(), response.getRemoved()); + Assert.assertEquals(expected.getConsoleEndpointCreatorAddress(), response.getConsoleEndpointCreatorAddress()); + Assert.assertEquals(expected.getClientAddress(), response.getClientAddress()); + Assert.assertEquals(expected.getDomain(), response.getDomain()); + Assert.assertEquals(expected.getDomainPath(), response.getDomainPath()); + Assert.assertEquals(expected.getDomainId(), response.getDomainId()); + Assert.assertEquals(expected.getUser(), response.getUser()); + Assert.assertEquals(expected.getUserId(), response.getUserId()); + Assert.assertEquals(expected.getAccount(), response.getAccount()); + Assert.assertEquals(expected.getAccountId(), response.getAccountId()); + Assert.assertEquals(expected.getHostId(), response.getHostId()); + Assert.assertEquals(expected.getHostName(), response.getHostName()); + Assert.assertEquals(expected.getVmId(), response.getVmId()); + Assert.assertEquals(expected.getVmName(), response.getVmName()); + } + } } diff --git a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java index 30350d71684..2189b451761 100644 --- a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java @@ -1268,4 +1268,35 @@ public class NetworkServiceImplTest { service.getNicVlanValueForExternalVm(nic); } } + + @Test + public void testGetIpAddressesFromIps() { + // Test with valid IPv4, IPv6 and MAC address + Network.IpAddresses result = service.getIpAddressesFromIps("192.168.1.1", "2001:db8::1", "00:11:22:33:44:55"); + Assert.assertEquals("192.168.1.1", result.getIp4Address()); + Assert.assertEquals("2001:db8::1", result.getIp6Address()); + Assert.assertEquals("00:11:22:33:44:55", result.getMacAddress()); + + // Test with all null values + result = service.getIpAddressesFromIps(null, null, null); + Assert.assertNull(result.getIp4Address()); + Assert.assertNull(result.getIp6Address()); + Assert.assertNull(result.getMacAddress()); + + // Test with invalid MAC address (non-unicast) + try { + service.getIpAddressesFromIps(null, null, "ff:ff:ff:ff:ff:ff"); + Assert.fail("Expected InvalidParameterValueException for non-unicast MAC address"); + } catch (InvalidParameterValueException e) { + Assert.assertEquals("Mac address is not unicast: ff:ff:ff:ff:ff:ff", e.getMessage()); + } + + // Test with invalid MAC address (invalid format) + try { + service.getIpAddressesFromIps(null, null, "invalid-mac"); + Assert.fail("Expected InvalidParameterValueException for invalid MAC address format"); + } catch (InvalidParameterValueException e) { + Assert.assertEquals("Mac address is not valid: invalid-mac", e.getMessage()); + } + } } diff --git a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java index 510cd7ac1a6..7ddd0d61213 100644 --- a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java @@ -1272,7 +1272,7 @@ public class AutoScaleManagerImplTest { when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic); when(userVmService.createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), - any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), + any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any())).thenReturn(userVmMock); UserVm result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); @@ -1283,7 +1283,7 @@ public class AutoScaleManagerImplTest { "-" + asVmGroupMock.getNextVmSeq() + "-[a-z]{6}"; Mockito.verify(userVmService).createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), matches(vmHostNamePattern), matches(vmHostNamePattern), - any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), + any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any()); Mockito.verify(asVmGroupMock).setNextVmSeq(nextVmSeq + 1); } @@ -1319,7 +1319,7 @@ public class AutoScaleManagerImplTest { when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced); when(userVmService.createAdvancedSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), - any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any(), any())).thenReturn(userVmMock); when(networkModel.checkSecurityGroupSupportForNetwork(account, zoneMock, List.of(networkId), Collections.emptyList())).thenReturn(true); @@ -1332,7 +1332,7 @@ public class AutoScaleManagerImplTest { "-" + asVmGroupMock.getNextVmSeq() + "-[a-z]{6}"; Mockito.verify(userVmService).createAdvancedSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), matches(vmHostNamePattern), matches(vmHostNamePattern), - any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any(), any()); Mockito.verify(asVmGroupMock).setNextVmSeq(nextVmSeq + 2); } @@ -1368,7 +1368,7 @@ public class AutoScaleManagerImplTest { when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced); when(userVmService.createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), - any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), + any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any(), any())).thenReturn(userVmMock); when(networkModel.checkSecurityGroupSupportForNetwork(account, zoneMock, List.of(networkId), Collections.emptyList())).thenReturn(false); @@ -1381,7 +1381,7 @@ public class AutoScaleManagerImplTest { "-" + asVmGroupMock.getNextVmSeq() + "-[a-z]{6}"; Mockito.verify(userVmService).createAdvancedVirtualMachine(any(), any(), any(), any(), any(), matches(vmHostNamePattern), matches(vmHostNamePattern), - any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), + any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any(), any()); Mockito.verify(asVmGroupMock).setNextVmSeq(nextVmSeq + 3); } diff --git a/server/src/test/java/com/cloud/network/lb/UpdateLoadBalancerTest.java b/server/src/test/java/com/cloud/network/lb/UpdateLoadBalancerTest.java index 729d2ea8ff5..ab25d936baa 100644 --- a/server/src/test/java/com/cloud/network/lb/UpdateLoadBalancerTest.java +++ b/server/src/test/java/com/cloud/network/lb/UpdateLoadBalancerTest.java @@ -16,6 +16,24 @@ // under the License. package com.cloud.network.lb; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.UUID; + +import org.apache.cloudstack.api.command.user.loadbalancer.UpdateLoadBalancerRuleCmd; +import org.apache.cloudstack.context.CallContext; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.InOrder; +import org.mockito.Mockito; + import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; @@ -35,27 +53,10 @@ import com.cloud.network.dao.NetworkVO; import com.cloud.network.element.LoadBalancingServiceProvider; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.user.Account; +import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; -import com.cloud.user.MockAccountManagerImpl; import com.cloud.user.User; import com.cloud.user.UserVO; -import org.apache.cloudstack.api.command.user.loadbalancer.UpdateLoadBalancerRuleCmd; -import org.apache.cloudstack.context.CallContext; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.mockito.InOrder; -import org.mockito.Mockito; - -import java.util.ArrayList; -import java.util.UUID; - -import static org.mockito.ArgumentMatchers.isNull; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.when; public class UpdateLoadBalancerTest { @@ -73,7 +74,7 @@ public class UpdateLoadBalancerTest { @Before public void setUp() { - _lbMgr._accountMgr = new MockAccountManagerImpl(); + _lbMgr._accountMgr = Mockito.mock(AccountManager.class); _lbMgr._autoScaleVmGroupDao = Mockito.mock(AutoScaleVmGroupDao.class); _lbMgr._networkDao = netDao; _lbMgr._networkOfferingServiceDao = ntwkOffServiceMapDao; diff --git a/server/src/test/java/com/cloud/network/vpc/VpcManagerImplTest.java b/server/src/test/java/com/cloud/network/vpc/VpcManagerImplTest.java index 8d513619805..4f92c60e25a 100644 --- a/server/src/test/java/com/cloud/network/vpc/VpcManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/vpc/VpcManagerImplTest.java @@ -551,7 +551,7 @@ public class VpcManagerImplTest { Mockito.when(vpcDao.persist(any(), anyMap())).thenReturn(vpc); Mockito.when(vpc.getUuid()).thenReturn("uuid"); doReturn(true).when(routedIpv4Manager).isRoutedVpc(any()); - doReturn(true).when(routedIpv4Manager).isVpcVirtualRouterGateway(vpcOfferingVO); + doReturn(true).when(routedIpv4Manager).isValidGateway(vpcOfferingVO); doReturn(true).when(routedIpv4Manager).isDynamicRoutedVpc(vpcOfferingVO); Ipv4GuestSubnetNetworkMap ipv4GuestSubnetNetworkMap = Mockito.mock(Ipv4GuestSubnetNetworkMap.class); doReturn(ipv4GuestSubnetNetworkMap).when(routedIpv4Manager).getOrCreateIpv4SubnetForVpc(any(), anyInt()); diff --git a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java index 01ab82c913d..6eb8bd04f46 100644 --- a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java +++ b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java @@ -22,6 +22,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import com.cloud.dc.HostPodVO; import com.cloud.dc.dao.HostPodDao; @@ -32,16 +33,24 @@ import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; import org.apache.cloudstack.api.command.admin.storage.ConfigureStorageAccessCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; +import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao; +import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.object.ObjectStore; import org.apache.commons.collections.MapUtils; import org.junit.Assert; import org.junit.Test; @@ -57,7 +66,9 @@ import org.springframework.test.util.ReflectionTestUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Command; import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.CapacityVO; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; @@ -150,6 +161,15 @@ public class StorageManagerImplTest { private ResourceManager resourceMgr; + @Mock + protected ObjectStoreDao objectStoreDao; + + @Mock + DataStoreProviderManager dataStoreProviderMgr; + + @Mock + DataStoreManager dataStoreMgr; + @Test public void createLocalStoragePoolName() { String hostMockName = "host1"; @@ -255,7 +275,7 @@ public class StorageManagerImplTest { volume.setState(Volume.State.Allocated); PrimaryDataStoreDao storagePoolDao = Mockito.mock(PrimaryDataStoreDao.class); storageManagerImpl._storagePoolDao = storagePoolDao; - assertTrue(storageManagerImpl.storagePoolCompatibleWithVolumePool(storagePool, volume)); + Assert.assertTrue(storageManagerImpl.storagePoolCompatibleWithVolumePool(storagePool, volume)); } @@ -264,8 +284,8 @@ public class StorageManagerImplTest { String sfUrl = "MVIP=1.2.3.4;SVIP=6.7.8.9;clusterAdminUsername=admin;" + "clusterAdminPassword=password;clusterDefaultMinIops=1000;" + "clusterDefaultMaxIops=2000;clusterDefaultBurstIopsPercentOfMaxIops=2"; - Map uriParams = storageManagerImpl.extractUriParamsAsMap(sfUrl); - assertTrue(MapUtils.isEmpty(uriParams)); + Map uriParams = storageManagerImpl.extractUriParamsAsMap(sfUrl); + Assert.assertTrue(MapUtils.isEmpty(uriParams)); } @Test @@ -274,8 +294,8 @@ public class StorageManagerImplTest { String host = "HOST"; String path = "/PATH"; String sfUrl = String.format("%s://%s%s", scheme, host, path); - Map uriParams = storageManagerImpl.extractUriParamsAsMap(sfUrl); - assertTrue(MapUtils.isNotEmpty(uriParams)); + Map uriParams = storageManagerImpl.extractUriParamsAsMap(sfUrl); + Assert.assertTrue(MapUtils.isNotEmpty(uriParams)); Assert.assertEquals(scheme, uriParams.get("scheme")); Assert.assertEquals(host, uriParams.get("host")); Assert.assertEquals(path, uriParams.get("hostPath")); @@ -433,7 +453,7 @@ public class StorageManagerImplTest { } try { Mockito.doReturn(new com.cloud.agent.api.Answer( - Mockito.mock(CheckDataStoreStoragePolicyComplainceCommand.class))) + Mockito.mock(CheckDataStoreStoragePolicyComplainceCommand.class))) .when(storageManagerImpl).getCheckDatastorePolicyComplianceAnswer("policy", pool); assertTrue(storageManagerImpl.isStoragePoolCompliantWithStoragePolicy(1L, pool)); } catch (StorageUnavailableException e) { @@ -480,8 +500,7 @@ public class StorageManagerImplTest { Mockito.when(policy.getPolicyId()).thenReturn("some"); Mockito.when(vsphereStoragePolicyDao.findById(Mockito.anyLong())) .thenReturn(policy); - Mockito.doReturn(new ArrayList<>(List.of(1L, 2L))) - .when(storageManagerImpl).getUpHostsInPool(Mockito.anyLong()); + Mockito.doReturn(new ArrayList<>(List.of(1L, 2L))).when(storageManagerImpl).getUpHostsInPool(Mockito.anyLong()); Mockito.when(hvGuruMgr.getGuruProcessedCommandTargetHost(Mockito.anyLong(), Mockito.any(CheckDataStoreStoragePolicyComplainceCommand.class))).thenReturn(1L); try { @@ -552,7 +571,7 @@ public class StorageManagerImplTest { .thenReturn(new ArrayList<>()); //new installation storageManagerImpl.enableDefaultDatastoreDownloadRedirectionForExistingInstallations(); Mockito.verify(configurationDao, Mockito.never()) - .update(StorageManager.DataStoreDownloadFollowRedirects.key(),StorageManager.DataStoreDownloadFollowRedirects.defaultValue()); + .update(StorageManager.DataStoreDownloadFollowRedirects.key(), StorageManager.DataStoreDownloadFollowRedirects.defaultValue()); } @Test @@ -796,7 +815,7 @@ public class StorageManagerImplTest { Long zoneId = 2L; Long capacityBytes = (long) (allocatedSizeWithTemplate / Double.valueOf(CapacityManager.StorageAllocatedCapacityDisableThreshold.defaultValue()) - / Double.valueOf(CapacityManager.StorageOverprovisioningFactor.defaultValue())); + / Double.valueOf(CapacityManager.StorageOverprovisioningFactor.defaultValue())); Long maxAllocatedSizeForResize = (long) (capacityBytes * Double.valueOf(CapacityManager.StorageOverprovisioningFactor.defaultValue()) * Double.valueOf(CapacityManager.StorageAllocatedCapacityDisableThresholdForVolumeSize.defaultValue())); @@ -1487,4 +1506,162 @@ public class StorageManagerImplTest { assertTrue(thrownException.getMessage().contains("access groups already exist on the cluster: [group4]")); } + + @Test + public void testGetObjectStorageUsedStats() { + Long zoneId = 1L; + List objectStores = new ArrayList<>(); + + ObjectStoreVO store1 = new ObjectStoreVO(); + store1.setAllocatedSize(1000L); + store1.setTotalSize(2000L); + objectStores.add(store1); + + ObjectStoreVO store2 = new ObjectStoreVO(); + store2.setAllocatedSize(2000L); + store2.setTotalSize(4000L); + objectStores.add(store2); + + ObjectStoreVO store3 = new ObjectStoreVO(); + store3.setAllocatedSize(null); + store3.setTotalSize(null); + objectStores.add(store3); + + Mockito.when(objectStoreDao.listObjectStores()).thenReturn(objectStores); + + CapacityVO result = storageManagerImpl.getObjectStorageUsedStats(zoneId); + + Assert.assertEquals(zoneId, result.getDataCenterId()); + Assert.assertEquals(Optional.of(3000L), Optional.of(result.getUsedCapacity())); // 1000 + 2000 + Assert.assertEquals(6000L, result.getTotalCapacity()); // 2000 + 4000 + Assert.assertEquals(Capacity.CAPACITY_TYPE_OBJECT_STORAGE, result.getCapacityType()); + Assert.assertNull(result.getPodId()); + Assert.assertNull(result.getClusterId()); + } + + @Test + public void testGetObjectStorageUsedStatsWithNullSizes() { + Long zoneId = 1L; + List objectStores = new ArrayList<>(); + + ObjectStoreVO store1 = new ObjectStoreVO(); + store1.setAllocatedSize(null); + store1.setTotalSize(null); + objectStores.add(store1); + + ObjectStoreVO store2 = new ObjectStoreVO(); + store2.setAllocatedSize(null); + store2.setTotalSize(null); + objectStores.add(store2); + + Mockito.when(objectStoreDao.listObjectStores()).thenReturn(objectStores); + + CapacityVO result = storageManagerImpl.getObjectStorageUsedStats(zoneId); + + Assert.assertEquals(zoneId, result.getDataCenterId()); + Assert.assertEquals(Optional.of(0L), Optional.of(result.getUsedCapacity())); + Assert.assertEquals(0L, result.getTotalCapacity()); + Assert.assertEquals(Capacity.CAPACITY_TYPE_OBJECT_STORAGE, result.getCapacityType()); + Assert.assertNull(result.getPodId()); + Assert.assertNull(result.getClusterId()); + } + + @Test + public void testDiscoverObjectStore() { + Long objectStoreId = 1L; + + String name = "test-store"; + String url = "http://10.1.1.33:80"; + Long size = 1000L; + String providerName = "test-provider"; + Map details = new HashMap<>(); + details.put("key1", "value1"); + + ObjectStoreVO objectStoreVO = new ObjectStoreVO(); + ReflectionTestUtils.setField(objectStoreVO, "id", objectStoreId); + objectStoreVO.setName(name); + objectStoreVO.setUrl(url); + objectStoreVO.setProviderName(providerName); + objectStoreVO.setTotalSize(size); + + DataStoreProvider storeProvider = Mockito.mock(DataStoreProvider.class); + DataStoreLifeCycle lifeCycle = Mockito.mock(DataStoreLifeCycle.class); + DataStore store = Mockito.mock(DataStore.class); + ObjectStore objectStore = Mockito.mock(ObjectStore.class); + + Mockito.when(dataStoreProviderMgr.getDataStoreProvider(providerName)).thenReturn(storeProvider); + Mockito.when(storeProvider.getDataStoreLifeCycle()).thenReturn(lifeCycle); + Mockito.when(lifeCycle.initialize(Mockito.any())).thenReturn(store); + Mockito.when(store.getId()).thenReturn(1L); + Mockito.when(dataStoreMgr.getDataStore(1L, DataStoreRole.Object)).thenReturn(null); + + ObjectStore result = storageManagerImpl.discoverObjectStore(name, url, size, providerName, details); + + Mockito.verify(dataStoreProviderMgr).getDataStoreProvider(providerName); + Mockito.verify(lifeCycle).initialize(Mockito.any()); + Mockito.verify(dataStoreMgr).getDataStore(1L, DataStoreRole.Object); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDiscoverObjectStoreInvalidProvider() { + // Setup + String name = "test-store"; + String url = "http://10.1.1.33:80"; + Long size = 1000L; + String providerName = "invalid-provider"; + Map details = new HashMap<>(); + + Mockito.when(dataStoreProviderMgr.getDataStoreProvider(providerName)).thenReturn(null); + + storageManagerImpl.discoverObjectStore(name, url, size, providerName, details); + } + + @Test(expected = IllegalArgumentException.class) + public void testDiscoverObjectStoreInvalidUrl() { + String name = "test-store"; + String url = "invalid-url"; + Long size = 1000L; + String providerName = "test-provider"; + Map details = new HashMap<>(); + + DataStoreProvider storeProvider = Mockito.mock(DataStoreProvider.class); + Mockito.when(dataStoreProviderMgr.getDataStoreProvider(providerName)).thenReturn(storeProvider); + + storageManagerImpl.discoverObjectStore(name, url, size, providerName, details); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDiscoverObjectStoreDuplicateUrl() { + String name = "test-store"; + String url = "http://10.1.1.33:80"; + Long size = 1000L; + String providerName = "test-provider"; + Map details = new HashMap<>(); + + DataStoreProvider storeProvider = Mockito.mock(DataStoreProvider.class); + ObjectStoreVO existingStore = new ObjectStoreVO(); + + Mockito.when(dataStoreProviderMgr.getDataStoreProvider(providerName)).thenReturn(storeProvider); + Mockito.when(objectStoreDao.findByUrl(url)).thenReturn(existingStore); + + storageManagerImpl.discoverObjectStore(name, url, size, providerName, details); + } + + @Test(expected = CloudRuntimeException.class) + public void testDiscoverObjectStoreInitializationFailure() { + String name = "test-store"; + String url = "http://10.1.1.33:80"; + Long size = 1000L; + String providerName = "test-provider"; + Map details = new HashMap<>(); + + DataStoreProvider storeProvider = Mockito.mock(DataStoreProvider.class); + DataStoreLifeCycle lifeCycle = Mockito.mock(DataStoreLifeCycle.class); + + Mockito.when(dataStoreProviderMgr.getDataStoreProvider(providerName)).thenReturn(storeProvider); + Mockito.when(storeProvider.getDataStoreLifeCycle()).thenReturn(lifeCycle); + Mockito.when(lifeCycle.initialize(Mockito.any())).thenThrow(new RuntimeException("Initialization failed")); + + storageManagerImpl.discoverObjectStore(name, url, size, providerName, details); + } } diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 7f1030992f9..79be3695fbd 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -579,7 +579,7 @@ public class VolumeApiServiceImplTest { when(volumeDataFactoryMock.getVolume(anyLong())).thenReturn(volumeInfoMock); when(volumeInfoMock.getState()).thenReturn(Volume.State.Allocated); lenient().when(volumeInfoMock.getPoolId()).thenReturn(1L); - volumeApiServiceImpl.takeSnapshot(5L, Snapshot.MANUAL_POLICY_ID, 3L, null, false, null, false, null, null); + volumeApiServiceImpl.takeSnapshot(5L, Snapshot.MANUAL_POLICY_ID, 3L, null, false, null, false, null, null, null, false); } @Test @@ -592,7 +592,7 @@ public class VolumeApiServiceImplTest { final TaggedResourceService taggedResourceService = Mockito.mock(TaggedResourceService.class); Mockito.lenient().when(taggedResourceService.createTags(any(), any(), any(), any())).thenReturn(null); ReflectionTestUtils.setField(volumeApiServiceImpl, "taggedResourceService", taggedResourceService); - volumeApiServiceImpl.takeSnapshot(5L, Snapshot.MANUAL_POLICY_ID, 3L, null, false, null, false, null, null); + volumeApiServiceImpl.takeSnapshot(5L, Snapshot.MANUAL_POLICY_ID, 3L, null, false, null, false, null, null, null, false); } @Test @@ -640,7 +640,7 @@ public class VolumeApiServiceImplTest { @Test public void testAllocSnapshotNonManagedStorageArchive() { try { - volumeApiServiceImpl.allocSnapshot(6L, 1L, "test", Snapshot.LocationType.SECONDARY, null); + volumeApiServiceImpl.allocSnapshot(6L, 1L, "test", Snapshot.LocationType.SECONDARY, null, null, null); } catch (InvalidParameterValueException e) { Assert.assertEquals(e.getMessage(), "VolumeId: 6 LocationType is supported only for managed storage"); return; diff --git a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java index e6c2a0d0f3c..f178c6b8912 100644 --- a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java +++ b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java @@ -16,30 +16,6 @@ // under the License. package com.cloud.storage.snapshot; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; - -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; -import org.apache.cloudstack.framework.async.AsyncCallFuture; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InjectMocks; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; @@ -63,6 +39,32 @@ import com.cloud.user.ResourceLimitService; import com.cloud.user.dao.AccountDao; import com.cloud.utils.Pair; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; + +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + @RunWith(MockitoJUnitRunner.class) public class SnapshotManagerImplTest { @Mock @@ -176,7 +178,7 @@ public class SnapshotManagerImplTest { } @Test public void testValidatePolicyZonesNoZones() { - snapshotManager.validatePolicyZones(null, Mockito.mock(VolumeVO.class), Mockito.mock(Account.class)); + snapshotManager.validatePolicyZones(null, null, Mockito.mock(VolumeVO.class), Mockito.mock(Account.class)); } @Test(expected = InvalidParameterValueException.class) @@ -186,7 +188,7 @@ public class SnapshotManagerImplTest { DataCenterVO zone = Mockito.mock(DataCenterVO.class); Mockito.when(zone.getType()).thenReturn(DataCenter.Type.Edge); Mockito.when(dataCenterDao.findById(1L)).thenReturn(zone); - snapshotManager.validatePolicyZones(List.of(1L), volumeVO, Mockito.mock(Account.class)); + snapshotManager.validatePolicyZones(List.of(1L), null, volumeVO, Mockito.mock(Account.class)); } @Test(expected = InvalidParameterValueException.class) @@ -197,7 +199,7 @@ public class SnapshotManagerImplTest { Mockito.when(zone.getType()).thenReturn(DataCenter.Type.Core); Mockito.when(dataCenterDao.findById(1L)).thenReturn(zone); Mockito.when(dataCenterDao.findById(2L)).thenReturn(null); - snapshotManager.validatePolicyZones(List.of(2L), volumeVO, Mockito.mock(Account.class)); + snapshotManager.validatePolicyZones(List.of(2L), null, volumeVO, Mockito.mock(Account.class)); } @Test(expected = PermissionDeniedException.class) @@ -211,7 +213,7 @@ public class SnapshotManagerImplTest { Mockito.when(zone1.getAllocationState()).thenReturn(Grouping.AllocationState.Disabled); Mockito.when(dataCenterDao.findById(2L)).thenReturn(zone1); Mockito.when(accountManager.isRootAdmin(Mockito.any())).thenReturn(false); - snapshotManager.validatePolicyZones(List.of(2L), volumeVO, Mockito.mock(Account.class)); + snapshotManager.validatePolicyZones(List.of(2L), null, volumeVO, Mockito.mock(Account.class)); } @Test(expected = InvalidParameterValueException.class) @@ -225,7 +227,7 @@ public class SnapshotManagerImplTest { Mockito.when(zone1.getType()).thenReturn(DataCenter.Type.Edge); Mockito.when(zone1.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); Mockito.when(dataCenterDao.findById(2L)).thenReturn(zone1); - snapshotManager.validatePolicyZones(List.of(2L), volumeVO, Mockito.mock(Account.class)); + snapshotManager.validatePolicyZones(List.of(2L), null, volumeVO, Mockito.mock(Account.class)); } @Test @@ -239,7 +241,7 @@ public class SnapshotManagerImplTest { Mockito.when(zone1.getType()).thenReturn(DataCenter.Type.Core); Mockito.when(zone1.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); Mockito.when(dataCenterDao.findById(2L)).thenReturn(zone1); - snapshotManager.validatePolicyZones(List.of(2L), volumeVO, Mockito.mock(Account.class)); + snapshotManager.validatePolicyZones(List.of(2L), null, volumeVO, Mockito.mock(Account.class)); } @Test @@ -308,15 +310,14 @@ public class SnapshotManagerImplTest { @Test(expected = InvalidParameterValueException.class) public void testGetCheckedSnapshotForCopyNoSnapshot() { - snapshotManager.getCheckedSnapshotForCopy(1L, List.of(100L), null); + SnapshotVO snapshotVO = Mockito.mock(SnapshotVO.class); + snapshotManager.getCheckedSnapshotForCopy(snapshotVO, List.of(100L), null, false); } @Test(expected = InvalidParameterValueException.class) public void testGetCheckedSnapshotForCopyNoSnapshotBackup() { - final long snapshotId = 1L; SnapshotVO snapshotVO = Mockito.mock(SnapshotVO.class); - Mockito.when(snapshotDao.findById(snapshotId)).thenReturn(snapshotVO); - snapshotManager.getCheckedSnapshotForCopy(snapshotId, List.of(100L), null); + snapshotManager.getCheckedSnapshotForCopy(snapshotVO, List.of(100L), null, false); } @Test(expected = InvalidParameterValueException.class) @@ -325,73 +326,62 @@ public class SnapshotManagerImplTest { SnapshotVO snapshotVO = Mockito.mock(SnapshotVO.class); Mockito.when(snapshotVO.getState()).thenReturn(Snapshot.State.BackedUp); Mockito.when(snapshotVO.getLocationType()).thenReturn(Snapshot.LocationType.PRIMARY); - Mockito.when(snapshotDao.findById(snapshotId)).thenReturn(snapshotVO); - snapshotManager.getCheckedSnapshotForCopy(snapshotId, List.of(100L), null); + snapshotManager.getCheckedSnapshotForCopy(snapshotVO, List.of(100L), null, false); } @Test(expected = InvalidParameterValueException.class) public void testGetCheckedSnapshotForCopyDestNotSpecified() { - final long snapshotId = 1L; SnapshotVO snapshotVO = Mockito.mock(SnapshotVO.class); Mockito.when(snapshotVO.getState()).thenReturn(Snapshot.State.BackedUp); - Mockito.when(snapshotDao.findById(snapshotId)).thenReturn(snapshotVO); - snapshotManager.getCheckedSnapshotForCopy(snapshotId, new ArrayList<>(), null); + snapshotManager.getCheckedSnapshotForCopy(snapshotVO, new ArrayList<>(), 1L, false); } @Test(expected = InvalidParameterValueException.class) public void testGetCheckedSnapshotForCopyDestContainsSource() { - final long snapshotId = 1L; SnapshotVO snapshotVO = Mockito.mock(SnapshotVO.class); Mockito.when(snapshotVO.getState()).thenReturn(Snapshot.State.BackedUp); Mockito.when(snapshotVO.getVolumeId()).thenReturn(1L); - Mockito.when(snapshotDao.findById(snapshotId)).thenReturn(snapshotVO); Mockito.when(volumeDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(VolumeVO.class)); - snapshotManager.getCheckedSnapshotForCopy(snapshotId, List.of(100L, 1L), 1L); + snapshotManager.getCheckedSnapshotForCopy(snapshotVO, List.of(100L, 1L), 1L, false); } @Test(expected = InvalidParameterValueException.class) public void testGetCheckedSnapshotForCopyNullSourceZone() { - final long snapshotId = 1L; SnapshotVO snapshotVO = Mockito.mock(SnapshotVO.class); Mockito.when(snapshotVO.getState()).thenReturn(Snapshot.State.BackedUp); Mockito.when(snapshotVO.getVolumeId()).thenReturn(1L); - Mockito.when(snapshotDao.findById(snapshotId)).thenReturn(snapshotVO); VolumeVO volumeVO = Mockito.mock(VolumeVO.class); Mockito.when(volumeVO.getDataCenterId()).thenReturn(1L); Mockito.when(volumeDao.findById(Mockito.anyLong())).thenReturn(volumeVO); - snapshotManager.getCheckedSnapshotForCopy(snapshotId, List.of(100L, 101L), null); + snapshotManager.getCheckedSnapshotForCopy(snapshotVO, List.of(100L, 101L), null, false); } @Test public void testGetCheckedSnapshotForCopyValid() { - final long snapshotId = 1L; final Long zoneId = 1L; SnapshotVO snapshotVO = Mockito.mock(SnapshotVO.class); Mockito.when(snapshotVO.getState()).thenReturn(Snapshot.State.BackedUp); Mockito.when(snapshotVO.getVolumeId()).thenReturn(1L); - Mockito.when(snapshotDao.findById(snapshotId)).thenReturn(snapshotVO); VolumeVO volumeVO = Mockito.mock(VolumeVO.class); Mockito.when(volumeVO.getDataCenterId()).thenReturn(zoneId); Mockito.when(volumeDao.findById(Mockito.anyLong())).thenReturn(volumeVO); Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(Mockito.mock(DataCenterVO.class)); - Pair result = snapshotManager.getCheckedSnapshotForCopy(snapshotId, List.of(100L, 101L), null); + Pair result = snapshotManager.getCheckedSnapshotForCopy(snapshotVO, List.of(100L, 101L), null, false); Assert.assertNotNull(result.first()); Assert.assertEquals(zoneId, result.second()); } @Test public void testGetCheckedSnapshotForCopyNullDest() { - final long snapshotId = 1L; final Long zoneId = 1L; SnapshotVO snapshotVO = Mockito.mock(SnapshotVO.class); Mockito.when(snapshotVO.getState()).thenReturn(Snapshot.State.BackedUp); Mockito.when(snapshotVO.getVolumeId()).thenReturn(1L); - Mockito.when(snapshotDao.findById(snapshotId)).thenReturn(snapshotVO); VolumeVO volumeVO = Mockito.mock(VolumeVO.class); Mockito.when(volumeVO.getDataCenterId()).thenReturn(zoneId); Mockito.when(volumeDao.findById(Mockito.anyLong())).thenReturn(volumeVO); Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(Mockito.mock(DataCenterVO.class)); - Pair result = snapshotManager.getCheckedSnapshotForCopy(snapshotId, List.of(100L, 101L), null); + Pair result = snapshotManager.getCheckedSnapshotForCopy(snapshotVO, List.of(100L, 101L), null, false); Assert.assertNotNull(result.first()); Assert.assertEquals(zoneId, result.second()); } diff --git a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerTest.java b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerTest.java index 4ccc6e99961..4d802319935 100755 --- a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerTest.java +++ b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerTest.java @@ -16,65 +16,13 @@ // under the License. package com.cloud.storage.snapshot; -import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.lang.reflect.Field; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - import com.cloud.api.ApiDBUtils; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.storage.Storage; -import org.apache.cloudstack.api.command.user.snapshot.ExtractSnapshotCmd; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; -import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.snapshot.SnapshotHelper; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.BDDMockito; -import org.mockito.InjectMocks; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -86,6 +34,7 @@ import com.cloud.storage.ScopeType; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; @@ -108,6 +57,59 @@ import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import org.apache.cloudstack.api.command.user.snapshot.ExtractSnapshotCmd; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.snapshot.SnapshotHelper; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +import org.mockito.BDDMockito; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class SnapshotManagerTest { @@ -428,7 +430,7 @@ public class SnapshotManagerTest { Mockito.doReturn(null).when(snapshotSchedulerMock).scheduleNextSnapshotJob(any()); SnapshotPolicyVO result = _snapshotMgr.createSnapshotPolicy(TEST_VOLUME_ID, TEST_SNAPSHOT_POLICY_SCHEDULE, TEST_SNAPSHOT_POLICY_TIMEZONE, TEST_SNAPSHOT_POLICY_INTERVAL, - TEST_SNAPSHOT_POLICY_MAX_SNAPS, TEST_SNAPSHOT_POLICY_DISPLAY, null); + TEST_SNAPSHOT_POLICY_MAX_SNAPS, TEST_SNAPSHOT_POLICY_DISPLAY, null, null); assertSnapshotPolicyResultAgainstPreBuiltInstance(result, null); } @@ -443,7 +445,7 @@ public class SnapshotManagerTest { TEST_SNAPSHOT_POLICY_MAX_SNAPS, TEST_SNAPSHOT_POLICY_DISPLAY); _snapshotMgr.updateSnapshotPolicy(snapshotPolicyVo, TEST_SNAPSHOT_POLICY_SCHEDULE, TEST_SNAPSHOT_POLICY_TIMEZONE, - TEST_SNAPSHOT_POLICY_INTERVAL, TEST_SNAPSHOT_POLICY_MAX_SNAPS, TEST_SNAPSHOT_POLICY_DISPLAY, TEST_SNAPSHOT_POLICY_ACTIVE, null); + TEST_SNAPSHOT_POLICY_INTERVAL, TEST_SNAPSHOT_POLICY_MAX_SNAPS, TEST_SNAPSHOT_POLICY_DISPLAY, TEST_SNAPSHOT_POLICY_ACTIVE, null, null); assertSnapshotPolicyResultAgainstPreBuiltInstance(snapshotPolicyVo, null); } @@ -478,7 +480,7 @@ public class SnapshotManagerTest { Mockito.doReturn(false).when(globalLockMock).lock(Mockito.anyInt()); _snapshotMgr.persistSnapshotPolicy(volumeMock, TEST_SNAPSHOT_POLICY_SCHEDULE, TEST_SNAPSHOT_POLICY_TIMEZONE, TEST_SNAPSHOT_POLICY_INTERVAL, TEST_SNAPSHOT_POLICY_MAX_SNAPS, - TEST_SNAPSHOT_POLICY_DISPLAY, TEST_SNAPSHOT_POLICY_ACTIVE, mapStringStringMock, null); + TEST_SNAPSHOT_POLICY_DISPLAY, TEST_SNAPSHOT_POLICY_ACTIVE, mapStringStringMock, null, null); } } @@ -503,7 +505,7 @@ public class SnapshotManagerTest { for (IntervalType intervalType : listIntervalTypes) { Mockito.doReturn(forUpdate ? snapshotPolicyVoInstance : null).when(snapshotPolicyDaoMock).findOneByVolumeInterval(Mockito.anyLong(), Mockito.eq(intervalType)); SnapshotPolicyVO result = _snapshotMgr.persistSnapshotPolicy(volumeMock, TEST_SNAPSHOT_POLICY_SCHEDULE, TEST_SNAPSHOT_POLICY_TIMEZONE, intervalType, - TEST_SNAPSHOT_POLICY_MAX_SNAPS, TEST_SNAPSHOT_POLICY_DISPLAY, TEST_SNAPSHOT_POLICY_ACTIVE, null, null); + TEST_SNAPSHOT_POLICY_MAX_SNAPS, TEST_SNAPSHOT_POLICY_DISPLAY, TEST_SNAPSHOT_POLICY_ACTIVE, null, null, null); assertSnapshotPolicyResultAgainstPreBuiltInstance(result, (short)intervalType.ordinal()); } diff --git a/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java b/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java index bb510e2aaa1..819694a226b 100755 --- a/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java +++ b/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java @@ -20,6 +20,7 @@ package com.cloud.template; import com.cloud.agent.AgentManager; +import com.cloud.api.query.dao.SnapshotJoinDao; import com.cloud.api.query.dao.UserVmJoinDao; import com.cloud.configuration.Resource; import com.cloud.dc.dao.DataCenterDao; @@ -204,6 +205,8 @@ public class TemplateManagerImplTest { AccountManager _accountMgr; @Inject VnfTemplateManager vnfTemplateManager; + @Inject + SnapshotJoinDao snapshotJoinDao; @Inject HeuristicRuleHelper heuristicRuleHelperMock; @@ -975,6 +978,11 @@ public class TemplateManagerImplTest { public HeuristicRuleHelper heuristicRuleHelper() { return Mockito.mock(HeuristicRuleHelper.class); } + @Bean + public SnapshotJoinDao snapshotJoinDao() { + return Mockito.mock(SnapshotJoinDao.class); + } + public static class Library implements TypeFilter { @Override diff --git a/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java b/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java deleted file mode 100644 index a84f02755c7..00000000000 --- a/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java +++ /dev/null @@ -1,515 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.user; - -import java.net.InetAddress; -import java.util.List; -import java.util.Map; - -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.acl.ControlledEntity; -import org.apache.cloudstack.acl.RoleType; -import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.api.command.admin.account.CreateAccountCmd; -import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; -import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; -import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd; -import org.apache.cloudstack.api.command.admin.user.MoveUserCmd; -import org.apache.cloudstack.api.command.admin.user.RegisterCmd; -import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; -import org.apache.cloudstack.api.response.UserTwoFactorAuthenticationSetupResponse; -import org.apache.cloudstack.auth.UserTwoFactorAuthenticator; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.springframework.stereotype.Component; - -import com.cloud.api.auth.SetupUserTwoFactorAuthenticationCmd; -import com.cloud.api.query.vo.ControlledViewEntity; -import com.cloud.dc.DataCenter; -import com.cloud.domain.Domain; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.network.vpc.VpcOffering; -import com.cloud.offering.DiskOffering; -import com.cloud.offering.NetworkOffering; -import com.cloud.offering.ServiceOffering; -import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.utils.Pair; -import com.cloud.utils.Ternary; -import com.cloud.utils.component.Manager; -import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; - -@Component -public class MockAccountManagerImpl extends ManagerBase implements Manager, AccountManager { - - @Override - public boolean deleteUserAccount(long accountId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public UserAccount disableUser(long userId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public UserAccount enableUser(long userId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public UserAccount lockUser(long userId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public UserAccount updateUser(UpdateUserCmd cmd) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Account disableAccount(String accountName, Long domainId, Long accountId) throws ConcurrentOperationException, ResourceUnavailableException { - // TODO Auto-generated method stub - return null; - } - - @Override - public Account enableAccount(String accountName, Long domainId, Long accountId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Account lockAccount(String accountName, Long domainId, Long accountId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Account updateAccount(UpdateAccountCmd cmd) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Account getSystemAccount() { - return new AccountVO(); - } - - @Override - public User getSystemUser() { - return new UserVO(); - } - - @Override - public boolean deleteUser(DeleteUserCmd deleteUserCmd) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean moveUser(MoveUserCmd moveUserCmd) { - return false; - } - - @Override - public boolean moveUser(long id, Long domainId, Account account) { - return false; - } - - @Override - public UserTwoFactorAuthenticator getUserTwoFactorAuthenticator(Long domainId, Long userAccountId) { - return null; - } - - @Override - public void verifyUsingTwoFactorAuthenticationCode(String code, Long domainId, Long userAccountId) { - - } - - @Override - public UserTwoFactorAuthenticationSetupResponse setupUserTwoFactorAuthentication(SetupUserTwoFactorAuthenticationCmd cmd) { - return null; - } - - @Override - public boolean isAdmin(Long accountId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public Account finalizeOwner(Account caller, String accountName, Long domainId, Long projectId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Account getActiveAccountByName(String accountName, Long domainId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public UserAccount getActiveUserAccount(String username, Long domainId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public List getActiveUserAccountByEmail(String email, Long domainId) { - return null; - } - - @Override - public Account getActiveAccountById(long accountId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Account getAccount(long accountId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public User getActiveUser(long userId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public User getUserIncludingRemoved(long userId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean isRootAdmin(Long accountId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public User getActiveUserByRegistrationToken(String registrationToken) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void markUserRegistered(long userId) { - // TODO Auto-generated method stub - - } - - @Override - public boolean disableAccount(long accountId) throws ConcurrentOperationException, ResourceUnavailableException { - // TODO Auto-generated method stub - return false; - } - - @Override - public void checkAccess(Account account, Domain domain) throws PermissionDeniedException { - // TODO Auto-generated method stub - - } - - @Override - public void checkAccess(Account account, ServiceOffering so, DataCenter zone) throws PermissionDeniedException { - // TODO Auto-generated method stub - } - - @Override - public void checkAccess(Account account, DiskOffering dof, DataCenter zone) throws PermissionDeniedException { - // TODO Auto-generated method stub - } - - @Override - public void checkAccess(Account account, NetworkOffering nof, DataCenter zone) throws PermissionDeniedException { - // TODO Auto-generated method stub - } - - @Override - public void checkAccess(Account account, VpcOffering vof, DataCenter zone) throws PermissionDeniedException { - // TODO Auto-generated method stub - } - - @Override - public Long checkAccessAndSpecifyAuthority(Account caller, Long zoneId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - return true; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - // TODO Auto-generated method stub - return false; - } - - @Override - public String getName() { - // TODO Auto-generated method stub - return null; - } - - @Override - public void checkAccess(Account account, AccessType accessType, boolean sameOwner, ControlledEntity... entities) throws PermissionDeniedException { - // TODO Auto-generated method stub - } - - - @Override - public UserAccount getUserAccountById(Long userId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void logoutUser(long userId) { - // TODO Auto-generated method stub - } - - @Override - public UserAccount authenticateUser(String username, String password, Long domainId, InetAddress loginIpAddress, Map requestParameters) { - return null; - } - - @Override - public Pair findUserByApiKey(String apiKey) { - return null; - } - - @Override - public String[] createApiKeyAndSecretKey(RegisterCmd cmd) { - return null; - } - - @Override - public String[] createApiKeyAndSecretKey(final long userId) { - return null; - } - - @Override - public boolean enableAccount(long accountId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public void buildACLSearchBuilder(SearchBuilder sb, Long domainId, boolean isRecursive, List permittedAccounts, - ListProjectResourcesCriteria listProjectResourcesCriteria) { - // TODO Auto-generated method stub - - } - - @Override - public void buildACLSearchCriteria(SearchCriteria sc, Long domainId, boolean isRecursive, List permittedAccounts, - ListProjectResourcesCriteria listProjectResourcesCriteria) { - // TODO Auto-generated method stub - - } - - @Override - public void buildACLSearchParameters(Account caller, Long id, String accountName, Long projectId, List permittedAccounts, Ternary domainIdRecursiveListProject, boolean listAll, boolean forProjectInvitation) { - // TODO Auto-generated method stub - } - - @Override - public void buildACLViewSearchBuilder(SearchBuilder sb, Long domainId, - boolean isRecursive, List permittedAccounts, ListProjectResourcesCriteria listProjectResourcesCriteria) { - // TODO Auto-generated method stub - } - - @Override - public void buildACLViewSearchCriteria(SearchCriteria sc, Long domainId, - boolean isRecursive, List permittedAccounts, ListProjectResourcesCriteria listProjectResourcesCriteria) { - // TODO Auto-generated method stub - } - - /* (non-Javadoc) - * @see com.cloud.user.AccountService#getUserByApiKey(java.lang.String) - */ - @Override - public UserAccount getUserByApiKey(String apiKey) { - // TODO Auto-generated method stub - return null; - } - - @Override - public UserAccount createUserAccount(CreateAccountCmd cmd) { - return createUserAccount(cmd.getUsername(), cmd.getPassword(), cmd.getFirstName(), - cmd.getLastName(), cmd.getEmail(), cmd.getTimeZone(), cmd.getAccountName(), - cmd.getAccountType(), cmd.getRoleId(), cmd.getDomainId(), - cmd.getNetworkDomain(), cmd.getDetails(), cmd.getAccountUUID(), - cmd.getUserUUID(), User.Source.UNKNOWN); - } - - @Override - public UserAccount createUserAccount(String userName, String password, String firstName, String lastName, String email, String timezone, String accountName, - Account.Type accountType, Long roleId, Long domainId, String networkDomain, Map details, String accountUUID, - String userUUID, User.Source source) { - // TODO Auto-generated method stub - return null; - } - - @Override - public User createUser(String userName, String password, String firstName, - String lastName, String email, String timeZone, String accountName, - Long domainId, String userUUID) { - // TODO Auto-generated method stub - return null; - } - - @Override public User createUser(String userName, String password, String firstName, String lastName, String email, String timeZone, String accountName, Long domainId, - String userUUID, User.Source source) { - // TODO Auto-generated method stub - return null; - } - - @Override - public RoleType getRoleType(Account account) { - return null; - } - - @Override - public boolean deleteAccount(AccountVO account, long callerUserId, Account caller) { - // TODO Auto-generated method stub - return false; - } - - @Override - public Account createAccount(String accountName, Account.Type accountType, Long roleId, Long domainId, String networkDomain, Map details, String uuid) { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean isDomainAdmin(Long accountId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean isResourceDomainAdmin(Long accountId) { - return false; - } - - @Override - public boolean isNormalUser(long accountId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public List listAclGroupsByAccount(Long accountId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void checkAccess(Account account, AccessType accessType, boolean sameOwner, String apiName, - ControlledEntity... entities) throws PermissionDeniedException { - // TODO Auto-generated method stub - } - - @Override - public void validateAccountHasAccessToResource(Account account, AccessType accessType, Object resource) { - // TODO Auto-generated method stub - } - - @Override - public Long finalyzeAccountId(String accountName, Long domainId, Long projectId, boolean enabledOnly) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Pair> getKeys(GetUserKeysCmd cmd) { - return null; - } - - @Override - public Pair> getKeys(Long userId) { - return null; - } - - @Override - public List listUserTwoFactorAuthenticationProviders() { - return null; - } - - @Override - public UserTwoFactorAuthenticator getUserTwoFactorAuthenticationProvider(Long domainId) { - return null; - } - - @Override - public void checkApiAccess(Account account, String command) throws PermissionDeniedException { - - } - @Override - public void checkAccess(User user, ControlledEntity entity) - throws PermissionDeniedException { - - } - @Override - public String getConfigComponentName() { - return null; - } - - @Override - public ConfigKey[] getConfigKeys() { - return null; - } - - @Override - public List getApiNameList() { - return null; - } - - @Override - public void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO user, String currentPassword, boolean skipCurrentPassValidation) { - } - - @Override - public UserAccount clearUserTwoFactorAuthenticationInSetupStateOnLogin(UserAccount user) { - return null; - } - - @Override - public void verifyCallerPrivilegeForUserOrAccountOperations(Account userAccount) { - } -} diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index b25da259b6b..e1efd87dcd8 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -29,6 +29,7 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; @@ -43,6 +44,7 @@ import java.text.SimpleDateFormat; import java.time.LocalDateTime; import java.time.ZoneOffset; import java.util.ArrayList; +import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -56,14 +58,24 @@ import java.util.UUID; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; +import org.apache.cloudstack.api.command.admin.vm.ExpungeVMCmd; +import org.apache.cloudstack.api.command.user.vm.CreateVMFromBackupCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVnfApplianceCmd; +import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; +import org.apache.cloudstack.api.command.user.vm.ResetVMSSHKeyCmd; import org.apache.cloudstack.api.command.user.vm.ResetVMUserDataCmd; import org.apache.cloudstack.api.command.user.vm.RestoreVMCmd; import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.backup.BackupVO; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; @@ -115,6 +127,7 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.network.Network; import com.cloud.network.NetworkModel; +import com.cloud.network.as.AutoScaleManager; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; @@ -122,6 +135,7 @@ import com.cloud.network.dao.LoadBalancerVMMapDao; import com.cloud.network.dao.LoadBalancerVMMapVO; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.element.UserDataServiceProvider; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkVO; import com.cloud.network.guru.NetworkGuru; @@ -159,15 +173,18 @@ import com.cloud.user.AccountManager; import com.cloud.user.AccountService; import com.cloud.user.AccountVO; import com.cloud.user.ResourceLimitService; +import com.cloud.user.SSHKeyPairVO; import com.cloud.user.UserData; import com.cloud.user.UserDataVO; import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.SSHKeyPairDao; import com.cloud.user.dao.UserDao; import com.cloud.user.dao.UserDataDao; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.UUIDManager; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExceptionProxyObject; import com.cloud.vm.dao.NicDao; @@ -264,6 +281,12 @@ public class UserVmManagerImplTest { @Mock PrimaryDataStoreDao primaryDataStoreDao; + @Mock + BackupDao backupDao; + + @Mock + BackupManager backupManager; + @Mock VirtualMachineManager virtualMachineManager; @@ -378,6 +401,9 @@ public class UserVmManagerImplTest { @Mock ServiceOfferingJoinDao serviceOfferingJoinDao; + @Mock + SSHKeyPairDao sshKeyPairDao; + @Mock private VMInstanceVO vmInstanceMock; @@ -399,6 +425,12 @@ public class UserVmManagerImplTest { @Mock private Scope scopeMock; + @Mock + private AutoScaleManager autoScaleManager; + + @Mock + private UUIDManager uuidMgr; + private static final long vmId = 1l; private static final long zoneId = 2L; private static final long accountId = 3L; @@ -634,13 +666,13 @@ public class UserVmManagerImplTest { Mockito.lenient().doReturn(Mockito.mock(UserVm.class)).when(userVmManagerImpl).updateVirtualMachine(Mockito.anyLong(), Mockito.anyString(), Mockito.anyString(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyBoolean(), Mockito.any(HTTPMethod.class), Mockito.anyString(), Mockito.anyString(), - Mockito.anyString(), Mockito.anyList(), Mockito.any()); + Mockito.anyString(), anyList(), Mockito.any()); Mockito.doNothing().when(userVmManagerImpl).validateIfVmSupportsMigration(Mockito.any(), Mockito.anyLong()); Mockito.doNothing().when(userVmManagerImpl).validateOldAndNewAccounts(Mockito.nullable(Account.class), Mockito.nullable(Account.class), Mockito.anyLong(), Mockito.nullable(String.class), Mockito.nullable(Long.class)); Mockito.doNothing().when(userVmManagerImpl).validateIfVmHasNoRules(Mockito.any(), Mockito.anyLong()); Mockito.doNothing().when(userVmManagerImpl).removeInstanceFromInstanceGroup(Mockito.anyLong()); - Mockito.doNothing().when(userVmManagerImpl).verifyResourceLimitsForAccountAndStorage(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.anyList(), Mockito.any()); + Mockito.doNothing().when(userVmManagerImpl).verifyResourceLimitsForAccountAndStorage(Mockito.any(), Mockito.any(), Mockito.any(), anyList(), Mockito.any()); Mockito.doNothing().when(userVmManagerImpl).validateIfNewOwnerHasAccessToTemplate(Mockito.any(), Mockito.any(), Mockito.any()); Mockito.doNothing().when(userVmManagerImpl).updateVmOwner(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); @@ -1118,14 +1150,14 @@ public class UserVmManagerImplTest { when(_dcMock.isLocalStorageEnabled()).thenReturn(true); when(_dcMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic); Mockito.doReturn(userVmVoMock).when(userVmManagerImpl).createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), - any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any()); UserVm result = userVmManagerImpl.createVirtualMachine(deployVMCmd); assertEquals(userVmVoMock, result); Mockito.verify(vnfTemplateManager).validateVnfApplianceNics(templateMock, null); Mockito.verify(userVmManagerImpl).createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), - any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any()); } @@ -1378,7 +1410,7 @@ public class UserVmManagerImplTest { cre.addProxyObject(vmId, "vmId"); Mockito.doThrow(cre).when(userVmManagerImpl).createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), - any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any()); CloudRuntimeException creThrown = assertThrows(CloudRuntimeException.class, () -> userVmManagerImpl.createVirtualMachine(deployVMCmd)); @@ -2718,7 +2750,7 @@ public class UserVmManagerImplTest { securityGroupIdList); Mockito.verify(userVmManagerImpl).cleanupOfOldOwnerNicsForNetwork(virtualMachineProfileMock); - Mockito.verify(userVmManagerImpl).addDefaultNetworkToNetworkList(Mockito.anyList(), Mockito.any()); + Mockito.verify(userVmManagerImpl).addDefaultNetworkToNetworkList(anyList(), Mockito.any()); Mockito.verify(userVmManagerImpl).allocateNetworksForVm(Mockito.any(), Mockito.any()); Mockito.verify(userVmManagerImpl).addSecurityGroupsToVm(accountMock, userVmVoMock,virtualMachineTemplateMock, securityGroupIdList, networkMock); } @@ -2736,7 +2768,7 @@ public class UserVmManagerImplTest { securityGroupIdList); Mockito.verify(userVmManagerImpl).cleanupOfOldOwnerNicsForNetwork(virtualMachineProfileMock); - Mockito.verify(userVmManagerImpl).addDefaultNetworkToNetworkList(Mockito.anyList(), Mockito.any()); + Mockito.verify(userVmManagerImpl).addDefaultNetworkToNetworkList(anyList(), Mockito.any()); Mockito.verify(userVmManagerImpl).allocateNetworksForVm(Mockito.any(), Mockito.any()); Mockito.verify(userVmManagerImpl).addSecurityGroupsToVm(accountMock, userVmVoMock,virtualMachineTemplateMock, securityGroupIdList, networkMock); } @@ -3243,6 +3275,392 @@ public class UserVmManagerImplTest { Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, srcHost.getId()); Mockito.verify(storageManager, times(1)).getStorageAccessGroups(null, null, null, destHost.getId()); } + @Test + public void testAllocateVMFromBackupUsingCmdValues() throws InsufficientCapacityException, ResourceAllocationException, ResourceUnavailableException { + Long backupId = 4L; + + CreateVMFromBackupCmd cmd = new CreateVMFromBackupCmd(); + cmd._accountService = accountService; + cmd._entityMgr = entityManager; + when(accountService.finalyzeAccountId(nullable(String.class), nullable(Long.class), nullable(Long.class), eq(true))).thenReturn(accountId); + when(accountService.getActiveAccountById(accountId)).thenReturn(account); + + ReflectionTestUtils.setField(cmd, "serviceOfferingId", serviceOfferingId); + ReflectionTestUtils.setField(cmd, "templateId", templateId); + ReflectionTestUtils.setField(cmd, "backupId", backupId); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + + ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class); + when(_serviceOfferingDao.findById(serviceOfferingId)).thenReturn(serviceOffering); + + Map diskDetails = new HashMap<>(); + diskDetails.put(ApiConstants.DISK_OFFERING_ID, "disk-offering-uuid"); + diskDetails.put(ApiConstants.DEVICE_ID, "1"); + diskDetails.put(ApiConstants.SIZE, "5"); + diskDetails.put(ApiConstants.MIN_IOPS, "1000"); + diskDetails.put(ApiConstants.MAX_IOPS, "5000"); + Map> disksDetails = new HashMap<>(); + disksDetails.put(0, diskDetails); + ReflectionTestUtils.setField(cmd, "dataDisksDetails", disksDetails); + DiskOffering diskOffering = mock(DiskOffering.class); + when(diskOffering.isCustomized()).thenReturn(true); + when(diskOffering.isCustomizedIops()).thenReturn(true); + when(entityManager.findByUuid(DiskOffering.class, "disk-offering-uuid")).thenReturn(diskOffering); + + BackupVO backup = mock(BackupVO.class); + when(backup.getZoneId()).thenReturn(zoneId); + when(backup.getVmId()).thenReturn(vmId); + when(backupDao.findById(backupId)).thenReturn(backup); + + UserVmVO userVmVO = new UserVmVO(); + userVmVO.setTemplateId(templateId); + when(userVmDao.findByIdIncludingRemoved(vmId)).thenReturn(userVmVO); + VMTemplateVO template = mock(VMTemplateVO.class); + when(template.getFormat()).thenReturn(Storage.ImageFormat.QCOW2); + when(templateDao.findById(templateId)).thenReturn(template); + VmDiskInfo rootVmDiskInfo = new VmDiskInfo(diskOffering, 10L, 1000L, 2000L); + when(backupManager.getRootDiskInfoFromBackup(backup)).thenReturn(rootVmDiskInfo); + Mockito.when(backupManager.canCreateInstanceFromBackup(backupId)).thenReturn(true); + + Mockito.doReturn(userVmVoMock).when(userVmManagerImpl).createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), + any(), any(), any(), any(), eq(true), any(), any(), any(), any()); + + UserVm result = userVmManagerImpl.allocateVMFromBackup(cmd); + + assertNotNull(result); + Mockito.verify(backupDao).findById(backupId); + Mockito.verify(userVmManagerImpl).createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), + any(), any(), any(), any(), eq(true), any(), any(), any(), any()); + } + + @Test + public void testAllocateVMFromBackupUsingBackupValues() throws InsufficientCapacityException, ResourceAllocationException, ResourceUnavailableException { + Long backupId = 5L; + + CreateVMFromBackupCmd cmd = mock(CreateVMFromBackupCmd.class); + when(cmd.getZoneId()).thenReturn(zoneId); + when(cmd.getBackupId()).thenReturn(backupId); + when(cmd.getEntityOwnerId()).thenReturn(accountId); + when(cmd.getServiceOfferingId()).thenReturn(null); + when(cmd.getHostId()).thenReturn(null); + when(cmd.getDiskOfferingId()).thenReturn(null); + when(cmd.getTemplateId()).thenReturn(null); + when(cmd.getNetworkIds()).thenReturn(null); + when(cmd.getIpToNetworkMap()).thenReturn(null); + when(cmd.getDataDiskInfoList()).thenReturn(null); + when(cmd.getOverrideDiskOfferingId()).thenReturn(null); + + Account owner = mock(Account.class); + when(accountService.getActiveAccountById(accountId)).thenReturn(owner); + + DataCenterVO zone = mock(DataCenterVO.class); + when(_dcDao.findById(zoneId)).thenReturn(zone); + + BackupVO backup = mock(BackupVO.class); + when(backup.getZoneId()).thenReturn(zoneId); + when(backup.getVmId()).thenReturn(vmId); + when(backupDao.findById(backupId)).thenReturn(backup); + + UserVmVO userVmVO = new UserVmVO(); + when(userVmDao.findByIdIncludingRemoved(vmId)).thenReturn(userVmVO); + VMTemplateVO template = mock(VMTemplateVO.class); + when(template.getFormat()).thenReturn(Storage.ImageFormat.QCOW2); + when(backup.getDetail(ApiConstants.TEMPLATE_ID)).thenReturn("template-uuid"); + when(templateDao.findByUuid("template-uuid")).thenReturn(template); + + DiskOfferingVO diskOffering = mock(DiskOfferingVO.class); + when(backup.getDetail(ApiConstants.SERVICE_OFFERING_ID)).thenReturn("service-offering-uuid"); + when(_serviceOfferingDao.findByUuid("service-offering-uuid")).thenReturn(serviceOffering); + VmDiskInfo rootVmDiskInfo = new VmDiskInfo(diskOffering, 10L, 1000L, 2000L); + when(backupManager.getRootDiskInfoFromBackup(backup)).thenReturn(rootVmDiskInfo); + + NetworkVO network1 = mock(NetworkVO.class); + NetworkVO network2 = mock(NetworkVO.class); + when(backupManager.getDataDiskInfoListFromBackup(backup)).thenReturn(List.of(new VmDiskInfo(diskOffering, 10L, 1000L, 2000L))); + Mockito.when(backupManager.canCreateInstanceFromBackup(backupId)).thenReturn(true); + + Mockito.doReturn(userVmVoMock).when(userVmManagerImpl).createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(false), any(), any(), any(), + any(), any(), any(), any(), eq(false), any(), any(), any(), any()); + + UserVm result = userVmManagerImpl.allocateVMFromBackup(cmd); + + assertNotNull(result); + Mockito.verify(userVmManagerImpl).createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(false), any(), any(), any(), + any(), any(), any(), any(), eq(false), any(), any(), any(), any()); + } + + @Test + public void testResetVMSSHKey() throws ResourceUnavailableException, InsufficientCapacityException { + Long domainId = 4L; + Long projectId = 5L; + Long networkId = 6L; + + List names = List.of("keypair1", "keypair2"); + ResetVMSSHKeyCmd cmd = mock(ResetVMSSHKeyCmd.class); + when(cmd.getId()).thenReturn(vmId); + when(cmd.getAccountName()).thenReturn("testAccount"); + when(cmd.getDomainId()).thenReturn(domainId); + when(cmd.getProjectId()).thenReturn(projectId); + when(cmd.getNames()).thenReturn(names); + + Account owner = mock(Account.class); + when(owner.getAccountId()).thenReturn(accountId); + when(owner.getDomainId()).thenReturn(domainId); + when(accountManager.finalizeOwner(callerAccount, "testAccount", domainId, projectId)).thenReturn(owner); + + UserVmVO userVm = new UserVmVO(vmId, null, null, templateId, Hypervisor.HypervisorType.KVM, 0, + true, false, domainId, accountId, 0L, 0L, null, null, null, null); + ReflectionTestUtils.setField(userVm, "state", VirtualMachine.State.Stopped); + userVm.setUserVmType("User"); + when(userVmDao.findById(vmId)).thenReturn(userVm); + VMTemplateVO template = mock(VMTemplateVO.class); + when(templateDao.findByIdIncludingRemoved(templateId)).thenReturn(template); + + Nic nic = mock(Nic.class); + when(nic.getNetworkId()).thenReturn(networkId); + when(networkModel.getDefaultNic(vmId)).thenReturn(nic); + NetworkVO network = mock(NetworkVO.class); + when(_networkDao.findById(networkId)).thenReturn(network); + UserDataServiceProvider element = mock(UserDataServiceProvider.class); + when(element.saveSSHKey(any(), any(), any(), any())).thenReturn(true); + when(_networkMgr.getSSHKeyResetProvider(network)).thenReturn(element); + + SSHKeyPairVO keyPair1 = mock(SSHKeyPairVO.class); + SSHKeyPairVO keyPair2 = mock(SSHKeyPairVO.class); + when(keyPair1.getPublicKey()).thenReturn("ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAr..."); + when(keyPair2.getPublicKey()).thenReturn("ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAr..."); + when(sshKeyPairDao.findByNames(accountId, domainId, names)).thenReturn(Arrays.asList(keyPair1, keyPair2)); + + UserVm result = userVmManagerImpl.resetVMSSHKey(cmd); + + assertNotNull(result); + Map details = result.getDetails(); + Assert.assertEquals(details.get(VmDetailConstants.SSH_PUBLIC_KEY), "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAr...\n" + + "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAr..."); + Assert.assertEquals(details.get(VmDetailConstants.SSH_KEY_PAIR_NAMES), "keypair1,keypair2"); + } + + @Test + public void testAllocateVMFromBackupUsingCmdValuesWithISO() throws InsufficientCapacityException, ResourceAllocationException, ResourceUnavailableException { + Long backupId = 4L; + Long rootDiskOfferingId = 5L; + Long isoId = 7L; + + CreateVMFromBackupCmd cmd = new CreateVMFromBackupCmd(); + cmd._accountService = accountService; + cmd._entityMgr = entityManager; + when(accountService.finalyzeAccountId(nullable(String.class), nullable(Long.class), nullable(Long.class), eq(true))).thenReturn(accountId); + when(accountService.getActiveAccountById(accountId)).thenReturn(account); + + ReflectionTestUtils.setField(cmd, "serviceOfferingId", serviceOfferingId); + ReflectionTestUtils.setField(cmd, "templateId", isoId); + ReflectionTestUtils.setField(cmd, "backupId", backupId); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd, "diskOfferingId", rootDiskOfferingId); + ReflectionTestUtils.setField(cmd, "overrideDiskOfferingId", null); + + ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class); + DiskOfferingVO rootDiskOffering = mock(DiskOfferingVO.class); + when(_serviceOfferingDao.findById(serviceOfferingId)).thenReturn(serviceOffering); + when(rootDiskOffering.getDiskSize()).thenReturn(10 * 1024 * 1024 * 1024L); + when(diskOfferingDao.findById(rootDiskOfferingId)).thenReturn(rootDiskOffering); + + Map diskDetails = new HashMap<>(); + diskDetails.put(ApiConstants.DISK_OFFERING_ID, "disk-offering-uuid"); + diskDetails.put(ApiConstants.DEVICE_ID, "1"); + diskDetails.put(ApiConstants.SIZE, "5"); + diskDetails.put(ApiConstants.MIN_IOPS, "1000"); + diskDetails.put(ApiConstants.MAX_IOPS, "5000"); + Map> disksDetails = new HashMap<>(); + disksDetails.put(0, diskDetails); + ReflectionTestUtils.setField(cmd, "dataDisksDetails", disksDetails); + DiskOffering diskOffering = mock(DiskOffering.class); + when(diskOffering.isCustomized()).thenReturn(true); + when(diskOffering.isCustomizedIops()).thenReturn(true); + when(entityManager.findByUuid(DiskOffering.class, "disk-offering-uuid")).thenReturn(diskOffering); + + BackupVO backup = mock(BackupVO.class); + when(backup.getZoneId()).thenReturn(zoneId); + when(backup.getVmId()).thenReturn(vmId); + when(backupDao.findById(backupId)).thenReturn(backup); + + UserVmVO userVmVO = new UserVmVO(); + userVmVO.setTemplateId(isoId); + when(userVmDao.findByIdIncludingRemoved(vmId)).thenReturn(userVmVO); + VMTemplateVO iso = mock(VMTemplateVO.class); + when(iso.getFormat()).thenReturn(Storage.ImageFormat.ISO); + when(templateDao.findById(isoId)).thenReturn(iso); + VmDiskInfo rootVmDiskInfo = new VmDiskInfo(diskOffering, 10L, 1000L, 2000L); + when(backupManager.getRootDiskInfoFromBackup(backup)).thenReturn(rootVmDiskInfo); + Mockito.when(backupManager.canCreateInstanceFromBackup(backupId)).thenReturn(true); + + Mockito.doReturn(userVmVoMock).when(userVmManagerImpl).createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), + any(), any(), any(), any(), eq(true), any(), any(), any(), any()); + + UserVm result = userVmManagerImpl.allocateVMFromBackup(cmd); + + assertNotNull(result); + Mockito.verify(backupDao).findById(backupId); + Mockito.verify(userVmManagerImpl).createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), + any(), any(), any(), any(), eq(true), any(), any(), any(), any()); + } + + @Test + public void testAllocateVMFromBackupUsingBackupValuesWithISO() throws InsufficientCapacityException, ResourceAllocationException, ResourceUnavailableException { + Long backupId = 5L; + Long isoId = 9L; + + CreateVMFromBackupCmd cmd = mock(CreateVMFromBackupCmd.class); + when(cmd.getZoneId()).thenReturn(zoneId); + when(cmd.getBackupId()).thenReturn(backupId); + when(cmd.getEntityOwnerId()).thenReturn(accountId); + when(cmd.getServiceOfferingId()).thenReturn(null); + when(cmd.getDiskOfferingId()).thenReturn(null); + when(cmd.getTemplateId()).thenReturn(null); + when(cmd.getHostId()).thenReturn(null); + when(cmd.getNetworkIds()).thenReturn(null); + when(cmd.getIpToNetworkMap()).thenReturn(null); + when(cmd.getDataDiskInfoList()).thenReturn(null); + + Account owner = mock(Account.class); + when(accountService.getActiveAccountById(accountId)).thenReturn(owner); + + DataCenterVO zone = mock(DataCenterVO.class); + when(_dcDao.findById(zoneId)).thenReturn(zone); + + BackupVO backup = mock(BackupVO.class); + when(backup.getZoneId()).thenReturn(zoneId); + when(backup.getVmId()).thenReturn(vmId); + when(backup.getDetail(ApiConstants.SERVICE_OFFERING_ID)).thenReturn("service-offering-uuid"); + when(backupDao.findById(backupId)).thenReturn(backup); + + UserVmVO userVmVO = new UserVmVO(); + when(userVmDao.findByIdIncludingRemoved(vmId)).thenReturn(userVmVO); + VMTemplateVO iso = mock(VMTemplateVO.class); + when(iso.getFormat()).thenReturn(Storage.ImageFormat.ISO); + when(backup.getDetail(ApiConstants.TEMPLATE_ID)).thenReturn("iso-uuid"); + when(templateDao.findByUuid("iso-uuid")).thenReturn(iso); + + ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class); + DiskOfferingVO diskOffering = mock(DiskOfferingVO.class); + when(backup.getDetail(ApiConstants.SERVICE_OFFERING_ID)).thenReturn("service-offering-uuid"); + when(_serviceOfferingDao.findByUuid("service-offering-uuid")).thenReturn(serviceOffering); + + VmDiskInfo rootVmDiskInfo = new VmDiskInfo(diskOffering, 10L, 1000L, 2000L); + when(backupManager.getRootDiskInfoFromBackup(backup)).thenReturn(rootVmDiskInfo); + + NetworkVO network1 = mock(NetworkVO.class); + NetworkVO network2 = mock(NetworkVO.class); + when(backupManager.getDataDiskInfoListFromBackup(backup)).thenReturn(List.of(new VmDiskInfo(diskOffering, 10L, 1000L, 2000L))); + Mockito.when(backupManager.canCreateInstanceFromBackup(backupId)).thenReturn(true); + + Mockito.doReturn(userVmVoMock).when(userVmManagerImpl).createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(false), any(), any(), any(), + any(), any(), any(), any(), eq(false), any(), any(), any(), any()); + + UserVm result = userVmManagerImpl.allocateVMFromBackup(cmd); + + assertNotNull(result); + Mockito.verify(userVmManagerImpl).createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(false), any(), any(), any(), + any(), any(), any(), any(), eq(false), any(), any(), any(), any()); + } + + @Test + public void testRestoreVMFromBackup() throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { + Long backupId = 5L; + Long templateId = 6L; + + CreateVMFromBackupCmd cmd = mock(CreateVMFromBackupCmd.class); + when(cmd.getBackupId()).thenReturn(backupId); + when(cmd.getStartVm()).thenReturn(true); + when(cmd.getEntityId()).thenReturn(vmId); + + UserVmVO vm = mock(UserVmVO.class); + when(vm.getId()).thenReturn(vmId); + when(vm.getState()).thenReturn(VirtualMachine.State.Running); + when(vm.getTemplateId()).thenReturn(templateId); + + when(backupManager.restoreBackupToVM(backupId, vmId)).thenReturn(true); + + Map params = new HashMap<>(); + Pair> vmPair = new Pair<>(vm, params); + doReturn(vmPair).when(userVmManagerImpl).startVirtualMachine(anyLong(), isNull(), isNull(), isNull(), anyMap(), isNull()); + doReturn(vmPair).when(userVmManagerImpl).startVirtualMachine(anyLong(), isNull(), isNull(), anyLong(), anyMap(), isNull()); + when(userVmDao.findById(vmId)).thenReturn(vm); + when(templateDao.findByIdIncludingRemoved(templateId)).thenReturn(mock(VMTemplateVO.class)); + when(userVmManagerImpl.stopVirtualMachine(anyLong(), anyLong())).thenReturn(true); + + UserVm result = userVmManagerImpl.restoreVMFromBackup(cmd); + + assertNotNull(result); + assertEquals(vm, result); + Mockito.verify(backupManager).restoreBackupToVM(backupId, vmId); + } + + @Test + public void testDestroyVm() throws ResourceUnavailableException { + Long volumeId = 4L; + Long accountId = 5L; + Long userId = 6L; + boolean expunge = true; + + ReflectionTestUtils.setField(userVmManagerImpl, "_uuidMgr", uuidMgr); + CallContext callContext = mock(CallContext.class); + Account callingAccount = mock(Account.class); + when(callingAccount.getId()).thenReturn(accountId); + when(callContext.getCallingAccount()).thenReturn(callingAccount); + when(accountManager.isAdmin(callingAccount.getId())).thenReturn(true); + doNothing().when(accountManager).checkApiAccess(callingAccount, BaseCmd.getCommandNameByClass(ExpungeVMCmd.class)); + try (MockedStatic mockedCallContext = Mockito.mockStatic(CallContext.class)) { + mockedCallContext.when(CallContext::current).thenReturn(callContext); + mockedCallContext.when(() -> CallContext.register(callContext, ApiCommandResourceType.Volume)).thenReturn(callContext); + + DestroyVMCmd cmd = mock(DestroyVMCmd.class); + when(cmd.getId()).thenReturn(vmId); + when(cmd.getExpunge()).thenReturn(expunge); + List volumeIds = List.of(volumeId); + when(cmd.getVolumeIds()).thenReturn(volumeIds); + + UserVmVO vm = mock(UserVmVO.class); + when(vm.getId()).thenReturn(vmId); + when(vm.getState()).thenReturn(VirtualMachine.State.Running); + when(vm.getUuid()).thenReturn("vm-uuid"); + when(vm.getUserVmType()).thenReturn("User"); + when(userVmDao.findById(vmId)).thenReturn(vm); + + VolumeVO vol = Mockito.mock(VolumeVO.class); + when(vol.getInstanceId()).thenReturn(vmId); + when(vol.getId()).thenReturn(volumeId); + when(vol.getVolumeType()).thenReturn(Volume.Type.DATADISK); + when(volumeDaoMock.findById(volumeId)).thenReturn(vol); + + List dataVolumes = new ArrayList<>(); + when(volumeDaoMock.findByInstanceAndType(vmId, Volume.Type.DATADISK)).thenReturn(dataVolumes); + + when(volumeApiService.destroyVolume(volumeId, CallContext.current().getCallingAccount(), expunge, false)).thenReturn(vol); + + doReturn(vm).when(userVmManagerImpl).stopVirtualMachine(anyLong(), anyBoolean()); + doReturn(vm).when(userVmManagerImpl).destroyVm(vmId, expunge); + doReturn(true).when(userVmManagerImpl).expunge(vm); + + try (MockedStatic mockedUsageEventUtils = Mockito.mockStatic(UsageEventUtils.class)) { + + UserVm result = userVmManagerImpl.destroyVm(cmd); + + assertNotNull(result); + assertEquals(vm, result); + Mockito.verify(userVmManagerImpl).stopVirtualMachine(vmId, false); + Mockito.verify(backupManager).checkAndRemoveBackupOfferingBeforeExpunge(vm); + } + } + } @Test(expected = InvalidParameterValueException.class) public void testValidateLeasePropertiesInvalidDuration() { @@ -3446,7 +3864,7 @@ public class UserVmManagerImplTest { when(_dcMock.isLocalStorageEnabled()).thenReturn(false); when(_dcMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic); Mockito.doReturn(userVmVoMock).when(userVmManagerImpl).createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), - any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any()); @@ -3484,7 +3902,7 @@ public class UserVmManagerImplTest { when(_dcMock.isLocalStorageEnabled()).thenReturn(false); when(_dcMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Basic); Mockito.doReturn(userVmVoMock).when(userVmManagerImpl).createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), - any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), + any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any(), any()); diff --git a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java index 7a357690208..93120673720 100644 --- a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java @@ -1128,6 +1128,11 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches return false; } + @Override + public IpAddresses getIpAddressesFromIps(String ipAddress, String ip6Address, String macAddress) { + return null; + } + @Override public void expungeLbVmRefs(List vmIds, Long batchSize) { } diff --git a/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java b/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java index 969a4202c51..61ae6cd018c 100644 --- a/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java +++ b/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java @@ -16,18 +16,41 @@ // under the License. package org.apache.cloudstack.backup; +import com.cloud.api.query.dao.UserVmJoinDao; +import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.alert.AlertManager; +import com.cloud.capacity.CapacityVO; import com.cloud.configuration.Resource; +import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.Domain; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.network.Network; +import com.cloud.network.NetworkService; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.offering.DiskOffering; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.DiskOfferingVO; import com.cloud.exception.ResourceAllocationException; +import com.cloud.storage.Storage; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeApiService; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -35,25 +58,37 @@ import com.cloud.user.AccountVO; import com.cloud.user.DomainManager; import com.cloud.user.ResourceLimitService; import com.cloud.user.User; -import com.cloud.user.UserVO; +import com.cloud.user.dao.AccountDao; import com.cloud.utils.DateUtil; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.VMInstanceDetailVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VmDiskInfo; import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.VMInstanceDetailsDao; import com.cloud.vm.dao.VMInstanceDao; +import com.google.gson.Gson; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.admin.backup.ImportBackupOfferingCmd; import org.apache.cloudstack.api.command.admin.backup.UpdateBackupOfferingCmd; +import org.apache.cloudstack.api.command.user.backup.CreateBackupCmd; import org.apache.cloudstack.api.command.user.backup.CreateBackupScheduleCmd; import org.apache.cloudstack.api.command.user.backup.DeleteBackupScheduleCmd; +import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupDetailsDao; import org.apache.cloudstack.backup.dao.BackupOfferingDao; import org.apache.cloudstack.backup.dao.BackupScheduleDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -73,17 +108,22 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.TimeZone; +import java.util.UUID; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.atLeastOnce; @RunWith(MockitoJUnitRunner.class) public class BackupManagerTest { @@ -94,6 +134,9 @@ public class BackupManagerTest { @Mock BackupOfferingDao backupOfferingDao; + @Mock + BackupDetailsDao backupDetailsDao; + @Mock BackupProvider backupProvider; @@ -128,24 +171,69 @@ public class BackupManagerTest { DataCenterDao dataCenterDao; @Mock - AlertManager alertManager; + private AlertManager alertManagerMock; + + @Mock + private Domain domainMock; @Mock private VMInstanceVO vmInstanceVOMock; @Mock - private CallContext callContextMock; + private CreateBackupScheduleCmd createBackupScheduleCmdMock; + + @Mock + private BackupOfferingVO backupOfferingVOMock; + + @Mock + private AsyncJobVO asyncJobVOMock; + + @Mock + private BackupScheduleVO backupScheduleVOMock; @Mock private AccountVO accountVOMock; + @Mock + private CallContext callContextMock; + @Mock private DeleteBackupScheduleCmd deleteBackupScheduleCmdMock; @Mock - private BackupScheduleVO backupScheduleVOMock; + DiskOfferingDao diskOfferingDao; - private UserVO user; + @Mock + ServiceOfferingDao serviceOfferingDao; + + @Mock + VMTemplateDao vmTemplateDao; + + @Mock + UserVmJoinDao userVmJoinDao; + + @Mock + PrimaryDataStoreDao primaryDataStoreDao; + + @Mock + HostDao hostDao; + + @Mock + private NetworkDao networkDao; + + @Mock + private NetworkService networkService; + + @Mock + private VMInstanceDetailsDao vmInstanceDetailsDao; + + @Mock + AccountDao accountDao; + + @Mock + DomainDao domainDao; + + private Gson gson; private String[] hostPossibleValues = {"127.0.0.1", "hostname"}; private String[] datastoresPossibleValues = {"e9804933-8609-4de3-bccc-6278072a496c", "datastore-name"}; @@ -155,6 +243,8 @@ public class BackupManagerTest { @Before public void setup() throws Exception { + gson = new Gson(); + closeable = MockitoAnnotations.openMocks(this); when(backupOfferingDao.findById(null)).thenReturn(null); when(backupOfferingDao.findById(123l)).thenReturn(null); @@ -175,6 +265,12 @@ public class BackupManagerTest { return true; }); + backupProvider = mock(BackupProvider.class); + when(backupProvider.getName()).thenReturn("testbackupprovider"); + Map backupProvidersMap = new HashMap<>(); + backupProvidersMap.put(backupProvider.getName().toLowerCase(), backupProvider); + ReflectionTestUtils.setField(backupManager, "backupProvidersMap", backupProvidersMap); + Account account = mock(Account.class); User user = mock(User.class); CallContext.register(user, account); @@ -189,6 +285,20 @@ public class BackupManagerTest { CallContext.unregister(); } + private void overrideBackupFrameworkConfigValue() { + ConfigKey configKey = BackupManager.BackupFrameworkEnabled; + this.configDepotImpl = (ConfigDepotImpl) ReflectionTestUtils.getField(configKey, "s_depot"); + ConfigDepotImpl configDepot = Mockito.mock(ConfigDepotImpl.class); + Mockito.when(configDepot.getConfigStringValue(Mockito.eq(BackupManager.BackupFrameworkEnabled.key()), + Mockito.eq(ConfigKey.Scope.Global), Mockito.isNull())).thenReturn("true"); + Mockito.when(configDepot.getConfigStringValue(Mockito.eq(BackupManager.BackupFrameworkEnabled.key()), + Mockito.eq(ConfigKey.Scope.Zone), Mockito.anyLong())).thenReturn("true"); + Mockito.when(configDepot.getConfigStringValue(Mockito.eq(BackupManager.BackupProviderPlugin.key()), + Mockito.eq(ConfigKey.Scope.Zone), Mockito.anyLong())).thenReturn("testbackupprovider"); + ReflectionTestUtils.setField(configKey, "s_depot", configDepot); + updatedConfigKeyDepot = true; + } + @Test public void testExceptionWhenUpdateWithNullId() { try { @@ -203,7 +313,7 @@ public class BackupManagerTest { } } - @Test (expected = InvalidParameterValueException.class) + @Test(expected = InvalidParameterValueException.class) public void testExceptionWhenUpdateWithNonExistentId() { Long id = 123l; @@ -213,7 +323,7 @@ public class BackupManagerTest { backupManager.updateBackupOffering(cmd); } - @Test (expected = ServerApiException.class) + @Test(expected = ServerApiException.class) public void testExceptionWhenUpdateWithoutChanges() { UpdateBackupOfferingCmd cmd = Mockito.spy(UpdateBackupOfferingCmd.class); when(cmd.getName()).thenReturn(null); @@ -244,96 +354,117 @@ public class BackupManagerTest { @Test public void restoreBackedUpVolumeTestHostIpAndDatastoreUuid() { BackupVO backupVO = new BackupVO(); - VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); + VMInstanceVO vm = mock(VMInstanceVO.class); String volumeUuid = "5f4ed903-ac23-4f8a-b595-69c73c40593f"; String vmName = "i-2-3-VM"; VirtualMachine.State vmState = VirtualMachine.State.Running; Mockito.when(vm.getName()).thenReturn(vmName); Mockito.when(vm.getState()).thenReturn(vmState); - Pair vmNameAndState = new Pair<>("i-2-3-VM", VirtualMachine.State.Running); + Pair vmNameAndState = new Pair<>(vmName, vmState); - Mockito.when(backupProvider.restoreBackedUpVolume(Mockito.any(), Mockito.eq(volumeUuid), - Mockito.eq("127.0.0.1"), Mockito.eq("e9804933-8609-4de3-bccc-6278072a496c"), Mockito.eq(vmNameAndState))).thenReturn(new Pair(Boolean.TRUE, "Success")); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Backup.VolumeInfo volumeInfo = mock(Backup.VolumeInfo.class); + when(volumeInfo.getUuid()).thenReturn(volumeUuid); + + doReturn(new Pair(Boolean.TRUE, "Success")) + .when(backupProvider).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), + any(String.class), any(String.class), any(Pair.class)); + + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success", restoreBackedUpVolume.second()); - Mockito.verify(backupProvider, times(1)).restoreBackedUpVolume(Mockito.any(), Mockito.anyString(), - Mockito.anyString(), Mockito.anyString(), any(Pair.class)); + verify(backupProvider, atLeastOnce()).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), + any(String.class), any(String.class), any(Pair.class)); } @Test public void restoreBackedUpVolumeTestHostIpAndDatastoreName() { BackupVO backupVO = new BackupVO(); - VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); + VMInstanceVO vm = mock(VMInstanceVO.class); String volumeUuid = "5f4ed903-ac23-4f8a-b595-69c73c40593f"; String vmName = "i-2-3-VM"; VirtualMachine.State vmState = VirtualMachine.State.Running; Mockito.when(vm.getName()).thenReturn(vmName); Mockito.when(vm.getState()).thenReturn(vmState); - Pair vmNameAndState = new Pair<>("i-2-3-VM", VirtualMachine.State.Running); - Mockito.when(backupProvider.restoreBackedUpVolume(Mockito.any(), Mockito.eq(volumeUuid), - Mockito.eq("127.0.0.1"), Mockito.eq("datastore-name"), Mockito.eq(vmNameAndState))).thenReturn(new Pair(Boolean.TRUE, "Success2")); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Pair vmNameAndState = new Pair<>(vmName, vmState); + + Backup.VolumeInfo volumeInfo = mock(Backup.VolumeInfo.class); + when(volumeInfo.getUuid()).thenReturn(volumeUuid); + + doReturn(new Pair(Boolean.TRUE, "Success2")) + .when(backupProvider).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), + any(String.class), any(String.class), any(Pair.class)); + + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success2", restoreBackedUpVolume.second()); - Mockito.verify(backupProvider, times(2)).restoreBackedUpVolume(Mockito.any(), Mockito.anyString(), - Mockito.anyString(), Mockito.anyString(), any(Pair.class)); + verify(backupProvider, atLeastOnce()).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), + any(String.class), any(String.class), any(Pair.class)); } @Test public void restoreBackedUpVolumeTestHostNameAndDatastoreUuid() { BackupVO backupVO = new BackupVO(); - VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); + VMInstanceVO vm = mock(VMInstanceVO.class); String volumeUuid = "5f4ed903-ac23-4f8a-b595-69c73c40593f"; String vmName = "i-2-3-VM"; VirtualMachine.State vmState = VirtualMachine.State.Running; Mockito.when(vm.getName()).thenReturn(vmName); Mockito.when(vm.getState()).thenReturn(vmState); - Pair vmNameAndState = new Pair<>("i-2-3-VM", VirtualMachine.State.Running); + Pair vmNameAndState = new Pair<>(vmName, vmState); - Mockito.when(backupProvider.restoreBackedUpVolume(Mockito.any(), Mockito.eq(volumeUuid), - Mockito.eq("hostname"), Mockito.eq("e9804933-8609-4de3-bccc-6278072a496c"), Mockito.eq(vmNameAndState))).thenReturn(new Pair(Boolean.TRUE, "Success3")); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Backup.VolumeInfo volumeInfo = mock(Backup.VolumeInfo.class); + when(volumeInfo.getUuid()).thenReturn(volumeUuid); + + doReturn(new Pair(Boolean.TRUE, "Success3")) + .when(backupProvider).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), + any(String.class), any(String.class), any(Pair.class)); + + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success3", restoreBackedUpVolume.second()); - Mockito.verify(backupProvider, times(3)).restoreBackedUpVolume(Mockito.any(), Mockito.anyString(), - Mockito.anyString(), Mockito.anyString(), any(Pair.class)); + verify(backupProvider, atLeastOnce()).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), + any(String.class), any(String.class), any(Pair.class)); } @Test public void restoreBackedUpVolumeTestHostAndDatastoreName() { BackupVO backupVO = new BackupVO(); - VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); + VMInstanceVO vm = mock(VMInstanceVO.class); String volumeUuid = "5f4ed903-ac23-4f8a-b595-69c73c40593f"; String vmName = "i-2-3-VM"; VirtualMachine.State vmState = VirtualMachine.State.Running; Mockito.when(vm.getName()).thenReturn(vmName); Mockito.when(vm.getState()).thenReturn(vmState); - Pair vmNameAndState = new Pair<>("i-2-3-VM", VirtualMachine.State.Running); + Pair vmNameAndState = new Pair<>(vmName, vmState); - Mockito.when(backupProvider.restoreBackedUpVolume(Mockito.any(), Mockito.eq(volumeUuid), - Mockito.eq("hostname"), Mockito.eq("datastore-name"), Mockito.eq(vmNameAndState))).thenReturn(new Pair(Boolean.TRUE, "Success4")); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Backup.VolumeInfo volumeInfo = mock(Backup.VolumeInfo.class); + when(volumeInfo.getUuid()).thenReturn(volumeUuid); + + doReturn(new Pair(Boolean.TRUE, "Success4")) + .when(backupProvider).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), + any(String.class), any(String.class), any(Pair.class)); + + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success4", restoreBackedUpVolume.second()); - Mockito.verify(backupProvider, times(4)).restoreBackedUpVolume(Mockito.any(), Mockito.anyString(), - Mockito.anyString(), Mockito.anyString(), any(Pair.class)); + verify(backupProvider, atLeastOnce()).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), + any(String.class), any(String.class), any(Pair.class)); } @Test public void tryRestoreVMTestRestoreSucceeded() throws NoTransitionException { - BackupOffering offering = Mockito.mock(BackupOffering.class); - VolumeVO volumeVO = Mockito.mock(VolumeVO.class); - VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); - BackupVO backup = Mockito.mock(BackupVO.class); + BackupOffering offering = mock(BackupOffering.class); + VolumeVO volumeVO = mock(VolumeVO.class); + VMInstanceVO vm = mock(VMInstanceVO.class); + BackupVO backup = mock(BackupVO.class); try (MockedStatic utils = Mockito.mockStatic(ActionEventUtils.class)) { Mockito.when(ActionEventUtils.onStartedActionEvent(Mockito.anyLong(), Mockito.anyLong(), @@ -358,10 +489,10 @@ public class BackupManagerTest { @Test public void tryRestoreVMTestRestoreFails() throws NoTransitionException { - BackupOffering offering = Mockito.mock(BackupOffering.class); - VolumeVO volumeVO = Mockito.mock(VolumeVO.class); - VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); - BackupVO backup = Mockito.mock(BackupVO.class); + BackupOffering offering = mock(BackupOffering.class); + VolumeVO volumeVO = mock(VolumeVO.class); + VMInstanceVO vm = mock(VMInstanceVO.class); + BackupVO backup = mock(BackupVO.class); try (MockedStatic utils = Mockito.mockStatic(ActionEventUtils.class)) { Mockito.when(ActionEventUtils.onStartedActionEvent(Mockito.anyLong(), Mockito.anyLong(), @@ -390,20 +521,6 @@ public class BackupManagerTest { } } - private void overrideBackupFrameworkConfigValue() { - ConfigKey configKey = BackupManager.BackupFrameworkEnabled; - this.configDepotImpl = (ConfigDepotImpl) ReflectionTestUtils.getField(configKey, "s_depot"); - ConfigDepotImpl configDepot = Mockito.mock(ConfigDepotImpl.class); - Mockito.when(configDepot.getConfigStringValue(Mockito.eq(BackupManager.BackupFrameworkEnabled.key()), - Mockito.eq(ConfigKey.Scope.Global), Mockito.isNull())).thenReturn("true"); - Mockito.when(configDepot.getConfigStringValue(Mockito.eq(BackupManager.BackupFrameworkEnabled.key()), - Mockito.eq(ConfigKey.Scope.Zone), Mockito.anyLong())).thenReturn("true"); - Mockito.when(configDepot.getConfigStringValue(Mockito.eq(BackupManager.BackupProviderPlugin.key()), - Mockito.eq(ConfigKey.Scope.Zone), Mockito.anyLong())).thenReturn("testbackupprovider"); - ReflectionTestUtils.setField(configKey, "s_depot", configDepot); - updatedConfigKeyDepot = true; - } - @Test public void testConfigureBackupSchedule() { Long vmId = 1L; @@ -418,6 +535,7 @@ public class BackupManagerTest { when(cmd.getIntervalType()).thenReturn(DateUtil.IntervalType.DAILY); when(cmd.getMaxBackups()).thenReturn(8); when(cmd.getSchedule()).thenReturn("00:00:00"); + when(cmd.getQuiesceVM()).thenReturn(null); VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); when(vmInstanceDao.findById(vmId)).thenReturn(vm); @@ -452,40 +570,41 @@ public class BackupManagerTest { } @Test - public void testConfigureBackupScheduleLimitReached() { - Long vmId = 1L; - Long zoneId = 2L; - Long accountId = 3L; - Long domainId = 4L; + public void configureBackupScheduleTestEnsureLimitCheckIsPerformed() { + long vmId = 1L; + long zoneId = 2L; + long accountId = 3L; + long domainId = 4L; + long backupOfferingId = 5L; - CreateBackupScheduleCmd cmd = Mockito.mock(CreateBackupScheduleCmd.class); - when(cmd.getVmId()).thenReturn(vmId); - when(cmd.getTimezone()).thenReturn("GMT"); - when(cmd.getIntervalType()).thenReturn(DateUtil.IntervalType.DAILY); - when(cmd.getMaxBackups()).thenReturn(8); + when(createBackupScheduleCmdMock.getVmId()).thenReturn(vmId); + when(createBackupScheduleCmdMock.getTimezone()).thenReturn("GMT"); + when(createBackupScheduleCmdMock.getIntervalType()).thenReturn(DateUtil.IntervalType.DAILY); + when(createBackupScheduleCmdMock.getMaxBackups()).thenReturn(8); - VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); - when(vmInstanceDao.findById(vmId)).thenReturn(vm); - when(vm.getDataCenterId()).thenReturn(zoneId); - when(vm.getAccountId()).thenReturn(accountId); + when(vmInstanceDao.findById(vmId)).thenReturn(vmInstanceVOMock); + when(vmInstanceVOMock.getDataCenterId()).thenReturn(zoneId); + when(vmInstanceVOMock.getAccountId()).thenReturn(accountId); + when(vmInstanceVOMock.getBackupOfferingId()).thenReturn(backupOfferingId); + + when(backupOfferingDao.findById(backupOfferingId)).thenReturn(backupOfferingVOMock); + when(backupOfferingVOMock.isUserDrivenBackupAllowed()).thenReturn(true); overrideBackupFrameworkConfigValue(); - Account account = Mockito.mock(Account.class); - when(accountManager.getAccount(accountId)).thenReturn(account); - when(account.getDomainId()).thenReturn(domainId); - Domain domain = Mockito.mock(Domain.class); - when(domainManager.getDomain(domainId)).thenReturn(domain); - when(resourceLimitMgr.findCorrectResourceLimitForAccount(account, Resource.ResourceType.backup, null)).thenReturn(10L); - when(resourceLimitMgr.findCorrectResourceLimitForDomain(domain, Resource.ResourceType.backup, null)).thenReturn(1L); + when(accountManager.getAccount(accountId)).thenReturn(accountVOMock); + when(accountVOMock.getDomainId()).thenReturn(domainId); + when(domainManager.getDomain(domainId)).thenReturn(domainMock); + when(resourceLimitMgr.findCorrectResourceLimitForAccount(accountVOMock, Resource.ResourceType.backup, null)).thenReturn(10L); + when(resourceLimitMgr.findCorrectResourceLimitForDomain(domainMock, Resource.ResourceType.backup, null)).thenReturn(1L); InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, - () -> backupManager.configureBackupSchedule(cmd)); - Assert.assertEquals(exception.getMessage(), "Max number of backups shouldn't exceed the domain/account level backup limit"); + () -> backupManager.configureBackupSchedule(createBackupScheduleCmdMock)); + Assert.assertEquals("'maxbackups' should not exceed the domain/account backup limit.", exception.getMessage()); } @Test - public void testCreateScheduledBackup() throws ResourceAllocationException { + public void createBackupTestCreateScheduledBackup() throws ResourceAllocationException { Long vmId = 1L; Long zoneId = 2L; Long scheduleId = 3L; @@ -496,35 +615,35 @@ public class BackupManagerTest { Long newBackupSize = 1000000000L; Long oldBackupSize = 400000000L; - VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); - when(vmInstanceDao.findById(vmId)).thenReturn(vm); - when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); - when(vm.getId()).thenReturn(vmId); - when(vm.getDataCenterId()).thenReturn(zoneId); - when(vm.getBackupOfferingId()).thenReturn(backupOfferingId); - when(vm.getAccountId()).thenReturn(accountId); + when(vmInstanceDao.findById(vmId)).thenReturn(vmInstanceVOMock); + when(vmInstanceVOMock.getDataCenterId()).thenReturn(zoneId); + when(vmInstanceVOMock.getBackupOfferingId()).thenReturn(backupOfferingId); + when(vmInstanceVOMock.getAccountId()).thenReturn(accountId); overrideBackupFrameworkConfigValue(); - BackupOfferingVO offering = Mockito.mock(BackupOfferingVO.class); - when(backupOfferingDao.findById(backupOfferingId)).thenReturn(offering); - when(offering.isUserDrivenBackupAllowed()).thenReturn(true); - when(offering.getProvider()).thenReturn("test"); + when(backupOfferingDao.findById(backupOfferingId)).thenReturn(backupOfferingVOMock); + when(backupOfferingVOMock.isUserDrivenBackupAllowed()).thenReturn(true); + when(backupOfferingVOMock.getProvider()).thenReturn("testbackupprovider"); - Account account = Mockito.mock(Account.class); - when(accountManager.getAccount(accountId)).thenReturn(account); + Mockito.doReturn(scheduleId).when(backupManager).getBackupScheduleId(asyncJobVOMock); + + when(accountManager.getAccount(accountId)).thenReturn(accountVOMock); BackupScheduleVO schedule = mock(BackupScheduleVO.class); - when(schedule.getScheduleType()).thenReturn(DateUtil.IntervalType.DAILY); - when(schedule.getMaxBackups()).thenReturn(0); when(backupScheduleDao.findById(scheduleId)).thenReturn(schedule); - when(backupScheduleDao.findByVMAndIntervalType(vmId, DateUtil.IntervalType.DAILY)).thenReturn(schedule); + when(schedule.getMaxBackups()).thenReturn(2); + + VolumeVO volume = mock(VolumeVO.class); + when(volumeDao.findByInstance(vmId)).thenReturn(List.of(volume)); + when(volume.getState()).thenReturn(Volume.State.Ready); + when(volumeApiService.getVolumePhysicalSize(null, null, null)).thenReturn(newBackupSize); BackupProvider backupProvider = mock(BackupProvider.class); Backup backup = mock(Backup.class); when(backup.getId()).thenReturn(backupId); when(backup.getSize()).thenReturn(newBackupSize); - when(backupProvider.getName()).thenReturn("test"); - when(backupProvider.takeBackup(vm)).thenReturn(new Pair<>(true, backup)); + when(backupProvider.getName()).thenReturn("testbackupprovider"); + when(backupProvider.takeBackup(vmInstanceVOMock, null)).thenReturn(new Pair<>(true, backup)); Map backupProvidersMap = new HashMap<>(); backupProvidersMap.put(backupProvider.getName().toLowerCase(), backupProvider); ReflectionTestUtils.setField(backupManager, "backupProvidersMap", backupProvidersMap); @@ -532,18 +651,15 @@ public class BackupManagerTest { BackupVO backupVO = mock(BackupVO.class); when(backupVO.getId()).thenReturn(backupId); BackupVO oldestBackupVO = mock(BackupVO.class); - when(oldestBackupVO.getSize()).thenReturn(oldBackupSize); - when(oldestBackupVO.getId()).thenReturn(oldestBackupId); - when(oldestBackupVO.getVmId()).thenReturn(vmId); - when(oldestBackupVO.getBackupOfferingId()).thenReturn(backupOfferingId); when(backupDao.findById(backupId)).thenReturn(backupVO); List backups = new ArrayList<>(List.of(oldestBackupVO)); - when(backupDao.listBackupsByVMandIntervalType(vmId, Backup.Type.DAILY)).thenReturn(backups); - when(backupDao.findByIdIncludingRemoved(oldestBackupId)).thenReturn(oldestBackupVO); - when(backupOfferingDao.findByIdIncludingRemoved(backupOfferingId)).thenReturn(offering); - when(backupProvider.deleteBackup(oldestBackupVO, false)).thenReturn(true); - when(backupDao.remove(oldestBackupVO.getId())).thenReturn(true); + when(backupDao.listBySchedule(scheduleId)).thenReturn(backups); + + CreateBackupCmd cmd = Mockito.mock(CreateBackupCmd.class); + when(cmd.getVmId()).thenReturn(vmId); + when(cmd.getName()).thenReturn("new-backup1"); + when(cmd.getQuiesceVM()).thenReturn(null); try (MockedStatic ignored = Mockito.mockStatic(ActionEventUtils.class)) { Mockito.when(ActionEventUtils.onActionEvent(Mockito.anyLong(), Mockito.anyLong(), @@ -551,20 +667,58 @@ public class BackupManagerTest { Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString())).thenReturn(1L); - Assert.assertEquals(backupManager.createBackup(vmId, scheduleId), true); + assertTrue(backupManager.createBackup(cmd, asyncJobVOMock)); + + Mockito.verify(resourceLimitMgr, times(1)).checkResourceLimit(accountVOMock, Resource.ResourceType.backup); + Mockito.verify(resourceLimitMgr, times(1)).checkResourceLimit(accountVOMock, Resource.ResourceType.backup_storage, newBackupSize); Mockito.verify(resourceLimitMgr, times(1)).incrementResourceCount(accountId, Resource.ResourceType.backup); Mockito.verify(resourceLimitMgr, times(1)).incrementResourceCount(accountId, Resource.ResourceType.backup_storage, newBackupSize); Mockito.verify(backupDao, times(1)).update(backupVO.getId(), backupVO); - - Mockito.verify(resourceLimitMgr, times(1)).decrementResourceCount(accountId, Resource.ResourceType.backup); - Mockito.verify(resourceLimitMgr, times(1)).decrementResourceCount(accountId, Resource.ResourceType.backup_storage, oldBackupSize); - Mockito.verify(backupDao, times(1)).remove(oldestBackupId); + Mockito.verify(backupManager, times(1)).deleteOldestBackupFromScheduleIfRequired(vmId, scheduleId); } } + @Test(expected = ResourceAllocationException.class) + public void createBackupTestResourceLimitReached() throws ResourceAllocationException { + Long vmId = 1L; + Long zoneId = 2L; + Long scheduleId = 3L; + Long backupOfferingId = 4L; + Long accountId = 5L; + + when(vmInstanceDao.findById(vmId)).thenReturn(vmInstanceVOMock); + when(vmInstanceVOMock.getDataCenterId()).thenReturn(zoneId); + when(vmInstanceVOMock.getBackupOfferingId()).thenReturn(backupOfferingId); + when(vmInstanceVOMock.getAccountId()).thenReturn(accountId); + + overrideBackupFrameworkConfigValue(); + BackupOfferingVO offering = Mockito.mock(BackupOfferingVO.class); + when(backupOfferingDao.findById(backupOfferingId)).thenReturn(offering); + when(offering.isUserDrivenBackupAllowed()).thenReturn(true); + when(offering.getProvider()).thenReturn("testbackupprovider"); + + Account account = Mockito.mock(Account.class); + when(accountManager.getAccount(accountId)).thenReturn(account); + Mockito.doThrow(new ResourceAllocationException("", Resource.ResourceType.backup)).when(resourceLimitMgr).checkResourceLimit(account, Resource.ResourceType.backup); + + CreateBackupCmd cmd = Mockito.mock(CreateBackupCmd.class); + when(cmd.getVmId()).thenReturn(vmId); + when(cmd.getQuiesceVM()).thenReturn(null); + + String jobParams = "{}"; + when(asyncJobVOMock.getCmdInfo()).thenReturn(jobParams); + when(asyncJobVOMock.getId()).thenReturn(1L); + + backupManager.createBackup(cmd, asyncJobVOMock); + + String msg = "Backup storage space resource limit exceeded for account id : " + accountId + ". Failed to create backup"; + Mockito.verify(alertManagerMock, times(1)).sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Backup resource limit exceeded for account id : " + accountId + + ". Failed to create backups; please use updateResourceLimit to increase the limit"); + } + @Test (expected = ResourceAllocationException.class) - public void testCreateBackupLimitReached() throws ResourceAllocationException { + public void testCreateBackupStorageLimitReached() throws ResourceAllocationException { Long vmId = 1L; Long zoneId = 2L; Long scheduleId = 3L; @@ -581,20 +735,22 @@ public class BackupManagerTest { BackupOfferingVO offering = Mockito.mock(BackupOfferingVO.class); when(backupOfferingDao.findById(backupOfferingId)).thenReturn(offering); when(offering.isUserDrivenBackupAllowed()).thenReturn(true); + when(offering.getProvider()).thenReturn("testbackupprovider"); - BackupScheduleVO schedule = mock(BackupScheduleVO.class); - when(schedule.getScheduleType()).thenReturn(DateUtil.IntervalType.DAILY); - when(backupScheduleDao.findById(scheduleId)).thenReturn(schedule); + Mockito.doReturn(scheduleId).when(backupManager).getBackupScheduleId(asyncJobVOMock); Account account = Mockito.mock(Account.class); - when(account.getId()).thenReturn(accountId); when(accountManager.getAccount(accountId)).thenReturn(account); Mockito.doThrow(new ResourceAllocationException("", Resource.ResourceType.backup_storage)).when(resourceLimitMgr).checkResourceLimit(account, Resource.ResourceType.backup_storage, 0L); - backupManager.createBackup(vmId, scheduleId); + CreateBackupCmd cmd = Mockito.mock(CreateBackupCmd.class); + when(cmd.getVmId()).thenReturn(vmId); + when(cmd.getQuiesceVM()).thenReturn(null); + + backupManager.createBackup(cmd, asyncJobVOMock); String msg = "Backup storage space resource limit exceeded for account id : " + accountId + ". Failed to create backup"; - Mockito.verify(alertManager, times(1)).sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Backup storage space resource limit exceeded for account id : " + accountId + Mockito.verify(alertManagerMock, times(1)).sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Backup storage space resource limit exceeded for account id : " + accountId + ". Failed to create backups; please use updateResourceLimit to increase the limit"); } @@ -608,7 +764,7 @@ public class BackupManagerTest { Long backup1Size = 1 * Resource.ResourceType.bytesToGiB; Long backup2Size = 2 * Resource.ResourceType.bytesToGiB; Long newBackupSize = 3 * Resource.ResourceType.bytesToGiB; - Long metricSize = 4 * Resource.ResourceType.bytesToGiB; + Long restorePointSize = 4 * Resource.ResourceType.bytesToGiB; overrideBackupFrameworkConfigValue(); @@ -624,23 +780,23 @@ public class BackupManagerTest { VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); when(vm.getId()).thenReturn(vmId); when(vm.getAccountId()).thenReturn(accountId); - when(vmInstanceDao.listByZoneWithBackups(dataCenterId, null)).thenReturn(List.of(vm)); - Backup.Metric metric = new Backup.Metric(metricSize, null); - Map metricMap = new HashMap<>(); - metricMap.put(vm, metric); - when(backupProvider.getBackupMetrics(Mockito.anyLong(), Mockito.anyList())).thenReturn(metricMap); + List vmIds = List.of(vmId); + when(backupDao.listVmIdsWithBackupsInZone(dataCenterId)).thenReturn(vmIds); + when(vmInstanceDao.listByZoneAndBackupOffering(dataCenterId, null)).thenReturn(List.of(vm)); - Backup.RestorePoint restorePoint1 = new Backup.RestorePoint(restorePoint1ExternalId, DateUtil.now(), "Root"); - Backup.RestorePoint restorePoint2 = new Backup.RestorePoint("12345", DateUtil.now(), "Root"); + Backup.RestorePoint restorePoint1 = new Backup.RestorePoint(restorePoint1ExternalId, DateUtil.now(), "Full", restorePointSize, 0L); + Backup.RestorePoint restorePoint2 = new Backup.RestorePoint("12345", DateUtil.now(), "Full", restorePointSize, 0L); List restorePoints = new ArrayList<>(List.of(restorePoint1, restorePoint2)); when(backupProvider.listRestorePoints(vm)).thenReturn(restorePoints); BackupVO backupInDb1 = new BackupVO(); backupInDb1.setSize(backup1Size); + backupInDb1.setAccountId(accountId); backupInDb1.setExternalId(restorePoint1ExternalId); BackupVO backupInDb2 = new BackupVO(); backupInDb2.setSize(backup2Size); + backupInDb2.setAccountId(accountId); backupInDb2.setExternalId(null); ReflectionTestUtils.setField(backupInDb2, "id", backup2Id); when(backupDao.findById(backup2Id)).thenReturn(backupInDb2); @@ -649,7 +805,7 @@ public class BackupManagerTest { BackupVO newBackupEntry = new BackupVO(); newBackupEntry.setSize(newBackupSize); - when(backupProvider.createNewBackupEntryForRestorePoint(restorePoint2, vm, metric)).thenReturn(newBackupEntry); + when(backupProvider.createNewBackupEntryForRestorePoint(restorePoint2, vm)).thenReturn(newBackupEntry); try (MockedStatic ignored = Mockito.mockStatic(ActionEventUtils.class)) { Mockito.when(ActionEventUtils.onActionEvent(Mockito.anyLong(), Mockito.anyLong(), @@ -663,8 +819,8 @@ public class BackupManagerTest { backupSyncTask.runInContext(); verify(resourceLimitMgr, times(1)).decrementResourceCount(accountId, Resource.ResourceType.backup_storage, backup1Size); - verify(resourceLimitMgr, times(1)).incrementResourceCount(accountId, Resource.ResourceType.backup_storage, metricSize); - Assert.assertEquals(backupInDb1.getSize(), metricSize); + verify(resourceLimitMgr, times(1)).incrementResourceCount(accountId, Resource.ResourceType.backup_storage, restorePointSize); + Assert.assertEquals(backupInDb1.getSize(), restorePointSize); verify(resourceLimitMgr, times(1)).incrementResourceCount(accountId, Resource.ResourceType.backup); verify(resourceLimitMgr, times(1)).incrementResourceCount(accountId, Resource.ResourceType.backup_storage, newBackupSize); @@ -766,4 +922,895 @@ public class BackupManagerTest { boolean success = backupManager.deleteBackupSchedule(deleteBackupScheduleCmdMock); assertTrue(success); } + + @Test + public void testGetBackupDetailsFromVM() { + Long vmId = 1L; + VirtualMachine vm = mock(VirtualMachine.class); + when(vm.getServiceOfferingId()).thenReturn(1L); + when(vm.getTemplateId()).thenReturn(2L); + when(vm.getId()).thenReturn(vmId); + + ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class); + when(serviceOffering.getUuid()).thenReturn("service-offering-uuid"); + when(serviceOfferingDao.findById(1L)).thenReturn(serviceOffering); + VMTemplateVO template = mock(VMTemplateVO.class); + when(template.getUuid()).thenReturn("template-uuid"); + when(vmTemplateDao.findById(2L)).thenReturn(template); + + VMInstanceDetailVO vmInstanceDetail = mock(VMInstanceDetailVO.class); + when(vmInstanceDetail.getName()).thenReturn("mocked-detail-name"); + when(vmInstanceDetail.getValue()).thenReturn("mocked-detail-value"); + List vmDetails = Collections.singletonList(vmInstanceDetail); + when(vmInstanceDetailsDao.listDetails(vmId)).thenReturn(vmDetails); + + UserVmJoinVO userVmJoinVO = mock(UserVmJoinVO.class); + when(userVmJoinVO.getNetworkUuid()).thenReturn("mocked-network-uuid"); + List userVmJoinVOs = Collections.singletonList(userVmJoinVO); + when(userVmJoinDao.searchByIds(vmId)).thenReturn(userVmJoinVOs); + + Map details = backupManager.getBackupDetailsFromVM(vm); + + assertEquals("service-offering-uuid", details.get(ApiConstants.SERVICE_OFFERING_ID)); + assertEquals("[{\"networkid\":\"mocked-network-uuid\"}]", details.get(ApiConstants.NICS)); + assertEquals("{\"mocked-detail-name\":\"mocked-detail-value\"}", details.get(ApiConstants.VM_SETTINGS)); + } + + @Test + public void getDataDiskInfoListFromBackup() { + Long size1 = 5L * 1024 * 1024 * 1024; + Long size2 = 10L * 1024 * 1024 * 1024; + Backup backup = mock(Backup.class); + + Backup.VolumeInfo volumeInfo0 = mock(Backup.VolumeInfo.class); + when(volumeInfo0.getType()).thenReturn(Volume.Type.ROOT); + Backup.VolumeInfo volumeInfo1 = mock(Backup.VolumeInfo.class); + when(volumeInfo1.getDiskOfferingId()).thenReturn("disk-offering-uuid-1"); + when(volumeInfo1.getSize()).thenReturn(size1); + when(volumeInfo1.getMinIops()).thenReturn(100L); + when(volumeInfo1.getMaxIops()).thenReturn(300L); + when(volumeInfo1.getType()).thenReturn(Volume.Type.DATADISK); + when(volumeInfo1.getDeviceId()).thenReturn(1L); + Backup.VolumeInfo volumeInfo2 = mock(Backup.VolumeInfo.class); + when(volumeInfo2.getDiskOfferingId()).thenReturn("disk-offering-uuid-2"); + when(volumeInfo2.getSize()).thenReturn(size2); + when(volumeInfo2.getMinIops()).thenReturn(200L); + when(volumeInfo2.getMaxIops()).thenReturn(400L); + when(volumeInfo2.getType()).thenReturn(Volume.Type.DATADISK); + when(volumeInfo2.getDeviceId()).thenReturn(2L); + when(backup.getBackedUpVolumes()).thenReturn(List.of(volumeInfo0, volumeInfo1, volumeInfo2)); + + DiskOfferingVO diskOffering1 = mock(DiskOfferingVO.class); + when(diskOffering1.getUuid()).thenReturn("disk-offering-uuid-1"); + when(diskOffering1.getState()).thenReturn(DiskOffering.State.Active); + + DiskOfferingVO diskOffering2 = mock(DiskOfferingVO.class); + when(diskOffering2.getUuid()).thenReturn("disk-offering-uuid-2"); + when(diskOffering2.getState()).thenReturn(DiskOffering.State.Active); + + when(diskOfferingDao.findByUuid("disk-offering-uuid-1")).thenReturn(diskOffering1); + when(diskOfferingDao.findByUuid("disk-offering-uuid-2")).thenReturn(diskOffering2); + + List vmDiskInfoList = backupManager.getDataDiskInfoListFromBackup(backup); + + assertEquals(2, vmDiskInfoList.size()); + assertEquals("disk-offering-uuid-1", vmDiskInfoList.get(0).getDiskOffering().getUuid()); + assertEquals(Long.valueOf(5), vmDiskInfoList.get(0).getSize()); + assertEquals(Long.valueOf(1), vmDiskInfoList.get(0).getDeviceId()); + assertEquals(Long.valueOf(100), vmDiskInfoList.get(0).getMinIops()); + assertEquals(Long.valueOf(300), vmDiskInfoList.get(0).getMaxIops()); + + assertEquals("disk-offering-uuid-2", vmDiskInfoList.get(1).getDiskOffering().getUuid()); + assertEquals(Long.valueOf(10), vmDiskInfoList.get(1).getSize()); + assertEquals(Long.valueOf(2), vmDiskInfoList.get(1).getDeviceId()); + assertEquals(Long.valueOf(200), vmDiskInfoList.get(1).getMinIops()); + assertEquals(Long.valueOf(400), vmDiskInfoList.get(1).getMaxIops()); + } + + @Test + public void getDataDiskInfoListFromBackupNullIops() { + Long size = 5L * 1024 * 1024 * 1024; + Backup backup = mock(Backup.class); + Backup.VolumeInfo volumeInfo1 = mock(Backup.VolumeInfo.class); + when(volumeInfo1.getDiskOfferingId()).thenReturn("disk-offering-uuid-1"); + when(volumeInfo1.getSize()).thenReturn(size); + when(volumeInfo1.getMinIops()).thenReturn(null); + when(volumeInfo1.getMaxIops()).thenReturn(null); + when(volumeInfo1.getType()).thenReturn(Volume.Type.DATADISK); + when(volumeInfo1.getDeviceId()).thenReturn(1L); + when(backup.getBackedUpVolumes()).thenReturn(List.of(volumeInfo1)); + + DiskOfferingVO diskOffering = mock(DiskOfferingVO.class); + when(diskOffering.getUuid()).thenReturn("disk-offering-uuid-1"); + when(diskOffering.getState()).thenReturn(DiskOffering.State.Active); + + when(diskOfferingDao.findByUuid("disk-offering-uuid-1")).thenReturn(diskOffering); + + List vmDiskInfoList = backupManager.getDataDiskInfoListFromBackup(backup); + + assertEquals(1, vmDiskInfoList.size()); + assertEquals("disk-offering-uuid-1", vmDiskInfoList.get(0).getDiskOffering().getUuid()); + assertEquals(Long.valueOf(5), vmDiskInfoList.get(0).getSize()); + assertEquals(Long.valueOf(1), vmDiskInfoList.get(0).getDeviceId()); + assertNull(vmDiskInfoList.get(0).getMinIops()); + assertNull(vmDiskInfoList.get(0).getMaxIops()); + } + + @Test (expected = InvalidParameterValueException.class) + public void testCheckVmDisksSizeAgainstBackup() { + Long sizeInBackup = 5L * 1024 * 1024 * 1024; + Long sizeInCmd = 2L; + Backup backup = mock(Backup.class); + Backup.VolumeInfo volumeInfo = mock(Backup.VolumeInfo.class); + when(volumeInfo.getDiskOfferingId()).thenReturn("disk-offering-uuid-1"); + when(volumeInfo.getSize()).thenReturn(sizeInBackup); + when(volumeInfo.getType()).thenReturn(Volume.Type.DATADISK); + when(backup.getBackedUpVolumes()).thenReturn(List.of(volumeInfo)); + + DiskOfferingVO diskOffering = mock(DiskOfferingVO.class); + when(diskOffering.getState()).thenReturn(DiskOffering.State.Active); + when(diskOfferingDao.findByUuid("disk-offering-uuid-1")).thenReturn(diskOffering); + List vmDiskInfoList = List.of(new VmDiskInfo(diskOffering, sizeInCmd, 1L, null, null)); + + backupManager.checkVmDisksSizeAgainstBackup(vmDiskInfoList, backup); + } + + @Test + public void testGetRootDiskInfoFromBackup() { + Long size = 5L * 1024 * 1024 * 1024; + Backup backup = mock(Backup.class); + Backup.VolumeInfo volumeInfo = mock(Backup.VolumeInfo.class); + when(volumeInfo.getDiskOfferingId()).thenReturn("root-disk-offering-uuid"); + when(volumeInfo.getSize()).thenReturn(size); + when(volumeInfo.getType()).thenReturn(Volume.Type.ROOT); + when(backup.getBackedUpVolumes()).thenReturn(List.of(volumeInfo)); + + DiskOfferingVO diskOffering = mock(DiskOfferingVO.class); + when(diskOffering.getUuid()).thenReturn("root-disk-offering-uuid"); + when(diskOfferingDao.findByUuid("root-disk-offering-uuid")).thenReturn(diskOffering); + + VmDiskInfo VmDiskInfo = backupManager.getRootDiskInfoFromBackup(backup); + + assertEquals("root-disk-offering-uuid", VmDiskInfo.getDiskOffering().getUuid()); + assertEquals(Long.valueOf(5), VmDiskInfo.getSize()); + assertEquals(null, VmDiskInfo.getDeviceId()); + } + + @Test + public void testImportBackupOffering() { + ImportBackupOfferingCmd cmd = Mockito.mock(ImportBackupOfferingCmd.class); + when(cmd.getZoneId()).thenReturn(1L); + when(cmd.getExternalId()).thenReturn("external-id"); + when(cmd.getName()).thenReturn("Test Offering"); + when(cmd.getDescription()).thenReturn("Test Description"); + when(cmd.getUserDrivenBackups()).thenReturn(true); + + overrideBackupFrameworkConfigValue(); + + when(backupOfferingDao.findByExternalId("external-id", 1L)).thenReturn(null); + when(backupOfferingDao.findByName("Test Offering", 1L)).thenReturn(null); + + BackupOfferingVO offering = new BackupOfferingVO(1L, "external-id", "testbackupprovider", "Test Offering", "Test Description", true); + when(backupOfferingDao.persist(any(BackupOfferingVO.class))).thenReturn(offering); + when(backupProvider.isValidProviderOffering(cmd.getZoneId(), cmd.getExternalId())).thenReturn(true); + + BackupOffering result = backupManager.importBackupOffering(cmd); + + assertEquals("Test Offering", result.getName()); + assertEquals("Test Description", result.getDescription()); + assertEquals(true, result.isUserDrivenBackupAllowed()); + assertEquals("external-id", result.getExternalId()); + assertEquals("testbackupprovider", result.getProvider()); + } + + @Test + public void testCreateVolumeInfoFromVolumes() { + Long diskOfferingId = 5L; + DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); + Mockito.when(diskOffering.getUuid()).thenReturn("disk-offering-uuid"); + Mockito.when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOffering); + + List volumes = new ArrayList<>(); + VolumeVO volume1 = new VolumeVO(Volume.Type.ROOT, "vol1", 1L, 2L, 3L, + diskOfferingId, null, 1024L, null, null, null); + volume1.setUuid("uuid1"); + volume1.setPath("path1"); + volume1.setDeviceId(0L); + volume1.setVolumeType(Volume.Type.ROOT); + volumes.add(volume1); + + VolumeVO volume2 = new VolumeVO(Volume.Type.ROOT, "vol2", 1L, 2L, 3L, + diskOfferingId, null, 2048L, 1000L, 2000L, null); + volume2.setUuid("uuid2"); + volume2.setPath("path2"); + volume2.setDeviceId(1L); + volume2.setVolumeType(Volume.Type.DATADISK); + volumes.add(volume2); + + String expectedJson = "[{\"uuid\":\"uuid1\",\"type\":\"ROOT\",\"size\":1024,\"path\":\"path1\",\"deviceId\":0,\"diskOfferingId\":\"disk-offering-uuid\"},{\"uuid\":\"uuid2\",\"type\":\"DATADISK\",\"size\":2048,\"path\":\"path2\",\"deviceId\":1,\"diskOfferingId\":\"disk-offering-uuid\",\"minIops\":1000,\"maxIops\":2000}]"; + String actualJson = backupManager.createVolumeInfoFromVolumes(new ArrayList<>(volumes)); + + assertEquals(expectedJson, actualJson); + } + + @Test + public void testAssignVMToBackupOffering() { + Long vmId = 1L; + Long offeringId = 2L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + when(vm.getId()).thenReturn(vmId); + BackupOfferingVO offering = mock(BackupOfferingVO.class); + + overrideBackupFrameworkConfigValue(); + + when(vmInstanceDao.findById(vmId)).thenReturn(vm); + when(backupOfferingDao.findById(offeringId)).thenReturn(offering); + when(vm.getState()).thenReturn(VirtualMachine.State.Running); + when(vm.getDataCenterId()).thenReturn(1L); + when(vm.getBackupOfferingId()).thenReturn(null); + when(offering.getProvider()).thenReturn("testbackupprovider"); + when(backupProvider.assignVMToBackupOffering(vm, offering)).thenReturn(true); + when(vmInstanceDao.update(1L, vm)).thenReturn(true); + + try (MockedStatic ignored2 = Mockito.mockStatic(UsageEventUtils.class)) { + boolean result = backupManager.assignVMToBackupOffering(vmId, offeringId); + + assertTrue(result); + verify(vmInstanceDao, times(1)).findById(vmId); + verify(backupOfferingDao, times(1)).findById(offeringId); + verify(backupManager, times(1)).getBackupProvider("testbackupprovider"); + } + } + + @Test + public void testRemoveVMFromBackupOffering() { + Long vmId = 1L; + Long accountId = 2L; + Long zoneId = 3L; + Long offeringId = 4L; + Long backupScheduleId = 5L; + String vmHostName = "vm1"; + String vmUuid = "uuid1"; + String resourceName = "Backup-" + vmHostName + "-" + vmUuid; + + boolean forced = true; + + VMInstanceVO vm = mock(VMInstanceVO.class); + when(vm.getId()).thenReturn(vmId); + when(vm.getDataCenterId()).thenReturn(1L); + when(vm.getBackupOfferingId()).thenReturn(offeringId); + when(vm.getAccountId()).thenReturn(accountId); + when(vm.getDataCenterId()).thenReturn(zoneId); + when(vm.getHostName()).thenReturn(vmHostName); + when(vm.getUuid()).thenReturn(vmUuid); + when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); + when(vmInstanceDao.update(vmId, vm)).thenReturn(true); + + BackupOfferingVO offering = mock(BackupOfferingVO.class); + when(backupOfferingDao.findById(vm.getBackupOfferingId())).thenReturn(offering); + when(offering.getProvider()).thenReturn("testbackupprovider"); + when(backupProvider.removeVMFromBackupOffering(vm)).thenReturn(true); + when(backupProvider.willDeleteBackupsOnOfferingRemoval()).thenReturn(true); + when(backupDao.listByVmId(null, vmId)).thenReturn(new ArrayList<>()); + + BackupScheduleVO backupSchedule = new BackupScheduleVO(); + ReflectionTestUtils.setField(backupSchedule, "id", backupScheduleId); + when(backupScheduleDao.listByVM(vmId)).thenReturn(List.of(backupSchedule)); + + overrideBackupFrameworkConfigValue(); + + try (MockedStatic usageEventUtilsMocked = Mockito.mockStatic(UsageEventUtils.class)) { + boolean result = backupManager.removeVMFromBackupOffering(vmId, forced); + + assertTrue(result); + verify(vmInstanceDao, times(1)).findByIdIncludingRemoved(vmId); + verify(backupOfferingDao, times(1)).findById(vm.getBackupOfferingId()); + verify(backupManager, times(1)).getBackupProvider("testbackupprovider"); + verify(backupScheduleDao, times(1)).remove(backupScheduleId); + usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_BACKUP_OFFERING_REMOVED_AND_BACKUPS_DELETED, accountId, zoneId, vmId, resourceName, + offeringId, null, null, Backup.class.getSimpleName(), vmUuid)); + } + } + + @Test + public void testDeleteBackupScheduleByVmId() { + Long vmId = 1L; + Long scheduleId = 2L; + DeleteBackupScheduleCmd cmd = new DeleteBackupScheduleCmd(); + ReflectionTestUtils.setField(cmd, "vmId", vmId); + + overrideBackupFrameworkConfigValue(); + + VMInstanceVO vm = mock(VMInstanceVO.class); + when(vmInstanceDao.findById(vmId)).thenReturn(vm); + BackupScheduleVO schedule = mock(BackupScheduleVO.class); + when(schedule.getId()).thenReturn(scheduleId); + when(backupScheduleDao.listByVM(vmId)).thenReturn(List.of(schedule)); + when(backupScheduleDao.remove(scheduleId)).thenReturn(true); + + boolean result = backupManager.deleteBackupSchedule(cmd); + assertTrue(result); + } + + @Test + public void testRestoreBackupToVM() throws NoTransitionException { + Long backupId = 1L; + Long vmId = 2L; + Long hostId = 3L; + Long offeringId = 4L; + Long poolId = 5L; + + BackupVO backup = mock(BackupVO.class); + when(backup.getBackupOfferingId()).thenReturn(offeringId); + when(backup.getStatus()).thenReturn(Backup.Status.BackedUp); + + VMInstanceVO vm = mock(VMInstanceVO.class); + when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); + when(vm.getId()).thenReturn(vmId); + when(vm.getState()).thenReturn(VirtualMachine.State.Stopped); + when(vm.getHostId()).thenReturn(hostId); + + BackupOfferingVO offering = mock(BackupOfferingVO.class); + BackupProvider backupProvider = mock(BackupProvider.class); + when(backupProvider.supportsInstanceFromBackup()).thenReturn(true); + + overrideBackupFrameworkConfigValue(); + + when(backupDao.findById(backupId)).thenReturn(backup); + when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); + when(backupOfferingDao.findByIdIncludingRemoved(offeringId)).thenReturn(offering); + when(offering.getProvider()).thenReturn("testbackupprovider"); + when(backupManager.getBackupProvider("testbackupprovider")).thenReturn(backupProvider); + when(virtualMachineManager.stateTransitTo(vm, VirtualMachine.Event.RestoringRequested, hostId)).thenReturn(true); + when(virtualMachineManager.stateTransitTo(vm, VirtualMachine.Event.RestoringSuccess, hostId)).thenReturn(true); + + VolumeVO rootVolume = mock(VolumeVO.class); + when(rootVolume.getPoolId()).thenReturn(poolId); + HostVO host = mock(HostVO.class); + when(hostDao.findById(hostId)).thenReturn(host); + StoragePoolVO pool = mock(StoragePoolVO.class); + when(volumeDao.findIncludingRemovedByInstanceAndType(vmId, Volume.Type.ROOT)).thenReturn(List.of(rootVolume)); + when(primaryDataStoreDao.findById(poolId)).thenReturn(pool); + when(rootVolume.getPoolId()).thenReturn(poolId); + when(volumeDao.findIncludingRemovedByInstanceAndType(vmId, Volume.Type.ROOT)).thenReturn(List.of(rootVolume)); + when(primaryDataStoreDao.findById(poolId)).thenReturn(pool); + when(backupProvider.restoreBackupToVM(vm, backup, null, null)).thenReturn(true); + + try (MockedStatic utils = Mockito.mockStatic(ActionEventUtils.class)) { + boolean result = backupManager.restoreBackupToVM(backupId, vmId); + + assertTrue(result); + verify(backupProvider, times(1)).restoreBackupToVM(vm, backup, null, null); + verify(virtualMachineManager, times(1)).stateTransitTo(vm, VirtualMachine.Event.RestoringRequested, hostId); + verify(virtualMachineManager, times(1)).stateTransitTo(vm, VirtualMachine.Event.RestoringSuccess, hostId); + } catch (ResourceUnavailableException e) { + fail("Test failed due to exception" + e); + } + } + + @Test + public void testRestoreBackupToVMException() throws NoTransitionException { + Long backupId = 1L; + Long vmId = 2L; + Long hostId = 3L; + Long offeringId = 4L; + Long poolId = 5L; + + BackupVO backup = mock(BackupVO.class); + when(backup.getBackupOfferingId()).thenReturn(offeringId); + when(backup.getStatus()).thenReturn(Backup.Status.BackedUp); + + VMInstanceVO vm = mock(VMInstanceVO.class); + when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); + when(vm.getId()).thenReturn(vmId); + when(vm.getState()).thenReturn(VirtualMachine.State.Stopped); + when(vm.getHostId()).thenReturn(hostId); + + BackupOfferingVO offering = mock(BackupOfferingVO.class); + BackupProvider backupProvider = mock(BackupProvider.class); + when(backupProvider.supportsInstanceFromBackup()).thenReturn(true); + + overrideBackupFrameworkConfigValue(); + + when(backupDao.findById(backupId)).thenReturn(backup); + when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); + when(backupOfferingDao.findByIdIncludingRemoved(offeringId)).thenReturn(offering); + when(offering.getProvider()).thenReturn("testbackupprovider"); + when(backupManager.getBackupProvider("testbackupprovider")).thenReturn(backupProvider); + when(virtualMachineManager.stateTransitTo(vm, VirtualMachine.Event.RestoringRequested, hostId)).thenReturn(true); + when(virtualMachineManager.stateTransitTo(vm, VirtualMachine.Event.RestoringFailed, hostId)).thenReturn(true); + + VolumeVO rootVolume = mock(VolumeVO.class); + when(rootVolume.getPoolId()).thenReturn(poolId); + HostVO host = mock(HostVO.class); + when(hostDao.findById(hostId)).thenReturn(host); + StoragePoolVO pool = mock(StoragePoolVO.class); + when(volumeDao.findIncludingRemovedByInstanceAndType(vmId, Volume.Type.ROOT)).thenReturn(List.of(rootVolume)); + when(primaryDataStoreDao.findById(poolId)).thenReturn(pool); + when(rootVolume.getPoolId()).thenReturn(poolId); + when(volumeDao.findIncludingRemovedByInstanceAndType(vmId, Volume.Type.ROOT)).thenReturn(List.of(rootVolume)); + when(primaryDataStoreDao.findById(poolId)).thenReturn(pool); + when(backupProvider.restoreBackupToVM(vm, backup, null, null)).thenReturn(false); + + try (MockedStatic utils = Mockito.mockStatic(ActionEventUtils.class)) { + CloudRuntimeException exception = Assert.assertThrows(CloudRuntimeException.class, + () -> backupManager.restoreBackupToVM(backupId, vmId)); + + verify(backupProvider, times(1)).restoreBackupToVM(vm, backup, null, null); + verify(virtualMachineManager, times(1)).stateTransitTo(vm, VirtualMachine.Event.RestoringRequested, hostId); + verify(virtualMachineManager, times(1)).stateTransitTo(vm, VirtualMachine.Event.RestoringFailed, hostId); + } + } + + @Test + public void testGetBackupStorageUsedStats() { + Long zoneId = 1L; + overrideBackupFrameworkConfigValue(); + when(backupManager.getBackupProvider(zoneId)).thenReturn(backupProvider); + when(backupProvider.getBackupStorageStats(zoneId)).thenReturn(new Pair<>(100L, 200L)); + + CapacityVO capacity = backupManager.getBackupStorageUsedStats(zoneId); + + Assert.assertNotNull(capacity); + Assert.assertEquals(Optional.ofNullable(Long.valueOf(100)), Optional.ofNullable(capacity.getUsedCapacity())); + Assert.assertEquals(Optional.ofNullable(Long.valueOf(200)), Optional.ofNullable(capacity.getTotalCapacity())); + Assert.assertEquals(CapacityVO.CAPACITY_TYPE_BACKUP_STORAGE, capacity.getCapacityType()); + } + + @Test + public void testCheckAndRemoveBackupOfferingBeforeExpunge() { + Long vmId = 1L; + Long zoneId = 2L; + Long offeringId = 3L; + String vmUuid = "uuid1"; + String instanceName = "i-2-1-VM"; + String backupExternalId = "backup-external-id"; + + VMInstanceVO vm = mock(VMInstanceVO.class); + when(vm.getId()).thenReturn(vmId); + when(vm.getUuid()).thenReturn(vmUuid); + when(vm.getBackupOfferingId()).thenReturn(offeringId); + when(vm.getInstanceName()).thenReturn(instanceName); + when(vm.getBackupExternalId()).thenReturn(backupExternalId); + when(vm.getDataCenterId()).thenReturn(zoneId); + Backup backup = mock(Backup.class); + when(backupDao.listByVmIdAndOffering(zoneId, vmId, offeringId)).thenReturn(List.of(backup)); + + CloudRuntimeException exception = Assert.assertThrows(CloudRuntimeException.class, + () -> backupManager.checkAndRemoveBackupOfferingBeforeExpunge(vm)); + Assert.assertEquals("This Instance [uuid: uuid1, name: i-2-1-VM] has a " + + "Backup Offering [id: 3, external id: backup-external-id] with 1 backups. Please, remove the backup offering " + + "before proceeding to VM exclusion!", exception.getMessage()); + } + + @Test + public void testGetIpToNetworkMapFromBackup() { + Long networkId1 = 1L; + Long networkId2 = 2L; + String networkUuid1 = "network-uuid-1"; + String networkUuid2 = "network-uuid-2"; + String ip1 = "10.1.1.1"; + String ip2 = "10.1.1.2"; + String ipv61 = "2001:db8::1"; + String ipv62 = "2001:db8::2"; + String mac1 = "00:11:22:33:44:55"; + String mac2 = "00:11:22:33:44:56"; + + // Test case 1: Missing network information + Backup backup1 = mock(Backup.class); + List networkIds1 = new ArrayList<>(); + try { + backupManager.getIpToNetworkMapFromBackup(backup1, true, networkIds1); + fail("Expected CloudRuntimeException for missing network information"); + } catch (CloudRuntimeException e) { + assertEquals("Backup doesn't contain network information. Please specify at least one valid network while creating instance", e.getMessage()); + } + + // Test case 2: IP preservation enabled with IP information + Backup backup2 = mock(Backup.class); + String nicsJson = String.format("[{\"networkid\":\"%s\",\"ipaddress\":\"%s\",\"ip6address\":\"%s\",\"macaddress\":\"%s\"}," + + "{\"networkid\":\"%s\",\"ipaddress\":\"%s\",\"ip6address\":\"%s\",\"macaddress\":\"%s\"}]", + networkUuid1, ip1, ipv61, mac1, networkUuid2, ip2, ipv62, mac2); + when(backup2.getDetail(ApiConstants.NICS)).thenReturn(nicsJson); + + NetworkVO network1 = mock(NetworkVO.class); + NetworkVO network2 = mock(NetworkVO.class); + when(networkDao.findByUuid(networkUuid1)).thenReturn(network1); + when(networkDao.findByUuid(networkUuid2)).thenReturn(network2); + when(network1.getId()).thenReturn(networkId1); + when(network2.getId()).thenReturn(networkId2); + + Network.IpAddresses ipAddresses1 = mock(Network.IpAddresses.class); + Network.IpAddresses ipAddresses2 = mock(Network.IpAddresses.class); + when(networkService.getIpAddressesFromIps(ip1, ipv61, mac1)).thenReturn(ipAddresses1); + when(networkService.getIpAddressesFromIps(ip2, ipv62, mac2)).thenReturn(ipAddresses2); + + List networkIds2 = new ArrayList<>(); + Map result2 = backupManager.getIpToNetworkMapFromBackup(backup2, true, networkIds2); + + assertEquals(2, result2.size()); + assertEquals(ipAddresses1, result2.get(networkId1)); + assertEquals(ipAddresses2, result2.get(networkId2)); + assertEquals(2, networkIds2.size()); + assertTrue(networkIds2.contains(networkId1)); + assertTrue(networkIds2.contains(networkId2)); + + // Test case 3: IP preservation enabled but missing IP information + Backup backup3 = mock(Backup.class); + nicsJson = String.format("[{\"networkid\":\"%s\"}]", networkUuid1); + when(backup3.getDetail(ApiConstants.NICS)).thenReturn(nicsJson); + + List networkIds3 = new ArrayList<>(); + Map result3 = backupManager.getIpToNetworkMapFromBackup(backup3, true, networkIds3); + + assertEquals(1, result3.size()); + assertNull(result3.get(networkId1)); + assertEquals(1, networkIds3.size()); + assertTrue(networkIds3.contains(networkId1)); + + // Test case 4: IP preservation disabled + Backup backup4 = mock(Backup.class); + nicsJson = String.format("[{\"networkid\":\"%s\"}]", networkUuid1); + when(backup4.getDetail(ApiConstants.NICS)).thenReturn(nicsJson); + + List networkIds4 = new ArrayList<>(); + Map result4 = backupManager.getIpToNetworkMapFromBackup(backup4, false, networkIds4); + + assertEquals(1, result4.size()); + assertNull(result4.get(networkId1)); + assertEquals(1, networkIds4.size()); + assertTrue(networkIds4.contains(networkId1)); + } + + @Test + public void testDeleteBackupVmNotFound() { + Long backupId = 1L; + Long vmId = 2L; + Long zoneId = 3L; + Long accountId = 4L; + Long backupOfferingId = 5L; + String vmHostName = "vm1"; + String vmUuid = "uuid1"; + String resourceName = "Backup-" + vmHostName + "-" + vmUuid; + + BackupVO backup = mock(BackupVO.class); + when(backup.getId()).thenReturn(backupId); + when(backup.getVmId()).thenReturn(vmId); + when(backup.getZoneId()).thenReturn(zoneId); + when(backup.getAccountId()).thenReturn(accountId); + when(backup.getBackupOfferingId()).thenReturn(backupOfferingId); + when(backup.getSize()).thenReturn(100L); + + overrideBackupFrameworkConfigValue(); + + VMInstanceVO vm = mock(VMInstanceVO.class); + when(vm.getId()).thenReturn(vmId); + when(vm.getAccountId()).thenReturn(accountId); + when(vm.getBackupOfferingId()).thenReturn(10L); + when(vm.getDataCenterId()).thenReturn(zoneId); + when(vm.getHostName()).thenReturn(vmHostName); + when(vm.getUuid()).thenReturn(vmUuid); + when(backupDao.findByIdIncludingRemoved(backupId)).thenReturn(backup); + when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); + when(backupDao.listByVmIdAndOffering(zoneId, vmId, backupOfferingId)).thenReturn(new ArrayList<>()); + + BackupOfferingVO offering = mock(BackupOfferingVO.class); + when(backupOfferingDao.findByIdIncludingRemoved(backupOfferingId)).thenReturn(offering); + + when(backupProvider.deleteBackup(backup, false)).thenReturn(true); + + when(backupDao.remove(backupId)).thenReturn(true); + + try (MockedStatic usageEventUtilsMocked = Mockito.mockStatic(UsageEventUtils.class)) { + boolean result = backupManager.deleteBackup(backupId, false); + + assertTrue(result); + verify(backupProvider).deleteBackup(backup, false); + verify(resourceLimitMgr).decrementResourceCount(accountId, Resource.ResourceType.backup); + verify(resourceLimitMgr).decrementResourceCount(accountId, Resource.ResourceType.backup_storage, backup.getSize()); + verify(backupDao).remove(backupId); + usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_BACKUP_OFFERING_REMOVED_AND_BACKUPS_DELETED, accountId, zoneId, vmId, resourceName, + backupOfferingId, null, null, Backup.class.getSimpleName(), vmUuid)); + } + } + + @Test + public void testNewBackupResponse() { + Long vmId = 1L; + Long accountId = 2L; + Long domainId = 3L; + Long zoneId = 4L; + Long vmOfferingId = 5L; + Long backupOfferingId = 6L; + Long backupId = 7L; + Long templateId = 8L; + String templateUuid = "template-uuid1"; + String serviceOfferingUuid = "service-offering-uuid1"; + + BackupVO backup = new BackupVO(); + ReflectionTestUtils.setField(backup, "id", backupId); + ReflectionTestUtils.setField(backup, "uuid", "backup-uuid"); + backup.setVmId(vmId); + backup.setAccountId(accountId); + backup.setDomainId(domainId); + backup.setZoneId(zoneId); + backup.setBackupOfferingId(backupOfferingId); + backup.setType("Full"); + backup.setBackupScheduleId(null); + + VMInstanceVO vm = new VMInstanceVO(vmId, 0L, "test-vm", "test-vm", VirtualMachine.Type.User, + 0L, Hypervisor.HypervisorType.Simulator, 0L, domainId, accountId, 0L, false); + vm.setDataCenterId(zoneId); + vm.setBackupOfferingId(vmOfferingId); + vm.setTemplateId(templateId); + + AccountVO account = new AccountVO(); + account.setUuid("account-uuid"); + account.setAccountName("test-account"); + + DomainVO domain = new DomainVO(); + domain.setUuid("domain-uuid"); + domain.setName("test-domain"); + + DataCenterVO zone = new DataCenterVO(1L, "test-zone", null, null, null, null, null, null, null, null, DataCenter.NetworkType.Advanced, null, null); + zone.setUuid("zone-uuid"); + + BackupOfferingVO offering = Mockito.mock(BackupOfferingVO.class); + Mockito.when(offering.getUuid()).thenReturn("offering-uuid"); + Mockito.when(offering.getName()).thenReturn("test-offering"); + + Mockito.when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); + Mockito.when(accountDao.findByIdIncludingRemoved(accountId)).thenReturn(account); + Mockito.when(domainDao.findByIdIncludingRemoved(domainId)).thenReturn(domain); + Mockito.when(dataCenterDao.findByIdIncludingRemoved(zoneId)).thenReturn(zone); + Mockito.when(backupOfferingDao.findByIdIncludingRemoved(backupOfferingId)).thenReturn(offering); + + VMTemplateVO template = mock(VMTemplateVO.class); + when(template.getFormat()).thenReturn(Storage.ImageFormat.QCOW2); + when(template.getUuid()).thenReturn(templateUuid); + when(template.getName()).thenReturn("template1"); + when(vmTemplateDao.findByUuid(templateUuid)).thenReturn(template); + Map details = new HashMap<>(); + details.put(ApiConstants.TEMPLATE_ID, templateUuid); + + ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class); + when(serviceOffering.getUuid()).thenReturn(serviceOfferingUuid); + when(serviceOffering.getName()).thenReturn("service-offering1"); + when(serviceOfferingDao.findByUuid(serviceOfferingUuid)).thenReturn(serviceOffering); + details.put(ApiConstants.SERVICE_OFFERING_ID, serviceOfferingUuid); + + NetworkVO network = mock(NetworkVO.class); + when(network.getName()).thenReturn("network1"); + when(networkDao.findByUuid("network-uuid1")).thenReturn(network); + details.put(ApiConstants.NICS, "[{\"networkid\":\"network-uuid1\"}]"); + + Mockito.when(backupDetailsDao.listDetailsKeyPairs(backup.getId(), true)).thenReturn(details); + + BackupResponse response = backupManager.createBackupResponse(backup, true); + + Assert.assertEquals("backup-uuid", response.getId()); + Assert.assertEquals("test-vm", response.getVmName()); + Assert.assertEquals("account-uuid", response.getAccountId()); + Assert.assertEquals("test-account", response.getAccount()); + Assert.assertEquals("domain-uuid", response.getDomainId()); + Assert.assertEquals("test-domain", response.getDomain()); + Assert.assertEquals("zone-uuid", response.getZoneId()); + Assert.assertEquals("test-zone", response.getZone()); + Assert.assertEquals("offering-uuid", response.getBackupOfferingId()); + Assert.assertEquals("test-offering", response.getBackupOffering()); + Assert.assertEquals("MANUAL", response.getIntervalType()); + Assert.assertEquals("{serviceofferingid=service-offering-uuid1, isiso=false, hypervisor=Simulator, " + + "nics=[{\"networkid\":\"network-uuid1\",\"networkname\":\"network1\"}], serviceofferingname=service-offering1, " + + "templatename=template1, templateid=template-uuid1}", response.getVmDetails().toString()); + Assert.assertEquals(true, response.getVmOfferingRemoved()); + } + + @Test + public void validateAndGetDefaultBackupRetentionIfRequiredTestReturnZeroAsDefaultValue() { + int retention = backupManager.validateAndGetDefaultBackupRetentionIfRequired(null, backupOfferingVOMock, null); + assertEquals(0, retention); + } + + @Test(expected = InvalidParameterValueException.class) + public void validateAndGetDefaultBackupRetentionIfRequiredTestThrowExceptionWhenBackupOfferingProviderIsVeeam() { + Mockito.when(backupOfferingVOMock.getProvider()).thenReturn("veeam"); + backupManager.validateAndGetDefaultBackupRetentionIfRequired(1, backupOfferingVOMock, vmInstanceVOMock); + } + + @Test(expected = InvalidParameterValueException.class) + public void validateAndGetDefaultBackupRetentionIfRequiredTestThrowExceptionWhenMaxBackupsIsLessThanZero() { + backupManager.validateAndGetDefaultBackupRetentionIfRequired(-1, backupOfferingVOMock, vmInstanceVOMock); + } + + @Test(expected = InvalidParameterValueException.class) + public void validateAndGetDefaultBackupRetentionIfRequiredTestThrowExceptionWhenMaxBackupsExceedsAccountLimit() { + int maxBackups = 6; + long accountId = 1L; + long accountLimit = 5L; + long domainId = 10L; + long domainLimit = -1L; + + when(vmInstanceVOMock.getAccountId()).thenReturn(accountId); + when(accountManager.getAccount(accountId)).thenReturn(accountVOMock); + when(resourceLimitMgr.findCorrectResourceLimitForAccount(accountVOMock, Resource.ResourceType.backup, null)).thenReturn(accountLimit); + when(accountVOMock.getDomainId()).thenReturn(domainId); + when(domainManager.getDomain(domainId)).thenReturn(domainMock); + when(resourceLimitMgr.findCorrectResourceLimitForDomain(domainMock, Resource.ResourceType.backup, null)).thenReturn(domainLimit); + when(accountVOMock.getId()).thenReturn(accountId); + when(accountManager.isRootAdmin(accountId)).thenReturn(false); + + backupManager.validateAndGetDefaultBackupRetentionIfRequired(maxBackups, backupOfferingVOMock, vmInstanceVOMock); + } + + @Test(expected = InvalidParameterValueException.class) + public void validateAndGetDefaultBackupRetentionIfRequiredTestThrowExceptionWhenMaxBackupsExceedsDomainLimit() { + int maxBackups = 6; + long accountId = 1L; + long accountLimit = -1L; + long domainId = 10L; + long domainLimit = 5L; + + when(vmInstanceVOMock.getAccountId()).thenReturn(accountId); + when(accountManager.getAccount(accountId)).thenReturn(accountVOMock); + when(resourceLimitMgr.findCorrectResourceLimitForAccount(accountVOMock, Resource.ResourceType.backup, null)).thenReturn(accountLimit); + when(accountVOMock.getDomainId()).thenReturn(domainId); + when(domainManager.getDomain(domainId)).thenReturn(domainMock); + when(resourceLimitMgr.findCorrectResourceLimitForDomain(domainMock, Resource.ResourceType.backup, null)).thenReturn(domainLimit); + when(accountVOMock.getId()).thenReturn(accountId); + when(accountManager.isRootAdmin(accountId)).thenReturn(false); + + backupManager.validateAndGetDefaultBackupRetentionIfRequired(maxBackups, backupOfferingVOMock, vmInstanceVOMock); + } + + @Test + public void validateAndGetDefaultBackupRetentionIfRequiredTestIgnoreLimitCheckWhenAccountIsRootAdmin() { + int maxBackups = 6; + long accountId = 1L; + long accountLimit = 5L; + long domainId = 10L; + long domainLimit = 5L; + + when(vmInstanceVOMock.getAccountId()).thenReturn(accountId); + when(accountManager.getAccount(accountId)).thenReturn(accountVOMock); + when(resourceLimitMgr.findCorrectResourceLimitForAccount(accountVOMock, Resource.ResourceType.backup, null)).thenReturn(accountLimit); + when(accountVOMock.getDomainId()).thenReturn(domainId); + when(domainManager.getDomain(domainId)).thenReturn(domainMock); + when(resourceLimitMgr.findCorrectResourceLimitForDomain(domainMock, Resource.ResourceType.backup, null)).thenReturn(domainLimit); + when(accountVOMock.getId()).thenReturn(accountId); + when(accountManager.isRootAdmin(accountId)).thenReturn(true); + + int retention = backupManager.validateAndGetDefaultBackupRetentionIfRequired(maxBackups, backupOfferingVOMock, vmInstanceVOMock); + assertEquals(maxBackups, retention); + } + + @Test + public void getBackupScheduleTestReturnNullWhenBackupIsManual() { + String jobParams = "{}"; + when(asyncJobVOMock.getCmdInfo()).thenReturn(jobParams); + when(asyncJobVOMock.getId()).thenReturn(1L); + + Long backupScheduleId = backupManager.getBackupScheduleId(asyncJobVOMock); + assertNull(backupScheduleId); + } + + @Test + public void getBackupScheduleTestReturnBackupScheduleIdWhenBackupIsScheduled() { + Map params = Map.of( + ApiConstants.SCHEDULE_ID, "100" + ); + String jobParams = gson.toJson(params); + when(asyncJobVOMock.getCmdInfo()).thenReturn(jobParams); + when(asyncJobVOMock.getId()).thenReturn(1L); + + Long backupScheduleId = backupManager.getBackupScheduleId(asyncJobVOMock); + assertEquals(Long.valueOf("100"), backupScheduleId); + } + + @Test + public void getBackupScheduleTestReturnNullWhenSpecifiedBackupScheduleIdIsNotALongValue() { + Map params = Map.of( + ApiConstants.SCHEDULE_ID, "InvalidValue" + ); + String jobParams = gson.toJson(params); + when(asyncJobVOMock.getCmdInfo()).thenReturn(jobParams); + when(asyncJobVOMock.getId()).thenReturn(1L); + + Long backupScheduleId = backupManager.getBackupScheduleId(asyncJobVOMock); + assertNull(backupScheduleId); + } + + @Test + public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenBackupScheduleIsNotFound() { + backupManager.deleteOldestBackupFromScheduleIfRequired(1L, 1L); + Mockito.verify(backupManager, Mockito.never()).deleteExcessBackups(Mockito.anyList(), Mockito.anyInt(), Mockito.anyLong()); + } + + @Test + public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenRetentionIsEqualToZero() { + Mockito.when(backupScheduleDao.findById(1L)).thenReturn(backupScheduleVOMock); + Mockito.when(backupScheduleVOMock.getMaxBackups()).thenReturn(0); + backupManager.deleteOldestBackupFromScheduleIfRequired(1L, 1L); + Mockito.verify(backupManager, Mockito.never()).deleteExcessBackups(Mockito.anyList(), Mockito.anyInt(), Mockito.anyLong()); + } + + @Test + public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenAmountOfBackupsToBeDeletedIsLessThanOne() { + List backups = List.of(Mockito.mock(BackupVO.class), Mockito.mock(BackupVO.class)); + Mockito.when(backupScheduleDao.findById(1L)).thenReturn(backupScheduleVOMock); + Mockito.when(backupScheduleVOMock.getMaxBackups()).thenReturn(2); + Mockito.when(backupDao.listBySchedule(1L)).thenReturn(backups); + backupManager.deleteOldestBackupFromScheduleIfRequired(1L, 1L); + Mockito.verify(backupManager, Mockito.never()).deleteExcessBackups(Mockito.anyList(), Mockito.anyInt(), Mockito.anyLong()); + } + + @Test + public void deleteOldestBackupFromScheduleIfRequiredTestDeleteBackupsWhenRequired() { + List backups = List.of(Mockito.mock(BackupVO.class), Mockito.mock(BackupVO.class)); + Mockito.when(backupScheduleDao.findById(1L)).thenReturn(backupScheduleVOMock); + Mockito.when(backupScheduleVOMock.getMaxBackups()).thenReturn(1); + Mockito.when(backupDao.listBySchedule(1L)).thenReturn(backups); + Mockito.doNothing().when(backupManager).deleteExcessBackups(Mockito.anyList(), Mockito.anyInt(), Mockito.anyLong()); + backupManager.deleteOldestBackupFromScheduleIfRequired(1L, 1L); + Mockito.verify(backupManager).deleteExcessBackups(Mockito.anyList(), Mockito.anyInt(), Mockito.anyLong()); + } + + @Test + public void deleteExcessBackupsTestEnsureBackupsAreDeletedWhenMethodIsCalled() { + try (MockedStatic actionEventUtils = Mockito.mockStatic(ActionEventUtils.class)) { + List backups = List.of(Mockito.mock(BackupVO.class), + Mockito.mock(BackupVO.class), + Mockito.mock(BackupVO.class)); + + Mockito.when(backups.get(0).getId()).thenReturn(1L); + Mockito.when(backups.get(1).getId()).thenReturn(2L); + Mockito.when(backups.get(0).getAccountId()).thenReturn(1L); + Mockito.when(backups.get(1).getAccountId()).thenReturn(2L); + Mockito.doReturn(true).when(backupManager).deleteBackup(Mockito.anyLong(), Mockito.eq(false)); + + actionEventUtils.when(() -> ActionEventUtils.onStartedActionEvent( + Mockito.anyLong(), Mockito.anyLong(), Mockito.anyString(), + Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), + Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(1L); + actionEventUtils.when(() -> ActionEventUtils.onCompletedActionEvent( + Mockito.anyLong(), Mockito.anyLong(), Mockito.anyString(), + Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), + Mockito.anyString(), Mockito.anyInt())).thenReturn(2L); + + backupManager.deleteExcessBackups(backups, 2, 1L); + Mockito.verify(backupManager, times(2)).deleteBackup(Mockito.anyLong(), Mockito.eq(false)); + } + } + + @Test + public void sendExceededBackupLimitAlertTestSendAlertForBackupResourceType() { + String accountUuid = UUID.randomUUID().toString(); + String expectedMessage = "Failed to create backup: backup resource limit exceeded for account with ID: " + accountUuid + "."; + String expectedAlertDetails = expectedMessage + " Please, use the 'updateResourceLimit' API to increase the backup limit."; + + backupManager.sendExceededBackupLimitAlert(accountUuid, Resource.ResourceType.backup); + verify(alertManagerMock).sendAlert( + AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, + 0L, + 0L, + expectedMessage, + expectedAlertDetails + ); + } + + @Test + public void sendExceededBackupLimitAlertTestSendAlertForBackupStorageResourceType() { + String accountUuid = UUID.randomUUID().toString(); + String expectedMessage = "Failed to create backup: backup storage space resource limit exceeded for account with ID: " + accountUuid + "."; + String expectedAlertDetails = expectedMessage + " Please, use the 'updateResourceLimit' API to increase the backup limit."; + + backupManager.sendExceededBackupLimitAlert(accountUuid, Resource.ResourceType.backup_storage); + verify(alertManagerMock).sendAlert( + AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, + 0L, + 0L, + expectedMessage, + expectedAlertDetails + ); + } } diff --git a/server/src/test/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImplTest.java index 748fe19893a..ec7ef20d441 100644 --- a/server/src/test/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImplTest.java @@ -17,21 +17,32 @@ package org.apache.cloudstack.consoleproxy; import com.cloud.agent.AgentManager; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; import com.cloud.exception.PermissionDeniedException; import com.cloud.server.ManagementServer; import com.cloud.user.Account; import com.cloud.user.AccountManager; +import com.cloud.utils.Pair; import com.cloud.utils.db.EntityManager; +import com.cloud.vm.ConsoleSessionVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.ConsoleSessionDao; import com.cloud.vm.dao.VMInstanceDetailsDao; import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.command.user.consoleproxy.ListConsoleSessionsCmd; +import org.apache.cloudstack.api.response.ConsoleSessionResponse; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.security.keys.KeysManager; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; +import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; @@ -66,6 +77,23 @@ public class ConsoleAccessManagerImplTest { @Mock Account account; + @Mock + private CallContext callContextMock; + @Mock + private DomainDao domainDaoMock; + @Mock + private DomainVO domainMock; + @Mock + private ConsoleSessionVO consoleSessionMock; + @Mock + private ConsoleSessionDao consoleSessionDaoMock; + @Mock + private ConsoleSessionResponse consoleSessionResponseMock; + @Mock + private ListConsoleSessionsCmd listConsoleSessionsCmdMock; + @Mock + private ResponseGenerator responseGeneratorMock; + @Test public void testCheckSessionPermissionAdminAccount() { Mockito.when(account.getId()).thenReturn(1L); @@ -106,4 +134,181 @@ public class ConsoleAccessManagerImplTest { Assert.assertFalse(consoleAccessManager.checkSessionPermission(virtualMachine, account)); } } + + @Test + public void listConsoleSessionsInternalTestNormalUsersShouldOnlyBeAllowedToListTheirOwnConsoleSessions() { + long callerDomainId = 5L; + long callerAccountId = 5L; + long callerUserId = 5L; + boolean isRecursive = false; + + try (MockedStatic callContextStaticMock = Mockito.mockStatic(CallContext.class)) { + callContextStaticMock.when(CallContext::current).thenReturn(callContextMock); + Mockito.when(listConsoleSessionsCmdMock.getDomainId()).thenReturn(null); + Mockito.when(callContextMock.getCallingAccount()).thenReturn(account); + Mockito.when(account.getDomainId()).thenReturn(callerDomainId); + Mockito.when(listConsoleSessionsCmdMock.isRecursive()).thenReturn(isRecursive); + Mockito.when(accountManager.isNormalUser(callerAccountId)).thenReturn(true); + Mockito.when(callContextMock.getCallingAccountId()).thenReturn(callerAccountId); + Mockito.when(callContextMock.getCallingUserId()).thenReturn(callerUserId); + + consoleAccessManager.listConsoleSessionsInternal(listConsoleSessionsCmdMock); + } + + Mockito.verify(consoleSessionDaoMock).listConsoleSessions( + Mockito.any(), Mockito.eq(List.of(callerDomainId)), Mockito.eq(callerAccountId), + Mockito.eq(callerUserId), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), + Mockito.any(), Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.any(), Mockito.any() + ); + } + + @Test + public void listConsoleSessionsInternalTestAdminsShouldBeAllowedToRetrieveOtherAccountsConsoleSessions() { + long callerDomainId = 5L; + long callerAccountId = 5L; + long callerUserId = 5L; + boolean isRecursive = false; + + try (MockedStatic callContextStaticMock = Mockito.mockStatic(CallContext.class)) { + callContextStaticMock.when(CallContext::current).thenReturn(callContextMock); + Mockito.when(listConsoleSessionsCmdMock.getDomainId()).thenReturn(callerDomainId); + Mockito.doReturn(callerDomainId).when(consoleAccessManager).getBaseDomainIdToListConsoleSessions(callerDomainId); + Mockito.when(listConsoleSessionsCmdMock.getAccountId()).thenReturn(callerAccountId); + Mockito.when(listConsoleSessionsCmdMock.getUserId()).thenReturn(callerUserId); + Mockito.when(listConsoleSessionsCmdMock.isRecursive()).thenReturn(isRecursive); + Mockito.when(callContextMock.getCallingAccountId()).thenReturn(callerAccountId); + Mockito.when(accountManager.isNormalUser(callerAccountId)).thenReturn(false); + + consoleAccessManager.listConsoleSessionsInternal(listConsoleSessionsCmdMock); + } + + Mockito.verify(consoleSessionDaoMock).listConsoleSessions( + Mockito.any(), Mockito.eq(List.of(callerDomainId)), Mockito.eq(callerAccountId), + Mockito.eq(callerUserId), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), + Mockito.any(), Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.any(), Mockito.any() + ); + } + + + @Test + public void listConsoleSessionsInternalTestShouldNotFetchConsoleSessionsRecursivelyWhenIsRecursiveIsFalse() { + long callerDomainId = 5L; + long callerAccountId = 5L; + long callerUserId = 5L; + boolean isRecursive = false; + + try (MockedStatic callContextStaticMock = Mockito.mockStatic(CallContext.class)) { + callContextStaticMock.when(CallContext::current).thenReturn(callContextMock); + Mockito.when(listConsoleSessionsCmdMock.getDomainId()).thenReturn(callerDomainId); + Mockito.doReturn(callerDomainId).when(consoleAccessManager).getBaseDomainIdToListConsoleSessions(callerDomainId); + Mockito.when(listConsoleSessionsCmdMock.getAccountId()).thenReturn(callerAccountId); + Mockito.when(listConsoleSessionsCmdMock.getUserId()).thenReturn(callerUserId); + Mockito.when(listConsoleSessionsCmdMock.isRecursive()).thenReturn(isRecursive); + Mockito.when(callContextMock.getCallingAccountId()).thenReturn(callerAccountId); + Mockito.when(accountManager.isNormalUser(callerAccountId)).thenReturn(false); + + consoleAccessManager.listConsoleSessionsInternal(listConsoleSessionsCmdMock); + } + + Mockito.verify(consoleSessionDaoMock).listConsoleSessions( + Mockito.any(), Mockito.eq(List.of(callerDomainId)), Mockito.eq(callerAccountId), + Mockito.eq(callerUserId), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), + Mockito.any(), Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.any(), Mockito.any() + ); + } + + @Test + public void listConsoleSessionsInternalTestShouldFetchConsoleSessionsRecursivelyWhenIsRecursiveIsTrue() { + long callerDomainId = 5L; + long callerAccountId = 5L; + long callerUserId = 5L; + boolean isRecursive = true; + List domainIdsCallerHasAccessTo = List.of(callerDomainId, 6L, 7L); + + try (MockedStatic callContextStaticMock = Mockito.mockStatic(CallContext.class)) { + callContextStaticMock.when(CallContext::current).thenReturn(callContextMock); + Mockito.when(listConsoleSessionsCmdMock.getDomainId()).thenReturn(callerDomainId); + Mockito.doReturn(callerDomainId).when(consoleAccessManager).getBaseDomainIdToListConsoleSessions(callerDomainId); + Mockito.when(listConsoleSessionsCmdMock.getAccountId()).thenReturn(callerAccountId); + Mockito.when(listConsoleSessionsCmdMock.getUserId()).thenReturn(callerUserId); + Mockito.when(listConsoleSessionsCmdMock.isRecursive()).thenReturn(isRecursive); + Mockito.when(callContextMock.getCallingAccountId()).thenReturn(callerAccountId); + Mockito.when(accountManager.isNormalUser(callerAccountId)).thenReturn(false); + Mockito.when(domainDaoMock.getDomainAndChildrenIds(callerDomainId)).thenReturn(domainIdsCallerHasAccessTo); + + consoleAccessManager.listConsoleSessionsInternal(listConsoleSessionsCmdMock); + } + + Mockito.verify(consoleSessionDaoMock).listConsoleSessions( + Mockito.any(), Mockito.eq(domainIdsCallerHasAccessTo), Mockito.eq(callerAccountId), + Mockito.eq(callerUserId), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), + Mockito.any(), Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.any(), Mockito.any() + ); + } + + @Test + public void listConsoleSessionsTestShouldCreateResponsesWithFullViewForRootAdmins() { + Mockito.doReturn(new Pair<>(List.of(consoleSessionMock), 1)) + .when(consoleAccessManager) + .listConsoleSessionsInternal(listConsoleSessionsCmdMock); + + try (MockedStatic callContextStaticMock = Mockito.mockStatic(CallContext.class)) { + callContextStaticMock.when(CallContext::current).thenReturn(callContextMock); + Mockito.when(callContextMock.getCallingAccountId()).thenReturn(2L); + Mockito.when(accountManager.isRootAdmin(2L)).thenReturn(true); + Mockito.when(responseGeneratorMock.createConsoleSessionResponse(consoleSessionMock, ResponseObject.ResponseView.Full)).thenReturn(consoleSessionResponseMock); + + consoleAccessManager.listConsoleSessions(listConsoleSessionsCmdMock); + } + Mockito.verify(responseGeneratorMock).createConsoleSessionResponse(consoleSessionMock, ResponseObject.ResponseView.Full); + } + + @Test + public void listConsoleSessionsTestShouldCreateResponsesWithRestrictedViewForNonRootAdmins() { + Mockito.doReturn(new Pair<>(List.of(consoleSessionMock), 1)) + .when(consoleAccessManager) + .listConsoleSessionsInternal(listConsoleSessionsCmdMock); + + try (MockedStatic callContextStaticMock = Mockito.mockStatic(CallContext.class)) { + callContextStaticMock.when(CallContext::current).thenReturn(callContextMock); + Mockito.when(callContextMock.getCallingAccountId()).thenReturn(2L); + Mockito.when(accountManager.isRootAdmin(2L)).thenReturn(false); + Mockito.when(responseGeneratorMock.createConsoleSessionResponse(consoleSessionMock, ResponseObject.ResponseView.Restricted)).thenReturn(consoleSessionResponseMock); + + consoleAccessManager.listConsoleSessions(listConsoleSessionsCmdMock); + } + + Mockito.verify(responseGeneratorMock).createConsoleSessionResponse(consoleSessionMock, ResponseObject.ResponseView.Restricted); + } + + @Test + public void getBaseDomainIdToListConsoleSessionsTestIfNoDomainIdIsProvidedReturnCallersDomainId() { + long callerDomainId = 5L; + + try (MockedStatic callContextStaticMock = Mockito.mockStatic(CallContext.class)) { + callContextStaticMock.when(CallContext::current).thenReturn(callContextMock); + Mockito.when(callContextMock.getCallingAccount()).thenReturn(account); + Mockito.when(account.getDomainId()).thenReturn(callerDomainId); + Assert.assertEquals(callerDomainId, consoleAccessManager.getBaseDomainIdToListConsoleSessions(null)); + } + } + + @Test + public void getBaseDomainIdToListConsoleSessionsTestPerformAccessValidationWhenDomainIsProvided() { + long domainId = 5L; + + try (MockedStatic callContextStaticMock = Mockito.mockStatic(CallContext.class)) { + callContextStaticMock.when(CallContext::current).thenReturn(callContextMock); + Mockito.when(callContextMock.getCallingAccount()).thenReturn(account); + Mockito.when(domainDaoMock.findById(domainId)).thenReturn(domainMock); + Assert.assertEquals(domainId, consoleAccessManager.getBaseDomainIdToListConsoleSessions(domainId)); + Mockito.verify(accountManager).checkAccess(account, domainMock); + } + } + + @Test + public void listConsoleSessionByIdTestShouldCallDbLayer() { + consoleAccessManager.listConsoleSessionById(1L); + Mockito.verify(consoleSessionDaoMock).findByIdIncludingRemoved(1L); + } } diff --git a/server/src/test/java/org/apache/cloudstack/resource/ResourceCleanupServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/resource/ResourceCleanupServiceImplTest.java index d214f04ddb4..c1552b9c445 100644 --- a/server/src/test/java/org/apache/cloudstack/resource/ResourceCleanupServiceImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/resource/ResourceCleanupServiceImplTest.java @@ -30,6 +30,8 @@ import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.cloudstack.api.command.admin.resource.PurgeExpungedResourcesCmd; +import org.apache.cloudstack.backup.BackupVO; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.jobs.dao.VmWorkJobDao; @@ -138,12 +140,14 @@ public class ResourceCleanupServiceImplTest { ConsoleSessionDao consoleSessionDao; @Mock ServiceOfferingDetailsDao serviceOfferingDetailsDao; + @Mock + BackupDao backupDao; @Spy @InjectMocks ResourceCleanupServiceImpl resourceCleanupService = Mockito.spy(new ResourceCleanupServiceImpl()); - List ids = List.of(1L, 2L); + List ids = List.of(1L, 2L, 3L); Long batchSize = 100L; private void overrideConfigValue(final ConfigKey configKey, final Object value) { @@ -372,44 +376,53 @@ public class ResourceCleanupServiceImplTest { @Test public void testGetFilteredVmIdsForSnapshots() { - Long skippedVmIds = ids.get(0); - Long notSkippedVmIds = ids.get(1); + List skippedVmIds = ids.subList(0, 2); + Long notSkippedVmIds = ids.get(2); VMSnapshotVO vmSnapshotVO = Mockito.mock(VMSnapshotVO.class); Mockito.when(vmSnapshotVO.getVmId()).thenReturn(1L); Mockito.when(vmSnapshotDao.searchByVms(Mockito.anyList())).thenReturn(List.of(vmSnapshotVO)); HashSet set = new HashSet<>(); set.add(1L); Mockito.doReturn(set).when(resourceCleanupService).getVmIdsWithActiveVolumeSnapshots(ids); - Pair, List> result = resourceCleanupService.getFilteredVmIdsForSnapshots(new ArrayList<>(ids)); + BackupVO backupVO = Mockito.mock(BackupVO.class); + Mockito.when(backupVO.getVmId()).thenReturn(2L); + Mockito.when(backupDao.searchByVmIds(Mockito.anyList())).thenReturn(List.of(backupVO)); + Pair, List> result = resourceCleanupService.getFilteredVmIdsForSnapshotsAndBackups(new ArrayList<>(ids)); Assert.assertEquals(1, result.first().size()); - Assert.assertEquals(1, result.second().size()); + Assert.assertEquals(2, result.second().size()); Assert.assertEquals(notSkippedVmIds, result.first().get(0)); - Assert.assertEquals(skippedVmIds, result.second().get(0)); + Assert.assertEquals(skippedVmIds, result.second()); } @Test - public void testGetVmIdsWithNoActiveSnapshots() { + public void testGetVmIdsWithNoActiveSnapshotsAndBackups() { VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); Mockito.when(vm1.getId()).thenReturn(ids.get(0)); VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class); Mockito.when(vm2.getId()).thenReturn(ids.get(1)); + VMInstanceVO vm3 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm3.getId()).thenReturn(ids.get(2)); Mockito.when(vmInstanceDao.searchRemovedByRemoveDate(Mockito.any(), Mockito.any(), - Mockito.anyLong(), Mockito.anyList())).thenReturn(List.of(vm1, vm2)); - Long skippedVmIds = ids.get(0); - Long notSkippedVmIds = ids.get(1); + Mockito.anyLong(), Mockito.anyList())).thenReturn(List.of(vm1, vm2, vm3)); + List skippedVmIds = ids.subList(0, 2); + Long notSkippedVmIds = ids.get(2); VMSnapshotVO vmSnapshotVO = Mockito.mock(VMSnapshotVO.class); Mockito.when(vmSnapshotVO.getVmId()).thenReturn(1L); Mockito.when(vmSnapshotDao.searchByVms(Mockito.anyList())).thenReturn(List.of(vmSnapshotVO)); HashSet set = new HashSet<>(); set.add(1L); Mockito.doReturn(set).when(resourceCleanupService).getVmIdsWithActiveVolumeSnapshots(Mockito.anyList()); + BackupVO backupVO = Mockito.mock(BackupVO.class); + Mockito.when(backupVO.getVmId()).thenReturn(2L); + Mockito.when(backupDao.searchByVmIds(Mockito.anyList())).thenReturn(List.of(backupVO)); + Pair, List> result = - resourceCleanupService.getVmIdsWithNoActiveSnapshots(new Date(), new Date(), batchSize, + resourceCleanupService.getVmIdsWithNoActiveSnapshotsAndBackups(new Date(), new Date(), batchSize, new ArrayList<>()); Assert.assertEquals(1, result.first().size()); - Assert.assertEquals(1, result.second().size()); + Assert.assertEquals(2, result.second().size()); Assert.assertEquals(notSkippedVmIds, result.first().get(0)); - Assert.assertEquals(skippedVmIds, result.second().get(0)); + Assert.assertEquals(skippedVmIds, result.second()); } @Test @@ -422,7 +435,7 @@ public class ResourceCleanupServiceImplTest { @Test public void testPurgeVMEntities() { Mockito.doReturn(new Pair<>(ids, new ArrayList<>())).when(resourceCleanupService) - .getVmIdsWithNoActiveSnapshots(Mockito.any(), Mockito.any(), Mockito.anyLong(), Mockito.anyList()); + .getVmIdsWithNoActiveSnapshotsAndBackups(Mockito.any(), Mockito.any(), Mockito.anyLong(), Mockito.anyList()); Mockito.when(vmInstanceDao.expungeList(ids)).thenReturn(ids.size()); Assert.assertEquals(ids.size(), resourceCleanupService.purgeVMEntities(batchSize, new Date(), new Date())); } @@ -430,14 +443,14 @@ public class ResourceCleanupServiceImplTest { @Test public void testExpungeVMEntityFiltered() { Mockito.doReturn(new Pair<>(new ArrayList<>(), List.of(ids.get(0)))).when(resourceCleanupService) - .getFilteredVmIdsForSnapshots(Mockito.anyList()); + .getFilteredVmIdsForSnapshotsAndBackups(Mockito.anyList()); Assert.assertFalse(resourceCleanupService.purgeVMEntity(ids.get(0))); } @Test public void testPurgeVMEntityFiltered() { Mockito.doReturn(new Pair<>(List.of(ids.get(0)), new ArrayList<>())).when(resourceCleanupService) - .getFilteredVmIdsForSnapshots(Mockito.anyList()); + .getFilteredVmIdsForSnapshotsAndBackups(Mockito.anyList()); Mockito.doNothing().when(resourceCleanupService) .purgeLinkedVMEntities(Mockito.anyList(), Mockito.anyLong()); Mockito.when(vmInstanceDao.expunge(ids.get(0))).thenReturn(true); @@ -447,7 +460,7 @@ public class ResourceCleanupServiceImplTest { @Test public void testPurgeVMEntity() { Mockito.doReturn(new Pair<>(List.of(ids.get(0)), new ArrayList<>())).when(resourceCleanupService) - .getFilteredVmIdsForSnapshots(Mockito.anyList()); + .getFilteredVmIdsForSnapshotsAndBackups(Mockito.anyList()); Mockito.doNothing().when(resourceCleanupService) .purgeLinkedVMEntities(Mockito.anyList(), Mockito.anyLong()); Mockito.when(vmInstanceDao.expunge(ids.get(0))).thenReturn(true); diff --git a/server/src/test/java/org/apache/cloudstack/snapshot/SnapshotHelperTest.java b/server/src/test/java/org/apache/cloudstack/snapshot/SnapshotHelperTest.java index 1b0a8486e35..ac254ed1c5e 100644 --- a/server/src/test/java/org/apache/cloudstack/snapshot/SnapshotHelperTest.java +++ b/server/src/test/java/org/apache/cloudstack/snapshot/SnapshotHelperTest.java @@ -19,13 +19,15 @@ package org.apache.cloudstack.snapshot; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - +import com.cloud.api.query.dao.SnapshotJoinDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; @@ -42,12 +44,11 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.SnapshotDao; -import com.cloud.utils.exception.CloudRuntimeException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; @RunWith(MockitoJUnitRunner.class) public class SnapshotHelperTest { @@ -83,6 +84,8 @@ public class SnapshotHelperTest { @Mock VolumeVO volumeVoMock; + @Mock + SnapshotJoinDao snapshotJoinDao; List dataStoreRoles = Arrays.asList(DataStoreRole.values()); @@ -94,10 +97,16 @@ public class SnapshotHelperTest { snapshotHelperSpy.storageStrategyFactory = storageStrategyFactoryMock; snapshotHelperSpy.snapshotDao = snapshotDaoMock; snapshotHelperSpy.dataStorageManager = dataStoreManager; + snapshotHelperSpy.snapshotJoinDao = snapshotJoinDao; } @Test public void validateExpungeTemporarySnapshotNotAKvmSnapshotOnPrimaryStorageDoNothing() { + DataStore store = Mockito.mock(DataStore.class); + DataStoreDriver storeDriver = Mockito.mock(DataStoreDriver.class); + Mockito.when(snapshotInfoMock.getDataStore()).thenReturn(store); + Mockito.when(snapshotInfoMock.getDataStore().getId()).thenReturn(1L); + Mockito.when(snapshotInfoMock.getSnapshotId()).thenReturn(1L); snapshotHelperSpy.expungeTemporarySnapshot(false, snapshotInfoMock); Mockito.verifyNoInteractions(snapshotServiceMock, snapshotDataStoreDaoMock); } @@ -105,27 +114,26 @@ public class SnapshotHelperTest { @Test public void validateExpungeTemporarySnapshotKvmSnapshotOnPrimaryStorageExpungesSnapshot() { DataStore store = Mockito.mock(DataStore.class); + DataStoreDriver storeDriver = Mockito.mock(DataStoreDriver.class); + Mockito.when(store.getRole()).thenReturn(DataStoreRole.Image); Mockito.when(store.getId()).thenReturn(1L); Mockito.when(snapshotInfoMock.getDataStore()).thenReturn(store); - Mockito.doReturn(true).when(snapshotServiceMock).deleteSnapshot(Mockito.any()); - Mockito.doReturn(true).when(snapshotDataStoreDaoMock).expungeReferenceBySnapshotIdAndDataStoreRole(Mockito.anyLong(), Mockito.anyLong(), Mockito.any()); - snapshotHelperSpy.expungeTemporarySnapshot(true, snapshotInfoMock); - - Mockito.verify(snapshotServiceMock).deleteSnapshot(Mockito.any()); - Mockito.verify(snapshotDataStoreDaoMock).expungeReferenceBySnapshotIdAndDataStoreRole(Mockito.anyLong(), Mockito.anyLong(), Mockito.any()); } @Test public void validateIsKvmSnapshotOnlyInPrimaryStorageBackupToSecondaryTrue() { List hypervisorTypes = Arrays.asList(Hypervisor.HypervisorType.values()); - snapshotHelperSpy.backupSnapshotAfterTakingSnapshot = true; - hypervisorTypes.forEach(type -> { Mockito.doReturn(type).when(snapshotInfoMock).getHypervisorType(); dataStoreRoles.forEach(role -> { - Assert.assertFalse(snapshotHelperSpy.isKvmSnapshotOnlyInPrimaryStorage(snapshotInfoMock, role)); + if (!role.equals(DataStoreRole.Primary)) { + Assert.assertFalse(snapshotHelperSpy.isKvmSnapshotOnlyInPrimaryStorage(snapshotInfoMock, role, 1l)); + } else { + if (type.equals(HypervisorType.KVM)) + Assert.assertTrue(snapshotHelperSpy.isKvmSnapshotOnlyInPrimaryStorage(snapshotInfoMock, role, 1l)); + } }); }); } @@ -139,9 +147,9 @@ public class SnapshotHelperTest { Mockito.doReturn(type).when(snapshotInfoMock).getHypervisorType(); dataStoreRoles.forEach(role -> { if (type == Hypervisor.HypervisorType.KVM && role == DataStoreRole.Primary) { - Assert.assertTrue(snapshotHelperSpy.isKvmSnapshotOnlyInPrimaryStorage(snapshotInfoMock, role)); + Assert.assertTrue(snapshotHelperSpy.isKvmSnapshotOnlyInPrimaryStorage(snapshotInfoMock, role, null)); } else { - Assert.assertFalse(snapshotHelperSpy.isKvmSnapshotOnlyInPrimaryStorage(snapshotInfoMock, role)); + Assert.assertFalse(snapshotHelperSpy.isKvmSnapshotOnlyInPrimaryStorage(snapshotInfoMock, role, null)); } }); }); diff --git a/server/src/test/java/org/apache/cloudstack/storage/object/BucketApiServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/storage/object/BucketApiServiceImplTest.java index 3ce855b504b..b630ddc69a7 100644 --- a/server/src/test/java/org/apache/cloudstack/storage/object/BucketApiServiceImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/storage/object/BucketApiServiceImplTest.java @@ -110,6 +110,7 @@ public class BucketApiServiceImplTest { ObjectStoreVO objectStoreVO = Mockito.mock(ObjectStoreVO.class); Mockito.when(objectStoreVO.getId()).thenReturn(objectStoreId); + Mockito.when(objectStoreVO.getTotalSize()).thenReturn(2000000000L); Mockito.when(objectStoreDao.findById(poolId)).thenReturn(objectStoreVO); ObjectStoreEntity objectStore = Mockito.mock(ObjectStoreEntity.class); Mockito.when(dataStoreMgr.getDataStore(objectStoreId, DataStoreRole.Object)).thenReturn(objectStore); diff --git a/test/integration/plugins/storpool/MigrateVolumeToStorPool.py b/test/integration/plugins/storpool/MigrateVolumeToStorPool.py index 5babdca094e..4fc94d24383 100644 --- a/test/integration/plugins/storpool/MigrateVolumeToStorPool.py +++ b/test/integration/plugins/storpool/MigrateVolumeToStorPool.py @@ -84,6 +84,12 @@ class TestMigrateVolumeToAnotherPool(cloudstackTestCase): zone = config.zones[0] assert zone is not None + td = TestData() + cls.testdata = td.testdata + cls.helper = StorPoolHelper() + sp_pools = cls.helper.get_pool(zone) + assert sp_pools is not None + cls.spapi = spapi.Api(host=zone.spEndpoint, port=zone.spEndpointPort, auth=zone.spAuthToken, multiCluster=True) testClient = super(TestMigrateVolumeToAnotherPool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() @@ -103,11 +109,8 @@ class TestMigrateVolumeToAnotherPool(cloudstackTestCase): # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) - td = TestData() - cls.testdata = td.testdata - cls.helper = StorPoolHelper() - storpool_primary_storage = cls.testdata[TestData.primaryStorage] - cls.template_name = storpool_primary_storage.get("name") + storpool_primary_storage = sp_pools[0] + cls.template_name = storpool_primary_storage["name"] storpool_service_offerings = cls.testdata[TestData.serviceOffering] nfs_service_offerings = cls.testdata[TestData.serviceOfferingsPrimary] @@ -283,12 +286,6 @@ class TestMigrateVolumeToAnotherPool(cloudstackTestCase): @classmethod def cleanUpCloudStack(cls): try: - if cls.nfs_storage_pool.state is not "Maintenance": - cls.nfs_storage_pool = StoragePool.enableMaintenance(cls.apiclient, cls.nfs_storage_pool.id) - - if cls.ceph_storage_pool.state is not "Maintenance": - cls.ceph_storage_pool = StoragePool.enableMaintenance(cls.apiclient, cls.ceph_storage_pool.id) - cls.storage_pool = StoragePool.update(cls.apiclient, id=cls.storage_pool.id, tags = ["ssd"]) diff --git a/test/integration/plugins/storpool/TestEncryptedVolumes.py b/test/integration/plugins/storpool/TestEncryptedVolumes.py index eed64950ef5..ac049fb77a2 100644 --- a/test/integration/plugins/storpool/TestEncryptedVolumes.py +++ b/test/integration/plugins/storpool/TestEncryptedVolumes.py @@ -97,9 +97,18 @@ class TestEncryptedVolumes(cloudstackTestCase): zone = config.zones[0] assert zone is not None - cls.zone = list_zones(cls.apiclient, name=zone.name)[0] - cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__ + storage_pools = zone.primaryStorages + sp_pools = [] + for storage in storage_pools: + if storage['provider'] and "StorPool" in storage['provider']: + sp_pools.append(storage) + + if len(sp_pools) < 2: + cls.debug("Cannot perform the tests because there aren't the required count of StorPool storage pools %s" % sp_pools) + return + + cls.zone = list_zones(cls.apiclient, name=zone.name)[0] cls.spapi = spapi.Api(host=zone.spEndpoint, port=zone.spEndpointPort, auth=zone.spAuthToken, multiCluster=True) cls.helper = StorPoolHelper() @@ -118,48 +127,43 @@ class TestEncryptedVolumes(cloudstackTestCase): td = TestData() cls.testdata = td.testdata - cls.sp_template_1 = "ssd" - storpool_primary_storage = { - "name": cls.sp_template_1, - "zoneid": cls.zone.id, - "url": "SP_API_HTTP=%s:%s;SP_AUTH_TOKEN=%s;SP_TEMPLATE=%s" % (zone.spEndpoint, zone.spEndpointPort, zone.spAuthToken, cls.sp_template_1), - "scope": "zone", - "capacitybytes": 564325555333, - "capacityiops": 155466, - "hypervisor": "kvm", - "provider": "StorPool", - "tags": cls.sp_template_1 - } - - cls.storpool_primary_storage = storpool_primary_storage + storpool_primary_storage = sp_pools[0] + cls.template_name = storpool_primary_storage["name"] storage_pool = list_storage_pools( cls.apiclient, - name=storpool_primary_storage["name"] - ) - + name=cls.template_name + ) if storage_pool is None: - newTemplate = sptypes.VolumeTemplateCreateDesc(name=storpool_primary_storage["name"], placeAll="virtual", - placeTail="virtual", placeHead="virtual", replication=1) - template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) - storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] + cls.storage_pool = storage_pool + cls.helper.updateStoragePoolTags(cls.apiclient, cls.storage_pool.id, cls.testdata[TestData.sp_template_1]["tags"]) + + cls.debug(pprint.pformat(storage_pool)) + cls.primary_storage = storage_pool - storpool_service_offerings_ssd = { - "name": "ssd-encrypted", - "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", - "cpunumber": 1, - "cpuspeed": 500, - "memory": 512, - "storagetype": "shared", - "customizediops": False, - "hypervisorsnapshotreserve": 200, - "encryptroot": True, - "tags": cls.sp_template_1 - } + storpool_primary_storage = sp_pools[1] + cls.template_name = storpool_primary_storage["name"] + + storage_pool = list_storage_pools( + cls.apiclient, + name=cls.template_name + ) + if storage_pool is None: + storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) + else: + storage_pool = storage_pool[0] + cls.storage_pool = storage_pool + cls.helper.updateStoragePoolTags(cls.apiclient, cls.storage_pool.id, cls.testdata[TestData.sp_template_2]["tags"]) + + cls.debug(pprint.pformat(storage_pool)) + + cls.primary_storage2 = storage_pool + + storpool_service_offerings_ssd = cls.testdata[TestData.serviceOfferingEncrypted] service_offerings_ssd = list_service_offering( cls.apiclient, @@ -174,69 +178,6 @@ class TestEncryptedVolumes(cloudstackTestCase): cls.service_offering = service_offerings_ssd cls.debug(pprint.pformat(cls.service_offering)) - cls.sp_template_2 = "ssd2" - - storpool_primary_storage2 = { - "name": cls.sp_template_2, - "zoneid": cls.zone.id, - "url": "SP_API_HTTP=%s:%s;SP_AUTH_TOKEN=%s;SP_TEMPLATE=%s" % (zone.spEndpoint, zone.spEndpointPort, zone.spAuthToken, cls.sp_template_2), - "scope": "zone", - "capacitybytes": 564325555333, - "capacityiops": 1554, - "hypervisor": "kvm", - "provider": "StorPool", - "tags": cls.sp_template_2 - } - - cls.storpool_primary_storage2 = storpool_primary_storage2 - storage_pool = list_storage_pools( - cls.apiclient, - name=storpool_primary_storage2["name"] - ) - - if storage_pool is None: - newTemplate = sptypes.VolumeTemplateCreateDesc(name=storpool_primary_storage2["name"], placeAll="virtual", - placeTail="virtual", placeHead="virtual", replication=1) - template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) - storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) - - else: - storage_pool = storage_pool[0] - cls.primary_storage2 = storage_pool - - storpool_service_offerings_ssd2 = { - "name": "ssd2-encrypted", - "displaytext": "SP_CO_2", - "cpunumber": 1, - "cpuspeed": 500, - "memory": 512, - "storagetype": "shared", - "customizediops": False, - "encryptroot": True, - "tags": cls.sp_template_2 - } - - service_offerings_ssd2 = list_service_offering( - cls.apiclient, - name=storpool_service_offerings_ssd2["name"] - ) - - if service_offerings_ssd2 is None: - service_offerings_ssd2 = ServiceOffering.create(cls.apiclient, storpool_service_offerings_ssd2, encryptroot=True) - else: - service_offerings_ssd2 = service_offerings_ssd2[0] - - cls.service_offering2 = service_offerings_ssd2 - - cls.disk_offerings_ssd2_encrypted = list_disk_offering( - cls.apiclient, - name=cls.testdata[TestData.diskOfferingEncrypted2]["name"] - ) - if cls.disk_offerings_ssd2_encrypted is None: - cls.disk_offerings_ssd2_encrypted = DiskOffering.create(cls.apiclient, cls.testdata[TestData.diskOfferingEncrypted2], encrypt=True) - else: - cls.disk_offerings_ssd2_encrypted = cls.disk_offerings_ssd2_encrypted[0] - cls.disk_offering_ssd_encrypted = list_disk_offering( cls.apiclient, name=cls.testdata[TestData.diskOfferingEncrypted]["name"] diff --git a/test/integration/plugins/storpool/TestStorPoolVolumes.py b/test/integration/plugins/storpool/TestStorPoolVolumes.py index 70f8900df58..150fe830598 100644 --- a/test/integration/plugins/storpool/TestStorPoolVolumes.py +++ b/test/integration/plugins/storpool/TestStorPoolVolumes.py @@ -32,6 +32,7 @@ from marvin.lib.base import (Account, Volume, SecurityGroup, Role, + DiskOffering, ) from marvin.lib.common import (get_zone, get_domain, @@ -79,17 +80,24 @@ class TestStoragePool(cloudstackTestCase): def setUpCloudStack(cls): config = cls.getClsConfig() StorPoolHelper.logger = cls + cls.logger = StorPoolHelper.logger zone = config.zones[0] assert zone is not None + td = TestData() + cls.testdata = td.testdata + cls.helper = StorPoolHelper() + + sp_pools = cls.helper.get_pool(zone) + assert sp_pools is not None + cls.spapi = spapi.Api(host=zone.spEndpoint, port=zone.spEndpointPort, auth=zone.spAuthToken, multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls._cleanup = [] cls.apiclient = testClient.getApiClient() - cls.helper = StorPoolHelper() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() @@ -106,34 +114,11 @@ class TestStoragePool(cloudstackTestCase): cls.debug(list_zones(cls.apiclient, name=zone.name)) assert cls.zone is not None - cls.sp_template_1 = "ssd" - storpool_primary_storage = { - "name" : cls.sp_template_1, - "zoneid": cls.zone.id, - "url": "SP_API_HTTP=%s:%s;SP_AUTH_TOKEN=%s;SP_TEMPLATE=%s" % (zone.spEndpoint, zone.spEndpointPort, zone.spAuthToken, cls.sp_template_1), - "scope": "zone", - "capacitybytes": 564325555333, - "capacityiops": 155466, - "hypervisor": "kvm", - "provider": "StorPool", - "tags": cls.sp_template_1 - } + cls.sp_template_1 = cls.testdata[TestData.sp_template_1]["tags"] + storpool_primary_storage = sp_pools[0] - cls.storpool_primary_storage = storpool_primary_storage - - storage_pool = list_storage_pools( - cls.apiclient, - name=storpool_primary_storage["name"] - ) - - if storage_pool is None: - newTemplate = sptypes.VolumeTemplateCreateDesc(name = storpool_primary_storage["name"],placeAll = "virtual", placeTail = "virtual", placeHead = "virtual", replication=1) - template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) - - storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) - else: - storage_pool = storage_pool[0] - cls.primary_storage = storage_pool + cls.primary_storage = cls.create_pool_if_not_exists(storpool_primary_storage) + cls.helper.updateStoragePoolTags(cls.apiclient, cls.primary_storage.id, cls.sp_template_1) storpool_service_offerings_ssd = { @@ -150,7 +135,7 @@ class TestStoragePool(cloudstackTestCase): service_offerings_ssd = list_service_offering( cls.apiclient, - name=storpool_service_offerings_ssd["name"] + name=cls.sp_template_1 ) if service_offerings_ssd is None: @@ -162,36 +147,12 @@ class TestStoragePool(cloudstackTestCase): cls.debug(pprint.pformat(cls.service_offering)) - cls.sp_template_2 = "ssd2" + cls.sp_template_2 = cls.testdata[TestData.sp_template_2]["tags"] - storpool_primary_storage2 = { - "name" : cls.sp_template_2, - "zoneid": cls.zone.id, - "url": "SP_API_HTTP=%s:%s;SP_AUTH_TOKEN=%s;SP_TEMPLATE=%s" % (zone.spEndpoint, zone.spEndpointPort, zone.spAuthToken, cls.sp_template_2), - "scope": "zone", - "capacitybytes": 564325555333, - "capacityiops": 1554, - "hypervisor": "kvm", - "provider": "StorPool", - "tags": cls.sp_template_2 - } + storpool_primary_storage2 = sp_pools[1] - cls.storpool_primary_storage2 = storpool_primary_storage2 - storage_pool = list_storage_pools( - cls.apiclient, - name=storpool_primary_storage2["name"] - ) - - if storage_pool is None: - newTemplate = sptypes.VolumeTemplateCreateDesc(name = storpool_primary_storage2["name"],placeAll = "virtual", placeTail = "virtual", placeHead = "virtual", replication=1) - - template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) - - storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) - - else: - storage_pool = storage_pool[0] - cls.primary_storage2 = storage_pool + cls.primary_storage2 = cls.create_pool_if_not_exists(storpool_primary_storage2) + cls.helper.updateStoragePoolTags(cls.apiclient, cls.primary_storage2.id, cls.sp_template_2) storpool_service_offerings_ssd2 = { "name": cls.sp_template_2, @@ -216,25 +177,11 @@ class TestStoragePool(cloudstackTestCase): cls.service_offering2 = service_offerings_ssd2 - disk_offerings = list_disk_offering( - cls.apiclient, - name="Small" - ) + cls.disk_offerings = cls.create_do_if_not_exists(cls.testdata[TestData.diskOfferingSmall]) - disk_offering_20 = list_disk_offering( - cls.apiclient, - name="Medium" - ) + cls.disk_offering_20 = cls.create_do_if_not_exists(cls.testdata[TestData.diskOfferingMedium]) - disk_offering_100 = list_disk_offering( - cls.apiclient, - name="Large" - ) - - - cls.disk_offerings = disk_offerings[0] - cls.disk_offering_20 = disk_offering_20[0] - cls.disk_offering_100 = disk_offering_100[0] + cls.disk_offering_100 = cls.create_do_if_not_exists(cls.testdata[TestData.diskOfferingLarge]) #The version of CentOS has to be supported template = get_template( @@ -253,14 +200,14 @@ class TestStoragePool(cloudstackTestCase): cls.services["zoneid"] = cls.zone.id cls.services["diskofferingid"] = cls.disk_offerings.id - role = Role.list(cls.apiclient, name='Admin') + role = Role.list(cls.apiclient, name='Root Admin') # Create VMs, VMs etc cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id, - roleid = role[0].id + roleid = 1 ) securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] @@ -271,7 +218,7 @@ class TestStoragePool(cloudstackTestCase): cls.apiclient, {"diskname":"StorPoolDisk-1" }, zoneid=cls.zone.id, - diskofferingid=disk_offerings[0].id, + diskofferingid=cls.disk_offerings.id, account=cls.account.name, domainid=cls.account.domainid, ) @@ -280,7 +227,7 @@ class TestStoragePool(cloudstackTestCase): cls.apiclient, {"diskname":"StorPoolDisk-2" }, zoneid=cls.zone.id, - diskofferingid=disk_offerings[0].id, + diskofferingid=cls.disk_offerings.id, account=cls.account.name, domainid=cls.account.domainid, ) @@ -289,7 +236,7 @@ class TestStoragePool(cloudstackTestCase): cls.apiclient, {"diskname":"StorPoolDisk-3" }, zoneid=cls.zone.id, - diskofferingid=disk_offerings[0].id, + diskofferingid=cls.disk_offerings.id, account=cls.account.name, domainid=cls.account.domainid, ) @@ -337,6 +284,34 @@ class TestStoragePool(cloudstackTestCase): cls.random_data = "random.data" return + @classmethod + def create_do_if_not_exists(cls, data): + disk_offerings = list_disk_offering( + cls.apiclient, + name=data["name"] + ) + if disk_offerings is None: + disk_offerings = DiskOffering.create(cls.apiclient, data) + else: + disk_offerings = disk_offerings[0] + return disk_offerings + + @classmethod + def create_pool_if_not_exists(cls, storpool_primary_storage): + storage_pool = list_storage_pools( + cls.apiclient, + name=storpool_primary_storage["name"] + ) + if storage_pool is None: + newTemplate = sptypes.VolumeTemplateCreateDesc(name=storpool_primary_storage["name"], placeAll="virtual", + placeTail="virtual", placeHead="virtual", replication=1) + template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) + + storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) + else: + storage_pool = storage_pool[0] + return storage_pool + @classmethod def tearDownClass(cls): cls.cleanUpCloudStack() @@ -406,6 +381,8 @@ class TestStoragePool(cloudstackTestCase): self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") self._cleanup.append(template) + virtual_machine.stop(self.apiclient, forced=True) + virtual_machine.delete(self.apiclient, expunge=True) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_02_snapshot_to_template_bypass_secondary(self): @@ -487,6 +464,8 @@ class TestStoragePool(cloudstackTestCase): self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") self._cleanup.append(template) + virtual_machine.stop(self.apiclient, forced=True) + virtual_machine.delete(self.apiclient, expunge=True) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_03_snapshot_volume_with_secondary(self): @@ -1016,6 +995,8 @@ class TestStoragePool(cloudstackTestCase): ) ssh_client = vm.get_ssh_client(reconnect=True) + vm.stop(self.apiclient, forced=True) + vm.delete(self.apiclient, expunge=True) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") @@ -1825,6 +1806,8 @@ class TestStoragePool(cloudstackTestCase): self.assertIsNotNone(snapshot, "Snapshot is None") self.assertEqual(list_volumes_of_vm[0].id, snapshot.volumeid, "Snapshot is not for the same volume") + vm.stop(self.apiclient, forced=True) + vm.delete(self.apiclient, expunge=True) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") @@ -1860,6 +1843,8 @@ class TestStoragePool(cloudstackTestCase): self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") self._cleanup.append(template) + virtual_machine.stop(self.apiclient, forced=True) + virtual_machine.delete(self.apiclient, expunge=True) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_24_migrate_vm_to_another_storage(self): @@ -1892,6 +1877,8 @@ class TestStoragePool(cloudstackTestCase): self.assertFalse(hasattr(self.volume, 'virtualmachineid') , "Volume is not detached") self.assertFalse(hasattr(self.volume, 'storageid') , "Volume is not detached") + + self.helper.updateStoragePoolTags(apiclient=self.apiclient, poolId=self.primary_storage2.id, tags=self.testdata[TestData.sp_template_1]["tags"]) volume = Volume.migrate( self.apiclient, volumeid = self.volume.id, @@ -1901,6 +1888,8 @@ class TestStoragePool(cloudstackTestCase): self.assertIsNotNone(volume, "Volume is None") self.assertEqual(volume.storageid, self.primary_storage2.id, "Storage is the same") + self.helper.updateStoragePoolTags(apiclient=self.apiclient, poolId=self.primary_storage2.id, tags=self.testdata[TestData.sp_template_2]["tags"]) + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_26_create_vm_on_another_storpool_storage(self): @@ -1916,6 +1905,8 @@ class TestStoragePool(cloudstackTestCase): rootdisksize=10 ) self.assertIsNotNone(virtual_machine, "Could not create virtual machine on another Storpool primary storage") + virtual_machine.stop(self.apiclient, forced=True) + virtual_machine.delete(self.apiclient, expunge=True) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") @@ -1958,6 +1949,8 @@ class TestStoragePool(cloudstackTestCase): self.assertIsNotNone(volume, "Could not create volume from snapshot") self.assertIsInstance(volume, Volume, "Volume is not instance of Volume") + virtual_machine.stop(self.apiclient, forced=True) + virtual_machine.delete(self.apiclient, expunge=True) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_28_download_volume(self): @@ -2041,6 +2034,10 @@ class TestStoragePool(cloudstackTestCase): self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") self._cleanup.append(template) + virtual_machine.stop(self.apiclient, forced=True) + virtual_machine.delete(self.apiclient, expunge=True) + virtual_machine2.stop(self.apiclient, forced=True) + virtual_machine2.delete(self.apiclient, expunge=True) @classmethod def create_volume(self, apiclient, zoneid=None, snapshotid=None, account=None, domainid=None): diff --git a/test/integration/plugins/storpool/TestTagsOnStorPool.py b/test/integration/plugins/storpool/TestTagsOnStorPool.py index ea5c2a4cc78..b813cd320c4 100644 --- a/test/integration/plugins/storpool/TestTagsOnStorPool.py +++ b/test/integration/plugins/storpool/TestTagsOnStorPool.py @@ -44,6 +44,7 @@ from marvin.lib.base import (Account, VmSnapshot, Volume, SecurityGroup, + DiskOffering, ) from marvin.lib.common import (get_zone, get_domain, @@ -79,10 +80,16 @@ class TestStoragePool(cloudstackTestCase): def setUpCloudStack(cls): config = cls.getClsConfig() StorPoolHelper.logger = cls + td = TestData() + cls.testdata = td.testdata + cls.helper = StorPoolHelper() zone = config.zones[0] assert zone is not None + sp_pools = cls.helper.get_pool(zone) + assert sp_pools is not None + cls.spapi = spapi.Api(host=zone.spEndpoint, port=zone.spEndpointPort, auth=zone.spAuthToken, multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() @@ -98,16 +105,9 @@ class TestStoragePool(cloudstackTestCase): # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = list_zones(cls.apiclient, name=zone.name)[0] - cls.debug(cls.zone) - cls.debug(list_zones(cls.apiclient, name=zone.name)) - assert cls.zone is not None assert cls.zone is not None - td = TestData() - cls.testdata = td.testdata - cls.helper = StorPoolHelper() - cls.account = cls.helper.create_account( cls.apiclient, cls.services["account"], @@ -120,34 +120,39 @@ class TestStoragePool(cloudstackTestCase): securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) - storpool_primary_storage = cls.testdata[TestData.primaryStorage] - - storpool_service_offerings = cls.testdata[TestData.serviceOffering] - - cls.template_name = storpool_primary_storage.get("name") - + storpool_primary_storage = sp_pools[0] + cls.template_name = storpool_primary_storage["name"] storage_pool = list_storage_pools( cls.apiclient, name=cls.template_name ) + if storage_pool is None: + storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) + else: + storage_pool = storage_pool[0] + cls.storage_pool = storage_pool + cls.helper.updateStoragePoolTags(cls.apiclient, cls.storage_pool.id, cls.testdata[TestData.sp_template_1]["tags"]) + + cls.debug(pprint.pformat(storage_pool)) + + + disk_offerings = list_disk_offering( + cls.apiclient, + name=cls.template_name + ) + if disk_offerings is None: + offering = cls.testdata[TestData.diskOfferingCustom] + cls.disk_offerings = DiskOffering.create(cls.apiclient, services=offering, custom=True) + else: + cls.disk_offerings = disk_offerings[0] + + storpool_service_offerings = cls.testdata[TestData.serviceOffering] service_offerings = list_service_offering( cls.apiclient, name=cls.template_name ) - disk_offerings = list_disk_offering( - cls.apiclient, - name="ssd" - ) - - cls.disk_offerings = disk_offerings[0] - if storage_pool is None: - storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) - else: - storage_pool = storage_pool[0] - cls.storage_pool = storage_pool - cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings) else: diff --git a/test/integration/plugins/storpool/TestVmSnapshots.py b/test/integration/plugins/storpool/TestVmSnapshots.py index ab35c076b4e..749e534f0da 100644 --- a/test/integration/plugins/storpool/TestVmSnapshots.py +++ b/test/integration/plugins/storpool/TestVmSnapshots.py @@ -29,6 +29,7 @@ from marvin.lib.base import (Account, User, Volume, SecurityGroup, + DiskOffering, ) from marvin.lib.common import (get_zone, get_domain, @@ -76,6 +77,8 @@ class TestVmSnapshot(cloudstackTestCase): cls.testdata = td.testdata cls.helper = StorPoolHelper() + sp_pools = cls.helper.get_pool(zone) + assert sp_pools is not None cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates @@ -114,26 +117,29 @@ class TestVmSnapshot(cloudstackTestCase): securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) - primarystorage = cls.testdata[TestData.primaryStorage] + primarystorage = sp_pools[0] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools( cls.apiclient, - name = primarystorage.get("name") + name = primarystorage["name"] ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, - name="ssd" + name=primarystorage["name"] ) - assert disk_offering is not None - + if disk_offering is None: + offering = cls.testdata[TestData.diskOfferingCustom] + cls.disk_offering = DiskOffering.create(cls.apiclient, services=offering, custom=True) + else: + cls.disk_offering = disk_offering[0] service_offering_only = list_service_offering( cls.apiclient, - name="ssd" + name=primarystorage["name"] ) if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] @@ -143,8 +149,6 @@ class TestVmSnapshot(cloudstackTestCase): serviceOffering) assert cls.service_offering_only is not None - cls.disk_offering = disk_offering[0] - # Create 1 data volume_1 cls.volume = Volume.create( cls.apiclient, diff --git a/test/integration/plugins/storpool/sp_util.py b/test/integration/plugins/storpool/sp_util.py index 70f36609af5..084a57ee954 100644 --- a/test/integration/plugins/storpool/sp_util.py +++ b/test/integration/plugins/storpool/sp_util.py @@ -84,6 +84,10 @@ class TestData(): diskOfferingTier1Template = "diskOfferingTier1Template" diskOfferingTier2Template = "diskOfferingTier2Template" diskOfferingWithTagsAndTempl = "diskOfferingWithTagsAndTempl" + diskOfferingSmall = "diskOfferingSmall" + diskOfferingMedium = "diskOfferingMedium" + diskOfferingLarge = "diskOfferingLarge" + diskOfferingCustom = "diskOfferingCustom" domainId = "domainId" hypervisor = "hypervisor" login = "login" @@ -100,6 +104,7 @@ class TestData(): serviceOfferingsPrimary = "serviceOfferingsPrimary" serviceOfferingsIops = "serviceOfferingsIops" serviceOfferingsCeph = "serviceOfferingsCeph" + serviceOfferingEncrypted = "serviceOfferingEncrypted" scope = "scope" StorPool = "StorPool" storageTag = ["ssd", "ssd2"] @@ -114,6 +119,8 @@ class TestData(): volume_6 = "volume_6" volume_7 = "volume_7" zoneId = "zoneId" + sp_template_1 = 'sp_template_1' + sp_template_2 = 'sp_template_2' def __init__(self): sp_template_1 = 'ssd' @@ -221,6 +228,18 @@ class TestData(): "customizediops": True, "tags": sp_template_1, }, + TestData.serviceOfferingEncrypted: { + "name": "Test-encrypted", + "displaytext": "SP Encrypted", + "cpunumber": 1, + "cpuspeed": 500, + "memory": 512, + "storagetype": "shared", + "customizediops": False, + "hypervisorsnapshotreserve": 200, + "encryptroot": True, + "tags": sp_template_1 + }, TestData.diskOffering: { "name": "SP_DO_1", "displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)", @@ -323,6 +342,38 @@ class TestData(): TestData.tags: sp_template_1, "storagetype": "shared" }, + TestData.diskOfferingSmall: { + "name": "Test-Small", + "displaytext": "Small Disk Offering", + "disksize" : 5, + "hypervisorsnapshotreserve": 200, + TestData.tags: sp_template_1, + "storagetype": "shared" + }, + TestData.diskOfferingMedium: { + "name": "Test-Medium", + "displaytext": "Medium Disk Offering", + "disksize": 20, + "hypervisorsnapshotreserve": 200, + TestData.tags: sp_template_1, + "storagetype": "shared" + }, + TestData.diskOfferingLarge: { + "name": "Test-Large", + "displaytext": "Large Disk Offering", + "disksize": 100, + "hypervisorsnapshotreserve": 200, + TestData.tags: sp_template_1, + "storagetype": "shared" + }, + TestData.diskOfferingCustom: { + "name": "Test-Custom", + "displaytext": "Custom Disk Offering", + "custom": True, + "hypervisorsnapshotreserve": 200, + TestData.tags: sp_template_1, + "storagetype": "shared" + }, TestData.volume_1: { TestData.diskName: "test-volume-1", }, @@ -344,6 +395,12 @@ class TestData(): TestData.volume_7: { TestData.diskName: "test-volume-7", }, + TestData.sp_template_1: { + "tags": "ssd" + }, + TestData.sp_template_2: { + "tags": "ssd2" + }, } class StorPoolHelper(): def setUpClass(cls): @@ -847,3 +904,80 @@ class StorPoolHelper(): break return destinationHost + + @classmethod + def updateStoragePoolTags(cls, apiclient, poolId, tags): + StoragePool.update( + apiclient, + id=poolId, + tags=tags + ) + + @classmethod + def get_pool(cls, zone): + storage_pools = zone.primaryStorages + sp_pools = [] + for storage in storage_pools: + if storage['provider'] and "StorPool" in storage['provider']: + sp_pools.append(storage) + + if len(sp_pools) < 2: + cls.debug("Cannot perform the tests because there aren't the required count of StorPool storage pools %s" % sp_pools) + return + return sp_pools + + @classmethod + def create_snapshot_template(cls, apiclient, services, snapshot_id, zone_id): + cmd = createTemplate.createTemplateCmd() + cmd.displaytext = "TemplateFromSnap" + name = "-".join([cmd.displaytext, random_gen()]) + cmd.name = name + if "ostypeid" in services: + cmd.ostypeid = services["ostypeid"] + elif "ostype" in services: + sub_cmd = listOsTypes.listOsTypesCmd() + sub_cmd.description = services["ostype"] + ostypes = apiclient.listOsTypes(sub_cmd) + + if not isinstance(ostypes, list): + cls.fail("Unable to find Ostype id with desc: %s" % + services["ostype"]) + cmd.ostypeid = ostypes[0].id + else: + cls.fail("Unable to find Ostype is required for creating template") + + cmd.isfeatured = True + cmd.ispublic = True + cmd.isextractable = False + + cmd.snapshotid = snapshot_id + cmd.zoneid = zone_id + apiclient.createTemplate(cmd) + templates = Template.list(apiclient, name=name, templatefilter="self") + if not isinstance(templates, list) and len(templates) < 0: + cls.fail("Unable to find created template with name %s" % name) + template = Template(templates[0].__dict__) + return template + + @classmethod + def verify_snapshot_copies(cls, userapiclient, snapshot_id, zone_ids): + snapshot_entries = Snapshot.list(userapiclient, id=snapshot_id, showunique=False) + if not isinstance(snapshot_entries, list): + cls.fail("Unable to list snapshot for multiple zones") + snapshots = set() + new_list = [] + for obj in snapshot_entries: + if obj.zoneid not in snapshots: + new_list.append(obj) + snapshots.add(obj.zoneid) + + if len(new_list) != len(zone_ids): + cls.fail("Undesired list snapshot size for multiple zones") + for zone_id in zone_ids: + zone_found = False + for entry in new_list: + if entry.zoneid == zone_id: + zone_found = True + break + if zone_found == False: + cls.fail("Unable to find snapshot entry for the zone ID: %s" % zone_id) diff --git a/test/integration/plugins/storpool/storpool.cfg b/test/integration/plugins/storpool/storpool.cfg new file mode 100644 index 00000000000..4eccc9d0f29 --- /dev/null +++ b/test/integration/plugins/storpool/storpool.cfg @@ -0,0 +1,154 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +{ + "zones": [ + { + "name": "Zone-A", + "enabled": "True", + "physical_networks": [ + { + "broadcastdomainrange": "Zone", + "name": "physical_network", + "traffictypes": [ + { + "typ": "Guest" + }, + { + "typ": "Management" + } + ], + "providers": [ + { + "broadcastdomainrange": "ZONE", + "name": "VirtualRouter" + }, + { + "broadcastdomainrange": "Pod", + "name": "SecurityGroupProvider" + } + ] + } + ], + "primaryStorages": [ + { + "url": "SP_API_HTTP=1.1.1.1:81;SP_AUTH_TOKEN=11111111;SP_TEMPLATE=ssd", + "name": "ssd", + "provider": "StorPool", + "hypervisor": "KVM", + "path": "/dev/storpool/", + "protocol": "SharedMountPoint", + "capacitybytes": 300000000000, + "tags": "ssd" + }, + { + "url": "SP_API_HTTP=1.1.1.1:81;SP_AUTH_TOKEN=1111111;SP_TEMPLATE=ssd2", + "name": "", + "provider": "StorPool", + "hypervisor": "KVM", + "path": "/dev/storpool/", + "protocol": "SharedMountPoint", + "capacitybytes": 300000000000, + "tags": "" + }, + { + "url": "nfs://1.1.1.1/export/primary", + "name": "primary", + "hypervisor": "KVM", + "tags": "nfs" + }, + { + "url": "rbd://cloudstack:342343223==@1.1.1.1/cloudstack", + "hypervisor": "KVM", + "name": "ceph", + "tags": "ceph" + } + ], + "spEndpoint": [SP_API_HTTP_HOST], + "spEndpointPort": 81, + "spAuthToken": [SP_AUTH_TOKEN] + }, + { + "name": "Zone-B", + "enabled": "True", + "physical_networks": [ + { + "broadcastdomainrange": "Zone", + "name": "physical_network", + "traffictypes": [ + { + "typ": "Guest" + }, + { + "typ": "Management" + } + ], + "providers": [ + { + "broadcastdomainrange": "ZONE", + "name": "VirtualRouter" + }, + { + "broadcastdomainrange": "Pod", + "name": "SecurityGroupProvider" + } + ] + } + ], + "primaryStorages": [ + { + "url": "SP_API_HTTP=1.1.1.1:81;SP_AUTH_TOKEN=111111;SP_TEMPLATE=default", + "name": "", + "provider": "StorPool", + "hypervisor": "KVM", + "path": "/dev/storpool/", + "protocol": "SharedMountPoint", + "capacitybytes": 300000000000, + "tags": "" + }, + { + "url": "nfs://1.1.1.1/export/primary", + "name": "", + "hypervisor": "KVM", + "tags": "" + } + ], + "spEndpoint": [SP_API_HTTP_HOST], + "spEndpointPort": 81, + "spAuthToken": [SP_AUTH_TOKEN] + } + ], + "dbSvr": { + "dbSvr": "127.0.0.1", + "passwd": "cloud", + "db": "cloud", + "port": 3306, + "user": "cloud" + }, + "logger": { + "LogFolderPath": "/tmp/" + }, + "mgtSvr": [ + { + "mgtSvrIp": "", + "port": 8096, + "user": "", + "passwd": "", + "hypervisor": "kvm" + } + ] +} diff --git a/test/integration/plugins/storpool/test_snapshot_copy_on_primary_storage.py b/test/integration/plugins/storpool/test_snapshot_copy_on_primary_storage.py new file mode 100644 index 00000000000..4e627a58f3b --- /dev/null +++ b/test/integration/plugins/storpool/test_snapshot_copy_on_primary_storage.py @@ -0,0 +1,255 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time + +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (createSnapshot, + deleteSnapshot, + copySnapshot, + createVolume, + createTemplate, + listOsTypes) +from marvin.lib.utils import (cleanup_resources, + random_gen) +from marvin.lib.base import (Account, + Zone, + ServiceOffering, + DiskOffering, + VirtualMachine, + Volume, + Snapshot, + Template, + StoragePool) +from marvin.lib.common import (get_domain, + get_zone, + get_template) +from marvin.lib.decoratorGenerators import skipTestIf +from marvin.codes import FAILED, PASS +from nose.plugins.attrib import attr +import logging +from sp_util import (TestData, StorPoolHelper) +import math + +class TestSnapshotCopy(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestSnapshotCopy, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + + cls._cleanup = [] + cls.logger = logging.getLogger('TestSnapshotCopy') + cls.testsNotSupported = False + cls.zones = Zone.list(cls.apiclient) + cls.pools = StoragePool.list(cls.apiclient, status="Up") + enabled_core_zones = [] + if not isinstance(cls.zones, list): + cls.testsNotSupported = True + elif len(cls.zones) < 2: + cls.testsNotSupported = True + else: + for z in cls.zones: + if z.type == 'Core' and z.allocationstate == 'Enabled': + enabled_core_zones.append(z) + if len(enabled_core_zones) < 2: + cls.testsNotSupported = True + + if cls.testsNotSupported == True: + cls.logger.info("Unsupported") + return + + cls.additional_zone = None + for z in enabled_core_zones: + if z.id != cls.zone.id: + cls.additional_zone = z + + cls.storpool_pool = None + for pool in cls.pools: + if pool.provider == "StorPool" and pool.zoneid != cls.zone.id: + cls.storpool_pool = pool + break + + template = get_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"]) + if template == FAILED: + assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] + + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + cls.services["iso"]["zoneid"] = cls.zone.id + + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id) + cls._cleanup.append(cls.account) + + cls.helper = StorPoolHelper() + + compute_offering_service = cls.services["service_offerings"]["tiny"].copy() + cls.service_offering = ServiceOffering.create( + cls.apiclient, + compute_offering_service) + cls._cleanup.append(cls.service_offering) + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = template.id + cls.virtual_machine = VirtualMachine.create( + cls.apiclient, + cls.services["virtual_machine"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + mode=cls.services["mode"] + ) + cls._cleanup.append(cls.virtual_machine) + cls.volume = Volume.list( + cls.apiclient, + virtualmachineid=cls.virtual_machine.id, + type='ROOT', + listall=True + )[0] + + @classmethod + def tearDownClass(cls): + super(TestSnapshotCopy, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.userapiclient = self.testClient.getUserApiClient( + UserName=self.account.name, + DomainName=self.account.domain + ) + self.dbclient = self.testClient.getDbConnection() + self.snapshot_id = None + self.cleanup = [] + + def tearDown(self): + super(TestSnapshotCopy, self).tearDown() + + + @skipTestIf("testsNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_01_take_snapshot_multi_zone(self): + """Test to take volume snapshot in multiple StorPool primary storage pools + """ + + snapshot = Snapshot.create(self.userapiclient, volume_id=self.volume.id, zoneids=[str(self.additional_zone.id)], usestoragereplication=True) + self.snapshot_id = snapshot.id + self.helper.verify_snapshot_copies(self.userapiclient, self.snapshot_id, [self.zone.id, self.additional_zone.id]) + time.sleep(420) + Snapshot.delete(snapshot, self.userapiclient) + return + + @skipTestIf("testsNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_02_copy_snapshot_multi_pools(self): + """Test to take volume snapshot on StorPool primary storage and then copy on StorPool primary storage in another pool + """ + + snapshot = Snapshot.create(self.userapiclient, volume_id=self.volume.id) + self.snapshot_id = snapshot.id + Snapshot.copy(self.userapiclient, self.snapshot_id, zone_ids=[str(self.additional_zone.id)], source_zone_id=self.zone.id, usestoragereplication=True) + self.helper.verify_snapshot_copies(self.userapiclient, self.snapshot_id, [self.zone.id, self.additional_zone.id]) + time.sleep(420) + Snapshot.delete(snapshot, self.userapiclient) + return + + @skipTestIf("testsNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_03_take_snapshot_multi_pools_delete_single_zone(self): + """Test to take volume snapshot in multiple StorPool storages in diff zones and delete from one zone + """ + + snapshot = Snapshot.create(self.userapiclient, volume_id=self.volume.id, zoneids=[str(self.additional_zone.id)], usestoragereplication=True) + self.snapshot_id = snapshot.id + self.helper.verify_snapshot_copies(self.userapiclient, self.snapshot_id, [self.zone.id, self.additional_zone.id]) + time.sleep(420) + Snapshot.delete(snapshot, self.userapiclient, self.zone.id) + self.helper.verify_snapshot_copies(self.userapiclient, self.snapshot_id, [self.additional_zone.id]) + self.cleanup.append(snapshot) + return + + @skipTestIf("testsNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_04_copy_snapshot_multi_zone_delete_all(self): + """Test to take volume snapshot on StorPool, copy in another StorPool primary storage in another zone and delete for all + """ + + snapshot = Snapshot.create(self.userapiclient, volume_id=self.volume.id) + self.snapshot_id = snapshot.id + Snapshot.copy(self.userapiclient, self.snapshot_id, zone_ids=[str(self.additional_zone.id)], source_zone_id=self.zone.id, usestoragereplication=True) + self.helper.verify_snapshot_copies(self.userapiclient, self.snapshot_id, [self.zone.id, self.additional_zone.id]) + time.sleep(420) + Snapshot.delete(snapshot, self.userapiclient) + snapshot_entries = Snapshot.list(self.userapiclient, id=snapshot.id) + if snapshot_entries and isinstance(snapshot_entries, list) and len(snapshot_entries) > 0: + self.fail("Snapshot delete for all zones failed") + return + + @skipTestIf("testsNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_05_take_snapshot_multi_zone_create_volume_additional_zone(self): + """Test to take volume snapshot on StorPool in multiple zones and create a volume in one of the additional zones + """ + + snapshot = Snapshot.create(self.userapiclient,volume_id=self.volume.id, zoneids=[str(self.additional_zone.id)], usestoragereplication=True) + self.snapshot_id = snapshot.id + self.helper.verify_snapshot_copies(self.userapiclient, self.snapshot_id, [self.zone.id, self.additional_zone.id]) + disk_offering_id = None + if snapshot.volumetype == 'ROOT': + service = self.services["disk_offering"] + service["disksize"] = math.ceil(snapshot.virtualsize/(1024*1024*1024)) + self.disk_offering = DiskOffering.create( + self.apiclient, + service + ) + self.cleanup.append(self.disk_offering) + disk_offering_id = self.disk_offering.id + + self.volume = Volume.create(self.userapiclient, {"diskname":"StorPoolDisk-1" }, snapshotid=self.snapshot_id, zoneid=self.zone.id, diskofferingid=disk_offering_id) + self.cleanup.append(self.volume) + time.sleep(420) + Snapshot.delete(snapshot, self.userapiclient) + if self.zone.id != self.volume.zoneid: + self.fail("Volume from snapshot not created in the additional zone") + return + + @skipTestIf("testsNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_06_take_snapshot_multi_zone_create_template_additional_zone(self): + """Test to take volume snapshot in multiple StorPool primary storages in diff zones and create a volume in one of the additional zones + """ + snapshot = Snapshot.create(self.userapiclient, volume_id=self.volume.id, zoneids=[str(self.additional_zone.id)], usestoragereplication=True) + self.snapshot_id = snapshot.id + self.helper.verify_snapshot_copies(self.userapiclient, self.snapshot_id, [self.zone.id, self.additional_zone.id]) + self.template = self.helper.create_snapshot_template(self.userapiclient, self.services, self.snapshot_id, self.additional_zone.id) + if self.additional_zone.id != self.template.zoneid: + self.fail("Template from snapshot not created in the additional zone") + time.sleep(420) + Snapshot.delete(snapshot, self.userapiclient) + self.cleanup.append(self.template) + return diff --git a/test/integration/plugins/storpool/test_storpool_tiers.py b/test/integration/plugins/storpool/test_storpool_tiers.py index f41059e9206..abea6941fb6 100644 --- a/test/integration/plugins/storpool/test_storpool_tiers.py +++ b/test/integration/plugins/storpool/test_storpool_tiers.py @@ -90,17 +90,17 @@ class TestStorPoolTiers(cloudstackTestCase): cls.qos = "SP_QOSCLASS" cls.spTemplate = "SP_TEMPLATE" - cls.disk_offerings_tier1_tags = cls.getDiskOffering(disk_offerings_tier1_tags, cls.qos, "ssd") + cls.disk_offerings_tier1_tags = cls.getDiskOffering(disk_offerings_tier1_tags, cls.qos, cls.testdata[TestData.sp_template_1]["tags"]) - cls.disk_offerings_tier2_tags = cls.getDiskOffering(disk_offerings_tier2_tags, cls.qos, "virtual") + cls.disk_offerings_tier2_tags = cls.getDiskOffering(disk_offerings_tier2_tags, cls.qos, cls.testdata[TestData.sp_template_2]["tags"]) - cls.disk_offerings_tier1_template = cls.getDiskOffering(disk_offerings_tier1_template, cls.spTemplate, "ssd") + cls.disk_offerings_tier1_template = cls.getDiskOffering(disk_offerings_tier1_template, cls.spTemplate, cls.testdata[TestData.sp_template_1]["tags"]) cls.disk_offerings_tier2_template = cls.getDiskOffering(disk_offerings_tier2_template, cls.spTemplate, - "virtual") + cls.testdata[TestData.sp_template_2]["tags"]) cls.disk_offerings_tier2_tags_template = cls.getDiskOffering(disk_offerings_tier2_tags_template, cls.spTemplate, - "virtual") - cls.resourceDetails(cls.qos, cls.disk_offerings_tier2_tags_template.id, "virtual") + cls.testdata[TestData.sp_template_2]["tags"]) + cls.resourceDetails(cls.qos, cls.disk_offerings_tier2_tags_template.id, cls.testdata[TestData.sp_template_1]["tags"]) cls.account = cls.helper.create_account( cls.apiclient, @@ -115,33 +115,35 @@ class TestStorPoolTiers(cloudstackTestCase): cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) - storpool_primary_storage = cls.testdata[TestData.primaryStorage] + sp_pools = cls.helper.get_pool(zone) + assert sp_pools is not None + + storpool_primary_storage = sp_pools[0] storpool_service_offerings = cls.testdata[TestData.serviceOffering] - cls.template_name = storpool_primary_storage.get("name") + cls.template_name = storpool_primary_storage["name"] storage_pool = list_storage_pools( cls.apiclient, name=cls.template_name ) - service_offerings = list_service_offering( - cls.apiclient, - name=cls.template_name - ) - - disk_offerings = list_disk_offering( - cls.apiclient, - name="ssd" - ) - if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool + cls.helper.updateStoragePoolTags(cls.apiclient, cls.storage_pool.id, cls.testdata[TestData.sp_template_1]["tags"]) + cls.debug(pprint.pformat(storage_pool)) + + + service_offerings = list_service_offering( + cls.apiclient, + name=cls.template_name + ) + if service_offerings is None: service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings) else: diff --git a/test/integration/smoke/test_attach_multiple_volumes.py b/test/integration/smoke/test_attach_multiple_volumes.py index 81199bcfdfa..9357e4d6509 100644 --- a/test/integration/smoke/test_attach_multiple_volumes.py +++ b/test/integration/smoke/test_attach_multiple_volumes.py @@ -285,6 +285,8 @@ class TestMultipleVolumeAttach(cloudstackTestCase): self.query_async_job(vol3_jobId.jobid) self.query_async_job(vol4_jobId.jobid) + time.sleep(60) + # List all the volumes attached to the instance. Includes even the Root disk. list_volume_response = Volume.list( self.apiClient, @@ -337,6 +339,8 @@ class TestMultipleVolumeAttach(cloudstackTestCase): self.query_async_job(vol5_jobId.jobid) self.query_async_job(vol6_jobId.jobid) + time.sleep(60) + volumes = Volume.list(self.apiClient, virtualmachineid=self.virtual_machine.id, type="DATADISK", diff --git a/test/integration/smoke/test_backup_recovery_dummy.py b/test/integration/smoke/test_backup_recovery_dummy.py index b4789bd0f24..3e2f16f8c68 100644 --- a/test/integration/smoke/test_backup_recovery_dummy.py +++ b/test/integration/smoke/test_backup_recovery_dummy.py @@ -18,10 +18,11 @@ from marvin.cloudstackTestCase import cloudstackTestCase from marvin.lib.utils import (cleanup_resources) -from marvin.lib.base import (Account, ServiceOffering, VirtualMachine, BackupOffering, Configurations, Backup) +from marvin.lib.base import (Account, ServiceOffering, DiskOffering, VirtualMachine, BackupOffering, Configurations, Backup, Volume) from marvin.lib.common import (get_domain, get_zone, get_template) from nose.plugins.attrib import attr from marvin.codes import FAILED +import time class TestDummyBackupAndRecovery(cloudstackTestCase): @@ -44,33 +45,34 @@ class TestDummyBackupAndRecovery(cloudstackTestCase): cls._cleanup = [] # Check backup configuration values, set them to enable the dummy provider - backup_enabled_cfg = Configurations.list(cls.api_client, name='backup.framework.enabled', zoneid=cls.zone.id) - backup_provider_cfg = Configurations.list(cls.api_client, name='backup.framework.provider.plugin', zoneid=cls.zone.id) + backup_enabled_cfg = Configurations.list(cls.api_client, name='backup.framework.enabled') + backup_provider_cfg = Configurations.list(cls.api_client, name='backup.framework.provider.plugin') cls.backup_enabled = backup_enabled_cfg[0].value cls.backup_provider = backup_provider_cfg[0].value if cls.backup_enabled == "false": - Configurations.update(cls.api_client, 'backup.framework.enabled', value='true', zoneid=cls.zone.id) + cls.skipTest(cls, reason="Test can be run only if the config backup.framework.enabled is true") if cls.backup_provider != "dummy": - Configurations.update(cls.api_client, 'backup.framework.provider.plugin', value='dummy', zoneid=cls.zone.id) + Configurations.update(cls.api_client, 'backup.framework.provider.plugin', value='dummy') if cls.hypervisor.lower() != 'simulator': return cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id) cls.offering = ServiceOffering.create(cls.api_client,cls.services["service_offerings"]["small"]) + cls.diskoffering = DiskOffering.create(cls.api_client, cls.services["disk_offering"]) cls.vm = VirtualMachine.create(cls.api_client, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.offering.id, - mode=cls.services["mode"]) - cls._cleanup = [cls.offering, cls.account] + diskofferingid=cls.diskoffering.id, mode=cls.services["mode"]) + cls._cleanup = [cls.offering, cls.diskoffering, cls.account] # Import a dummy backup offering to use on tests cls.provider_offerings = BackupOffering.listExternal(cls.api_client, cls.zone.id) cls.debug("Importing backup offering %s - %s" % (cls.provider_offerings[0].externalid, cls.provider_offerings[0].name)) - cls.offering = BackupOffering.importExisting(cls.api_client, cls.zone.id, cls.provider_offerings[0].externalid, + cls.backup_offering = BackupOffering.importExisting(cls.api_client, cls.zone.id, cls.provider_offerings[0].externalid, cls.provider_offerings[0].name, cls.provider_offerings[0].description) - cls._cleanup.append(cls.offering) + cls._cleanup.append(cls.backup_offering) @classmethod def tearDownClass(cls): @@ -79,10 +81,8 @@ class TestDummyBackupAndRecovery(cloudstackTestCase): cleanup_resources(cls.api_client, cls._cleanup) # Restore original backup framework values values - if cls.backup_enabled == "false": - Configurations.update(cls.api_client, 'backup.framework.enabled', value=cls.backup_enabled, zoneid=cls.zone.id) if cls.backup_provider != "dummy": - Configurations.update(cls.api_client, 'backup.framework.provider.plugin', value=cls.backup_provider, zoneid=cls.zone.id) + Configurations.update(cls.api_client, 'backup.framework.provider.plugin', value=cls.backup_provider) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) @@ -139,7 +139,7 @@ class TestDummyBackupAndRecovery(cloudstackTestCase): self.assertEqual(backups, None, "There should not exist any backup for the VM") # Assign VM to offering and create ad-hoc backup - self.offering.assignOffering(self.apiclient, self.vm.id) + self.backup_offering.assignOffering(self.apiclient, self.vm.id) Backup.create(self.apiclient, self.vm.id) # Verify backup is created for the VM @@ -155,4 +155,68 @@ class TestDummyBackupAndRecovery(cloudstackTestCase): self.assertEqual(backups, None, "There should not exist any backup for the VM") # Remove VM from offering - self.offering.removeOffering(self.apiclient, self.vm.id) + self.backup_offering.removeOffering(self.apiclient, self.vm.id) + + @attr(tags=["advanced", "backup"], required_hardware="false") + def test_vm_backup_create_vm_from_backup(self): + """ + Test creating a new VM from a backup + """ + self.backup_offering.assignOffering(self.apiclient, self.vm.id) + + Backup.create(self.apiclient, self.vm.id, "backup1") + Backup.create(self.apiclient, self.vm.id, "backup2") + + # Verify backup is created for the VM + backups = Backup.list(self.apiclient, self.vm.id) + #self.cleanup.extend(backups) + #self.cleanup.append(backups[0]) + self.assertEqual(len(backups), 2, "There should exist two backups for the VM") + + # Remove VM from offering + self.backup_offering.removeOffering(self.apiclient, self.vm.id) + + # Verify no. of backups after removing the backup offering + backups = Backup.list(self.apiclient, self.vm.id) + self.assertEqual(len(backups), 2, "There should exist two backups for the VM") + + # Create a new VM from first backup + new_vm_name = "vm-from-backup1-" + str(int(time.time())) + new_vm = Backup.createVMFromBackup( + self.apiclient, + self.services["small"], + mode=self.services["mode"], + backupid=backups[0].id, + vmname=new_vm_name, + accountname=self.account.name, + domainid=self.account.domainid, + zoneid=self.zone.id + ) + self.cleanup.append(new_vm) + + # Verify the new VM was created successfully + self.assertIsNotNone(new_vm, "Failed to create VM from backup") + self.assertEqual(new_vm.name, new_vm_name, "VM name does not match the requested name") + + # Verify the new VM is running + self.assertEqual(new_vm.state, "Running", "New VM should be in Running state") + + # Verify the new VM has the correct service offering + self.assertEqual(new_vm.serviceofferingid, self.offering.id, + "New VM should have the correct service offering") + + # Verify the new VM has the correct zone + self.assertEqual(new_vm.zoneid, self.zone.id, "New VM should be in the correct zone") + + # Verify the new VM has the correct number of volumes (ROOT + DATADISK) + volumes = Volume.list( + self.apiclient, + virtualmachineid=new_vm.id, + listall=True + ) + self.assertTrue(isinstance(volumes, list), "List volumes should return a valid list") + self.assertEqual(2, len(volumes), "The new VM should have 2 volumes (ROOT + DATADISK)") + + # Delete backups + Backup.delete(self.apiclient, backups[0].id) + Backup.delete(self.apiclient, backups[1].id) diff --git a/test/integration/smoke/test_backup_recovery_nas.py b/test/integration/smoke/test_backup_recovery_nas.py new file mode 100644 index 00000000000..ea7f1112cbe --- /dev/null +++ b/test/integration/smoke/test_backup_recovery_nas.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.utils import (cleanup_resources) +from marvin.lib.base import (Account, ServiceOffering, DiskOffering, VirtualMachine, BackupOffering, + BackupRepository, Backup, Configurations, Volume, StoragePool) +from marvin.lib.common import (get_domain, get_zone, get_template) +from nose.plugins.attrib import attr +from marvin.codes import FAILED +import time + +class TestNASBackupAndRecovery(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + # Setup + + cls.testClient = super(TestNASBackupAndRecovery, cls).getClsTestClient() + cls.api_client = cls.testClient.getApiClient() + cls.services = cls.testClient.getParsedTestDataConfig() + cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) + cls.services["mode"] = cls.zone.networktype + cls.hypervisor = cls.testClient.getHypervisorInfo() + cls.domain = get_domain(cls.api_client) + cls.template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) + if cls.template == FAILED: + assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = cls.template.id + cls._cleanup = [] + + if cls.hypervisor.lower() != 'kvm': + cls.skipTest(cls, reason="Test can be run only on KVM hypervisor") + + cls.storage_pool = StoragePool.list(cls.api_client)[0] + if cls.storage_pool.type.lower() != 'networkfilesystem': + cls.skipTest(cls, reason="Test can be run only if the primary storage is of type NFS") + + # Check backup configuration values, set them to enable the nas provider + backup_enabled_cfg = Configurations.list(cls.api_client, name='backup.framework.enabled') + backup_provider_cfg = Configurations.list(cls.api_client, name='backup.framework.provider.plugin') + cls.backup_enabled = backup_enabled_cfg[0].value + cls.backup_provider = backup_provider_cfg[0].value + + if cls.backup_enabled == "false": + cls.skipTest(cls, reason="Test can be run only if the config backup.framework.enabled is true") + if cls.backup_provider != "nas": + Configurations.update(cls.api_client, 'backup.framework.provider.plugin', value='nas') + + cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id) + + cls._cleanup = [cls.account] + + # Create NAS backup repository and offering. Use the same directory as the storage pool + cls.backup_repository = BackupRepository.add(cls.api_client, zoneid=cls.zone.id, name="Nas", + address=cls.storage_pool.ipaddress + ":" + cls.storage_pool.path, + provider="nas", type="nfs",) + cls._cleanup.append(cls.backup_repository) + cls.provider_offerings = BackupOffering.listExternal(cls.api_client, cls.zone.id) + cls.backup_offering = BackupOffering.importExisting(cls.api_client, cls.zone.id, cls.provider_offerings[0].externalid, + cls.provider_offerings[0].name, cls.provider_offerings[0].description) + cls._cleanup.append(cls.backup_offering) + + cls.offering = ServiceOffering.create(cls.api_client,cls.services["service_offerings"]["small"]) + cls.diskoffering = DiskOffering.create(cls.api_client, cls.services["disk_offering"]) + cls._cleanup.extend([cls.offering, cls.diskoffering, cls.account]) + cls.vm = VirtualMachine.create(cls.api_client, cls.services["small"], accountid=cls.account.name, + domainid=cls.account.domainid, serviceofferingid=cls.offering.id, + diskofferingid=cls.diskoffering.id, mode=cls.services["mode"]) + + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + + if cls.backup_provider != "nas": + Configurations.update(cls.api_client, 'backup.framework.provider.plugin', value=cls.backup_provider) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + if self.hypervisor.lower() != 'kvm': + raise self.skipTest("Skipping test cases which must only run for Simulator") + self.cleanup = [] + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + @attr(tags=["advanced", "backup"], required_hardware="true") + def test_vm_backup_lifecycle(self): + """ + Test VM backup lifecycle + """ + + # Verify there are no backups for the VM + backups = Backup.list(self.apiclient, self.vm.id) + self.assertEqual(backups, None, "There should not exist any backup for the VM") + + # Assign VM to offering and create ad-hoc backup + self.backup_offering.assignOffering(self.apiclient, self.vm.id) + Backup.create(self.apiclient, self.vm.id) + + # Verify backup is created for the VM + backups = Backup.list(self.apiclient, self.vm.id) + self.assertEqual(len(backups), 1, "There should exist only one backup for the VM") + backup = backups[0] + + # Delete backup + Backup.delete(self.apiclient, backup.id) + + # Verify backup is deleted + backups = Backup.list(self.apiclient, self.vm.id) + self.assertEqual(backups, None, "There should not exist any backup for the VM") + + # Remove VM from offering + self.backup_offering.removeOffering(self.apiclient, self.vm.id) + + @attr(tags=["advanced", "backup"], required_hardware="true") + def test_vm_backup_create_vm_from_backup(self): + """ + Test creating a new VM from a backup + """ + self.backup_offering.assignOffering(self.apiclient, self.vm.id) + + # Create a file and take backup + try: + ssh_client_vm = self.vm.get_ssh_client(reconnect=True) + ssh_client_vm.execute("touch test_backup_and_recovery.txt") + except Exception as err: + self.fail("SSH failed for Virtual machine: %s due to %s" % (self.vm.ipaddress, err)) + + time.sleep(5) + + Backup.create(self.apiclient, self.vm.id, "backup1") + Backup.create(self.apiclient, self.vm.id, "backup2") + + # Verify backup is created for the VM + backups = Backup.list(self.apiclient, self.vm.id) + self.assertEqual(len(backups), 2, "There should exist two backups for the VM") + + # Remove VM from offering + self.backup_offering.removeOffering(self.apiclient, self.vm.id) + + # Verify no. of backups after removing the backup offering + backups = Backup.list(self.apiclient, self.vm.id) + self.assertEqual(len(backups), 2, "There should exist two backups for the VM") + + # Create a new VM from first backup + new_vm_name = "vm-from-backup1-" + str(int(time.time())) + new_vm = Backup.createVMFromBackup( + self.apiclient, + self.services["small"], + mode=self.services["mode"], + backupid=backups[0].id, + vmname=new_vm_name, + accountname=self.account.name, + domainid=self.account.domainid, + zoneid=self.zone.id + ) + self.cleanup.append(new_vm) + + # Verify the new VM was created successfully + self.assertIsNotNone(new_vm, "Failed to create VM from backup") + self.assertEqual(new_vm.name, new_vm_name, "VM name does not match the requested name") + + # Verify the new VM is running + self.assertEqual(new_vm.state, "Running", "New VM should be in Running state") + + # Verify the new VM has the correct service offering + self.assertEqual(new_vm.serviceofferingid, self.offering.id, + "New VM should have the correct service offering") + + # Verify the new VM has the correct zone + self.assertEqual(new_vm.zoneid, self.zone.id, "New VM should be in the correct zone") + + # Verify the new VM has the correct number of volumes (ROOT + DATADISK) + volumes = Volume.list( + self.apiclient, + virtualmachineid=new_vm.id, + listall=True + ) + self.assertTrue(isinstance(volumes, list), "List volumes should return a valid list") + self.assertEqual(2, len(volumes), "The new VM should have 2 volumes (ROOT + DATADISK)") + + # Verify that the file is present in the Instance created from backup + try: + ssh_client_new_vm = new_vm.get_ssh_client(reconnect=True) + result = ssh_client_new_vm.execute("ls test_backup_and_recovery.txt") + self.assertEqual(result[0], "test_backup_and_recovery.txt", + "Instance created from Backup should have the same file as the backup.") + except Exception as err: + self.fail("SSH failed for Virtual machine: %s due to %s" % (self.vm.ipaddress, err)) + + # Delete backups + Backup.delete(self.apiclient, backups[0].id) + Backup.delete(self.apiclient, backups[1].id) diff --git a/test/integration/smoke/test_deploy_vgpu_enabled_vm.py b/test/integration/smoke/test_deploy_vgpu_enabled_vm.py index d7994e3fe8d..131ea6195ab 100644 --- a/test/integration/smoke/test_deploy_vgpu_enabled_vm.py +++ b/test/integration/smoke/test_deploy_vgpu_enabled_vm.py @@ -58,7 +58,8 @@ class TestDeployvGPUenabledVM(cloudstackTestCase): cls.unsupportedHypervisor = True cls.skipTest("Skipping test because suitable hypervisor/host not present") hosts = list_hosts( - cls.apiclient + cls.apiclient, + type='routing' ) if hosts is None: cls.unsupportedHypervisor = True diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index bcaaca2e39a..ad72b4c7938 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -228,7 +228,8 @@ known_categories = { 'Rolling': 'Rolling Maintenance', 'importVsphereStoragePolicies' : 'vSphere storage policies', 'listVsphereStoragePolicies' : 'vSphere storage policies', - 'ConsoleEndpoint': 'Console Endpoint', + 'createConsoleEndpoint': 'Console Session', + 'listConsoleSessions': 'Console Session', 'importVm': 'Virtual Machine', 'revertToVMSnapshot': 'Virtual Machine', 'listQuarantinedIp': 'IP Quarantine', diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py index 0c9843f4399..16b2467b63d 100755 --- a/tools/marvin/marvin/lib/base.py +++ b/tools/marvin/marvin/lib/base.py @@ -516,6 +516,43 @@ class VirtualMachine: virtual_machine.ssh_ip = nat_rule.ipaddress virtual_machine.public_ip = nat_rule.ipaddress + @classmethod + def program_ssh_access( + cls, apiclient, services, mode, networkids, virtual_machine, allow_egress=False, vpcid=None): + """ + Program SSH access to the VM + """ + # program ssh access over NAT via PF + retries = 5 + interval = 30 + while retries > 0: + try: + if mode.lower() == 'advanced': + cls.access_ssh_over_nat( + apiclient, + services, + virtual_machine, + allow_egress=allow_egress, + networkid=networkids[0] if networkids else None, + vpcid=vpcid) + elif mode.lower() == 'basic': + if virtual_machine.publicip is not None: + # EIP/ELB (netscaler) enabled zone + vm_ssh_ip = virtual_machine.publicip + else: + # regular basic zone with security group + vm_ssh_ip = virtual_machine.nic[0].ipaddress + virtual_machine.ssh_ip = vm_ssh_ip + virtual_machine.public_ip = vm_ssh_ip + break + except Exception as e: + if retries >= 0: + retries = retries - 1 + time.sleep(interval) + continue + raise Exception( + "The following exception appeared while programming ssh access - %s" % e) + @classmethod def create(cls, apiclient, services, templateid=None, accountid=None, domainid=None, zoneid=None, networkids=None, @@ -716,36 +753,7 @@ class VirtualMachine: virtual_machine.public_ip = virtual_machine.nic[0].ipaddress return VirtualMachine(virtual_machine.__dict__, services) - # program ssh access over NAT via PF - retries = 5 - interval = 30 - while retries > 0: - time.sleep(interval) - try: - if mode.lower() == 'advanced': - cls.access_ssh_over_nat( - apiclient, - services, - virtual_machine, - allow_egress=allow_egress, - networkid=cmd.networkids[0] if cmd.networkids else None, - vpcid=vpcid) - elif mode.lower() == 'basic': - if virtual_machine.publicip is not None: - # EIP/ELB (netscaler) enabled zone - vm_ssh_ip = virtual_machine.publicip - else: - # regular basic zone with security group - vm_ssh_ip = virtual_machine.nic[0].ipaddress - virtual_machine.ssh_ip = vm_ssh_ip - virtual_machine.public_ip = vm_ssh_ip - break - except Exception as e: - if retries >= 0: - retries = retries - 1 - continue - raise Exception( - "The following exception appeared while programming ssh access - %s" % e) + cls.program_ssh_access(apiclient, services, mode, cmd.networkids, virtual_machine, allow_egress, vpcid) return VirtualMachine(virtual_machine.__dict__, services) @@ -1141,7 +1149,7 @@ class Volume: @classmethod def create(cls, apiclient, services, zoneid=None, account=None, - domainid=None, diskofferingid=None, projectid=None, size=None): + domainid=None, diskofferingid=None, projectid=None, size=None, snapshotid=None): """Create Volume""" cmd = createVolume.createVolumeCmd() cmd.name = "-".join([services["diskname"], random_gen()]) @@ -1172,6 +1180,9 @@ class Volume: if size: cmd.size = size + if snapshotid: + cmd.snapshotid = snapshotid + return Volume(apiclient.createVolume(cmd).__dict__) def update(self, apiclient, **kwargs): @@ -1387,7 +1398,7 @@ class Snapshot: @classmethod def create(cls, apiclient, volume_id, account=None, - domainid=None, projectid=None, locationtype=None, asyncbackup=None): + domainid=None, projectid=None, locationtype=None, asyncbackup=None, zoneids=None, pool_ids=None, usestoragereplication=None): """Create Snapshot""" cmd = createSnapshot.createSnapshotCmd() cmd.volumeid = volume_id @@ -1401,12 +1412,20 @@ class Snapshot: cmd.locationtype = locationtype if asyncbackup: cmd.asyncbackup = asyncbackup + if zoneids: + cmd.zoneids = zoneids + if pool_ids: + cmd.storageids = pool_ids + if usestoragereplication: + cmd.usestoragereplication = usestoragereplication return Snapshot(apiclient.createSnapshot(cmd).__dict__) - def delete(self, apiclient): + def delete(self, apiclient, zone_id=None): """Delete Snapshot""" cmd = deleteSnapshot.deleteSnapshotCmd() cmd.id = self.id + if zone_id: + cmd.zoneid = zone_id apiclient.deleteSnapshot(cmd) @classmethod @@ -1419,6 +1438,22 @@ class Snapshot: cmd.listall = True return (apiclient.listSnapshots(cmd)) + @classmethod + def copy(cls, apiclient, snapshotid, zone_ids=None, source_zone_id=None, pool_ids=None, usestoragereplication=None): + """ Copy snapshot to another zone or a primary storage in another zone""" + cmd = copySnapshot.copySnapshotCmd() + cmd.id = snapshotid + if source_zone_id: + cmd.sourcezoneid = source_zone_id + if zone_ids: + cmd.destzoneids = zone_ids + if pool_ids: + cmd.storageids = pool_ids + if usestoragereplication: + cmd.usestoragereplication = usestoragereplication + return Snapshot(apiclient.copySnapshot(cmd).__dict__) + + def validateState(self, apiclient, snapshotstate, timeout=600): """Check if snapshot is in required state returnValue: List[Result, Reason] @@ -1454,7 +1489,7 @@ class Template: @classmethod def create(cls, apiclient, services, volumeid=None, - account=None, domainid=None, projectid=None, randomise=True): + account=None, domainid=None, projectid=None, randomise=True, snapshotid=None, zoneid=None): """Create template from Volume""" # Create template from Virtual machine and Volume ID cmd = createTemplate.createTemplateCmd() @@ -1500,6 +1535,12 @@ class Template: if projectid: cmd.projectid = projectid + + if snapshotid: + cmd.snapshotid = snapshotid + + if zoneid: + cmd.zoneid = zoneid return Template(apiclient.createTemplate(cmd).__dict__) @classmethod @@ -6157,11 +6198,13 @@ class Backup: self.__dict__.update(items) @classmethod - def create(self, apiclient, vmid): + def create(cls, apiclient, vmid, name=None): """Create VM backup""" cmd = createBackup.createBackupCmd() cmd.virtualmachineid = vmid + if name: + cmd.name = name return Backup(apiclient.createBackup(cmd).__dict__) @classmethod @@ -6175,11 +6218,12 @@ class Backup: return (apiclient.deleteBackup(cmd)) @classmethod - def list(self, apiclient, vmid): + def list(self, apiclient, vmid=None): """List VM backups""" cmd = listBackups.listBackupsCmd() - cmd.virtualmachineid = vmid + if vmid: + cmd.virtualmachineid = vmid cmd.listall = True return (apiclient.listBackups(cmd)) @@ -6201,6 +6245,21 @@ class Backup: cmd.virtualmachineid = virtualmachineid return (apiclient.restoreVolumeFromBackupAndAttachToVM(cmd)) + @classmethod + def createVMFromBackup(cls, apiclient, services, mode, backupid, accountname, domainid, zoneid, vmname=None): + """Create new VM from backup + """ + cmd = createVMFromBackup.createVMFromBackupCmd() + cmd.backupid = backupid + cmd.account = accountname + cmd.domainid = domainid + cmd.zoneid = zoneid + if vmname: + cmd.name = vmname + response = apiclient.createVMFromBackup(cmd) + virtual_machine = VirtualMachine(response.__dict__, []) + VirtualMachine.program_ssh_access(apiclient, services, mode, cmd.networkids, virtual_machine) + return virtual_machine class BackupSchedule: @@ -6243,6 +6302,37 @@ class BackupSchedule: return (apiclient.updateBackupSchedule(cmd)) +class BackupRepository: + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def add(cls, apiclient, zoneid, name, address, provider, type): + """Add backup repository""" + + cmd = addBackupRepository.addBackupRepositoryCmd() + cmd.zoneid = zoneid + cmd.name = name + cmd.address = address + cmd.provider = provider + cmd.type = type + response = apiclient.addBackupRepository(cmd) + return BackupRepository(response.__dict__) + + def delete(self, apiclient): + """Delete backup repository""" + + cmd = deleteBackupRepository.deleteBackupRepositoryCmd() + cmd.id = self.id + return (apiclient.deleteBackupRepository(cmd)) + + def list(self, apiclient): + """List backup repository""" + + cmd = listBackupRepositories.listBackupRepositoriesCmd() + return (apiclient.listBackupRepository(cmd)) + class ProjectRole: def __init__(self, items): diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index d3a44789489..394de6ca6d2 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -509,6 +509,7 @@ "label.certificate.upload.failed": "Certificate upload failed", "label.certificate.upload.failed.description": "Failed to update SSL Certificate. Failed to pass certificate validation check.", "label.certificateid": "Certificate ID", +"label.chainsize": "Chain size", "label.change": "Change", "label.change.affinity": "Change affinity", "label.change.bgp.peers": "Change BGP peers", @@ -572,6 +573,7 @@ "label.communities": "Communities", "label.community": "Community", "label.complete": "Complete", +"label.completed": "Completed", "label.compute": "Compute", "label.compute.offerings": "Compute Offerings", "label.compute.offering.for.sharedfs.instance": "Compute Offering for Instance", @@ -581,6 +583,7 @@ "label.configuration": "Configuration", "label.configuration.details": "Configuration Details", "label.configure": "Configure", + "label.configure.instance": "Configure Instance", "label.configure.health.monitor": "Configure Health Monitor", "label.configure.app": "Configure the App", "label.configure.ldap": "Configure LDAP", @@ -1043,6 +1046,7 @@ "label.f5.ip.loadbalancer": "F5 BIG-IP load balancer.", "label.failed": "Failed", "label.featured": "Featured", +"label.fetch.from.backup": "Fetch from Backup", "label.fetch.instances": "Fetch Instances", "label.fetch.latest": "Fetch latest", "label.filename": "File Name", @@ -1243,6 +1247,7 @@ "label.instance": "Instance", "label.instance.conversion.support": "Instance Conversion Supported", "label.instance.groups": "Instance Groups", +"label.instance.metadata": "Instance metadata", "label.instance.name": "Instance name", "label.instancename": "Internal name", "label.instanceport": "Instance port", @@ -1590,6 +1595,7 @@ "label.more.access.dashboard.ui": "More about accessing dashboard UI", "label.mount.cks.iso.on.vr": "Use CKS packages from Virtual Router", "label.mount.sharedfs": "Mount Shared FileSystem via NFS", +"label.mountopts": "Mount options", "label.move.down.row": "Move down one row", "label.move.to.bottom": "Move to bottom", "label.move.to.top": "Move to top", @@ -1605,6 +1611,7 @@ "label.netmask": "Netmask", "label.netris": "Netris", "label.netristag": "Netris tag", +"label.netrisurl": "Netris URL", "label.netris.provider": "Netris Provider", "label.netris.provider.name": "Netris provider name", "label.netris.provider.username": "Netris provider username", @@ -2083,6 +2090,8 @@ "label.restartrequired": "Restart required", "label.restore": "Restore", "label.restore.volume.attach": "Restore volume and attach", +"label.use.backup.ip.address": "Use IP Addresses from Backup", +"label.use.backup.ip.address.tooltip": "Use the same IP/MAC addresses as stored in the backup metadata. The command will error out if the IP/MAC addresses are not available", "label.review": "Review", "label.role": "Role", "label.roleid": "Role", @@ -2150,6 +2159,7 @@ "label.scaleup.policy": "ScaleUp policy", "label.scaling": "Scaling", "label.schedule": "Schedule", +"label.scheduled": "Scheduled", "label.schedule.add": "Add schedule", "label.scheduled.backups": "Scheduled backups", "label.schedules": "Schedules", @@ -2189,7 +2199,8 @@ "label.select.root.disk": "Select the ROOT disk", "label.select.source.vcenter.datacenter": "Select the source VMware vCenter Datacenter", "label.select.tier": "Select Network Tier", -"label.select.zones": "Select Zones", +"label.select.zones": "Select zones", +"label.select.storagepools": "Select storage pools", "label.select.2fa.provider": "Select the provider", "label.selected.storage": "Selected storage", "label.self": "Mine", @@ -2291,6 +2302,9 @@ "label.srx": "SRX", "label.srx.firewall": "Juniper SRX firewall", "label.storageaccessgroups": "Storage Access Groups", +"label.storageallocated": "Allocated size", +"label.storagetotal": "Total size", +"label.storageused": "Used size", "label.clusterstorageaccessgroups": "Cluster Storage Access Groups", "label.podstorageaccessgroups": "Pod Storage Access Groups", "label.zonestorageaccessgroups": "Zone Storage Access Groups", @@ -2318,6 +2332,7 @@ "label.standard.us.keyboard": "Standard (US) keyboard", "label.start": "Start", "label.startasn": "Start AS Number", +"label.started": "Started", "label.start.date": "Start date", "label.start.date.and.time": "Start date and time", "label.start.ip": "Start IP", @@ -2368,6 +2383,7 @@ "label.storagemotionenabled": "Storage motion enabled", "label.storagepolicy": "Storage policy", "label.storagepool": "Storage pool", +"label.storagepools": "Storage pools", "label.storagepool.tooltip": "Destination Storage Pool. Volume should be located in this Storage Pool", "label.storagetags": "Storage tags", "label.storagetype": "Storage type", @@ -2823,6 +2839,7 @@ "label.cniconfiguration": "CNI Configuration", "label.cniconfigname": "Associated CNI Configuration", "label.cniconfigparams": "CNI Configuration parameters", +"label.create.instance.from.backup": "Create new instance from backup", "label.lease.enable": "Enable Lease", "label.lease.enable.tooltip": "The Instance Lease feature allows to set a lease duration (in days) for instances, after which they automatically expire. Upon expiry, the instance can either be stopped (powered off) or destroyed, based on the configured policy", "label.instance.lease": "Instance lease", @@ -2832,11 +2849,13 @@ "label.leaseexpiryaction": "Lease expiry action", "label.remainingdays": "Lease", "label.leased": "Leased", +"label.usestoragereplication": "Use primary storage replication", "message.acquire.ip.failed": "Failed to acquire IP.", "message.action.acquire.ip": "Please confirm that you want to acquire new IP.", "message.action.cancel.maintenance": "Your host has been successfully canceled for maintenance. This process can take up to several minutes.", "message.action.cancel.maintenance.mode": "Please confirm that you want to cancel this maintenance.", "message.action.create.snapshot.from.vmsnapshot": "Please confirm that you want to create Snapshot from Instance Snapshot", +"message.action.create.instance.from.backup": "Please confirm that you want to create a new Instance from the given Backup.
Click on configure to edit the parameters for the new Instance before creation.", "message.action.delete.asnrange": "Please confirm the AS range that you want to delete", "message.action.delete.autoscale.vmgroup": "Please confirm that you want to delete this autoscaling group.", "message.action.delete.backup.offering": "Please confirm that you want to delete this backup offering?", @@ -3041,7 +3060,7 @@ "message.autoscale.vm.networks": "Please choose at least one Network for Instances in the autoscaling group. The default Network must be an Isolated Network or VPC Network Tier which supports Instance AutoScaling and has load balancing rules.", "message.autoscale.vmprofile.update": "The autoscale Instance profile can be updated only when autoscaling group is DISABLED.", "message.backup.attach.restore": "Please confirm that you want to restore and attach the volume from the backup?", -"message.backup.create": "Are you sure you want create an Instance backup?", +"message.backup.create": "Are you sure you want to create an Instance backup?", "message.backup.offering.remove": "Are you sure you want to remove Instance from backup offering and delete the backup chain?", "message.backup.restore": "Please confirm that you want to restore the Instance backup?", "message.cancel.shutdown": "Please confirm that you would like to cancel the shutdown on this Management Server. It will resume accepting any new Async Jobs.", @@ -3138,12 +3157,14 @@ "message.confirm.unmanage.gpu.devices": "Please confirm that you want to unmanage the selected GPU devices?", "message.confirm.upgrade.router.newer.template": "Please confirm that you want to upgrade router to use newer Template.", "message.cpu.usage.info": "The CPU usage percentage can exceed 100% if the Instance has more than 1 vCPU or when CPU Cap is not enabled. This behavior happens according to the hypervisor being used (e.g: in KVM), due to how they account the stats", +"message.create.backup.failed": "Failed to create backup.", "message.create.bucket.failed": "Failed to create bucket.", "message.create.bucket.processing": "Bucket creation in progress", "message.create.compute.offering": "Compute Offering created", "message.create.sharedfs.failed": "Failed to create Shared FileSystem.", "message.create.sharedfs.processing": "Shared FileSystem creation in progress.", "message.create.tungsten.public.network": "Create Tungsten-Fabric public Network", +"message.create.instance.from.backup.prefill": "Data is prefilled using the configurations stored in the backup. Edit to change individual fields.", "message.create.internallb": "Creating internal LB", "message.create.internallb.failed": "Failed to create internal LB.", "message.create.internallb.processing": "Creation of internal LB is in progress", @@ -3697,6 +3718,7 @@ "message.shared.network.unsupported.for.nsx": "Shared networks aren't supported for NSX enabled Zones", "message.shutdown.triggered": "A shutdown has been triggered. CloudStack will not accept new jobs", "message.snapshot.additional.zones": "Snapshots will always be created in its native Zone - %x, here you can select additional zone(s) where it will be copied to at creation time", +"message.snapshot.desc": "Snapshot to create a ROOT disk from", "message.sourcenatip.change.warning": "WARNING: Changing the sourcenat IP address of the network will cause connectivity downtime for the Instances with NICs in the Network.", "message.sourcenatip.change.inhibited": "Changing the sourcenat to this IP of the Network to this address is inhibited as firewall rules are defined for it. This can include port forwarding or load balancing rules.\n - If this is an Isolated Network, please use updateNetwork/click the edit button.\n - If this is a VPC, first clear all other rules for this address.", "message.specify.tag.key": "Please specify a tag key.", @@ -3874,7 +3896,7 @@ "message.template.arch": "Please select a Template architecture.", "message.template.desc": "OS image that can be used to boot Instances.", "message.template.import.vm.temporary": "If a temporary Template is used, the reset Instance operation will not work after importing it.", -"message.template.iso": "Please select a Template or ISO to continue.", +"message.template.iso": "Please select a Template, ISO, volume or a snapshot to continue.", "message.template.type.change.warning": "WARNING: Changing the Template type to SYSTEM will disable further changes to the Template.", "message.tooltip.reserved.system.netmask": "The Network prefix that defines the Pod subnet. Uses CIDR notation.", "message.traffic.type.deleted": "Successfully deleted traffic type", @@ -3949,6 +3971,7 @@ "message.vnf.nic.move.down.fail": "Failed to move down this NIC", "message.vnf.no.credentials": "No credentials found for the VNF appliance.", "message.vnf.select.networks": "Please select the relevant network for each VNF NIC.", +"message.volume.desc": "Volume to use as a ROOT disk", "message.volume.state.allocated": "The volume is allocated but has not been created yet.", "message.volume.state.attaching": "The volume is attaching to a volume from Ready state.", "message.volume.state.copying": "The volume is being copied from the image store to primary storage, in case it's an uploaded volume.", diff --git a/ui/src/api/index.js b/ui/src/api/index.js index 1f532c36336..0c8e8e9696c 100644 --- a/ui/src/api/index.js +++ b/ui/src/api/index.js @@ -23,6 +23,19 @@ import { ACCESS_TOKEN } from '@/store/mutation-types' +const getAPICommandsRegex = /^(get|list|query|find)\w+$/i +const additionalGetAPICommandsList = [ + 'isaccountallowedtocreateofferingswithtags', + 'readyforshutdown', + 'cloudianisenabled', + 'quotabalance', + 'quotasummary', + 'quotatarifflist', + 'quotaisenabled', + 'quotastatement', + 'verifyoauthcodeandgetuser' +] + export function getAPI (command, args = {}) { args.command = command args.response = 'json' @@ -64,6 +77,12 @@ export function postAPI (command, data = {}) { }) } +export function callAPI (command, args = {}) { + const isGetAPICommand = getAPICommandsRegex.test(command) || additionalGetAPICommandsList.includes(command.toLowerCase()) + const call = isGetAPICommand ? getAPI : postAPI + return call(command, args) +} + export function login (arg) { if (!sourceToken.checkExistSource()) { sourceToken.init() diff --git a/ui/src/components/header/ProjectMenu.vue b/ui/src/components/header/ProjectMenu.vue index 590a8a2fbd0..32a77723825 100644 --- a/ui/src/components/header/ProjectMenu.vue +++ b/ui/src/components/header/ProjectMenu.vue @@ -33,6 +33,7 @@ diff --git a/ui/src/components/view/DeployVMFromBackup.vue b/ui/src/components/view/DeployVMFromBackup.vue new file mode 100644 index 00000000000..8d929a1fed0 --- /dev/null +++ b/ui/src/components/view/DeployVMFromBackup.vue @@ -0,0 +1,2663 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + + + + + diff --git a/ui/src/components/view/DetailsTab.vue b/ui/src/components/view/DetailsTab.vue index 3a4d0fa043b..7829121fdb2 100644 --- a/ui/src/components/view/DetailsTab.vue +++ b/ui/src/components/view/DetailsTab.vue @@ -64,7 +64,8 @@
- {{ volume.type }} - {{ volume.path }} ({{ parseFloat(volume.size / (1024.0 * 1024.0 * 1024.0)).toFixed(1) }} GB) + {{ volume.type }} - {{ volume.path }} + {{ volume.type }} - {{ volume.path }} ({{ parseFloat(volume.size / (1024.0 * 1024.0 * 1024.0)).toFixed(1) }} GB)
@@ -74,17 +75,22 @@
- {{ parseFloat(dataResource.size / (1024.0 * 1024.0 * 1024.0)).toFixed(2) }} GiB + {{ sizeInGiB(dataResource.size) }} GiB
- {{ parseFloat(dataResource.physicalsize / (1024.0 * 1024.0 * 1024.0)).toFixed(2) }} GiB + {{ sizeInGiB(dataResource.physicalsize) }} GiB
- {{ parseFloat(dataResource.virtualsize / (1024.0 * 1024.0 * 1024.0)).toFixed(2) }} GiB + {{ sizeInGiB(dataResource.virtualsize) }} GiB +
+
+
+
+ {{ sizeInGiB(dataResource.chainsize) }} GiB
@@ -472,6 +478,12 @@ export default { } return `label.${source}` + }, + sizeInGiB (sizeInBytes) { + if (!sizeInBytes || sizeInBytes === 0) { + return '0.00' + } + return parseFloat(sizeInBytes / (1024.0 * 1024.0 * 1024.0)).toFixed(2) } } } diff --git a/ui/src/components/view/EventsTab.vue b/ui/src/components/view/EventsTab.vue index c3a6ac5c17f..59080a4988a 100644 --- a/ui/src/components/view/EventsTab.vue +++ b/ui/src/components/view/EventsTab.vue @@ -52,6 +52,8 @@ import { getAPI } from '@/api' import { genericCompare } from '@/utils/sort.js' import ListView from '@/components/view/ListView' +const EVENTS_TAB_COLUMNS_KEY = 'events_tab_columns' + export default { name: 'EventsTab', components: { @@ -98,8 +100,7 @@ export default { } }, created () { - this.selectedColumnKeys = this.columnKeys - this.updateSelectedColumns('description') + this.setDefaultColumns() this.pageSize = this.pageSizeOptions[0] * 1 this.fetchData() }, @@ -111,6 +112,15 @@ export default { } }, methods: { + setDefaultColumns () { + const savedColumns = this.$localStorage.get(EVENTS_TAB_COLUMNS_KEY) + if (savedColumns && Array.isArray(savedColumns) && savedColumns.length > 0) { + this.selectedColumnKeys = savedColumns + } else { + this.selectedColumnKeys = this.columnKeys.filter(x => x !== 'description') + } + this.updateColumns() + }, fetchData () { this.fetchEvents() }, @@ -145,6 +155,7 @@ export default { } else { this.selectedColumnKeys.push(key) } + this.$localStorage.set(EVENTS_TAB_COLUMNS_KEY, this.selectedColumnKeys) this.updateColumns() }, updateColumns () { diff --git a/ui/src/components/view/GPUSummaryTab.vue b/ui/src/components/view/GPUSummaryTab.vue index 8b649e05662..52553f143d5 100644 --- a/ui/src/components/view/GPUSummaryTab.vue +++ b/ui/src/components/view/GPUSummaryTab.vue @@ -167,15 +167,7 @@ export default { Object.values(cardGroups).forEach(cardGroup => { const profileCount = Object.keys(cardGroup.profiles).length - // Filter devices for card summary calculation - // Exclude passthrough profile devices from aggregates if there are multiple profiles - let cardDevicesForSummary = cardGroup.devices - if (profileCount > 1) { - cardDevicesForSummary = cardGroup.devices.filter(device => !device.vgpuprofilename || device.vgpuprofilename.toLowerCase() !== 'passthrough' - ) - } - - const cardSummary = this.calculateSummary(cardDevicesForSummary) + const cardSummary = this.calculateSummary(cardGroup.devices) const cardKey = `card-${cardGroup.gpucardname}` const cardNode = { @@ -192,7 +184,6 @@ export default { expandedKeys.push(cardKey) cardNode.children = Object.values(cardGroup.profiles) - .filter(profile => profile.vgpuprofilename.toLowerCase() !== 'passthrough') .map(profile => { const profileSummary = this.calculateSummary(profile.devices) return { @@ -204,7 +195,6 @@ export default { } }) } - summaryTree.push(cardNode) }) @@ -222,6 +212,9 @@ export default { } devices.forEach(device => { + if (device.gpudevicetype === 'VGPUOnly') { + return + } summary.total++ if (device.virtualmachineid) { diff --git a/ui/src/components/view/ListView.vue b/ui/src/components/view/ListView.vue index 205e340652d..f4acba595b9 100644 --- a/ui/src/components/view/ListView.vue +++ b/ui/src/components/view/ListView.vue @@ -342,7 +342,8 @@ }} - +
+ + {{ $t('label.fetch.from.backup') }} + + + {{ $t('label.clear') }} + +
@@ -219,28 +227,31 @@ export default { }, updateNetworkData (name, key, value) { this.formRef.value.validate().then(() => { - this.$emit('handler-error', false) - const index = this.networks.findIndex(item => item.key === key) - if (index === -1) { - const networkItem = {} - networkItem.key = key - networkItem[name] = value - this.networks.push(networkItem) - this.$emit('update-network-config', this.networks) - return - } - - this.networks.filter((item, index) => { - if (item.key === key) { - this.networks[index][name] = value - } - }) + this.updateNetworkDataWithoutValidation(name, key, value) this.$emit('update-network-config', this.networks) }).catch((error) => { this.formRef.value.scrollToField(error.errorFields[0].name) this.$emit('handler-error', true) }) }, + updateNetworkDataWithoutValidation (name, key, value) { + this.$emit('handler-error', false) + const index = this.networks.findIndex(item => item.key === key) + if (index === -1) { + const networkItem = {} + networkItem.key = key + networkItem[name] = value + this.networks.push(networkItem) + this.$emit('update-network-config', this.networks) + return + } + + this.networks.filter((item, index) => { + if (item.key === key) { + this.networks[index][name] = value + } + }) + }, removeItem (id) { this.dataItems = this.dataItems.filter(item => item.id !== id) if (this.selectedRowKeys.includes(id)) { @@ -250,6 +261,59 @@ export default { } } }, + handleFetchIpAddresses () { + if (!this.preFillContent.networkids) { + return + } + if (!this.preFillContent.ipAddresses && !this.preFillContent.macAddresses) { + return + } + + const networkIds = this.dataItems.map(item => item.id) + this.dataItems.forEach(record => { + const ipAddressKey = 'ipAddress' + record.id + const macAddressKey = 'macAddress' + record.id + this.form[ipAddressKey] = '' + this.form[macAddressKey] = '' + }) + + networkIds.forEach((networkId) => { + const backupIndex = this.preFillContent.networkids.findIndex(id => id === networkId) + if (backupIndex !== -1) { + if (this.preFillContent.ipAddresses && backupIndex < this.preFillContent.ipAddresses.length) { + const ipAddress = this.preFillContent.ipAddresses[backupIndex] + if (ipAddress) { + const ipAddressKey = 'ipAddress' + networkId + this.form[ipAddressKey] = ipAddress + this.updateNetworkDataWithoutValidation('ipAddress', networkId, ipAddress) + } + } + + if (this.preFillContent.macAddresses && backupIndex < this.preFillContent.macAddresses.length) { + const macAddress = this.preFillContent.macAddresses[backupIndex] + if (macAddress) { + const macAddressKey = 'macAddress' + networkId + this.form[macAddressKey] = macAddress + this.updateNetworkDataWithoutValidation('macAddress', networkId, macAddress) + } + } + } + }) + }, + handleClearIpAddresses () { + this.dataItems.forEach(record => { + const ipAddressKey = 'ipAddress' + record.id + const macAddressKey = 'macAddress' + record.id + this.form[ipAddressKey] = '' + this.form[macAddressKey] = '' + + this.updateNetworkDataWithoutValidation('ipAddress', record.id, '') + this.updateNetworkDataWithoutValidation('macAddress', record.id, '') + }) + + this.networks = [] + this.$emit('update-network-config', this.networks) + }, async validatorMacAddress (rule, value) { if (!value || value === '') { return Promise.resolve() diff --git a/ui/src/views/compute/wizard/NetworkSelection.vue b/ui/src/views/compute/wizard/NetworkSelection.vue index 3a4fd8c88f9..fffcbf7e3e7 100644 --- a/ui/src/views/compute/wizard/NetworkSelection.vue +++ b/ui/src/views/compute/wizard/NetworkSelection.vue @@ -260,8 +260,11 @@ export default { }) if (!this.loading) { if (this.preFillContent.networkids) { - this.selectedRowKeys = this.preFillContent.networkids - this.$emit('select-network-item', this.preFillContent.networkids) + const validNetworkIds = this.preFillContent.networkids.filter(networkId => + this.items.some(item => item.id === networkId) + ) + this.selectedRowKeys = validNetworkIds + this.$emit('select-network-item', validNetworkIds) } else { if (this.items && this.items.length > 0) { if (this.oldZoneId === this.zoneId) { diff --git a/ui/src/views/compute/wizard/OsBasedImageSelection.vue b/ui/src/views/compute/wizard/OsBasedImageSelection.vue index ca157384a9a..6a52eea207b 100644 --- a/ui/src/views/compute/wizard/OsBasedImageSelection.vue +++ b/ui/src/views/compute/wizard/OsBasedImageSelection.vue @@ -25,6 +25,8 @@ @change="emitChangeImageType()"> {{ $t('label.template') }} {{ $t('label.iso') }} + {{ $t('label.volume') }} + {{ $t('label.snapshot') }}
{{ $t('message.' + localSelectedImageType.replace('id', '') + '.desc') }} diff --git a/ui/src/views/compute/wizard/TemplateIsoSelection.vue b/ui/src/views/compute/wizard/TemplateIsoSelection.vue index 9393a7860de..4979068dac7 100644 --- a/ui/src/views/compute/wizard/TemplateIsoSelection.vue +++ b/ui/src/views/compute/wizard/TemplateIsoSelection.vue @@ -25,7 +25,7 @@ @@ -103,12 +103,19 @@ export default { deep: true, handler (items) { const key = this.inputDecorator.slice(0, -2) + if (this.pagination) { + return + } for (const filter of this.filterOpts) { - if (items[filter.id] && items[filter.id][key] && items[filter.id][key].length > 0) { - if (!this.pagination) { + if (this.preFillContent.templateid) { + if (items[filter.id]?.[key]?.some(item => item.id === this.preFillContent.templateid)) { this.filterType = filter.id this.checkedValue = items[filter.id][key][0].id + break } + } else if (items[filter.id]?.[key]?.length > 0) { + this.filterType = filter.id + this.checkedValue = items[filter.id][key][0].id break } } diff --git a/ui/src/views/compute/wizard/VolumeDiskOfferingSelectView.vue b/ui/src/views/compute/wizard/VolumeDiskOfferingSelectView.vue new file mode 100644 index 00000000000..4fc7c3fc972 --- /dev/null +++ b/ui/src/views/compute/wizard/VolumeDiskOfferingSelectView.vue @@ -0,0 +1,280 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + + + diff --git a/ui/src/views/dashboard/CapacityDashboard.vue b/ui/src/views/dashboard/CapacityDashboard.vue index 7e0b8180ac8..9b374628886 100644 --- a/ui/src/views/dashboard/CapacityDashboard.vue +++ b/ui/src/views/dashboard/CapacityDashboard.vue @@ -210,14 +210,14 @@ - +
-
+
{{ $t(ts[ctype]) }} @@ -377,6 +377,8 @@ export default { MEMORY: 'label.memory', PRIVATE_IP: 'label.management.ips', SECONDARY_STORAGE: 'label.secondary.storage', + BACKUP_STORAGE: 'label.backup.storage', + OBJECT_STORAGE: 'label.object.storage', STORAGE: 'label.primary.storage.used', STORAGE_ALLOCATED: 'label.primary.storage.allocated', VIRTUAL_NETWORK_PUBLIC_IP: 'label.public.ips', @@ -438,6 +440,8 @@ export default { case 'STORAGE': case 'STORAGE_ALLOCATED': case 'SECONDARY_STORAGE': + case 'BACKUP_STORAGE': + case 'OBJECT_STORAGE': case 'LOCAL_STORAGE': value = parseFloat(value / (1024 * 1024 * 1024.0), 10).toFixed(2) if (value >= 1024.0) { @@ -667,6 +671,13 @@ export default { min-height: 370px; } +.dashboard-storage { + width: 100%; + overflow-x:hidden; + overflow-y: scroll; + max-height: 370px; +} + .dashboard-event { width: 100%; overflow-x:hidden; diff --git a/ui/src/views/iam/DomainView.vue b/ui/src/views/iam/DomainView.vue index a865b4383b1..2d414950456 100644 --- a/ui/src/views/iam/DomainView.vue +++ b/ui/src/views/iam/DomainView.vue @@ -78,7 +78,7 @@ + + diff --git a/ui/src/views/storage/FormSchedule.vue b/ui/src/views/storage/FormSchedule.vue index ea8559016b0..acc12e8158b 100644 --- a/ui/src/views/storage/FormSchedule.vue +++ b/ui/src/views/storage/FormSchedule.vue @@ -139,7 +139,7 @@ - + @@ -169,6 +169,35 @@ + + + + + + + + + + + + {{ opt.name || opt.description }} + + + + +
{{ $t('label.tags') }}
@@ -224,6 +253,7 @@