Merge remote-tracking branch 'upstream/main' into fork/shapeblue/integration-veeam-kvm

This commit is contained in:
Abhisar Sinha 2026-04-14 23:30:58 +05:30
commit 07bca60fc4
225 changed files with 12101 additions and 2475 deletions

1
.github/CODEOWNERS vendored
View File

@ -17,6 +17,7 @@
/plugins/storage/volume/linstor @rp-
/plugins/storage/volume/storpool @slavkap
/plugins/storage/volume/ontap @rajiv1 @sandeeplocharla @piyush5 @suryag
.pre-commit-config.yaml @jbampton
/.github/linters/ @jbampton

View File

@ -465,3 +465,6 @@ iscsi.session.cleanup.enabled=false
# Instance conversion VIRT_V2V_TMPDIR env var
#convert.instance.env.virtv2v.tmpdir=
# Time, in seconds, to wait before retrying to rebase during the incremental snapshot process.
# incremental.snapshot.retry.rebase.wait=60

View File

@ -899,6 +899,11 @@ public class AgentProperties{
*/
public static final Property<Boolean> CREATE_FULL_CLONE = new Property<>("create.full.clone", false);
/**
* Time, in seconds, to wait before retrying to rebase during the incremental snapshot process.
* */
public static final Property<Integer> INCREMENTAL_SNAPSHOT_RETRY_REBASE_WAIT = new Property<>("incremental.snapshot.retry.rebase.wait", 60);
public static class Property <T>{
private String name;

View File

@ -26,10 +26,13 @@ public final class BucketTO {
private String secretKey;
private long accountId;
public BucketTO(Bucket bucket) {
this.name = bucket.getName();
this.accessKey = bucket.getAccessKey();
this.secretKey = bucket.getSecretKey();
this.accountId = bucket.getAccountId();
}
public BucketTO(String name) {
@ -47,4 +50,8 @@ public final class BucketTO {
public String getSecretKey() {
return this.secretKey;
}
public long getAccountId() {
return this.accountId;
}
}

View File

@ -82,7 +82,7 @@ public interface ProjectService {
Project updateProject(long id, String name, String displayText, String newOwnerName, Long userId, Role newRole) throws ResourceAllocationException;
boolean addAccountToProject(long projectId, String accountName, String email, Long projectRoleId, Role projectRoleType);
boolean addAccountToProject(long projectId, String accountName, String email, Long projectRoleId, Role projectRoleType) throws ResourceAllocationException;
boolean deleteAccountFromProject(long projectId, String accountName);
@ -100,6 +100,6 @@ public interface ProjectService {
Project findByProjectAccountIdIncludingRemoved(long projectAccountId);
boolean addUserToProject(Long projectId, String username, String email, Long projectRoleId, Role projectRole);
boolean addUserToProject(Long projectId, String username, String email, Long projectRoleId, Role projectRole) throws ResourceAllocationException;
}

View File

@ -71,7 +71,6 @@ import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd;
import org.apache.cloudstack.api.command.user.vmgroup.UpdateVMGroupCmd;
import org.apache.cloudstack.config.Configuration;
import org.apache.cloudstack.config.ConfigurationGroup;
import org.apache.cloudstack.framework.config.ConfigKey;
import com.cloud.alert.Alert;
import com.cloud.capacity.Capacity;
@ -108,14 +107,6 @@ import com.cloud.vm.VirtualMachineProfile;
public interface ManagementService {
static final String Name = "management-server";
ConfigKey<Boolean> JsInterpretationEnabled = new ConfigKey<>("Hidden"
, Boolean.class
, "js.interpretation.enabled"
, "false"
, "Enable/Disable all JavaScript interpretation related functionalities to create or update Javascript rules."
, false
, ConfigKey.Scope.Global);
/**
* returns the a map of the names/values in the configuration table
*
@ -534,6 +525,4 @@ public interface ManagementService {
boolean removeManagementServer(RemoveManagementServerCmd cmd);
void checkJsInterpretationAllowedIfNeededForParameterValue(String paramName, boolean paramValue);
}

View File

@ -23,9 +23,10 @@ import org.apache.cloudstack.api.InternalIdentity;
public interface VMTemplateStorageResourceAssoc extends InternalIdentity {
public static enum Status {
UNKNOWN, DOWNLOAD_ERROR, NOT_DOWNLOADED, DOWNLOAD_IN_PROGRESS, DOWNLOADED, ABANDONED, UPLOADED, NOT_UPLOADED, UPLOAD_ERROR, UPLOAD_IN_PROGRESS, CREATING, CREATED, BYPASSED
UNKNOWN, DOWNLOAD_ERROR, NOT_DOWNLOADED, DOWNLOAD_IN_PROGRESS, DOWNLOADED, ABANDONED, LIMIT_REACHED, UPLOADED, NOT_UPLOADED, UPLOAD_ERROR, UPLOAD_IN_PROGRESS, CREATING, CREATED, BYPASSED
}
List<Status> ERROR_DOWNLOAD_STATES = List.of(Status.DOWNLOAD_ERROR, Status.ABANDONED, Status.LIMIT_REACHED, Status.UNKNOWN);
List<Status> PENDING_DOWNLOAD_STATES = List.of(Status.NOT_DOWNLOADED, Status.DOWNLOAD_IN_PROGRESS);
String getInstallPath();

View File

@ -30,6 +30,7 @@ import com.cloud.exception.ResourceAllocationException;
import com.cloud.offering.DiskOffering;
import com.cloud.offering.ServiceOffering;
import com.cloud.template.VirtualMachineTemplate;
import org.apache.cloudstack.resourcelimit.Reserver;
public interface ResourceLimitService {
@ -191,6 +192,7 @@ public interface ResourceLimitService {
*/
public void checkResourceLimit(Account account, ResourceCount.ResourceType type, long... count) throws ResourceAllocationException;
public void checkResourceLimitWithTag(Account account, ResourceCount.ResourceType type, String tag, long... count) throws ResourceAllocationException;
public void checkResourceLimitWithTag(Account account, Long domainId, boolean considerSystemAccount, ResourceCount.ResourceType type, String tag, long... count) throws ResourceAllocationException;
/**
* Gets the count of resources for a resource type and account
@ -251,12 +253,12 @@ public interface ResourceLimitService {
List<String> getResourceLimitStorageTags(DiskOffering diskOffering);
void updateTaggedResourceLimitsAndCountsForAccounts(List<AccountResponse> responses, String tag);
void updateTaggedResourceLimitsAndCountsForDomains(List<DomainResponse> responses, String tag);
void checkVolumeResourceLimit(Account owner, Boolean display, Long size, DiskOffering diskOffering) throws ResourceAllocationException;
void checkVolumeResourceLimit(Account owner, Boolean display, Long size, DiskOffering diskOffering, List<Reserver> reservations) throws ResourceAllocationException;
List<String> getResourceLimitStorageTagsForResourceCountOperation(Boolean display, DiskOffering diskOffering);
void checkVolumeResourceLimitForDiskOfferingChange(Account owner, Boolean display, Long currentSize, Long newSize,
DiskOffering currentOffering, DiskOffering newOffering) throws ResourceAllocationException;
DiskOffering currentOffering, DiskOffering newOffering, List<Reserver> reservations) throws ResourceAllocationException;
void checkPrimaryStorageResourceLimit(Account owner, Boolean display, Long size, DiskOffering diskOffering) throws ResourceAllocationException;
void checkPrimaryStorageResourceLimit(Account owner, Boolean display, Long size, DiskOffering diskOffering, List<Reserver> reservations) throws ResourceAllocationException;
void incrementVolumeResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
void decrementVolumeResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
@ -273,25 +275,23 @@ public interface ResourceLimitService {
void incrementVolumePrimaryStorageResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
void decrementVolumePrimaryStorageResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
void checkVmResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template) throws ResourceAllocationException;
void checkVmResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Reserver> reservations) throws ResourceAllocationException;
void incrementVmResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template);
void decrementVmResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template);
void checkVmResourceLimitsForServiceOfferingChange(Account owner, Boolean display, Long currentCpu, Long newCpu,
Long currentMemory, Long newMemory, ServiceOffering currentOffering, ServiceOffering newOffering, VirtualMachineTemplate template) throws ResourceAllocationException;
Long currentMemory, Long newMemory, ServiceOffering currentOffering, ServiceOffering newOffering, VirtualMachineTemplate template, List<Reserver> reservations) throws ResourceAllocationException;
void checkVmResourceLimitsForTemplateChange(Account owner, Boolean display, ServiceOffering offering,
VirtualMachineTemplate currentTemplate, VirtualMachineTemplate newTemplate) throws ResourceAllocationException;
VirtualMachineTemplate currentTemplate, VirtualMachineTemplate newTemplate, List<Reserver> reservations) throws ResourceAllocationException;
void checkVmCpuResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long cpu) throws ResourceAllocationException;
void incrementVmCpuResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long cpu);
void decrementVmCpuResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long cpu);
void checkVmMemoryResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long memory) throws ResourceAllocationException;
void incrementVmMemoryResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long memory);
void decrementVmMemoryResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long memory);
void checkVmGpuResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long gpu) throws ResourceAllocationException;
void incrementVmGpuResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long gpu);
void decrementVmGpuResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long gpu);
long recalculateDomainResourceCount(final long domainId, final ResourceType type, String tag);
}

View File

@ -127,8 +127,8 @@ public enum ApiCommandResourceType {
}
public static ApiCommandResourceType fromString(String value) {
if (StringUtils.isNotEmpty(value) && EnumUtils.isValidEnum(ApiCommandResourceType.class, value)) {
return valueOf(value);
if (StringUtils.isNotBlank(value) && EnumUtils.isValidEnumIgnoreCase(ApiCommandResourceType.class, value)) {
return EnumUtils.getEnumIgnoreCase(ApiCommandResourceType.class, value);
}
return null;
}

View File

@ -512,6 +512,7 @@ public class ApiConstants {
public static final String REPAIR = "repair";
public static final String REPETITION_ALLOWED = "repetitionallowed";
public static final String REQUIRES_HVM = "requireshvm";
public static final String RESERVED_RESOURCE_DETAILS = "reservedresourcedetails";
public static final String RESOURCES = "resources";
public static final String RESOURCE_COUNT = "resourcecount";
public static final String RESOURCE_NAME = "resourcename";

View File

@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.user.account;
import java.util.List;
import com.cloud.exception.ResourceAllocationException;
import org.apache.cloudstack.api.ApiArgValidator;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.BaseCmd;
@ -106,7 +107,7 @@ public class AddAccountToProjectCmd extends BaseAsyncCmd {
/////////////////////////////////////////////////////
@Override
public void execute() {
public void execute() throws ResourceAllocationException {
if (accountName == null && email == null) {
throw new InvalidParameterValueException("Either accountName or email is required");
}

View File

@ -17,6 +17,7 @@
package org.apache.cloudstack.api.command.user.account;
import com.cloud.exception.ResourceAllocationException;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiArgValidator;
@ -111,7 +112,7 @@ public class AddUserToProjectCmd extends BaseAsyncCmd {
/////////////////////////////////////////////////////
@Override
public void execute() {
public void execute() throws ResourceAllocationException {
validateInput();
boolean result = _projectService.addUserToProject(getProjectId(), getUsername(), getEmail(), getProjectRoleId(), getRoleType());
if (result) {

View File

@ -20,6 +20,7 @@ package org.apache.cloudstack.api.command.user.backup;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.ACL;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
@ -53,6 +54,7 @@ public class RestoreVolumeFromBackupAndAttachToVMCmd extends BaseAsyncCmd {
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@ACL
@Parameter(name = ApiConstants.BACKUP_ID,
type = CommandType.UUID,
entityType = BackupResponse.class,
@ -60,12 +62,14 @@ public class RestoreVolumeFromBackupAndAttachToVMCmd extends BaseAsyncCmd {
description = "ID of the Instance backup")
private Long backupId;
@ACL
@Parameter(name = ApiConstants.VOLUME_ID,
type = CommandType.STRING,
required = true,
description = "ID of the volume backed up")
private String volumeUuid;
@ACL
@Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID,
type = CommandType.UUID,
entityType = UserVmResponse.class,

View File

@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.user.job;
import java.util.Date;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiArgValidator;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseListAccountResourcesCmd;
import org.apache.cloudstack.api.Parameter;
@ -40,6 +41,12 @@ public class ListAsyncJobsCmd extends BaseListAccountResourcesCmd {
@Parameter(name = ApiConstants.MANAGEMENT_SERVER_ID, type = CommandType.UUID, entityType = ManagementServerResponse.class, description = "The id of the management server", since="4.19")
private Long managementServerId;
@Parameter(name = ApiConstants.RESOURCE_ID, validations = {ApiArgValidator.UuidString}, type = CommandType.STRING, description = "the ID of the resource associated with the job", since="4.22.1")
private String resourceId;
@Parameter(name = ApiConstants.RESOURCE_TYPE, type = CommandType.STRING, description = "the type of the resource associated with the job", since="4.22.1")
private String resourceType;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -52,6 +59,14 @@ public class ListAsyncJobsCmd extends BaseListAccountResourcesCmd {
return managementServerId;
}
public String getResourceId() {
return resourceId;
}
public String getResourceType() {
return resourceType;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -16,8 +16,8 @@
// under the License.
package org.apache.cloudstack.api.command.user.job;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiArgValidator;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.Parameter;
@ -34,9 +34,15 @@ public class QueryAsyncJobResultCmd extends BaseCmd {
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.JOB_ID, type = CommandType.UUID, entityType = AsyncJobResponse.class, required = true, description = "The ID of the asynchronous job")
@Parameter(name = ApiConstants.JOB_ID, type = CommandType.UUID, entityType = AsyncJobResponse.class, description = "The ID of the asynchronous job")
private Long id;
@Parameter(name = ApiConstants.RESOURCE_ID, validations = {ApiArgValidator.UuidString}, type = CommandType.STRING, description = "the ID of the resource associated with the job", since="4.22.1")
private String resourceId;
@Parameter(name = ApiConstants.RESOURCE_TYPE, type = CommandType.STRING, description = "the type of the resource associated with the job", since="4.22.1")
private String resourceType;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -45,6 +51,14 @@ public class QueryAsyncJobResultCmd extends BaseCmd {
return id;
}
public String getResourceId() {
return resourceId;
}
public String getResourceType() {
return resourceType;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -51,6 +51,7 @@ public class CreateVMFromBackupCmd extends BaseDeployVMCmd {
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@ACL
@Parameter(name = ApiConstants.BACKUP_ID,
type = CommandType.UUID,
entityType = BackupResponse.class,

View File

@ -113,7 +113,8 @@ public class CreateVolumeCmd extends BaseAsyncCreateCustomIdCmd implements UserC
@Parameter(name = ApiConstants.STORAGE_ID,
type = CommandType.UUID,
entityType = StoragePoolResponse.class,
description = "Storage pool ID to create the volume in. Exclusive with SnapshotId parameter.")
description = "Storage pool ID to create the volume in. Cannot be used with the snapshotid parameter.",
authorized = {RoleType.Admin})
private Long storageId;
/////////////////////////////////////////////////////

View File

@ -85,6 +85,12 @@ public class ExtensionResponse extends BaseResponse {
@Param(description = "Removal timestamp of the extension, if applicable")
private Date removed;
@SerializedName(ApiConstants.RESERVED_RESOURCE_DETAILS)
@Param(description = "Resource detail names as comma separated string that should be reserved and not visible " +
"to end users",
since = "4.22.1")
protected String reservedResourceDetails;
public ExtensionResponse(String id, String name, String description, String type) {
this.id = id;
this.name = name;
@ -179,4 +185,8 @@ public class ExtensionResponse extends BaseResponse {
public void setRemoved(Date removed) {
this.removed = removed;
}
public void setReservedResourceDetails(String reservedResourceDetails) {
this.reservedResourceDetails = reservedResourceDetails;
}
}

View File

@ -17,8 +17,11 @@
package org.apache.cloudstack.extension;
import java.util.List;
public interface ExtensionHelper {
Long getExtensionIdForCluster(long clusterId);
Extension getExtension(long id);
Extension getExtensionForCluster(long clusterId);
List<String> getExtensionReservedResourceDetails(long extensionId);
}

View File

@ -0,0 +1,30 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.resourcelimit;
/**
* Interface implemented by <code>CheckedReservation</code>.
* </br></br>
* This is defined in <code>cloud-api</code> to allow methods declared in modules that do not depend on <code>cloud-server</code>
* to receive <code>CheckedReservations</code> as parameters.
*/
public interface Reserver extends AutoCloseable {
void close();
}

View File

@ -37,7 +37,7 @@ public interface VolumeImportUnmanageService extends PluggableService, Configura
Arrays.asList(Hypervisor.HypervisorType.KVM, Hypervisor.HypervisorType.VMware);
List<Storage.StoragePoolType> SUPPORTED_STORAGE_POOL_TYPES_FOR_KVM = Arrays.asList(Storage.StoragePoolType.NetworkFilesystem,
Storage.StoragePoolType.Filesystem, Storage.StoragePoolType.RBD);
Storage.StoragePoolType.Filesystem, Storage.StoragePoolType.RBD, Storage.StoragePoolType.SharedMountPoint);
ConfigKey<Boolean> AllowImportVolumeWithBackingFile = new ConfigKey<>(Boolean.class,
"allow.import.volume.with.backing.file",

View File

@ -16,6 +16,7 @@
// under the License.
package org.apache.cloudstack.api.command.test;
import com.cloud.exception.ResourceAllocationException;
import junit.framework.Assert;
import junit.framework.TestCase;
@ -149,6 +150,8 @@ public class AddAccountToProjectCmdTest extends TestCase {
addAccountToProjectCmd.execute();
} catch (InvalidParameterValueException exception) {
Assert.assertEquals("Either accountName or email is required", exception.getLocalizedMessage());
} catch (ResourceAllocationException exception) {
Assert.fail();
}
}

View File

@ -121,6 +121,11 @@
<artifactId>cloud-plugin-storage-volume-adaptive</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-ontap</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-solidfire</artifactId>

View File

@ -140,7 +140,7 @@ public class DownloadAnswer extends Answer {
}
public Long getTemplateSize() {
return templateSize;
return templateSize == 0 ? templatePhySicalSize : templateSize;
}
public void setTemplatePhySicalSize(long templatePhySicalSize) {

View File

@ -21,6 +21,7 @@ package org.apache.cloudstack.direct.download;
import com.cloud.utils.UriUtils;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.utils.security.DigestHelper;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
@ -33,6 +34,7 @@ import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public abstract class DirectTemplateDownloaderImpl implements DirectTemplateDownloader {
@ -128,15 +130,14 @@ public abstract class DirectTemplateDownloaderImpl implements DirectTemplateDown
*/
protected File createTemporaryDirectoryAndFile(String downloadDir) {
createFolder(downloadDir);
return new File(downloadDir + File.separator + getFileNameFromUrl());
return new File(downloadDir + File.separator + getTemporaryFileName());
}
/**
* Return filename from url
* Return filename from the temporary download file
*/
public String getFileNameFromUrl() {
String[] urlParts = url.split("/");
return urlParts[urlParts.length - 1];
public String getTemporaryFileName() {
return String.format("%s.%s", UUID.randomUUID(), FilenameUtils.getExtension(url));
}
@Override

View File

@ -97,7 +97,7 @@ public class MetalinkDirectTemplateDownloader extends DirectTemplateDownloaderIm
DirectTemplateDownloader urlDownloader = createDownloaderForMetalinks(getUrl(), getTemplateId(), getDestPoolPath(),
getChecksum(), headers, connectTimeout, soTimeout, null, temporaryDownloadPath);
try {
setDownloadedFilePath(downloadDir + File.separator + getFileNameFromUrl());
setDownloadedFilePath(downloadDir + File.separator + getTemporaryFileName());
File f = new File(getDownloadedFilePath());
if (f.exists()) {
f.delete();

View File

@ -69,7 +69,7 @@ public class NfsDirectTemplateDownloader extends DirectTemplateDownloaderImpl {
String mount = String.format(mountCommand, srcHost + ":" + srcPath, "/mnt/" + mountSrcUuid);
Script.runSimpleBashScript(mount);
String downloadDir = getDestPoolPath() + File.separator + getDirectDownloadTempPath(getTemplateId());
setDownloadedFilePath(downloadDir + File.separator + getFileNameFromUrl());
setDownloadedFilePath(downloadDir + File.separator + getTemporaryFileName());
Script.runSimpleBashScript("cp /mnt/" + mountSrcUuid + srcPath + " " + getDownloadedFilePath());
Script.runSimpleBashScript("umount /mnt/" + mountSrcUuid);
return new Pair<>(true, getDownloadedFilePath());

View File

@ -19,6 +19,9 @@
package org.apache.cloudstack.storage.command;
import com.cloud.configuration.Resource;
import org.apache.cloudstack.utils.bytescale.ByteScaleUtils;
public class TemplateOrVolumePostUploadCommand {
long entityId;
@ -185,6 +188,11 @@ public class TemplateOrVolumePostUploadCommand {
this.description = description;
}
public void setDefaultMaxSecondaryStorageInBytes(long defaultMaxSecondaryStorageInBytes) {
this.defaultMaxSecondaryStorageInGB = defaultMaxSecondaryStorageInBytes != Resource.RESOURCE_UNLIMITED ?
ByteScaleUtils.bytesToGibibytes(defaultMaxSecondaryStorageInBytes) : Resource.RESOURCE_UNLIMITED;
}
public void setDefaultMaxSecondaryStorageInGB(long defaultMaxSecondaryStorageInGB) {
this.defaultMaxSecondaryStorageInGB = defaultMaxSecondaryStorageInGB;
}

View File

@ -28,6 +28,7 @@ public class UploadStatusCommand extends Command {
}
private String entityUuid;
private EntityType entityType;
private Boolean abort;
protected UploadStatusCommand() {
}
@ -37,6 +38,11 @@ public class UploadStatusCommand extends Command {
this.entityType = entityType;
}
public UploadStatusCommand(String entityUuid, EntityType entityType, Boolean abort) {
this(entityUuid, entityType);
this.abort = abort;
}
public String getEntityUuid() {
return entityUuid;
}
@ -45,6 +51,10 @@ public class UploadStatusCommand extends Command {
return entityType;
}
public Boolean getAbort() {
return abort;
}
@Override
public boolean executeInSequence() {
return false;

View File

@ -310,7 +310,7 @@ public interface NetworkOrchestrationService {
void removeDhcpServiceInSubnet(Nic nic);
boolean resourceCountNeedsUpdate(NetworkOffering ntwkOff, ACLType aclType);
boolean isResourceCountUpdateNeeded(NetworkOffering networkOffering);
void prepareAllNicsForMigration(VirtualMachineProfile vm, DeployDestination dest);

View File

@ -120,7 +120,7 @@ public interface VolumeOrchestrationService {
void destroyVolume(Volume volume);
DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template,
Account owner, Long deviceId);
Account owner, Long deviceId, boolean incrementResourceCount);
VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool) throws NoTransitionException;

View File

@ -21,6 +21,7 @@ import static org.apache.cloudstack.framework.config.ConfigKey.Scope.Cluster;
import com.cloud.deploy.DeploymentPlanner;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.utils.component.Manager;
import com.cloud.vm.VMInstanceVO;
import org.apache.cloudstack.framework.config.ConfigKey;
@ -32,6 +33,8 @@ import java.util.List;
*/
public interface HighAvailabilityManager extends Manager {
List<StoragePoolType> LIBVIRT_STORAGE_POOL_TYPES_WITH_HA_SUPPORT = List.of(StoragePoolType.NetworkFilesystem, StoragePoolType.SharedMountPoint);
ConfigKey<Boolean> ForceHA = new ConfigKey<>("Advanced", Boolean.class, "force.ha", "false",
"Force High-Availability to happen even if the VM says no.", true, Cluster);

View File

@ -309,7 +309,6 @@ import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
import com.google.gson.Gson;
public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, VmWorkJobHandler, Listener, Configurable {
public static final String VM_WORK_JOB_HANDLER = VirtualMachineManagerImpl.class.getSimpleName();
@ -593,7 +592,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
Long deviceId = dataDiskDeviceIds.get(index++);
String volumeName = deviceId == null ? "DATA-" + persistedVm.getId() : "DATA-" + persistedVm.getId() + "-" + String.valueOf(deviceId);
volumeMgr.allocateRawVolume(Type.DATADISK, volumeName, dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(),
dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), persistedVm, template, owner, deviceId);
dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), persistedVm, template, owner, deviceId, true);
}
}
if (datadiskTemplateToDiskOfferingMap != null && !datadiskTemplateToDiskOfferingMap.isEmpty()) {
@ -603,7 +602,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
long diskOfferingSize = diskOffering.getDiskSize() / (1024 * 1024 * 1024);
VMTemplateVO dataDiskTemplate = _templateDao.findById(dataDiskTemplateToDiskOfferingMap.getKey());
volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + persistedVm.getId() + "-" + String.valueOf( diskNumber), diskOffering, diskOfferingSize, null, null,
persistedVm, dataDiskTemplate, owner, diskNumber);
persistedVm, dataDiskTemplate, owner, diskNumber, true);
diskNumber++;
}
}
@ -633,7 +632,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
String rootVolumeName = String.format("ROOT-%s", vm.getId());
if (template.getFormat() == ImageFormat.ISO) {
volumeMgr.allocateRawVolume(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(),
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vm, template, owner, null);
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vm, template, owner, null, true);
} else if (Arrays.asList(ImageFormat.BAREMETAL, ImageFormat.EXTERNAL).contains(template.getFormat())) {
logger.debug("{} has format [{}]. Skipping ROOT volume [{}] allocation.", template, template.getFormat(), rootVolumeName);
} else {
@ -2224,7 +2223,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachineProfile profile, final boolean force, final boolean checkBeforeCleanup) {
final VirtualMachine vm = profile.getVirtualMachine();
Map<String, Boolean> vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId());
StopCommand stpCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), checkBeforeCleanup);
updateStopCommandForExternalHypervisorType(vm.getHypervisorType(), profile, stpCmd);
if (MapUtils.isNotEmpty(vlanToPersistenceMap)) {
@ -5283,9 +5281,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
private void saveCustomOfferingDetails(long vmId, ServiceOffering serviceOffering) {
Map<String, String> details = vmInstanceDetailsDao.listDetailsKeyPairs(vmId);
details.put(UsageEventVO.DynamicParameters.cpuNumber.name(), serviceOffering.getCpu().toString());
details.put(UsageEventVO.DynamicParameters.cpuSpeed.name(), serviceOffering.getSpeed().toString());
details.put(UsageEventVO.DynamicParameters.memory.name(), serviceOffering.getRamSize().toString());
// We need to restore only the customizable parameters. If we save a parameter that is not customizable and attempt
// to restore a VM snapshot, com.cloud.vm.UserVmManagerImpl.validateCustomParameters will fail.
ServiceOffering unfilledOffering = _serviceOfferingDao.findByIdIncludingRemoved(serviceOffering.getId());
if (unfilledOffering.getCpu() == null) {
details.put(UsageEventVO.DynamicParameters.cpuNumber.name(), serviceOffering.getCpu().toString());
}
if (unfilledOffering.getSpeed() == null) {
details.put(UsageEventVO.DynamicParameters.cpuSpeed.name(), serviceOffering.getSpeed().toString());
}
if (unfilledOffering.getRamSize() == null) {
details.put(UsageEventVO.DynamicParameters.memory.name(), serviceOffering.getRamSize().toString());
}
List<VMInstanceDetailVO> detailList = new ArrayList<>();
for (Map.Entry<String, String> entry: details.entrySet()) {
VMInstanceDetailVO detailVO = new VMInstanceDetailVO(vmId, entry.getKey(), entry.getValue(), true);

View File

@ -58,6 +58,7 @@ import org.apache.cloudstack.framework.messagebus.PublishScope;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.network.RoutedIpv4Manager;
import org.apache.cloudstack.network.dao.NetworkPermissionDao;
import org.apache.cloudstack.reservation.dao.ReservationDao;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.ObjectUtils;
@ -86,6 +87,7 @@ import com.cloud.api.query.dao.DomainRouterJoinDao;
import com.cloud.api.query.vo.DomainRouterJoinVO;
import com.cloud.bgp.BGPService;
import com.cloud.configuration.ConfigurationManager;
import com.cloud.configuration.Resource;
import com.cloud.configuration.Resource.ResourceType;
import com.cloud.dc.ASNumberVO;
import com.cloud.dc.ClusterVO;
@ -214,6 +216,7 @@ import com.cloud.offerings.dao.NetworkOfferingDao;
import com.cloud.offerings.dao.NetworkOfferingDetailsDao;
import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
import com.cloud.resource.ResourceManager;
import com.cloud.resourcelimit.CheckedReservation;
import com.cloud.server.ManagementServer;
import com.cloud.user.Account;
import com.cloud.user.ResourceLimitService;
@ -447,6 +450,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
ClusterDao clusterDao;
@Inject
RoutedIpv4Manager routedIpv4Manager;
@Inject
private ReservationDao reservationDao;
protected StateMachine2<Network.State, Network.Event, Network> _stateMachine;
ScheduledExecutorService _executor;
@ -2752,12 +2757,6 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
return null;
}
final boolean updateResourceCount = resourceCountNeedsUpdate(ntwkOff, aclType);
//check resource limits
if (updateResourceCount) {
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.network, isDisplayNetworkEnabled);
}
// Validate network offering
if (ntwkOff.getState() != NetworkOffering.State.Enabled) {
// see NetworkOfferingVO
@ -2776,6 +2775,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
boolean ipv6 = false;
try (CheckedReservation networkReservation = new CheckedReservation(owner, domainId, Resource.ResourceType.network, null, null, 1L, reservationDao, _resourceLimitMgr)) {
if (StringUtils.isNoneBlank(ip6Gateway, ip6Cidr)) {
ipv6 = true;
}
@ -3115,8 +3116,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
}
if (updateResourceCount) {
_resourceLimitMgr.incrementResourceCount(owner.getId(), ResourceType.network, isDisplayNetworkEnabled);
if (isResourceCountUpdateNeeded(ntwkOff)) {
changeAccountResourceCountOrRecalculateDomainResourceCount(owner.getAccountId(), domainId, isDisplayNetworkEnabled, true);
}
UsageEventUtils.publishNetworkCreation(network);
@ -3127,6 +3128,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
CallContext.current().setEventDetails("Network ID: " + network.getUuid());
CallContext.current().putContextParameter(Network.class, network.getUuid());
return network;
}
}
@Override
@ -3492,9 +3494,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
final NetworkOffering ntwkOff = _entityMgr.findById(NetworkOffering.class, networkFinal.getNetworkOfferingId());
final boolean updateResourceCount = resourceCountNeedsUpdate(ntwkOff, networkFinal.getAclType());
if (updateResourceCount) {
_resourceLimitMgr.decrementResourceCount(networkFinal.getAccountId(), ResourceType.network, networkFinal.getDisplayNetwork());
if (isResourceCountUpdateNeeded(ntwkOff)) {
changeAccountResourceCountOrRecalculateDomainResourceCount(networkFinal.getAccountId(), networkFinal.getDomainId(), networkFinal.getDisplayNetwork(), false);
}
}
return deletedVlans.second();
@ -3517,6 +3518,23 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
return success;
}
/**
* If it is a shared network with {@link ACLType#Domain}, it will belong to account {@link Account#ACCOUNT_ID_SYSTEM} and the resources will be not incremented for the
* domain. Therefore, we force the recalculation of the domain's resource count in this case. Otherwise, it will change the count for the account owner.
* @param incrementAccountResourceCount If true, the account resource count will be incremented by 1; otherwise, it will decremented by 1.
*/
private void changeAccountResourceCountOrRecalculateDomainResourceCount(Long accountId, Long domainId, boolean displayNetwork, boolean incrementAccountResourceCount) {
if (Account.ACCOUNT_ID_SYSTEM == accountId && ObjectUtils.isNotEmpty(domainId)) {
_resourceLimitMgr.recalculateDomainResourceCount(domainId, ResourceType.network, null);
} else {
if (incrementAccountResourceCount) {
_resourceLimitMgr.incrementResourceCount(accountId, ResourceType.network, displayNetwork);
} else {
_resourceLimitMgr.decrementResourceCount(accountId, ResourceType.network, displayNetwork);
}
}
}
private void publishDeletedVlanRanges(List<VlanVO> deletedVlanRangeToPublish) {
if (CollectionUtils.isNotEmpty(deletedVlanRangeToPublish)) {
for (VlanVO vlan : deletedVlanRangeToPublish) {
@ -3526,10 +3544,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
@Override
public boolean resourceCountNeedsUpdate(final NetworkOffering ntwkOff, final ACLType aclType) {
//Update resource count only for Isolated account specific non-system networks
final boolean updateResourceCount = ntwkOff.getGuestType() == GuestType.Isolated && !ntwkOff.isSystemOnly() && aclType == ACLType.Account;
return updateResourceCount;
public boolean isResourceCountUpdateNeeded(NetworkOffering networkOffering) {
return !networkOffering.isSystemOnly();
}
protected Pair<Boolean, List<VlanVO>> deleteVlansInNetwork(final NetworkVO network, final long userId, final Account callerAccount) {

View File

@ -40,6 +40,7 @@ import javax.naming.ConfigurationException;
import com.cloud.deploy.DeploymentClusterPlanner;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.resourcelimit.ReservationHelper;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
@ -82,6 +83,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager;
import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO;
import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao;
import org.apache.cloudstack.resourcelimit.Reserver;
import org.apache.cloudstack.secret.PassphraseVO;
import org.apache.cloudstack.secret.dao.PassphraseDao;
import org.apache.cloudstack.snapshot.SnapshotHelper;
@ -862,7 +864,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true)
@Override
public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner,
Long deviceId) {
Long deviceId, boolean incrementResourceCount) {
if (size == null) {
size = offering.getDiskSize();
} else {
@ -901,7 +903,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
saveVolumeDetails(offering.getId(), vol.getId());
// Save usage event and update resource count for user vm volumes
if (vm.getType() == VirtualMachine.Type.User) {
if (vm.getType() == VirtualMachine.Type.User && incrementResourceCount) {
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), offering.getId(), null, size,
Volume.class.getName(), vol.getUuid(), vol.getInstanceId(), vol.isDisplayVolume());
_resourceLimitMgr.incrementVolumeResourceCount(vm.getAccountId(), vol.isDisplayVolume(), vol.getSize(), offering);
@ -1938,14 +1940,20 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
template == null ? null : template.getSize(),
vol.getPassphraseId() != null);
if (newSize != vol.getSize()) {
DiskOfferingVO diskOffering = diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId());
if (newSize == vol.getSize()) {
return;
}
DiskOfferingVO diskOffering = diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId());
List<Reserver> reservations = new ArrayList<>();
try {
VMInstanceVO vm = vol.getInstanceId() != null ? vmInstanceDao.findById(vol.getInstanceId()) : null;
if (vm == null || vm.getType() == VirtualMachine.Type.User) {
// Update resource count for user vm volumes when volume is attached
if (newSize > vol.getSize()) {
_resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()),
vol.isDisplay(), newSize - vol.getSize(), diskOffering);
vol.isDisplay(), newSize - vol.getSize(), diskOffering, reservations);
_resourceLimitMgr.incrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
newSize - vol.getSize(), diskOffering);
} else {
@ -1953,9 +1961,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
vol.getSize() - newSize, diskOffering);
}
}
vol.setSize(newSize);
_volsDao.persist(vol);
} finally {
ReservationHelper.closeAll(reservations);
}
vol.setSize(newSize);
_volsDao.persist(vol);
}
@Override

View File

@ -27,6 +27,6 @@ public interface AccountVlanMapDao extends GenericDao<AccountVlanMapVO, Long> {
public List<AccountVlanMapVO> listAccountVlanMapsByVlan(long vlanDbId);
public AccountVlanMapVO findAccountVlanMap(long accountId, long vlanDbId);
public AccountVlanMapVO findAccountVlanMap(Long accountId, long vlanDbId);
}

View File

@ -48,9 +48,9 @@ public class AccountVlanMapDaoImpl extends GenericDaoBase<AccountVlanMapVO, Long
}
@Override
public AccountVlanMapVO findAccountVlanMap(long accountId, long vlanDbId) {
public AccountVlanMapVO findAccountVlanMap(Long accountId, long vlanDbId) {
SearchCriteria<AccountVlanMapVO> sc = AccountVlanSearch.create();
sc.setParameters("accountId", accountId);
sc.setParametersIfNotNull("accountId", accountId);
sc.setParameters("vlanDbId", vlanDbId);
return findOneIncludingRemovedBy(sc);
}

View File

@ -24,5 +24,5 @@ import com.cloud.utils.db.GenericDao;
public interface DomainVlanMapDao extends GenericDao<DomainVlanMapVO, Long> {
public List<DomainVlanMapVO> listDomainVlanMapsByDomain(long domainId);
public List<DomainVlanMapVO> listDomainVlanMapsByVlan(long vlanDbId);
public DomainVlanMapVO findDomainVlanMap(long domainId, long vlanDbId);
public DomainVlanMapVO findDomainVlanMap(Long domainId, long vlanDbId);
}

View File

@ -46,9 +46,9 @@ public class DomainVlanMapDaoImpl extends GenericDaoBase<DomainVlanMapVO, Long>
}
@Override
public DomainVlanMapVO findDomainVlanMap(long domainId, long vlanDbId) {
public DomainVlanMapVO findDomainVlanMap(Long domainId, long vlanDbId) {
SearchCriteria<DomainVlanMapVO> sc = DomainVlanSearch.create();
sc.setParameters("domainId", domainId);
sc.setParametersIfNotNull("domainId", domainId);
sc.setParameters("vlanDbId", vlanDbId);
return findOneIncludingRemovedBy(sc);
}

View File

@ -45,4 +45,9 @@ public interface HostTagsDao extends GenericDao<HostTagVO, Long> {
HostTagResponse newHostTagResponse(HostTagVO hostTag);
List<HostTagVO> searchByIds(Long... hostTagIds);
/**
* List all host tags defined on hosts within a cluster
*/
List<String> listByClusterId(Long clusterId);
}

View File

@ -23,6 +23,7 @@ import org.apache.cloudstack.api.response.HostTagResponse;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Component;
@ -43,9 +44,12 @@ public class HostTagsDaoImpl extends GenericDaoBase<HostTagVO, Long> implements
private final SearchBuilder<HostTagVO> stSearch;
private final SearchBuilder<HostTagVO> tagIdsearch;
private final SearchBuilder<HostTagVO> ImplicitTagsSearch;
private final GenericSearchBuilder<HostTagVO, String> tagSearch;
@Inject
private ConfigurationDao _configDao;
@Inject
private HostDao hostDao;
public HostTagsDaoImpl() {
HostSearch = createSearchBuilder();
@ -72,6 +76,11 @@ public class HostTagsDaoImpl extends GenericDaoBase<HostTagVO, Long> implements
ImplicitTagsSearch.and("hostId", ImplicitTagsSearch.entity().getHostId(), SearchCriteria.Op.EQ);
ImplicitTagsSearch.and("isImplicit", ImplicitTagsSearch.entity().getIsImplicit(), SearchCriteria.Op.EQ);
ImplicitTagsSearch.done();
tagSearch = createSearchBuilder(String.class);
tagSearch.selectFields(tagSearch.entity().getTag());
tagSearch.and("hostIdIN", tagSearch.entity().getHostId(), SearchCriteria.Op.IN);
tagSearch.done();
}
@Override
@ -235,4 +244,15 @@ public class HostTagsDaoImpl extends GenericDaoBase<HostTagVO, Long> implements
return tagList;
}
@Override
public List<String> listByClusterId(Long clusterId) {
List<Long> hostIds = hostDao.listIdsByClusterId(clusterId);
if (CollectionUtils.isEmpty(hostIds)) {
return new ArrayList<>();
}
SearchCriteria<String> sc = tagSearch.create();
sc.setParameters("hostIdIN", hostIds.toArray());
return customSearch(sc, null);
}
}

View File

@ -170,6 +170,7 @@ public class SnapshotDaoImpl extends GenericDaoBase<SnapshotVO, Long> implements
CountSnapshotsByAccount.select(null, Func.COUNT, null);
CountSnapshotsByAccount.and("account", CountSnapshotsByAccount.entity().getAccountId(), SearchCriteria.Op.EQ);
CountSnapshotsByAccount.and("status", CountSnapshotsByAccount.entity().getState(), SearchCriteria.Op.NIN);
CountSnapshotsByAccount.and("snapshotTypeNEQ", CountSnapshotsByAccount.entity().getSnapshotType(), SearchCriteria.Op.NIN);
CountSnapshotsByAccount.and("removed", CountSnapshotsByAccount.entity().getRemoved(), SearchCriteria.Op.NULL);
CountSnapshotsByAccount.done();
@ -220,6 +221,7 @@ public class SnapshotDaoImpl extends GenericDaoBase<SnapshotVO, Long> implements
SearchCriteria<Long> sc = CountSnapshotsByAccount.create();
sc.setParameters("account", accountId);
sc.setParameters("status", State.Error, State.Destroyed);
sc.setParameters("snapshotTypeNEQ", Snapshot.Type.GROUP.ordinal());
return customSearch(sc, null).get(0);
}

View File

@ -17,7 +17,12 @@
package com.cloud.upgrade.dao;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import com.cloud.utils.crypt.DBEncryptionUtil;
import com.cloud.utils.exception.CloudRuntimeException;
public class Upgrade42210to42300 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate {
@ -42,4 +47,46 @@ public class Upgrade42210to42300 extends DbUpgradeAbstractImpl implements DbUpgr
return new InputStream[] {script};
}
@Override
public void performDataMigration(Connection conn) {
unhideJsInterpretationEnabled(conn);
}
protected void unhideJsInterpretationEnabled(Connection conn) {
String value = getJsInterpretationEnabled(conn);
if (value != null) {
updateJsInterpretationEnabledFields(conn, value);
}
}
protected String getJsInterpretationEnabled(Connection conn) {
String query = "SELECT value FROM cloud.configuration WHERE name = 'js.interpretation.enabled' AND category = 'Hidden';";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
ResultSet rs = pstmt.executeQuery();
if (rs.next()) {
return rs.getString("value");
}
logger.debug("Unable to retrieve value of hidden configuration 'js.interpretation.enabled'. The configuration may already be unhidden.");
return null;
} catch (SQLException e) {
throw new CloudRuntimeException("Error while retrieving value of hidden configuration 'js.interpretation.enabled'.", e);
}
}
protected void updateJsInterpretationEnabledFields(Connection conn, String encryptedValue) {
String query = "UPDATE cloud.configuration SET value = ?, category = 'System', component = 'JsInterpreter', is_dynamic = 1 WHERE name = 'js.interpretation.enabled';";
try (PreparedStatement pstmt = conn.prepareStatement(query)) {
String decryptedValue = DBEncryptionUtil.decrypt(encryptedValue);
logger.info("Updating setting 'js.interpretation.enabled' to decrypted value [{}], category 'System', component 'JsInterpreter', and is_dynamic '1'.", decryptedValue);
pstmt.setString(1, decryptedValue);
pstmt.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Error while unhiding configuration 'js.interpretation.enabled'.", e);
} catch (CloudRuntimeException e) {
logger.warn("Error while decrypting configuration 'js.interpretation.enabled'. The configuration may already be decrypted.");
}
}
}

View File

@ -21,6 +21,7 @@ import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.utils.Pair;
@ -193,5 +194,9 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
List<VMInstanceVO> listByIdsIncludingRemoved(List<Long> ids);
List<VMInstanceVO> listDeleteProtectedVmsByAccountId(long accountId);
List<VMInstanceVO> listDeleteProtectedVmsByDomainIds(Set<Long> domainIds);
List<Long> listIdsByHostIdForVolumeStats(long hostIds);
}

View File

@ -25,11 +25,13 @@ import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Component;
@ -106,6 +108,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
protected SearchBuilder<VMInstanceVO> IdsPowerStateSelectSearch;
GenericSearchBuilder<VMInstanceVO, Integer> CountByOfferingId;
GenericSearchBuilder<VMInstanceVO, Integer> CountUserVmNotInDomain;
SearchBuilder<VMInstanceVO> DeleteProtectedVmSearchByAccount;
SearchBuilder<VMInstanceVO> DeleteProtectedVmSearchByDomainIds;
@Inject
ResourceTagDao tagsDao;
@ -368,6 +372,19 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
CountUserVmNotInDomain.and("domainIdsNotIn", CountUserVmNotInDomain.entity().getDomainId(), Op.NIN);
CountUserVmNotInDomain.done();
DeleteProtectedVmSearchByAccount = createSearchBuilder();
DeleteProtectedVmSearchByAccount.selectFields(DeleteProtectedVmSearchByAccount.entity().getUuid());
DeleteProtectedVmSearchByAccount.and(ApiConstants.ACCOUNT_ID, DeleteProtectedVmSearchByAccount.entity().getAccountId(), Op.EQ);
DeleteProtectedVmSearchByAccount.and(ApiConstants.DELETE_PROTECTION, DeleteProtectedVmSearchByAccount.entity().isDeleteProtection(), Op.EQ);
DeleteProtectedVmSearchByAccount.and(ApiConstants.REMOVED, DeleteProtectedVmSearchByAccount.entity().getRemoved(), Op.NULL);
DeleteProtectedVmSearchByAccount.done();
DeleteProtectedVmSearchByDomainIds = createSearchBuilder();
DeleteProtectedVmSearchByDomainIds.selectFields(DeleteProtectedVmSearchByDomainIds.entity().getUuid());
DeleteProtectedVmSearchByDomainIds.and(ApiConstants.DOMAIN_IDS, DeleteProtectedVmSearchByDomainIds.entity().getDomainId(), Op.IN);
DeleteProtectedVmSearchByDomainIds.and(ApiConstants.DELETE_PROTECTION, DeleteProtectedVmSearchByDomainIds.entity().isDeleteProtection(), Op.EQ);
DeleteProtectedVmSearchByDomainIds.and(ApiConstants.REMOVED, DeleteProtectedVmSearchByDomainIds.entity().getRemoved(), Op.NULL);
DeleteProtectedVmSearchByDomainIds.done();
}
@Override
@ -1297,6 +1314,24 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
return listIncludingRemovedBy(sc);
}
@Override
public List<VMInstanceVO> listDeleteProtectedVmsByAccountId(long accountId) {
SearchCriteria<VMInstanceVO> sc = DeleteProtectedVmSearchByAccount.create();
sc.setParameters(ApiConstants.ACCOUNT_ID, accountId);
sc.setParameters(ApiConstants.DELETE_PROTECTION, true);
Filter filter = new Filter(VMInstanceVO.class, null, false, 0L, 10L);
return listBy(sc, filter);
}
@Override
public List<VMInstanceVO> listDeleteProtectedVmsByDomainIds(Set<Long> domainIds) {
SearchCriteria<VMInstanceVO> sc = DeleteProtectedVmSearchByDomainIds.create();
sc.setParameters(ApiConstants.DOMAIN_IDS, domainIds.toArray());
sc.setParameters(ApiConstants.DELETE_PROTECTION, true);
Filter filter = new Filter(VMInstanceVO.class, null, false, 0L, 10L);
return listBy(sc, filter);
}
@Override
public List<Long> listIdsByHostIdForVolumeStats(long hostId) {
GenericSearchBuilder<VMInstanceVO, Long> sb = createSearchBuilder(Long.class);

View File

@ -21,20 +21,14 @@ import java.util.Date;
import java.util.List;
import com.cloud.utils.DateUtil;
import org.apache.cloudstack.api.response.BackupScheduleResponse;
import org.apache.cloudstack.backup.BackupSchedule;
import org.apache.cloudstack.backup.BackupScheduleVO;
import com.cloud.utils.db.GenericDao;
public interface BackupScheduleDao extends GenericDao<BackupScheduleVO, Long> {
BackupScheduleVO findByVM(Long vmId);
List<BackupScheduleVO> listByVM(Long vmId);
BackupScheduleVO findByVMAndIntervalType(Long vmId, DateUtil.IntervalType intervalType);
List<BackupScheduleVO> getSchedulesToExecute(Date currentTimestamp);
BackupScheduleResponse newBackupScheduleResponse(BackupSchedule schedule);
}

View File

@ -17,28 +17,23 @@
package org.apache.cloudstack.backup.dao;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Date;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import com.cloud.utils.DateUtil;
import org.apache.cloudstack.api.response.BackupScheduleResponse;
import org.apache.cloudstack.backup.BackupSchedule;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.TransactionLegacy;
import org.apache.cloudstack.backup.BackupScheduleVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
public class BackupScheduleDaoImpl extends GenericDaoBase<BackupScheduleVO, Long> implements BackupScheduleDao {
@Inject
VMInstanceDao vmInstanceDao;
private SearchBuilder<BackupScheduleVO> backupScheduleSearch;
private SearchBuilder<BackupScheduleVO> executableSchedulesSearch;
@ -59,13 +54,6 @@ public class BackupScheduleDaoImpl extends GenericDaoBase<BackupScheduleVO, Long
executableSchedulesSearch.done();
}
@Override
public BackupScheduleVO findByVM(Long vmId) {
SearchCriteria<BackupScheduleVO> sc = backupScheduleSearch.create();
sc.setParameters("vm_id", vmId);
return findOneBy(sc);
}
@Override
public List<BackupScheduleVO> listByVM(Long vmId) {
SearchCriteria<BackupScheduleVO> sc = backupScheduleSearch.create();
@ -88,21 +76,19 @@ public class BackupScheduleDaoImpl extends GenericDaoBase<BackupScheduleVO, Long
return listBy(sc);
}
@DB
@Override
public BackupScheduleResponse newBackupScheduleResponse(BackupSchedule schedule) {
VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(schedule.getVmId());
BackupScheduleResponse response = new BackupScheduleResponse();
response.setId(schedule.getUuid());
response.setVmId(vm.getUuid());
response.setVmName(vm.getHostName());
response.setIntervalType(schedule.getScheduleType());
response.setSchedule(schedule.getSchedule());
response.setTimezone(schedule.getTimezone());
response.setMaxBackups(schedule.getMaxBackups());
if (schedule.getQuiesceVM() != null) {
response.setQuiesceVM(schedule.getQuiesceVM());
public boolean remove(Long id) {
String sql = "UPDATE backups SET backup_schedule_id = NULL WHERE backup_schedule_id = ?";
TransactionLegacy transaction = TransactionLegacy.currentTxn();
try {
PreparedStatement preparedStatement = transaction.prepareAutoCloseStatement(sql);
preparedStatement.setLong(1, id);
preparedStatement.executeUpdate();
return super.remove(id);
} catch (SQLException e) {
logger.warn("Unable to clean up backup schedules references from the backups table.", e);
return false;
}
response.setObjectName("backupschedule");
return response;
}
}

View File

@ -51,6 +51,8 @@ StateDao<ObjectInDataStoreStateMachine.State, ObjectInDataStoreStateMachine.Even
SnapshotDataStoreVO findBySnapshotIdAndDataStoreRoleAndState(long snapshotId, DataStoreRole role, ObjectInDataStoreStateMachine.State state);
List<SnapshotDataStoreVO> listBySnapshotIdAndDataStoreRoleAndStateIn(long snapshotId, DataStoreRole role, ObjectInDataStoreStateMachine.State... state);
List<SnapshotDataStoreVO> listReadyByVolumeIdAndCheckpointPathNotNull(long volumeId);
SnapshotDataStoreVO findOneBySnapshotId(long snapshotId, long zoneId);

View File

@ -68,6 +68,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
protected SearchBuilder<SnapshotDataStoreVO> searchFilteringStoreIdEqStateEqStoreRoleEqIdEqUpdateCountEqSnapshotIdEqVolumeIdEq;
private SearchBuilder<SnapshotDataStoreVO> stateSearch;
private SearchBuilder<SnapshotDataStoreVO> idStateNinSearch;
private SearchBuilder<SnapshotDataStoreVO> idEqRoleEqStateInSearch;
protected SearchBuilder<SnapshotVO> snapshotVOSearch;
private SearchBuilder<SnapshotDataStoreVO> snapshotCreatedSearch;
private SearchBuilder<SnapshotDataStoreVO> dataStoreAndInstallPathSearch;
@ -151,6 +152,11 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
idStateNinSearch.and(STATE, idStateNinSearch.entity().getState(), SearchCriteria.Op.NOTIN);
idStateNinSearch.done();
idEqRoleEqStateInSearch = createSearchBuilder();
idEqRoleEqStateInSearch.and(SNAPSHOT_ID, idEqRoleEqStateInSearch.entity().getSnapshotId(), SearchCriteria.Op.EQ);
idEqRoleEqStateInSearch.and(STORE_ROLE, idEqRoleEqStateInSearch.entity().getRole(), SearchCriteria.Op.EQ);
idEqRoleEqStateInSearch.and(STATE, idEqRoleEqStateInSearch.entity().getState(), SearchCriteria.Op.IN);
snapshotVOSearch = snapshotDao.createSearchBuilder();
snapshotVOSearch.and(VOLUME_ID, snapshotVOSearch.entity().getVolumeId(), SearchCriteria.Op.EQ);
snapshotVOSearch.done();
@ -387,6 +393,15 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
return findOneBy(sc);
}
@Override
public List<SnapshotDataStoreVO> listBySnapshotIdAndDataStoreRoleAndStateIn(long snapshotId, DataStoreRole role, State... state) {
SearchCriteria<SnapshotDataStoreVO> sc = idEqRoleEqStateInSearch.create();
sc.setParameters(SNAPSHOT_ID, snapshotId);
sc.setParameters(STORE_ROLE, role);
sc.setParameters(STATE, (Object[])state);
return listBy(sc);
}
@Override
public SnapshotDataStoreVO findOneBySnapshotId(long snapshotId, long zoneId) {
try (TransactionLegacy transactionLegacy = TransactionLegacy.currentTxn()) {

View File

@ -687,22 +687,23 @@ CREATE TABLE IF NOT EXISTS `cloud`.`backup_details` (
UPDATE `cloud`.`backups` b
INNER JOIN `cloud`.`vm_instance` vm ON b.vm_id = vm.id
SET b.backed_volumes = (
SELECT CONCAT("[",
GROUP_CONCAT(
CONCAT(
"{\"uuid\":\"", v.uuid, "\",",
"\"type\":\"", v.volume_type, "\",",
"\"size\":", v.`size`, ",",
"\"path\":\"", IFNULL(v.path, 'null'), "\",",
"\"deviceId\":", IFNULL(v.device_id, 'null'), ",",
"\"diskOfferingId\":\"", doff.uuid, "\",",
"\"minIops\":", IFNULL(v.min_iops, 'null'), ",",
"\"maxIops\":", IFNULL(v.max_iops, 'null'),
"}"
)
SEPARATOR ","
SELECT COALESCE(
CAST(
JSON_ARRAYAGG(
JSON_OBJECT(
'uuid', v.uuid,
'type', v.volume_type,
'size', v.size,
'path', v.path,
'deviceId', v.device_id,
'diskOfferingId', doff.uuid,
'minIops', v.min_iops,
'maxIops', v.max_iops
)
) AS CHAR
),
"]")
'[]'
)
FROM `cloud`.`volumes` v
LEFT JOIN `cloud`.`disk_offering` doff ON v.disk_offering_id = doff.id
WHERE v.instance_id = vm.id
@ -711,22 +712,23 @@ SET b.backed_volumes = (
-- Add diskOfferingId, deviceId, minIops and maxIops to backup_volumes in vm_instance table
UPDATE `cloud`.`vm_instance` vm
SET vm.backup_volumes = (
SELECT CONCAT("[",
GROUP_CONCAT(
CONCAT(
"{\"uuid\":\"", v.uuid, "\",",
"\"type\":\"", v.volume_type, "\",",
"\"size\":", v.`size`, ",",
"\"path\":\"", IFNULL(v.path, 'null'), "\",",
"\"deviceId\":", IFNULL(v.device_id, 'null'), ",",
"\"diskOfferingId\":\"", doff.uuid, "\",",
"\"minIops\":", IFNULL(v.min_iops, 'null'), ",",
"\"maxIops\":", IFNULL(v.max_iops, 'null'),
"}"
)
SEPARATOR ","
SELECT COALESCE(
CAST(
JSON_ARRAYAGG(
JSON_OBJECT(
'uuid', v.uuid,
'type', v.volume_type,
'size', v.size,
'path', v.path,
'deviceId', v.device_id,
'diskOfferingId', doff.uuid,
'minIops', v.min_iops,
'maxIops', v.max_iops
)
) AS CHAR
),
"]")
'[]'
)
FROM `cloud`.`volumes` v
LEFT JOIN `cloud`.`disk_offering` doff ON v.disk_offering_id = doff.id
WHERE v.instance_id = vm.id

View File

@ -34,7 +34,22 @@ UPDATE `cloud`.`alert` SET type = 34 WHERE name = 'ALERT.VR.PRIVATE.IFACE.MTU';
-- Update configuration 'kvm.ssh.to.agent' description and is_dynamic fields
UPDATE `cloud`.`configuration` SET description = 'True if the management server will restart the agent service via SSH into the KVM hosts after or during maintenance operations', is_dynamic = 1 WHERE name = 'kvm.ssh.to.agent';
-- Sanitize legacy network-level addressing fields for Public networks
UPDATE `cloud`.`networks`
SET `broadcast_uri` = NULL,
`gateway` = NULL,
`cidr` = NULL,
`ip6_gateway` = NULL,
`ip6_cidr` = NULL
WHERE `traffic_type` = 'Public';
UPDATE `cloud`.`vm_template` SET guest_os_id = 99 WHERE name = 'kvm-default-vm-import-dummy-template';
-- Update existing vm_template records with NULL type to "USER"
UPDATE `cloud`.`vm_template` SET `type` = 'USER' WHERE `type` IS NULL;
-- remove unused config item
DELETE FROM `cloud`.`configuration` WHERE name = 'consoleproxy.cmd.port';
-- Drops the unused "backup_interval_type" column of the "cloud.backups" table
ALTER TABLE `cloud`.`backups` DROP COLUMN `backup_interval_type`;

View File

@ -80,6 +80,7 @@ SELECT
`vm_template`.`format` AS `template_format`,
`vm_template`.`display_text` AS `template_display_text`,
`vm_template`.`enable_password` AS `password_enabled`,
`vm_template`.`extension_id` AS `template_extension_id`,
`iso`.`id` AS `iso_id`,
`iso`.`uuid` AS `iso_uuid`,
`iso`.`name` AS `iso_name`,

View File

@ -120,7 +120,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
private final List<Snapshot.State> snapshotStatesAbleToDeleteSnapshot = Arrays.asList(Snapshot.State.Destroying, Snapshot.State.Destroyed, Snapshot.State.Error, Snapshot.State.Hidden);
public SnapshotDataStoreVO getSnapshotImageStoreRef(long snapshotId, long zoneId) {
List<SnapshotDataStoreVO> snaps = snapshotStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image);
List<SnapshotDataStoreVO> snaps = snapshotStoreDao.listBySnapshotIdAndDataStoreRoleAndStateIn(snapshotId, DataStoreRole.Image, State.Ready, State.Hidden);
for (SnapshotDataStoreVO ref : snaps) {
if (zoneId == dataStoreMgr.getStoreZoneId(ref.getDataStoreId(), ref.getRole())) {
return ref;

View File

@ -111,7 +111,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
FreezeThawVMAnswer freezeAnswer = null;
FreezeThawVMCommand thawCmd = null;
FreezeThawVMAnswer thawAnswer = null;
List<SnapshotInfo> forRollback = new ArrayList<>();
List<SnapshotInfo> snapshotsForRollback = new ArrayList<>();
long startFreeze = 0;
try {
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested);
@ -165,7 +165,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
logger.info("The virtual machine is frozen");
for (VolumeInfo vol : vinfos) {
long startSnapshtot = System.nanoTime();
SnapshotInfo snapInfo = createDiskSnapshot(vmSnapshot, forRollback, vol);
SnapshotInfo snapInfo = createDiskSnapshot(vmSnapshot, snapshotsForRollback, vol);
if (snapInfo == null) {
thawAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, thawCmd);
@ -222,7 +222,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
}
}
if (!result) {
for (SnapshotInfo snapshotInfo : forRollback) {
for (SnapshotInfo snapshotInfo : snapshotsForRollback) {
rollbackDiskSnapshot(snapshotInfo);
}
try {
@ -388,10 +388,16 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
//Rollback if one of disks snapshot fails
protected void rollbackDiskSnapshot(SnapshotInfo snapshotInfo) {
if (snapshotInfo == null) {
return;
}
Long snapshotID = snapshotInfo.getId();
SnapshotVO snapshot = snapshotDao.findById(snapshotID);
if (snapshot == null) {
return;
}
deleteSnapshotByStrategy(snapshot);
logger.debug("Rollback is executed: deleting snapshot with id:" + snapshotID);
logger.debug("Rollback is executed: deleting snapshot with id: {}", snapshotID);
}
protected void deleteSnapshotByStrategy(SnapshotVO snapshot) {
@ -434,7 +440,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
}
}
protected SnapshotInfo createDiskSnapshot(VMSnapshot vmSnapshot, List<SnapshotInfo> forRollback, VolumeInfo vol) {
protected SnapshotInfo createDiskSnapshot(VMSnapshot vmSnapshot, List<SnapshotInfo> snapshotsForRollback, VolumeInfo vol) {
String snapshotName = vmSnapshot.getId() + "_" + vol.getUuid();
SnapshotVO snapshot = new SnapshotVO(vol.getDataCenterId(), vol.getAccountId(), vol.getDomainId(), vol.getId(), vol.getDiskOfferingId(),
snapshotName, (short) Snapshot.Type.GROUP.ordinal(), Snapshot.Type.GROUP.name(), vol.getSize(), vol.getMinIops(), vol.getMaxIops(), Hypervisor.HypervisorType.KVM, null);
@ -448,6 +454,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
vol.addPayload(setPayload(vol, snapshot, quiescevm));
SnapshotInfo snapshotInfo = snapshotDataFactory.getSnapshot(snapshot.getId(), vol.getDataStore());
snapshotInfo.addPayload(vol.getpayload());
snapshotsForRollback.add(snapshotInfo);
SnapshotStrategy snapshotStrategy = storageStrategyFactory.getSnapshotStrategy(snapshotInfo, SnapshotOperation.TAKE);
if (snapshotStrategy == null) {
throw new CloudRuntimeException("Could not find strategy for snapshot uuid:" + snapshotInfo.getUuid());
@ -455,8 +462,6 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
snapshotInfo = snapshotStrategy.takeSnapshot(snapshotInfo);
if (snapshotInfo == null) {
throw new CloudRuntimeException("Failed to create snapshot");
} else {
forRollback.add(snapshotInfo);
}
vmSnapshotDetailsDao.persist(new VMSnapshotDetailsVO(vmSnapshot.getId(), STORAGE_SNAPSHOT, String.valueOf(snapshot.getId()), true));
snapshotInfo.markBackedUp();

View File

@ -257,11 +257,6 @@ public class DefaultSnapshotStrategyTest {
@Test
public void testGetSnapshotImageStoreRefNull() {
SnapshotDataStoreVO ref1 = Mockito.mock(SnapshotDataStoreVO.class);
Mockito.when(ref1.getDataStoreId()).thenReturn(1L);
Mockito.when(ref1.getRole()).thenReturn(DataStoreRole.Image);
Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(Mockito.anyLong(), Mockito.any(DataStoreRole.class))).thenReturn(List.of(ref1));
Mockito.when(dataStoreManager.getStoreZoneId(1L, DataStoreRole.Image)).thenReturn(2L);
Assert.assertNull(defaultSnapshotStrategySpy.getSnapshotImageStoreRef(1L, 1L));
}
@ -270,7 +265,7 @@ public class DefaultSnapshotStrategyTest {
SnapshotDataStoreVO ref1 = Mockito.mock(SnapshotDataStoreVO.class);
Mockito.when(ref1.getDataStoreId()).thenReturn(1L);
Mockito.when(ref1.getRole()).thenReturn(DataStoreRole.Image);
Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(Mockito.anyLong(), Mockito.any(DataStoreRole.class))).thenReturn(List.of(ref1));
Mockito.when(snapshotDataStoreDao.listBySnapshotIdAndDataStoreRoleAndStateIn(Mockito.anyLong(), Mockito.any(DataStoreRole.class), Mockito.any(), Mockito.any())).thenReturn(List.of(ref1));
Mockito.when(dataStoreManager.getStoreZoneId(1L, DataStoreRole.Image)).thenReturn(1L);
Assert.assertNotNull(defaultSnapshotStrategySpy.getSnapshotImageStoreRef(1L, 1L));
}

View File

@ -155,7 +155,7 @@ public class VMSnapshotStrategyKVMTest extends TestCase{
@Test
public void testCreateDiskSnapshotBasedOnStrategy() throws Exception {
VMSnapshotVO vmSnapshot = Mockito.mock(VMSnapshotVO.class);
List<SnapshotInfo> forRollback = new ArrayList<>();
List<SnapshotInfo> snapshotsForRollback = new ArrayList<>();
VolumeInfo vol = Mockito.mock(VolumeInfo.class);
SnapshotInfo snapshotInfo = Mockito.mock(SnapshotInfo.class);
SnapshotStrategy strategy = Mockito.mock(SnapshotStrategy.class);
@ -179,7 +179,7 @@ public class VMSnapshotStrategyKVMTest extends TestCase{
VMSnapshotDetailsVO vmDetails = new VMSnapshotDetailsVO(vmSnapshot.getId(), volUuid, String.valueOf(snapshot.getId()), false);
when(vmSnapshotDetailsDao.persist(any())).thenReturn(vmDetails);
info = vmStrategy.createDiskSnapshot(vmSnapshot, forRollback, vol);
info = vmStrategy.createDiskSnapshot(vmSnapshot, snapshotsForRollback, vol);
assertNotNull(info);
}

View File

@ -145,10 +145,10 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
storageType = "shared";
}
logger.debug(String.format(
"Filtering storage pools by capacity type [%s] as the first storage pool of the list, with name [%s] and ID [%s], is a [%s] storage.",
logger.info(
"Filtering storage pools by capacity type [{}] as the first storage pool of the list, with name [{}] and ID [{}], is a [{}] storage.",
capacityType, storagePool.getName(), storagePool.getUuid(), storageType
));
);
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
List<Long> poolIdsByCapacity = result.first();
@ -185,7 +185,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
Long clusterId = plan.getClusterId();
List<Long> poolIdsByVolCount = volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
logger.debug(String.format("List of pools in ascending order of number of volumes for account [%s] is [%s].", account, poolIdsByVolCount));
logger.debug("List of pools in ascending order of number of volumes for account [{}] is [{}].", account, poolIdsByVolCount);
// now filter the given list of Pools by this ordered list
Map<Long, StoragePool> poolMap = new HashMap<>();
@ -206,16 +206,11 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
@Override
public List<StoragePool> reorderPools(List<StoragePool> pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh) {
if (logger.isTraceEnabled()) {
logger.trace("reordering pools");
}
if (pools == null) {
logger.trace("There are no pools to reorder; returning null.");
logger.info("There are no pools to reorder.");
return null;
}
if (logger.isTraceEnabled()) {
logger.trace(String.format("reordering %d pools", pools.size()));
}
logger.info("Reordering [{}] pools", pools.size());
Account account = null;
if (vmProfile.getVirtualMachine() != null) {
account = vmProfile.getOwner();
@ -224,9 +219,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
pools = reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
if (vmProfile.getVirtualMachine() == null) {
if (logger.isTraceEnabled()) {
logger.trace("The VM is null, skipping pools reordering by disk provisioning type.");
}
logger.info("The VM is null, skipping pool reordering by disk provisioning type.");
return pools;
}
@ -240,14 +233,10 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
List<StoragePool> reorderStoragePoolsBasedOnAlgorithm(List<StoragePool> pools, DeploymentPlan plan, Account account) {
String volumeAllocationAlgorithm = VolumeOrchestrationService.VolumeAllocationAlgorithm.value();
logger.debug("Using volume allocation algorithm {} to reorder pools.", volumeAllocationAlgorithm);
logger.info("Using volume allocation algorithm {} to reorder pools.", volumeAllocationAlgorithm);
if (volumeAllocationAlgorithm.equals("random") || (account == null)) {
reorderRandomPools(pools);
} else if (StringUtils.equalsAny(volumeAllocationAlgorithm, "userdispersing", "firstfitleastconsumed")) {
if (logger.isTraceEnabled()) {
logger.trace("Using reordering algorithm {}", volumeAllocationAlgorithm);
}
if (volumeAllocationAlgorithm.equals("userdispersing")) {
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
} else {
@ -259,16 +248,15 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
void reorderRandomPools(List<StoragePool> pools) {
StorageUtil.traceLogStoragePools(pools, logger, "pools to choose from: ");
if (logger.isTraceEnabled()) {
logger.trace("Shuffle this so that we don't check the pools in the same order. Algorithm == 'random' (or no account?)");
}
StorageUtil.traceLogStoragePools(pools, logger, "pools to shuffle: ");
logger.trace("Shuffle this so that we don't check the pools in the same order. Algorithm == 'random' (or no account?)");
logger.debug("Pools to shuffle: [{}]", pools);
Collections.shuffle(pools, secureRandom);
StorageUtil.traceLogStoragePools(pools, logger, "shuffled list of pools to choose from: ");
logger.debug("Shuffled list of pools to choose from: [{}]", pools);
}
private List<StoragePool> reorderPoolsByDiskProvisioningType(List<StoragePool> pools, DiskProfile diskProfile) {
if (diskProfile != null && diskProfile.getProvisioningType() != null && !diskProfile.getProvisioningType().equals(Storage.ProvisioningType.THIN)) {
logger.info("Reordering [{}] pools by disk provisioning type [{}].", pools.size(), diskProfile.getProvisioningType());
List<StoragePool> reorderedPools = new ArrayList<>();
int preferredIndex = 0;
for (StoragePool pool : pools) {
@ -282,22 +270,28 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
reorderedPools.add(preferredIndex++, pool);
}
}
logger.debug("Reordered list of pools by disk provisioning type [{}]: [{}]", diskProfile.getProvisioningType(), reorderedPools);
return reorderedPools;
} else {
if (diskProfile == null) {
logger.info("Reordering pools by disk provisioning type wasn't necessary, since no disk profile was found.");
} else {
logger.debug("Reordering pools by disk provisioning type wasn't necessary, since the provisioning type is [{}].", diskProfile.getProvisioningType());
}
return pools;
}
}
protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) {
logger.debug(String.format("Checking if storage pool [%s] is suitable to disk [%s].", pool, dskCh));
logger.debug("Checking if storage pool [{}] is suitable to disk [{}].", pool, dskCh);
if (avoid.shouldAvoid(pool)) {
logger.debug(String.format("StoragePool [%s] is in avoid set, skipping this pool to allocation of disk [%s].", pool, dskCh));
logger.debug("StoragePool [{}] is in avoid set, skipping this pool to allocation of disk [{}].", pool, dskCh);
return false;
}
if (dskCh.requiresEncryption() && !pool.getPoolType().supportsEncryption()) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Storage pool type '%s' doesn't support encryption required for volume, skipping this pool", pool.getPoolType()));
logger.debug("Storage pool type '[{}]' doesn't support encryption required for volume, skipping this pool", pool.getPoolType());
}
return false;
}
@ -319,8 +313,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
}
if (!checkDiskProvisioningSupport(dskCh, pool)) {
logger.debug(String.format("Storage pool [%s] does not have support to disk provisioning of disk [%s].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh,
"type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType")));
logger.debug("Storage pool [{}] does not have support to disk provisioning of disk [{}].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh,
"type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType"));
return false;
}
@ -332,7 +326,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
HostVO plannedHost = hostDao.findById(plan.getHostId());
if (!storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(plannedHost, pool)) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, plannedHost));
logger.debug("StoragePool [{}] and host [{}] does not have matching storage access groups", pool, plannedHost);
}
return false;
}
@ -343,13 +337,13 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
if (!isTempVolume) {
volume = volumeDao.findById(dskCh.getVolumeId());
if (!storageMgr.storagePoolCompatibleWithVolumePool(pool, volume)) {
logger.debug(String.format("Pool [%s] is not compatible with volume [%s], skipping it.", pool, volume));
logger.debug("Pool [{}] is not compatible with volume [{}], skipping it.", pool, volume);
return false;
}
}
if (pool.isManaged() && !storageUtil.managedStoragePoolCanScale(pool, plan.getClusterId(), plan.getHostId())) {
logger.debug(String.format("Cannot allocate pool [%s] to volume [%s] because the max number of managed clustered filesystems has been exceeded.", pool, volume));
logger.debug("Cannot allocate pool [{}] to volume [{}] because the max number of managed clustered filesystems has been exceeded.", pool, volume);
return false;
}
@ -358,13 +352,13 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
requestVolumeDiskProfilePairs.add(new Pair<>(volume, dskCh));
if (dskCh.getHypervisorType() == HypervisorType.VMware) {
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster && storageMgr.isStoragePoolDatastoreClusterParent(pool)) {
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is a parent datastore cluster.", pool, volume));
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is a parent datastore cluster.", pool, volume);
return false;
}
if (pool.getParent() != 0L) {
StoragePoolVO datastoreCluster = storagePoolDao.findById(pool.getParent());
if (datastoreCluster == null || (datastoreCluster != null && datastoreCluster.getStatus() != StoragePoolStatus.Up)) {
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not in [%s] state.", datastoreCluster, volume, StoragePoolStatus.Up));
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is not in [{}] state.", datastoreCluster, volume, StoragePoolStatus.Up);
return false;
}
}
@ -374,11 +368,11 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
storageMgr.isStoragePoolCompliantWithStoragePolicy(dskCh.getDiskOfferingId(), pool) :
storageMgr.isStoragePoolCompliantWithStoragePolicy(requestVolumeDiskProfilePairs, pool);
if (!isStoragePoolStoragePolicyCompliance) {
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not compliant with the storage policy required by the volume.", pool, volume));
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is not compliant with the storage policy required by the volume.", pool, volume);
return false;
}
} catch (StorageUnavailableException e) {
logger.warn(String.format("Could not verify storage policy compliance against storage pool %s due to exception %s", pool.getUuid(), e.getMessage()));
logger.warn("Could not verify storage policy compliance against storage pool [{}] due to exception [{}]", pool.getUuid(), e.getMessage());
return false;
}
}
@ -427,19 +421,19 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
protected void logDisabledStoragePools(long dcId, Long podId, Long clusterId, ScopeType scope) {
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, scope);
if (disabledPools != null && !disabledPools.isEmpty()) {
logger.trace(String.format("Ignoring pools [%s] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools)));
logger.trace("Ignoring pools [{}] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools));
}
}
protected void logStartOfSearch(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, int returnUpTo,
boolean bypassStorageTypeCheck){
logger.trace(String.format("%s is looking for storage pools that match the VM's disk profile [%s], virtual machine profile [%s] and "
+ "deployment plan [%s]. Returning up to [%d] and bypassStorageTypeCheck [%s].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck));
logger.trace("[{}] is looking for storage pools that match the VM's disk profile [{}], virtual machine profile [{}] and "
+ "deployment plan [{}]. Returning up to [{}] and bypassStorageTypeCheck [{}].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
}
protected void logEndOfSearch(List<StoragePool> storagePoolList) {
logger.debug(String.format("%s is returning [%s] suitable storage pools [%s].", this.getClass().getSimpleName(), storagePoolList.size(),
Arrays.toString(storagePoolList.toArray())));
logger.debug("[{}] is returning [{}] suitable storage pools [{}].", this.getClass().getSimpleName(), storagePoolList.size(),
Arrays.toString(storagePoolList.toArray()));
}
}

View File

@ -230,8 +230,10 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
updateBuilder.setJobId(answer.getJobId());
updateBuilder.setLocalDownloadPath(answer.getDownloadPath());
updateBuilder.setInstallPath(answer.getInstallPath());
updateBuilder.setSize(answer.getTemplateSize());
updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
if (!VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
updateBuilder.setSize(answer.getTemplateSize());
updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
}
_templateStoreDao.update(tmpltStoreVO.getId(), updateBuilder);
// update size in vm_template table
VMTemplateVO tmlptUpdater = _templateDao.createForUpdate();
@ -241,8 +243,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
AsyncCompletionCallback<CreateCmdResult> caller = context.getParentCallback();
if (answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR ||
answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.ABANDONED || answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.UNKNOWN) {
if (VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
CreateCmdResult result = new CreateCmdResult(null, null);
result.setSuccess(false);
result.setResult(answer.getErrorString());
@ -285,19 +286,22 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
updateBuilder.setJobId(answer.getJobId());
updateBuilder.setLocalDownloadPath(answer.getDownloadPath());
updateBuilder.setInstallPath(answer.getInstallPath());
updateBuilder.setSize(answer.getTemplateSize());
updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
if (!VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
updateBuilder.setSize(answer.getTemplateSize());
updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
}
_volumeStoreDao.update(volStoreVO.getId(), updateBuilder);
// update size in volume table
VolumeVO volUpdater = volumeDao.createForUpdate();
volUpdater.setSize(answer.getTemplateSize());
volumeDao.update(obj.getId(), volUpdater);
if (!VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
VolumeVO volUpdater = volumeDao.createForUpdate();
volUpdater.setSize(answer.getTemplateSize());
volumeDao.update(obj.getId(), volUpdater);
}
}
AsyncCompletionCallback<CreateCmdResult> caller = context.getParentCallback();
if (answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR ||
answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.ABANDONED || answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.UNKNOWN) {
if (VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
CreateCmdResult result = new CreateCmdResult(null, null);
result.setSuccess(false);
result.setResult(answer.getErrorString());

View File

@ -83,6 +83,12 @@ public class CreateExtensionCmd extends BaseCmd {
description = "Details in key/value pairs using format details[i].keyname=keyvalue. Example: details[0].endpoint.url=urlvalue")
protected Map details;
@Parameter(name = ApiConstants.RESERVED_RESOURCE_DETAILS, type = CommandType.STRING,
description = "Resource detail names as comma separated string that should be reserved and not visible " +
"to end users",
since = "4.22.1")
protected String reservedResourceDetails;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -115,6 +121,10 @@ public class CreateExtensionCmd extends BaseCmd {
return convertDetailsToMap(details);
}
public String getReservedResourceDetails() {
return reservedResourceDetails;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -78,6 +78,12 @@ public class UpdateExtensionCmd extends BaseCmd {
"if false or not set, no action)")
private Boolean cleanupDetails;
@Parameter(name = ApiConstants.RESERVED_RESOURCE_DETAILS, type = CommandType.STRING,
description = "Resource detail names as comma separated string that should be reserved and not visible " +
"to end users",
since = "4.22.1")
protected String reservedResourceDetails;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -106,6 +112,10 @@ public class UpdateExtensionCmd extends BaseCmd {
return cleanupDetails;
}
public String getReservedResourceDetails() {
return reservedResourceDetails;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -216,6 +216,11 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
@Inject
AccountService accountService;
// Map of in-built extension names and their reserved resource details that shouldn't be accessible to end-users
protected static final Map<String, List<String>> INBUILT_RESERVED_RESOURCE_DETAILS = Map.of(
"proxmox", List.of("proxmox_vmid")
);
private ScheduledExecutorService extensionPathStateCheckExecutor;
protected String getDefaultExtensionRelativePath(String name) {
@ -563,6 +568,25 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
updateExtensionPathReady(extension, true);
}
protected void addInbuiltExtensionReservedResourceDetails(long extensionId, List<String> reservedResourceDetails) {
ExtensionVO vo = extensionDao.findById(extensionId);
if (vo == null || vo.isUserDefined()) {
return;
}
String lowerName = StringUtils.defaultString(vo.getName()).toLowerCase();
Optional<Map.Entry<String, List<String>>> match = INBUILT_RESERVED_RESOURCE_DETAILS.entrySet().stream()
.filter(e -> lowerName.contains(e.getKey().toLowerCase()))
.findFirst();
if (match.isPresent()) {
Set<String> existing = new HashSet<>(reservedResourceDetails);
for (String detailKey : match.get().getValue()) {
if (existing.add(detailKey)) {
reservedResourceDetails.add(detailKey);
}
}
}
}
@Override
public String getExtensionsPath() {
return externalProvisioner.getExtensionsPath();
@ -577,6 +601,7 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
String relativePath = cmd.getPath();
final Boolean orchestratorRequiresPrepareVm = cmd.isOrchestratorRequiresPrepareVm();
final String stateStr = cmd.getState();
final String reservedResourceDetails = cmd.getReservedResourceDetails();
ExtensionVO extensionByName = extensionDao.findByName(name);
if (extensionByName != null) {
throw new CloudRuntimeException("Extension by name already exists");
@ -624,6 +649,10 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM, String.valueOf(orchestratorRequiresPrepareVm),
false));
}
if (StringUtils.isNotBlank(reservedResourceDetails)) {
detailsVOList.add(new ExtensionDetailsVO(extension.getId(),
ApiConstants.RESERVED_RESOURCE_DETAILS, reservedResourceDetails, false));
}
if (CollectionUtils.isNotEmpty(detailsVOList)) {
extensionDetailsDao.saveDetails(detailsVOList);
}
@ -704,6 +733,7 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
final String stateStr = cmd.getState();
final Map<String, String> details = cmd.getDetails();
final Boolean cleanupDetails = cmd.isCleanupDetails();
final String reservedResourceDetails = cmd.getReservedResourceDetails();
final ExtensionVO extensionVO = extensionDao.findById(id);
if (extensionVO == null) {
throw new InvalidParameterValueException("Failed to find the extension");
@ -732,7 +762,8 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
throw new CloudRuntimeException(String.format("Failed to updated the extension: %s",
extensionVO.getName()));
}
updateExtensionsDetails(cleanupDetails, details, orchestratorRequiresPrepareVm, id);
updateExtensionsDetails(cleanupDetails, details, orchestratorRequiresPrepareVm, reservedResourceDetails,
id);
return extensionVO;
});
if (StringUtils.isNotBlank(stateStr)) {
@ -748,9 +779,11 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
return result;
}
protected void updateExtensionsDetails(Boolean cleanupDetails, Map<String, String> details, Boolean orchestratorRequiresPrepareVm, long id) {
protected void updateExtensionsDetails(Boolean cleanupDetails, Map<String, String> details,
Boolean orchestratorRequiresPrepareVm, String reservedResourceDetails, long id) {
final boolean needToUpdateAllDetails = Boolean.TRUE.equals(cleanupDetails) || MapUtils.isNotEmpty(details);
if (!needToUpdateAllDetails && orchestratorRequiresPrepareVm == null) {
if (!needToUpdateAllDetails && orchestratorRequiresPrepareVm == null &&
StringUtils.isBlank(reservedResourceDetails)) {
return;
}
if (needToUpdateAllDetails) {
@ -761,6 +794,9 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
hiddenDetails.put(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM,
String.valueOf(orchestratorRequiresPrepareVm));
}
if (StringUtils.isNotBlank(reservedResourceDetails)) {
hiddenDetails.put(ApiConstants.RESERVED_RESOURCE_DETAILS, reservedResourceDetails);
}
if (MapUtils.isNotEmpty(hiddenDetails)) {
hiddenDetails.forEach((key, value) -> detailsVOList.add(
new ExtensionDetailsVO(id, key, value, false)));
@ -775,15 +811,29 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
extensionDetailsDao.removeDetails(id);
}
} else {
ExtensionDetailsVO detailsVO = extensionDetailsDao.findDetail(id,
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM);
if (detailsVO == null) {
extensionDetailsDao.persist(new ExtensionDetailsVO(id,
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM,
String.valueOf(orchestratorRequiresPrepareVm), false));
} else if (Boolean.parseBoolean(detailsVO.getValue()) != orchestratorRequiresPrepareVm) {
detailsVO.setValue(String.valueOf(orchestratorRequiresPrepareVm));
extensionDetailsDao.update(detailsVO.getId(), detailsVO);
if (orchestratorRequiresPrepareVm != null) {
ExtensionDetailsVO detailsVO = extensionDetailsDao.findDetail(id,
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM);
if (detailsVO == null) {
extensionDetailsDao.persist(new ExtensionDetailsVO(id,
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM,
String.valueOf(orchestratorRequiresPrepareVm), false));
} else if (Boolean.parseBoolean(detailsVO.getValue()) != orchestratorRequiresPrepareVm) {
detailsVO.setValue(String.valueOf(orchestratorRequiresPrepareVm));
extensionDetailsDao.update(detailsVO.getId(), detailsVO);
}
}
if (StringUtils.isNotBlank(reservedResourceDetails)) {
ExtensionDetailsVO detailsVO = extensionDetailsDao.findDetail(id,
ApiConstants.RESERVED_RESOURCE_DETAILS);
if (detailsVO == null) {
extensionDetailsDao.persist(new ExtensionDetailsVO(id,
ApiConstants.RESERVED_RESOURCE_DETAILS,
reservedResourceDetails, false));
} else if (!reservedResourceDetails.equals(detailsVO.getValue())) {
detailsVO.setValue(reservedResourceDetails);
extensionDetailsDao.update(detailsVO.getId(), detailsVO);
}
}
}
}
@ -961,12 +1011,16 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
hiddenDetails = extensionDetails.second();
} else {
hiddenDetails = extensionDetailsDao.listDetailsKeyPairs(extension.getId(),
List.of(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM));
List.of(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM,
ApiConstants.RESERVED_RESOURCE_DETAILS));
}
if (hiddenDetails.containsKey(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM)) {
response.setOrchestratorRequiresPrepareVm(Boolean.parseBoolean(
hiddenDetails.get(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM)));
}
if (hiddenDetails.containsKey(ApiConstants.RESERVED_RESOURCE_DETAILS)) {
response.setReservedResourceDetails(hiddenDetails.get(ApiConstants.RESERVED_RESOURCE_DETAILS));
}
response.setObjectName(Extension.class.getSimpleName().toLowerCase());
return response;
}
@ -1605,6 +1659,24 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
return extensionDao.findById(extensionId);
}
@Override
public List<String> getExtensionReservedResourceDetails(long extensionId) {
ExtensionDetailsVO detailsVO = extensionDetailsDao.findDetail(extensionId,
ApiConstants.RESERVED_RESOURCE_DETAILS);
if (detailsVO == null || !StringUtils.isNotBlank(detailsVO.getValue())) {
return Collections.emptyList();
}
List<String> reservedDetails = new ArrayList<>();
String[] parts = detailsVO.getValue().split(",");
for (String part : parts) {
if (StringUtils.isNotBlank(part)) {
reservedDetails.add(part.trim());
}
}
addInbuiltExtensionReservedResourceDetails(extensionId, reservedDetails);
return reservedDetails;
}
@Override
public boolean start() {
long pathStateCheckInterval = PathStateCheckInterval.value();

View File

@ -94,4 +94,18 @@ public class CreateExtensionCmdTest {
setField(cmd, "details", details);
assertTrue(MapUtils.isNotEmpty(cmd.getDetails()));
}
@Test
public void getReservedResourceDetailsReturnsValueWhenSet() {
setField(cmd, "reservedResourceDetails", "detail1,detail2,detail3");
String result = cmd.getReservedResourceDetails();
assertEquals("detail1,detail2,detail3", result);
}
@Test
public void getReservedResourceDetailsReturnsNullWhenNotSet() {
setField(cmd, "reservedResourceDetails", null);
String result = cmd.getReservedResourceDetails();
assertNull(result);
}
}

View File

@ -26,6 +26,7 @@ import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.springframework.test.util.ReflectionTestUtils.setField;
import java.util.EnumSet;
import java.util.HashMap;
@ -134,6 +135,20 @@ public class UpdateExtensionCmdTest {
assertTrue(cmd.isCleanupDetails());
}
@Test
public void getReservedResourceDetailsReturnsValueWhenSet() {
setField(cmd, "reservedResourceDetails", "detail1,detail2,detail3");
String result = cmd.getReservedResourceDetails();
assertEquals("detail1,detail2,detail3", result);
}
@Test
public void getReservedResourceDetailsReturnsNullWhenNotSet() {
setField(cmd, "reservedResourceDetails", null);
String result = cmd.getReservedResourceDetails();
assertNull(result);
}
@Test
public void executeSetsExtensionResponseWhenManagerSucceeds() {
Extension extension = mock(Extension.class);

View File

@ -23,11 +23,13 @@ import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
@ -40,6 +42,7 @@ import static org.mockito.Mockito.when;
import java.io.File;
import java.security.InvalidParameterException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
@ -49,8 +52,6 @@ import java.util.List;
import java.util.Map;
import java.util.UUID;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.user.AccountService;
import org.apache.cloudstack.acl.Role;
import org.apache.cloudstack.acl.RoleService;
import org.apache.cloudstack.acl.RoleType;
@ -85,9 +86,11 @@ import org.apache.cloudstack.framework.extensions.dao.ExtensionResourceMapDao;
import org.apache.cloudstack.framework.extensions.dao.ExtensionResourceMapDetailsDao;
import org.apache.cloudstack.framework.extensions.vo.ExtensionCustomActionDetailsVO;
import org.apache.cloudstack.framework.extensions.vo.ExtensionCustomActionVO;
import org.apache.cloudstack.framework.extensions.vo.ExtensionDetailsVO;
import org.apache.cloudstack.framework.extensions.vo.ExtensionResourceMapVO;
import org.apache.cloudstack.framework.extensions.vo.ExtensionVO;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.commons.collections.CollectionUtils;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -113,6 +116,7 @@ import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.host.Host;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
@ -122,6 +126,7 @@ import com.cloud.org.Cluster;
import com.cloud.serializer.GsonHelper;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.user.Account;
import com.cloud.user.AccountService;
import com.cloud.utils.Pair;
import com.cloud.utils.UuidUtils;
import com.cloud.utils.db.EntityManager;
@ -664,6 +669,8 @@ public class ExtensionsManagerImplTest {
when(cmd.getPath()).thenReturn(null);
when(cmd.isOrchestratorRequiresPrepareVm()).thenReturn(null);
when(cmd.getState()).thenReturn(null);
String reservedResourceDetails = "abc,xyz";
when(cmd.getReservedResourceDetails()).thenReturn(reservedResourceDetails);
when(extensionDao.findByName("ext1")).thenReturn(null);
when(extensionDao.persist(any())).thenAnswer(inv -> {
ExtensionVO extensionVO = inv.getArgument(0);
@ -671,11 +678,20 @@ public class ExtensionsManagerImplTest {
return extensionVO;
});
when(managementServerHostDao.listBy(any())).thenReturn(Collections.emptyList());
List<ExtensionDetailsVO> detailsList = new ArrayList<>();
doAnswer(inv -> {
List<ExtensionDetailsVO> detailsVO = inv.getArgument(0);
detailsList.addAll(detailsVO);
return null;
}).when(extensionDetailsDao).saveDetails(anyList());
Extension ext = extensionsManager.createExtension(cmd);
assertEquals("ext1", ext.getName());
verify(extensionDao).persist(any());
assertTrue(CollectionUtils.isNotEmpty(detailsList));
assertTrue(detailsList.stream()
.anyMatch(detail -> ApiConstants.RESERVED_RESOURCE_DETAILS.equals(detail.getName())
&& reservedResourceDetails.equals(detail.getValue())));
}
@Test
@ -938,14 +954,32 @@ public class ExtensionsManagerImplTest {
public void updateExtensionsDetails_SavesDetails_WhenDetailsProvided() {
long extensionId = 10L;
Map<String, String> details = Map.of("foo", "bar", "baz", "qux");
extensionsManager.updateExtensionsDetails(false, details, null, extensionId);
extensionsManager.updateExtensionsDetails(false, details, null, null, extensionId);
verify(extensionDetailsDao).saveDetails(any());
}
@Test
public void updateExtensionsDetails_PersistReservedDetail_WhenProvided() {
long extensionId = 10L;
when(extensionDetailsDao.persist(any())).thenReturn(mock(ExtensionDetailsVO.class));
extensionsManager.updateExtensionsDetails(false, null, null, "abc,xyz", extensionId);
verify(extensionDetailsDao).persist(any());
}
@Test
public void updateExtensionsDetails_UpdateReservedDetail_WhenProvided() {
long extensionId = 10L;
when(extensionDetailsDao.findDetail(anyLong(), eq(ApiConstants.RESERVED_RESOURCE_DETAILS)))
.thenReturn(mock(ExtensionDetailsVO.class));
when(extensionDetailsDao.update(anyLong(), any())).thenReturn(true);
extensionsManager.updateExtensionsDetails(false, null, null, "abc,xyz", extensionId);
verify(extensionDetailsDao).update(anyLong(), any());
}
@Test
public void updateExtensionsDetails_DoesNothing_WhenDetailsAndCleanupAreNull() {
long extensionId = 11L;
extensionsManager.updateExtensionsDetails(null, null, null, extensionId);
extensionsManager.updateExtensionsDetails(null, null, null, null, extensionId);
verify(extensionDetailsDao, never()).removeDetails(anyLong());
verify(extensionDetailsDao, never()).saveDetails(any());
}
@ -953,7 +987,7 @@ public class ExtensionsManagerImplTest {
@Test
public void updateExtensionsDetails_RemovesDetailsOnly_WhenCleanupIsTrue() {
long extensionId = 12L;
extensionsManager.updateExtensionsDetails(true, null, null, extensionId);
extensionsManager.updateExtensionsDetails(true, null, null, null, extensionId);
verify(extensionDetailsDao).removeDetails(extensionId);
verify(extensionDetailsDao, never()).saveDetails(any());
}
@ -961,7 +995,7 @@ public class ExtensionsManagerImplTest {
@Test
public void updateExtensionsDetails_PersistsOrchestratorFlag_WhenFlagIsNotNull() {
long extensionId = 13L;
extensionsManager.updateExtensionsDetails(false, null, true, extensionId);
extensionsManager.updateExtensionsDetails(false, null, true, null, extensionId);
verify(extensionDetailsDao).persist(any());
}
@ -970,7 +1004,7 @@ public class ExtensionsManagerImplTest {
long extensionId = 14L;
Map<String, String> details = Map.of("foo", "bar");
doThrow(CloudRuntimeException.class).when(extensionDetailsDao).saveDetails(any());
extensionsManager.updateExtensionsDetails(false, details, null, extensionId);
extensionsManager.updateExtensionsDetails(false, details, null, null, extensionId);
}
@Test
@ -1161,7 +1195,8 @@ public class ExtensionsManagerImplTest {
when(externalProvisioner.getExtensionPath("entry2.sh")).thenReturn("/some/path/entry2.sh");
Map<String, String> hiddenDetails = Map.of(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM, "false");
when(extensionDetailsDao.listDetailsKeyPairs(2L, List.of(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM)))
when(extensionDetailsDao.listDetailsKeyPairs(2L, List.of(
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM, ApiConstants.RESERVED_RESOURCE_DETAILS)))
.thenReturn(hiddenDetails);
EnumSet<ApiConstants.ExtensionDetails> viewDetails = EnumSet.noneOf(ApiConstants.ExtensionDetails.class);
@ -2069,4 +2104,118 @@ public class ExtensionsManagerImplTest {
}
}
@Test
public void getExtensionReservedResourceDetailsReturnsEmptyListWhenDetailsNotFound() {
long extensionId = 1L;
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(null);
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
assertNotNull(result);
assertTrue(result.isEmpty());
}
@Test
public void getExtensionReservedResourceDetailsReturnsEmptyListWhenValueIsBlank() {
long extensionId = 2L;
ExtensionDetailsVO detailsVO = mock(ExtensionDetailsVO.class);
when(detailsVO.getValue()).thenReturn(" ");
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(detailsVO);
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
assertNotNull(result);
assertTrue(result.isEmpty());
}
@Test
public void getExtensionReservedResourceDetailsReturnsListOfTrimmedDetails() {
long extensionId = 3L;
ExtensionDetailsVO detailsVO = mock(ExtensionDetailsVO.class);
when(detailsVO.getValue()).thenReturn(" detail1 , detail2,detail3 ");
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(detailsVO);
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
assertNotNull(result);
assertEquals(3, result.size());
assertEquals("detail1", result.get(0));
assertEquals("detail2", result.get(1));
assertEquals("detail3", result.get(2));
}
@Test
public void getExtensionReservedResourceDetailsHandlesEmptyPartsGracefully() {
long extensionId = 4L;
ExtensionDetailsVO detailsVO = mock(ExtensionDetailsVO.class);
when(detailsVO.getValue()).thenReturn("detail1,,detail2, ,detail3");
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(detailsVO);
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
assertNotNull(result);
assertEquals(3, result.size());
assertEquals("detail1", result.get(0));
assertEquals("detail2", result.get(1));
assertEquals("detail3", result.get(2));
}
@Test
public void getExtensionReservedResourceDetailsReturnsEmptyListWhenSplitResultsInNoParts() {
long extensionId = 5L;
ExtensionDetailsVO detailsVO = mock(ExtensionDetailsVO.class);
when(detailsVO.getValue()).thenReturn(",");
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(detailsVO);
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
assertNotNull(result);
assertTrue(result.isEmpty());
}
@Test
public void addInbuiltExtensionReservedResourceDetailsDoesNothingWhenExtensionNotFound() {
when(extensionDao.findById(1L)).thenReturn(null);
List<String> reservedResourceDetails = new ArrayList<>();
extensionsManager.addInbuiltExtensionReservedResourceDetails(1L, reservedResourceDetails);
assertTrue(reservedResourceDetails.isEmpty());
}
@Test
public void addInbuiltExtensionReservedResourceDetailsDoesNothingForUserDefinedExtension() {
ExtensionVO extension = mock(ExtensionVO.class);
when(extension.isUserDefined()).thenReturn(true);
when(extensionDao.findById(2L)).thenReturn(extension);
List<String> reservedResourceDetails = new ArrayList<>();
reservedResourceDetails.add("existing-detail");
extensionsManager.addInbuiltExtensionReservedResourceDetails(2L, reservedResourceDetails);
assertEquals(1, reservedResourceDetails.size());
assertTrue(reservedResourceDetails.contains("existing-detail"));
}
@Test
public void addInbuiltExtensionReservedResourceDetailsDoesNothingWhenNoMatchFound() {
ExtensionVO extension = mock(ExtensionVO.class);
when(extension.isUserDefined()).thenReturn(false);
when(extension.getName()).thenReturn("no-such-inbuilt-key-expected");
when(extensionDao.findById(3L)).thenReturn(extension);
List<String> reservedResourceDetails = new ArrayList<>();
extensionsManager.addInbuiltExtensionReservedResourceDetails(3L, reservedResourceDetails);
assertTrue(reservedResourceDetails.isEmpty());
}
@Test
public void addInbuiltExtensionReservedResourceDetailsAddedDetails() {
ExtensionVO extension = mock(ExtensionVO.class);
when(extension.isUserDefined()).thenReturn(false);
Map.Entry<String, List<String>> entry =
ExtensionsManagerImpl.INBUILT_RESERVED_RESOURCE_DETAILS.entrySet().iterator().next();
when(extension.getName()).thenReturn(entry.getKey());
when(extensionDao.findById(3L)).thenReturn(extension);
List<String> reservedResourceDetails = new ArrayList<>();
extensionsManager.addInbuiltExtensionReservedResourceDetails(3L, reservedResourceDetails);
assertFalse(reservedResourceDetails.isEmpty());
assertEquals(reservedResourceDetails.size(), entry.getValue().size());
assertTrue(reservedResourceDetails.containsAll(entry.getValue()));
}
}

View File

@ -23,12 +23,30 @@ import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
import com.cloud.utils.db.GenericDao;
import javax.annotation.Nullable;
public interface AsyncJobDao extends GenericDao<AsyncJobVO, Long> {
AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId);
List<AsyncJobVO> findInstancePendingAsyncJobs(String instanceType, Long accountId);
/**
* Finds async job matching the given parameters.
* Non-null parameters are added to search criteria.
* Returns the most recent job by creation date.
* <p>
* When searching by resourceId and resourceType, only one active job
* is expected per resource, so returning a single result is sufficient.
*
* @param id job ID
* @param resourceId resource ID (instanceId)
* @param resourceType resource type (instanceType)
* @return matching job or null
*/
@Nullable
AsyncJobVO findJob(Long id, Long resourceId, String resourceType);
AsyncJobVO findPseudoJob(long threadId, long msid);
void cleanupPseduoJobs(long msid);

View File

@ -22,6 +22,8 @@ import java.util.Date;
import java.util.List;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
import org.apache.cloudstack.jobs.JobInfo;
@ -45,6 +47,7 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
private final SearchBuilder<AsyncJobVO> expiringUnfinishedAsyncJobSearch;
private final SearchBuilder<AsyncJobVO> expiringCompletedAsyncJobSearch;
private final SearchBuilder<AsyncJobVO> failureMsidAsyncJobSearch;
private final SearchBuilder<AsyncJobVO> byIdResourceIdResourceTypeSearch;
private final GenericSearchBuilder<AsyncJobVO, Long> asyncJobTypeSearch;
private final GenericSearchBuilder<AsyncJobVO, Long> pendingNonPseudoAsyncJobsSearch;
@ -95,6 +98,12 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
failureMsidAsyncJobSearch.and("job_cmd", failureMsidAsyncJobSearch.entity().getCmd(), Op.IN);
failureMsidAsyncJobSearch.done();
byIdResourceIdResourceTypeSearch = createSearchBuilder();
byIdResourceIdResourceTypeSearch.and("id", byIdResourceIdResourceTypeSearch.entity().getId(), SearchCriteria.Op.EQ);
byIdResourceIdResourceTypeSearch.and("instanceId", byIdResourceIdResourceTypeSearch.entity().getInstanceId(), SearchCriteria.Op.EQ);
byIdResourceIdResourceTypeSearch.and("instanceType", byIdResourceIdResourceTypeSearch.entity().getInstanceType(), SearchCriteria.Op.EQ);
byIdResourceIdResourceTypeSearch.done();
asyncJobTypeSearch = createSearchBuilder(Long.class);
asyncJobTypeSearch.select(null, SearchCriteria.Func.COUNT, asyncJobTypeSearch.entity().getId());
asyncJobTypeSearch.and("job_info", asyncJobTypeSearch.entity().getCmdInfo(),Op.LIKE);
@ -140,6 +149,30 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
return listBy(sc);
}
@Override
public AsyncJobVO findJob(Long id, Long resourceId, String resourceType) {
SearchCriteria<AsyncJobVO> sc = byIdResourceIdResourceTypeSearch.create();
if (id == null && resourceId == null && StringUtils.isBlank(resourceType)) {
logger.debug("findJob called with all null parameters");
return null;
}
if (id != null) {
sc.setParameters("id", id);
}
if (resourceId != null && StringUtils.isNotBlank(resourceType)) {
sc.setParameters("instanceType", resourceType);
sc.setParameters("instanceId", resourceId);
}
Filter filter = new Filter(AsyncJobVO.class, "created", false, 0L, 1L);
List<AsyncJobVO> result = searchIncludingRemoved(sc, filter, Boolean.FALSE, false);
if (CollectionUtils.isNotEmpty(result)) {
return result.get(0);
}
return null;
}
@Override
public AsyncJobVO findPseudoJob(long threadId, long msid) {
SearchCriteria<AsyncJobVO> sc = pseudoJobSearch.create();

View File

@ -83,6 +83,8 @@ Requires: (iptables-services or iptables)
Requires: rng-tools
Requires: (qemu-img or qemu-tools)
Requires: python3-pip
Requires: python3-six
Requires: python3-protobuf
Requires: python3-setuptools
Requires: (libgcrypt > 1.8.3 or libgcrypt20)
Group: System Environment/Libraries
@ -336,11 +338,11 @@ cp -r ui/dist/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/
rm -f ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/config.json
ln -sf /etc/%{name}/ui/config.json ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/config.json
# Package mysql-connector-python
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/ee/ff/48bde5c0f013094d729fe4b0316ba2a24774b3ff1c52d924a8a4cb04078a/six-1.15.0-py2.py3-none-any.whl
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/e9/93/4860cebd5ad3ff2664ad3c966490ccb46e3b88458b2095145bca11727ca4/setuptools-47.3.1-py3-none-any.whl
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/32/27/1141a8232723dcb10a595cc0ce4321dcbbd5215300bf4acfc142343205bf/protobuf-3.19.6-py2.py3-none-any.whl
# Package mysql-connector-python (bundled to avoid dependency on external community repo)
# Version 8.0.31 is the last version supporting Python 3.6 (EL8)
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/08/1f/42d74bae9dd6dcfec67c9ed0f3fa482b1ae5ac5f117ca82ab589ecb3ca19/mysql_connector_python-8.0.31-py2.py3-none-any.whl
# Version 8.3.0 supports Python 3.8 to 3.12 (EL9, EL10)
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/53/ed/26a4b8cacb8852c6fd97d2d58a7f2591c41989807ea82bd8d9725a4e6937/mysql_connector_python-8.3.0-py2.py3-none-any.whl
chmod 440 ${RPM_BUILD_ROOT}%{_sysconfdir}/sudoers.d/%{name}-management
chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/%{name}/mnt
@ -457,8 +459,13 @@ then
fi
%post management
# Install mysql-connector-python
pip3 install %{_datadir}/%{name}-management/setup/wheel/six-1.15.0-py2.py3-none-any.whl %{_datadir}/%{name}-management/setup/wheel/setuptools-47.3.1-py3-none-any.whl %{_datadir}/%{name}-management/setup/wheel/protobuf-3.19.6-py2.py3-none-any.whl %{_datadir}/%{name}-management/setup/wheel/mysql_connector_python-8.0.31-py2.py3-none-any.whl
# Install mysql-connector-python wheel
# Detect Python version to install compatible wheel
if python3 -c 'import sys; sys.exit(0 if sys.version_info >= (3, 7) else 1)'; then
pip3 install %{_datadir}/%{name}-management/setup/wheel/mysql_connector_python-8.3.0-py2.py3-none-any.whl
else
pip3 install %{_datadir}/%{name}-management/setup/wheel/mysql_connector_python-8.0.31-py2.py3-none-any.whl
fi
/usr/bin/systemctl enable cloudstack-management > /dev/null 2>&1 || true
/usr/bin/systemctl enable --now rngd > /dev/null 2>&1 || true

View File

@ -49,6 +49,7 @@ import com.cloud.user.User;
import com.cloud.user.UserVO;
import com.cloud.utils.DateUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.command.QuotaBalanceCmd;
@ -163,12 +164,6 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder {
private Set<Account.Type> accountTypesThatCanListAllQuotaSummaries = Sets.newHashSet(Account.Type.ADMIN, Account.Type.DOMAIN_ADMIN);
protected void checkActivationRulesAllowed(String activationRule) {
if (!_quotaService.isJsInterpretationEnabled() && StringUtils.isNotEmpty(activationRule)) {
throw new PermissionDeniedException("Quota Tariff Activation Rule cannot be set, as Javascript interpretation is disabled in the configuration.");
}
}
@Override
public QuotaTariffResponse createQuotaTariffResponse(QuotaTariffVO tariff, boolean returnActivationRule) {
final QuotaTariffResponse response = new QuotaTariffResponse();
@ -501,6 +496,7 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder {
Integer position = cmd.getPosition();
warnQuotaTariffUpdateDeprecatedFields(cmd);
jsInterpreterHelper.ensureInterpreterEnabledIfParameterProvided(ApiConstants.ACTIVATION_RULE, StringUtils.isNotBlank(activationRule));
QuotaTariffVO currentQuotaTariff = _quotaTariffDao.findByName(name);
@ -508,8 +504,6 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder {
throw new InvalidParameterValueException(String.format("There is no quota tariffs with name [%s].", name));
}
checkActivationRulesAllowed(activationRule);
Date currentQuotaTariffStartDate = currentQuotaTariff.getEffectiveOn();
currentQuotaTariff.setRemoved(now);
@ -758,14 +752,14 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder {
String activationRule = cmd.getActivationRule();
Integer position = ObjectUtils.defaultIfNull(cmd.getPosition(), 1);
jsInterpreterHelper.ensureInterpreterEnabledIfParameterProvided(ApiConstants.ACTIVATION_RULE, StringUtils.isNotBlank(activationRule));
QuotaTariffVO currentQuotaTariff = _quotaTariffDao.findByName(name);
if (currentQuotaTariff != null) {
throw new InvalidParameterValueException(String.format("A quota tariff with name [%s] already exist.", name));
}
checkActivationRulesAllowed(activationRule);
if (startDate.compareTo(now) < 0) {
throw new InvalidParameterValueException(String.format("The value passed as Quota tariff's start date is in the past: [%s]. " +
"Please, inform a date in the future or do not pass the parameter to use the current date and time.", startDate));

View File

@ -40,6 +40,4 @@ public interface QuotaService extends PluggableService {
boolean saveQuotaAccount(AccountVO account, BigDecimal aggrUsage, Date endDate);
boolean isJsInterpretationEnabled();
}

View File

@ -64,7 +64,6 @@ import com.cloud.configuration.Config;
import com.cloud.domain.dao.DomainDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.server.ManagementService;
import com.cloud.user.Account;
import com.cloud.user.AccountVO;
import com.cloud.user.dao.AccountDao;
@ -95,8 +94,6 @@ public class QuotaServiceImpl extends ManagerBase implements QuotaService, Confi
private TimeZone _usageTimezone;
private boolean jsInterpretationEnabled = false;
public QuotaServiceImpl() {
super();
}
@ -108,8 +105,6 @@ public class QuotaServiceImpl extends ManagerBase implements QuotaService, Confi
String timeZoneStr = ObjectUtils.defaultIfNull(_configDao.getValue(Config.UsageAggregationTimezone.toString()), "GMT");
_usageTimezone = TimeZone.getTimeZone(timeZoneStr);
jsInterpretationEnabled = ManagementService.JsInterpretationEnabled.value();
return true;
}
@ -298,9 +293,4 @@ public class QuotaServiceImpl extends ManagerBase implements QuotaService, Confi
_quotaAcc.updateQuotaAccount(accountId, acc);
}
}
@Override
public boolean isJsInterpretationEnabled() {
return jsInterpretationEnabled;
}
}

View File

@ -106,7 +106,6 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem
}
}
_resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template);
return template;
}

View File

@ -35,7 +35,6 @@ import com.cloud.agent.properties.AgentPropertiesFileHandler;
public class KVMHABase {
protected Logger logger = LogManager.getLogger(getClass());
private long _timeout = 60000; /* 1 minutes */
protected static String s_heartBeatPath;
protected long _heartBeatUpdateTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.HEARTBEAT_UPDATE_TIMEOUT);
protected long _heartBeatUpdateFreq = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_UPDATE_FREQUENCY);
protected long _heartBeatUpdateMaxTries = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_UPDATE_MAX_TRIES);

View File

@ -18,7 +18,7 @@ package com.cloud.hypervisor.kvm.resource;
import com.cloud.agent.properties.AgentProperties;
import com.cloud.agent.properties.AgentPropertiesFileHandler;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.ha.HighAvailabilityManager;
import com.cloud.utils.script.Script;
import org.libvirt.Connect;
import org.libvirt.LibvirtException;
@ -39,20 +39,15 @@ public class KVMHAMonitor extends KVMHABase implements Runnable {
private final String hostPrivateIp;
public KVMHAMonitor(HAStoragePool pool, String host, String scriptPath) {
public KVMHAMonitor(HAStoragePool pool, String host) {
if (pool != null) {
storagePool.put(pool.getPoolUUID(), pool);
}
hostPrivateIp = host;
configureHeartBeatPath(scriptPath);
rebootHostAndAlertManagementOnHeartbeatTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.REBOOT_HOST_AND_ALERT_MANAGEMENT_ON_HEARTBEAT_TIMEOUT);
}
private static synchronized void configureHeartBeatPath(String scriptPath) {
KVMHABase.s_heartBeatPath = scriptPath;
}
public void addStoragePool(HAStoragePool pool) {
synchronized (storagePool) {
storagePool.put(pool.getPoolUUID(), pool);
@ -86,8 +81,8 @@ public class KVMHAMonitor extends KVMHABase implements Runnable {
Set<String> removedPools = new HashSet<>();
for (String uuid : storagePool.keySet()) {
HAStoragePool primaryStoragePool = storagePool.get(uuid);
if (primaryStoragePool.getPool().getType() == StoragePoolType.NetworkFilesystem) {
checkForNotExistingPools(removedPools, uuid);
if (HighAvailabilityManager.LIBVIRT_STORAGE_POOL_TYPES_WITH_HA_SUPPORT.contains(primaryStoragePool.getPool().getType())) {
checkForNotExistingLibvirtStoragePools(removedPools, uuid);
if (removedPools.contains(uuid)) {
continue;
}
@ -127,7 +122,7 @@ public class KVMHAMonitor extends KVMHABase implements Runnable {
return result;
}
private void checkForNotExistingPools(Set<String> removedPools, String uuid) {
private void checkForNotExistingLibvirtStoragePools(Set<String> removedPools, String uuid) {
try {
Connect conn = LibvirtConnection.getConnection();
StoragePool storage = conn.storagePoolLookupByUUIDString(uuid);

View File

@ -1086,11 +1086,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
throw new ConfigurationException("Unable to find patch.sh");
}
heartBeatPath = Script.findScript(kvmScriptsDir, "kvmheartbeat.sh");
if (heartBeatPath == null) {
throw new ConfigurationException("Unable to find kvmheartbeat.sh");
}
createVmPath = Script.findScript(storageScriptsDir, "createvm.sh");
if (createVmPath == null) {
throw new ConfigurationException("Unable to find the createvm.sh");
@ -1359,7 +1354,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
final String[] info = NetUtils.getNetworkParams(privateNic);
kvmhaMonitor = new KVMHAMonitor(null, info[0], heartBeatPath);
kvmhaMonitor = new KVMHAMonitor(null, info[0]);
final Thread ha = new Thread(kvmhaMonitor);
ha.start();

View File

@ -79,28 +79,47 @@ public class LibvirtGpuDef {
gpuBuilder.append(" <driver name='vfio'/>\n");
gpuBuilder.append(" <source>\n");
// Parse the bus address (e.g., 00:02.0) into domain, bus, slot, function
String domain = "0x0000";
String bus = "0x00";
String slot = "0x00";
String function = "0x0";
// Parse the bus address into domain, bus, slot, function. Two input formats are accepted:
// - "dddd:bb:ss.f" full PCI address with domain (e.g. 0000:00:02.0)
// - "bb:ss.f" legacy short BDF; domain defaults to 0000
// Each segment is parsed as a hex integer and formatted with fixed widths
// (domain: 4 hex digits, bus/slot: 2 hex digits, function: 1 hex digit) to
// produce canonical libvirt XML values regardless of input casing or padding.
int domainVal = 0, busVal = 0, slotVal = 0, funcVal = 0;
if (busAddress != null && !busAddress.isEmpty()) {
String[] parts = busAddress.split(":");
if (parts.length > 1) {
bus = "0x" + parts[0];
String[] slotFunctionParts = parts[1].split("\\.");
if (slotFunctionParts.length > 0) {
slot = "0x" + slotFunctionParts[0];
if (slotFunctionParts.length > 1) {
function = "0x" + slotFunctionParts[1].trim();
}
try {
String slotFunction;
if (parts.length == 3) {
domainVal = Integer.parseInt(parts[0], 16);
busVal = Integer.parseInt(parts[1], 16);
slotFunction = parts[2];
} else if (parts.length == 2) {
busVal = Integer.parseInt(parts[0], 16);
slotFunction = parts[1];
} else {
throw new IllegalArgumentException("Invalid PCI bus address format: '" + busAddress + "'");
}
String[] sf = slotFunction.split("\\.");
if (sf.length == 2) {
slotVal = Integer.parseInt(sf[0], 16);
funcVal = Integer.parseInt(sf[1].trim(), 16);
} else {
throw new IllegalArgumentException("Invalid PCI bus address format: '" + busAddress + "'");
}
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Invalid PCI bus address format: '" + busAddress + "'", e);
}
}
String domain = String.format("0x%04x", domainVal);
String bus = String.format("0x%02x", busVal);
String slot = String.format("0x%02x", slotVal);
String function = String.format("0x%x", funcVal);
gpuBuilder.append(" <address domain='").append(domain).append("' bus='").append(bus).append("' slot='")
.append(slot).append("' function='").append(function.trim()).append("'/>\n");
.append(slot).append("' function='").append(function).append("'/>\n");
gpuBuilder.append(" </source>\n");
gpuBuilder.append("</hostdev>\n");
}

View File

@ -48,7 +48,7 @@ public final class LibvirtCheckVMActivityOnStoragePoolCommandWrapper extends Com
KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid());
if (primaryPool.isPoolSupportHA()){
if (primaryPool.isPoolSupportHA()) {
final HAStoragePool nfspool = monitor.getStoragePool(pool.getUuid());
final KVMHAVMActivityChecker ha = new KVMHAVMActivityChecker(nfspool, command.getHost(), command.getVolumeList(), libvirtComputingResource.getVmActivityCheckPath(), command.getSuspectTimeInSeconds());
final Future<Boolean> future = executors.submit(ha);

View File

@ -45,11 +45,10 @@ public final class LibvirtCheckVirtualMachineCommandWrapper extends CommandWrapp
Integer vncPort = null;
if (state == PowerState.PowerOn) {
vncPort = libvirtComputingResource.getVncPort(conn, command.getVmName());
}
Domain vm = conn.domainLookupByName(command.getVmName());
if (state == PowerState.PowerOn && DomainInfo.DomainState.VIR_DOMAIN_PAUSED.equals(vm.getInfo().state)) {
return new CheckVirtualMachineAnswer(command, PowerState.PowerUnknown, vncPort);
Domain vm = conn.domainLookupByName(command.getVmName());
if (DomainInfo.DomainState.VIR_DOMAIN_PAUSED.equals(vm.getInfo().state)) {
return new CheckVirtualMachineAnswer(command, PowerState.PowerUnknown, vncPort);
}
}
return new CheckVirtualMachineAnswer(command, state, vncPort);

View File

@ -47,7 +47,10 @@ import java.util.Map;
@ResourceWrapper(handles = CheckVolumeCommand.class)
public final class LibvirtCheckVolumeCommandWrapper extends CommandWrapper<CheckVolumeCommand, Answer, LibvirtComputingResource> {
private static final List<Storage.StoragePoolType> STORAGE_POOL_TYPES_SUPPORTED = Arrays.asList(Storage.StoragePoolType.Filesystem, Storage.StoragePoolType.NetworkFilesystem);
private static final List<Storage.StoragePoolType> STORAGE_POOL_TYPES_SUPPORTED = Arrays.asList(
Storage.StoragePoolType.Filesystem,
Storage.StoragePoolType.NetworkFilesystem,
Storage.StoragePoolType.SharedMountPoint);
@Override
public Answer execute(final CheckVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {

View File

@ -52,7 +52,7 @@ import java.util.stream.Collectors;
public final class LibvirtGetVolumesOnStorageCommandWrapper extends CommandWrapper<GetVolumesOnStorageCommand, Answer, LibvirtComputingResource> {
static final List<StoragePoolType> STORAGE_POOL_TYPES_SUPPORTED_BY_QEMU_IMG = Arrays.asList(StoragePoolType.NetworkFilesystem,
StoragePoolType.Filesystem, StoragePoolType.RBD);
StoragePoolType.Filesystem, StoragePoolType.RBD, StoragePoolType.SharedMountPoint);
@Override
public Answer execute(final GetVolumesOnStorageCommand command, final LibvirtComputingResource libvirtComputingResource) {

View File

@ -88,7 +88,7 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
List<PrimaryDataStoreTO> restoreVolumePools = command.getRestoreVolumePools();
List<String> restoreVolumePaths = command.getRestoreVolumePaths();
Integer mountTimeout = command.getMountTimeout() * 1000;
int timeout = command.getWait();
int timeout = command.getWait() > 0 ? command.getWait() * 1000 : serverResource.getCmdsTimeout();
KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr();
List<String> backupFiles = command.getBackupFiles();
@ -270,7 +270,7 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
return replaceBlockDeviceWithBackup(storagePoolMgr, volumePool, volumePath, backupPath, timeout, createTargetVolume, size);
}
int exitValue = Script.runSimpleBashScriptForExitValue(String.format(RSYNC_COMMAND, backupPath, volumePath));
int exitValue = Script.runSimpleBashScriptForExitValue(String.format(RSYNC_COMMAND, backupPath, volumePath), timeout, false);
return exitValue == 0;
}
@ -278,7 +278,7 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid());
QemuImg qemu;
try {
qemu = new QemuImg(timeout * 1000, true, false);
qemu = new QemuImg(timeout, true, false);
String volumeUuid = getVolumeUuidFromPath(volumePath, volumePool);
KVMPhysicalDisk disk = null;
if (createTargetVolume) {

View File

@ -52,6 +52,7 @@ public class LibvirtTakeBackupCommandWrapper extends CommandWrapper<TakeBackupCo
List<PrimaryDataStoreTO> volumePools = command.getVolumePools();
final List<String> volumePaths = command.getVolumePaths();
KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
int timeout = command.getWait() > 0 ? command.getWait() * 1000 : libvirtComputingResource.getCmdsTimeout();
List<String> diskPaths = new ArrayList<>();
if (Objects.nonNull(volumePaths)) {
@ -81,7 +82,7 @@ public class LibvirtTakeBackupCommandWrapper extends CommandWrapper<TakeBackupCo
"-d", diskPaths.isEmpty() ? "" : String.join(",", diskPaths)
});
Pair<Integer, String> result = Script.executePipedCommands(commands, libvirtComputingResource.getCmdsTimeout());
Pair<Integer, String> result = Script.executePipedCommands(commands, timeout);
if (result.first() != 0) {
logger.debug("Failed to take VM backup: " + result.second());

View File

@ -289,6 +289,7 @@ public class KVMStoragePoolManager {
if (pool instanceof LibvirtStoragePool) {
addPoolDetails(uuid, (LibvirtStoragePool) pool);
((LibvirtStoragePool) pool).setType(type);
}
return pool;
@ -390,6 +391,9 @@ public class KVMStoragePoolManager {
private synchronized KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type, Map<String, String> details, boolean primaryStorage) {
StorageAdaptor adaptor = getStorageAdaptor(type);
KVMStoragePool pool = adaptor.createStoragePool(name, host, port, path, userInfo, type, details, primaryStorage);
if (pool instanceof LibvirtStoragePool) {
((LibvirtStoragePool) pool).setType(type);
}
// LibvirtStorageAdaptor-specific statement
if (pool.isPoolSupportHA() && primaryStorage) {

View File

@ -185,6 +185,8 @@ public class KVMStorageProcessor implements StorageProcessor {
private int incrementalSnapshotTimeout;
private int incrementalSnapshotRetryRebaseWait;
private static final String CHECKPOINT_XML_TEMP_DIR = "/tmp/cloudstack/checkpointXMLs";
private static final String BACKUP_XML_TEMP_DIR = "/tmp/cloudstack/backupXMLs";
@ -252,6 +254,7 @@ public class KVMStorageProcessor implements StorageProcessor {
_cmdsTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.CMDS_TIMEOUT) * 1000;
incrementalSnapshotTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.INCREMENTAL_SNAPSHOT_TIMEOUT) * 1000;
incrementalSnapshotRetryRebaseWait = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.INCREMENTAL_SNAPSHOT_RETRY_REBASE_WAIT) * 1000;
return true;
}
@ -2093,8 +2096,25 @@ public class KVMStorageProcessor implements StorageProcessor {
QemuImg qemuImg = new QemuImg(wait);
qemuImg.rebase(snapshotFile, parentSnapshotFile, PhysicalDiskFormat.QCOW2.toString(), false);
} catch (LibvirtException | QemuImgException e) {
logger.error("Exception while rebasing incremental snapshot [{}] due to: [{}].", snapshotName, e.getMessage(), e);
throw new CloudRuntimeException(e);
if (!StringUtils.contains(e.getMessage(), "Is another process using the image")) {
logger.error("Exception while rebasing incremental snapshot [{}] due to: [{}].", snapshotName, e.getMessage(), e);
throw new CloudRuntimeException(e);
}
retryRebase(snapshotName, wait, e, snapshotFile, parentSnapshotFile);
}
}
private void retryRebase(String snapshotName, int wait, Exception e, QemuImgFile snapshotFile, QemuImgFile parentSnapshotFile) {
logger.warn("Libvirt still has not released the lock, will wait [{}] milliseconds and try again later.", incrementalSnapshotRetryRebaseWait);
try {
Thread.sleep(incrementalSnapshotRetryRebaseWait);
QemuImg qemuImg = new QemuImg(wait);
qemuImg.rebase(snapshotFile, parentSnapshotFile, PhysicalDiskFormat.QCOW2.toString(), false);
} catch (LibvirtException | QemuImgException | InterruptedException ex) {
logger.error("Unable to rebase snapshot [{}].", snapshotName, ex);
CloudRuntimeException cre = new CloudRuntimeException(ex);
cre.addSuppressed(e);
throw cre;
}
}

View File

@ -31,6 +31,7 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
import com.cloud.agent.api.to.HostTO;
import com.cloud.agent.properties.AgentProperties;
import com.cloud.agent.properties.AgentPropertiesFileHandler;
import com.cloud.ha.HighAvailabilityManager;
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.StoragePoolType;
@ -320,13 +321,24 @@ public class LibvirtStoragePool implements KVMStoragePool {
@Override
public boolean isPoolSupportHA() {
return type == StoragePoolType.NetworkFilesystem;
return HighAvailabilityManager.LIBVIRT_STORAGE_POOL_TYPES_WITH_HA_SUPPORT.contains(type);
}
public String getHearthBeatPath() {
if (type == StoragePoolType.NetworkFilesystem) {
if (StoragePoolType.NetworkFilesystem.equals(type)) {
String kvmScriptsDir = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_SCRIPTS_DIR);
return Script.findScript(kvmScriptsDir, "kvmheartbeat.sh");
String scriptPath = Script.findScript(kvmScriptsDir, "kvmheartbeat.sh");
if (scriptPath == null) {
throw new CloudRuntimeException("Unable to find heartbeat script 'kvmheartbeat.sh' in directory: " + kvmScriptsDir);
}
return scriptPath;
} else if (StoragePoolType.SharedMountPoint.equals(type)) {
String kvmScriptsDir = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_SCRIPTS_DIR);
String scriptPath = Script.findScript(kvmScriptsDir, "kvmsmpheartbeat.sh");
if (scriptPath == null) {
throw new CloudRuntimeException("Unable to find heartbeat script 'kvmsmpheartbeat.sh' in directory: " + kvmScriptsDir);
}
return scriptPath;
}
return null;
}
@ -410,4 +422,8 @@ public class LibvirtStoragePool implements KVMStoragePool {
return true;
}
}
public void setType(StoragePoolType type) {
this.type = type;
}
}

View File

@ -115,6 +115,145 @@ public class LibvirtGpuDefTest extends TestCase {
assertTrue(gpuXml.contains("</hostdev>"));
}
@Test
public void testGpuDef_withFullPciAddressDomainZero() {
LibvirtGpuDef gpuDef = new LibvirtGpuDef();
VgpuTypesInfo pciGpuInfo = new VgpuTypesInfo(
GpuDevice.DeviceType.PCI,
"passthrough",
"passthrough",
"0000:00:02.0",
"10de",
"NVIDIA Corporation",
"1b38",
"Tesla T4"
);
gpuDef.defGpu(pciGpuInfo);
String gpuXml = gpuDef.toString();
assertTrue(gpuXml.contains("<address domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>"));
}
@Test
public void testGpuDef_withFullPciAddressNonZeroDomain() {
LibvirtGpuDef gpuDef = new LibvirtGpuDef();
VgpuTypesInfo pciGpuInfo = new VgpuTypesInfo(
GpuDevice.DeviceType.PCI,
"passthrough",
"passthrough",
"0001:65:00.0",
"10de",
"NVIDIA Corporation",
"1b38",
"Tesla T4"
);
gpuDef.defGpu(pciGpuInfo);
String gpuXml = gpuDef.toString();
assertTrue(gpuXml.contains("<address domain='0x0001' bus='0x65' slot='0x00' function='0x0'/>"));
}
@Test
public void testGpuDef_withNvidiaStyleEightDigitDomain() {
// nvidia-smi reports PCI addresses with an 8-digit domain (e.g. "00000001:af:00.1").
// generatePciXml must normalize it to the canonical 4-digit form "0x0001".
LibvirtGpuDef gpuDef = new LibvirtGpuDef();
VgpuTypesInfo pciGpuInfo = new VgpuTypesInfo(
GpuDevice.DeviceType.PCI,
"passthrough",
"passthrough",
"00000001:af:00.1",
"10de",
"NVIDIA Corporation",
"1b38",
"Tesla T4"
);
gpuDef.defGpu(pciGpuInfo);
String gpuXml = gpuDef.toString();
assertTrue(gpuXml.contains("<address domain='0x0001' bus='0xaf' slot='0x00' function='0x1'/>"));
}
@Test
public void testGpuDef_withFullPciAddressVfNonZeroDomain() {
LibvirtGpuDef gpuDef = new LibvirtGpuDef();
VgpuTypesInfo vfGpuInfo = new VgpuTypesInfo(
GpuDevice.DeviceType.PCI,
"VF-Profile",
"VF-Profile",
"0002:81:00.3",
"10de",
"NVIDIA Corporation",
"1eb8",
"Tesla T4"
);
gpuDef.defGpu(vfGpuInfo);
String gpuXml = gpuDef.toString();
// Non-passthrough NVIDIA VFs should be unmanaged
assertTrue(gpuXml.contains("<hostdev mode='subsystem' type='pci' managed='no' display='off'>"));
assertTrue(gpuXml.contains("<address domain='0x0002' bus='0x81' slot='0x00' function='0x3'/>"));
}
@Test
public void testGpuDef_withLegacyShortBdfDefaultsDomainToZero() {
// Backward compatibility: short BDF with no domain segment must still
// produce a valid libvirt address with domain 0x0000.
LibvirtGpuDef gpuDef = new LibvirtGpuDef();
VgpuTypesInfo pciGpuInfo = new VgpuTypesInfo(
GpuDevice.DeviceType.PCI,
"passthrough",
"passthrough",
"af:00.0",
"10de",
"NVIDIA Corporation",
"1b38",
"Tesla T4"
);
gpuDef.defGpu(pciGpuInfo);
String gpuXml = gpuDef.toString();
assertTrue(gpuXml.contains("<address domain='0x0000' bus='0xaf' slot='0x00' function='0x0'/>"));
}
@Test
public void testGpuDef_withInvalidBusAddressThrows() {
String[] invalidAddresses = {
"notahex:00.0", // non-hex bus
"gg:00:02.0", // non-hex domain
"00:02:03:04", // too many colon-separated parts
"00", // missing slot/function
"00:02", // missing function (no dot)
"00:02.0.1", // extra dot in ss.f
};
for (String addr : invalidAddresses) {
LibvirtGpuDef gpuDef = new LibvirtGpuDef();
VgpuTypesInfo info = new VgpuTypesInfo(
GpuDevice.DeviceType.PCI,
"passthrough",
"passthrough",
addr,
"10de",
"NVIDIA Corporation",
"1b38",
"Tesla T4"
);
gpuDef.defGpu(info);
try {
String ignored = gpuDef.toString();
fail("Expected IllegalArgumentException for address: " + addr + " but got: " + ignored);
} catch (IllegalArgumentException e) {
assertTrue("Exception message should contain the bad address",
e.getMessage().contains(addr));
}
}
}
@Test
public void testGpuDef_withNullDeviceType() {
LibvirtGpuDef gpuDef = new LibvirtGpuDef();

View File

@ -0,0 +1,191 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.resource.wrapper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.libvirt.Connect;
import org.libvirt.Domain;
import org.libvirt.DomainInfo;
import org.libvirt.DomainInfo.DomainState;
import org.libvirt.LibvirtException;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
import com.cloud.agent.api.CheckVirtualMachineAnswer;
import com.cloud.agent.api.CheckVirtualMachineCommand;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.vm.VirtualMachine.PowerState;
@RunWith(MockitoJUnitRunner.class)
public class LibvirtCheckVirtualMachineCommandWrapperTest {
private static final String VM_NAME = "i-2-3-VM";
@Mock
private LibvirtComputingResource libvirtComputingResource;
@Mock
private LibvirtUtilitiesHelper libvirtUtilitiesHelper;
@Mock
private Connect conn;
@Mock
private Domain domain;
private LibvirtCheckVirtualMachineCommandWrapper wrapper;
private CheckVirtualMachineCommand command;
@Before
public void setUp() throws LibvirtException {
wrapper = new LibvirtCheckVirtualMachineCommandWrapper();
command = new CheckVirtualMachineCommand(VM_NAME);
when(libvirtComputingResource.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper);
when(libvirtUtilitiesHelper.getConnectionByVmName(VM_NAME)).thenReturn(conn);
}
@Test
public void testExecuteVmPoweredOnReturnsStateAndVncPort() throws LibvirtException {
DomainInfo domainInfo = new DomainInfo();
domainInfo.state = DomainState.VIR_DOMAIN_RUNNING;
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenReturn(5900);
when(conn.domainLookupByName(VM_NAME)).thenReturn(domain);
when(domain.getInfo()).thenReturn(domainInfo);
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
assertTrue(answer.getResult());
assertEquals(PowerState.PowerOn, answer.getState());
assertEquals(Integer.valueOf(5900), answer.getVncPort());
}
@Test
public void testExecuteVmPausedReturnsPowerUnknown() throws LibvirtException {
DomainInfo domainInfo = new DomainInfo();
domainInfo.state = DomainState.VIR_DOMAIN_PAUSED;
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenReturn(5901);
when(conn.domainLookupByName(VM_NAME)).thenReturn(domain);
when(domain.getInfo()).thenReturn(domainInfo);
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
assertTrue(answer.getResult());
assertEquals(PowerState.PowerUnknown, answer.getState());
assertEquals(Integer.valueOf(5901), answer.getVncPort());
}
@Test
public void testExecuteVmPoweredOffReturnsStateWithNullVncPort() throws LibvirtException {
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOff);
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
assertTrue(answer.getResult());
assertEquals(PowerState.PowerOff, answer.getState());
assertNull(answer.getVncPort());
}
@Test
public void testExecuteVmStateUnknownReturnsStateWithNullVncPort() throws LibvirtException {
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerUnknown);
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
assertTrue(answer.getResult());
assertEquals(PowerState.PowerUnknown, answer.getState());
assertNull(answer.getVncPort());
}
@Test
public void testExecuteVmPoweredOnWithNullVncPort() throws LibvirtException {
DomainInfo domainInfo = new DomainInfo();
domainInfo.state = DomainState.VIR_DOMAIN_RUNNING;
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenReturn(null);
when(conn.domainLookupByName(VM_NAME)).thenReturn(domain);
when(domain.getInfo()).thenReturn(domainInfo);
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
assertTrue(answer.getResult());
assertEquals(PowerState.PowerOn, answer.getState());
assertNull(answer.getVncPort());
}
@Test
public void testExecuteLibvirtExceptionOnGetConnectionReturnsFailure() throws LibvirtException {
LibvirtException libvirtException = mock(LibvirtException.class);
when(libvirtException.getMessage()).thenReturn("Connection refused");
when(libvirtUtilitiesHelper.getConnectionByVmName(VM_NAME)).thenThrow(libvirtException);
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
assertFalse(answer.getResult());
assertEquals("Connection refused", answer.getDetails());
}
@Test
public void testExecuteLibvirtExceptionOnGetVncPortReturnsFailure() throws LibvirtException {
LibvirtException libvirtException = mock(LibvirtException.class);
when(libvirtException.getMessage()).thenReturn("VNC port error");
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenThrow(libvirtException);
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
assertFalse(answer.getResult());
assertEquals("VNC port error", answer.getDetails());
}
@Test
public void testExecuteLibvirtExceptionOnDomainLookupReturnsFailure() throws LibvirtException {
LibvirtException libvirtException = mock(LibvirtException.class);
when(libvirtException.getMessage()).thenReturn("Domain not found");
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenReturn(5900);
when(conn.domainLookupByName(VM_NAME)).thenThrow(libvirtException);
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
assertFalse(answer.getResult());
assertEquals("Domain not found", answer.getDetails());
}
@Test
public void testExecuteCallsGetLibvirtUtilitiesHelper() throws LibvirtException {
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOff);
wrapper.execute(command, libvirtComputingResource);
verify(libvirtComputingResource).getLibvirtUtilitiesHelper();
verify(libvirtUtilitiesHelper).getConnectionByVmName(VM_NAME);
}
}

View File

@ -391,7 +391,15 @@ public class LibvirtRestoreBackupCommandWrapperTest {
try (MockedStatic<Script> scriptMock = mockStatic(Script.class)) {
scriptMock.when(() -> Script.runSimpleBashScriptForExitValue(anyString(), anyInt(), any(Boolean.class)))
.thenReturn(0); // Mount success
.thenAnswer(invocation -> {
String command = invocation.getArgument(0);
if (command.contains("mount")) {
return 0; // mount success
} else if (command.contains("rsync")) {
return 1; // Rsync failure
}
return 0; // Other commands success
});
scriptMock.when(() -> Script.runSimpleBashScriptForExitValue(anyString()))
.thenAnswer(invocation -> {
String command = invocation.getArgument(0);
@ -399,8 +407,6 @@ public class LibvirtRestoreBackupCommandWrapperTest {
return 0; // File exists
} else if (command.contains("qemu-img check")) {
return 0; // File is valid
} else if (command.contains("rsync")) {
return 1; // Rsync failure
}
return 0; // Other commands success
});
@ -444,7 +450,15 @@ public class LibvirtRestoreBackupCommandWrapperTest {
try (MockedStatic<Script> scriptMock = mockStatic(Script.class)) {
scriptMock.when(() -> Script.runSimpleBashScriptForExitValue(anyString(), anyInt(), any(Boolean.class)))
.thenReturn(0); // Mount success
.thenAnswer(invocation -> {
String command = invocation.getArgument(0);
if (command.contains("mount")) {
return 0; // Mount success
} else if (command.contains("rsync")) {
return 0; // Rsync success
}
return 0; // Other commands success
});
scriptMock.when(() -> Script.runSimpleBashScriptForExitValue(anyString()))
.thenAnswer(invocation -> {
String command = invocation.getArgument(0);
@ -452,8 +466,6 @@ public class LibvirtRestoreBackupCommandWrapperTest {
return 0; // File exists
} else if (command.contains("qemu-img check")) {
return 0; // File is valid
} else if (command.contains("rsync")) {
return 0; // Rsync success
} else if (command.contains("virsh attach-disk")) {
return 1; // Attach failure
}
@ -539,10 +551,10 @@ public class LibvirtRestoreBackupCommandWrapperTest {
filesMock.when(() -> Files.createTempDirectory(anyString())).thenReturn(tempPath);
try (MockedStatic<Script> scriptMock = mockStatic(Script.class)) {
scriptMock.when(() -> Script.runSimpleBashScriptForExitValue(anyString(), anyInt(), any(Boolean.class)))
.thenReturn(0); // Mount success
scriptMock.when(() -> Script.runSimpleBashScriptForExitValue(anyString()))
.thenReturn(0); // All other commands success
.thenReturn(0); // All commands success
scriptMock.when(() -> Script.runSimpleBashScriptForExitValue(anyString(), anyInt(), any(Boolean.class)))
.thenReturn(0); // All commands success
filesMock.when(() -> Files.deleteIfExists(any(Path.class))).thenReturn(true);

View File

@ -130,6 +130,7 @@
<module>storage/volume/default</module>
<module>storage/volume/nexenta</module>
<module>storage/volume/sample</module>
<module>storage/volume/ontap</module>
<module>storage/volume/solidfire</module>
<module>storage/volume/scaleio</module>
<module>storage/volume/linstor</module>

View File

@ -24,6 +24,8 @@ import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import javax.crypto.KeyGenerator;
import javax.crypto.SecretKey;
@ -98,6 +100,51 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
return String.format("%s-%s", ACS_PREFIX, account.getUuid());
}
private void updateCannedPolicy(long storeId, Account account, String excludeBucket) {
List<BucketVO> buckets = _bucketDao.listByObjectStoreIdAndAccountId(storeId, account.getId());
String resources = buckets.stream()
.map(BucketVO::getName)
.filter(name -> !Objects.equals(name, excludeBucket))
.map(name -> "\"arn:aws:s3:::" + name + "/*\"")
.collect(Collectors.joining(",\n"));
String policy;
if (resources.isEmpty()) {
// Resource cannot be empty in a canned Policy so deny access to all resources if the user has no buckets
policy = " {\n" +
" \"Statement\": [\n" +
" {\n" +
" \"Action\": \"s3:*\",\n" +
" \"Effect\": \"Deny\",\n" +
" \"Resource\": [\"arn:aws:s3:::*\", \"arn:aws:s3:::*/*\"]\n" +
" }\n" +
" ],\n" +
" \"Version\": \"2012-10-17\"\n" +
" }";
} else {
policy = " {\n" +
" \"Statement\": [\n" +
" {\n" +
" \"Action\": \"s3:*\",\n" +
" \"Effect\": \"Allow\",\n" +
" \"Resource\": [" + resources + "]\n" +
" }\n" +
" ],\n" +
" \"Version\": \"2012-10-17\"\n" +
" }";
}
MinioAdminClient minioAdminClient = getMinIOAdminClient(storeId);
String policyName = getUserOrAccessKeyForAccount(account) + "-policy";
String userName = getUserOrAccessKeyForAccount(account);
try {
minioAdminClient.addCannedPolicy(policyName, policy);
minioAdminClient.setPolicy(userName, false, policyName);
} catch (NoSuchAlgorithmException | IOException | InvalidKeyException e) {
throw new CloudRuntimeException(e);
}
}
@Override
public Bucket createBucket(Bucket bucket, boolean objectLock) {
//ToDo Client pool mgmt
@ -125,33 +172,8 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
throw new CloudRuntimeException(e);
}
List<BucketVO> buckets = _bucketDao.listByObjectStoreIdAndAccountId(storeId, accountId);
StringBuilder resources_builder = new StringBuilder();
for(BucketVO exitingBucket : buckets) {
resources_builder.append("\"arn:aws:s3:::"+exitingBucket.getName()+"/*\",\n");
}
resources_builder.append("\"arn:aws:s3:::"+bucketName+"/*\"\n");
updateCannedPolicy(storeId, account,null);
String policy = " {\n" +
" \"Statement\": [\n" +
" {\n" +
" \"Action\": \"s3:*\",\n" +
" \"Effect\": \"Allow\",\n" +
" \"Principal\": \"*\",\n" +
" \"Resource\": ["+resources_builder+"]" +
" }\n" +
" ],\n" +
" \"Version\": \"2012-10-17\"\n" +
" }";
MinioAdminClient minioAdminClient = getMinIOAdminClient(storeId);
String policyName = getUserOrAccessKeyForAccount(account) + "-policy";
String userName = getUserOrAccessKeyForAccount(account);
try {
minioAdminClient.addCannedPolicy(policyName, policy);
minioAdminClient.setPolicy(userName, false, policyName);
} catch (Exception e) {
throw new CloudRuntimeException(e);
}
String accessKey = _accountDetailsDao.findDetail(accountId, MINIO_ACCESS_KEY).getValue();
String secretKey = _accountDetailsDao.findDetail(accountId, MINIO_SECRET_KEY).getValue();
ObjectStoreVO store = _storeDao.findById(storeId);
@ -183,6 +205,8 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
@Override
public boolean deleteBucket(BucketTO bucket, long storeId) {
String bucketName = bucket.getName();
long accountId = bucket.getAccountId();
Account account = _accountDao.findById(accountId);
MinioClient minioClient = getMinIOClient(storeId);
try {
if(!minioClient.bucketExists(BucketExistsArgs.builder().bucket(bucketName).build())) {
@ -197,6 +221,9 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
} catch (Exception e) {
throw new CloudRuntimeException(e);
}
updateCannedPolicy(storeId, account, bucketName);
return true;
}

View File

@ -129,10 +129,15 @@ public class MinIOObjectStoreDriverImplTest {
@Test
public void testDeleteBucket() throws Exception {
String bucketName = "test-bucket";
BucketTO bucket = new BucketTO(bucketName);
BucketVO bucketVO = new BucketVO(1L, 1L, 1L, bucketName, 1, false, false, false, null);
BucketTO bucket = new BucketTO(bucketVO);
when(accountDao.findById(1L)).thenReturn(account);
when(account.getUuid()).thenReturn(UUID.randomUUID().toString());
when(bucketDao.listByObjectStoreIdAndAccountId(anyLong(), anyLong())).thenReturn(new ArrayList<BucketVO>());
doReturn(minioClient).when(minioObjectStoreDriverImpl).getMinIOClient(anyLong());
when(minioClient.bucketExists(BucketExistsArgs.builder().bucket(bucketName).build())).thenReturn(true);
doNothing().when(minioClient).removeBucket(RemoveBucketArgs.builder().bucket(bucketName).build());
doReturn(minioAdminClient).when(minioObjectStoreDriverImpl).getMinIOAdminClient(anyLong());
boolean success = minioObjectStoreDriverImpl.deleteBucket(bucket, 1L);
assertTrue(success);
verify(minioClient, times(1)).bucketExists(any());

View File

@ -27,6 +27,7 @@ import java.util.UUID;
import javax.inject.Inject;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.ha.HighAvailabilityManager;
import com.cloud.storage.VolumeVO;
import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
@ -587,7 +588,7 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
@Override
public boolean isStorageSupportHA(StoragePoolType type) {
return StoragePoolType.NetworkFilesystem == type;
return type != null && HighAvailabilityManager.LIBVIRT_STORAGE_POOL_TYPES_WITH_HA_SUPPORT.contains(type);
}
@Override

View File

@ -0,0 +1,123 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# Apache CloudStack - NetApp ONTAP Storage Plugin
## Overview
The NetApp ONTAP Storage Plugin provides integration between Apache CloudStack and NetApp ONTAP storage systems. This plugin enables CloudStack to provision and manage primary storage on ONTAP clusters, supporting both NAS (NFS) and SAN (iSCSI) protocols.
## Features
- **Primary Storage Support**: Provision and manage primary storage pools on NetApp ONTAP
- **Multiple Protocols**: Support for NFS 3.0 and iSCSI protocols
- **Unified Storage**: Integration with traditional ONTAP unified storage architecture
- **KVM Hypervisor Support**: Supports KVM hypervisor environments
- **Managed Storage**: Operates as managed storage with full lifecycle management
- **Flexible Scoping**: Support for Zone-wide and Cluster-scoped storage pools
## Architecture
### Component Structure
| Package | Description |
|---------|-------------------------------------------------------|
| `driver` | Primary datastore driver implementation |
| `feign` | REST API clients and data models for ONTAP operations |
| `lifecycle` | Storage pool lifecycle management |
| `listener` | Host connection event handlers |
| `provider` | Main provider and strategy factory |
| `service` | ONTAP Storage strategy implementations (NAS/SAN) |
| `utils` | Constants and helper utilities |
## Requirements
### ONTAP Requirements
- NetApp ONTAP 9.15.1 or higher
- Storage Virtual Machine (SVM) configured with appropriate protocols enabled
- Management LIF accessible from CloudStack management server
- Data LIF(s) accessible from hypervisor hosts and are of IPv4 type
- Aggregates assigned to the SVM with sufficient capacity
### CloudStack Requirements
- Apache CloudStack current version or higher
- KVM hypervisor hosts
- For iSCSI: Hosts must have iSCSI initiator configured with valid IQN
- For NFS: Hosts must have NFS client packages installed
### Minimum Volume Size
ONTAP requires a minimum volume size of **1.56 GB** (1,677,721,600 bytes). The plugin will automatically adjust requested sizes below this threshold.
## Configuration
### Storage Pool Creation Parameters
When creating an ONTAP primary storage pool, provide the following details in the URL field (semicolon-separated key=value pairs):
| Parameter | Required | Description |
|-----------|----------|-------------|
| `username` | Yes | ONTAP cluster admin username |
| `password` | Yes | ONTAP cluster admin password |
| `svmName` | Yes | Storage Virtual Machine name |
| `protocol` | Yes | Storage protocol (`NFS3` or `ISCSI`) |
| `managementLIF` | Yes | ONTAP cluster management LIF IP address |
### Example URL Format
```
username=admin;password=secretpass;svmName=svm1;protocol=ISCSI;managementLIF=192.168.1.100
```
## Port Configuration
| Protocol | Default Port |
|----------|--------------|
| NFS | 2049 |
| iSCSI | 3260 |
| ONTAP Management API | 443 (HTTPS) |
## Limitations
- Supports only **KVM** hypervisor
- Supports only **Unified ONTAP** storage (disaggregated not supported)
- Supports only **NFS3** and **iSCSI** protocols
- IPv6 type and FQDN LIFs are not supported
## Troubleshooting
### Common Issues
1. **Connection Failures**
- Verify management LIF is reachable from CloudStack management server
- Check firewall rules for port 443
2. **Protocol Errors**
- Ensure the protocol (NFS/iSCSI) is enabled on the SVM
- Verify Data LIFs are configured for the protocol
3. **Capacity Errors**
- Check aggregate space availability
- Ensure requested volume size meets minimum requirements (1.56 GB)
4. **Host Connection Issues**
- For iSCSI: Verify host IQN is properly configured in host's storage URL
- For NFS: Ensure NFS client is installed and running

View File

@ -0,0 +1,169 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-volume-ontap</artifactId>
<name>Apache CloudStack Plugin - Storage Volume ONTAP Provider</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.23.0.0-SNAPSHOT</version>
<relativePath>../../../pom.xml</relativePath>
</parent>
<properties>
<spring-cloud.version>2021.0.7</spring-cloud.version>
<openfeign.version>11.0</openfeign.version>
<httpclient.version>4.5.14</httpclient.version>
<swagger-annotations.version>1.6.2</swagger-annotations.version>
<maven-compiler-plugin.version>3.8.1</maven-compiler-plugin.version>
<maven-surefire-plugin.version>2.22.2</maven-surefire-plugin.version>
<jackson-databind.version>2.13.4</jackson-databind.version>
<assertj.version>3.24.2</assertj.version>
<junit-jupiter.version>5.8.1</junit-jupiter.version>
<mockito.version>3.12.4</mockito.version>
<mockito-junit-jupiter.version>5.2.0</mockito-junit-jupiter.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-dependencies</artifactId>
<version>${spring-cloud.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-volume-default</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.github.openfeign</groupId>
<artifactId>feign-core</artifactId>
<version>${openfeign.version}</version>
</dependency>
<dependency>
<groupId>io.github.openfeign</groupId>
<artifactId>feign-httpclient</artifactId>
<version>${openfeign.version}</version>
</dependency>
<dependency>
<groupId>io.github.openfeign</groupId>
<artifactId>feign-jackson</artifactId>
<version>${openfeign.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson-databind.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage-volume</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-annotations</artifactId>
<version>${swagger-annotations.version}</version>
</dependency>
<!-- JUnit 5 -->
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<version>${junit-jupiter.version}</version>
<scope>test</scope>
</dependency>
<!-- Mockito -->
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-junit-jupiter</artifactId>
<version>${mockito-junit-jupiter.version}</version>
<scope>test</scope>
</dependency>
<!-- Mockito Inline (for static method mocking) -->
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-inline</artifactId>
<version>${mockito.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<version>${assertj.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
<repositories>
<repository>
<id>central</id>
<name>Maven Central</name>
<url>https://repo.maven.apache.org/maven2</url>
</repository>
</repositories>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>11</source>
<target>11</target>
</configuration>
</plugin>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<skipTests>false</skipTests>
<includes>
<include>**/*Test.java</include>
</includes>
</configuration>
<executions>
<execution>
<phase>integration-test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,188 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.driver;
import com.cloud.agent.api.to.DataStoreTO;
import com.cloud.agent.api.to.DataTO;
import com.cloud.host.Host;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.utils.Pair;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.HashMap;
import java.util.Map;
public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
private static final Logger logger = LogManager.getLogger(OntapPrimaryDatastoreDriver.class);
@Override
public Map<String, String> getCapabilities() {
logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called");
Map<String, String> mapCapabilities = new HashMap<>();
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString());
return mapCapabilities;
}
@Override
public DataTO getTO(DataObject data) {
return null;
}
@Override
public DataStoreTO getStoreTO(DataStore store) { return null; }
@Override
public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CreateCmdResult> callback) {
throw new UnsupportedOperationException("Create operation is not supported for ONTAP primary storage.");
}
@Override
public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback<CommandResult> callback) {
throw new UnsupportedOperationException("Delete operation is not supported for ONTAP primary storage.");
}
@Override
public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) {
throw new UnsupportedOperationException("Copy operation is not supported for ONTAP primary storage.");
}
@Override
public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
throw new UnsupportedOperationException("Copy operation is not supported for ONTAP primary storage.");
}
@Override
public boolean canCopy(DataObject srcData, DataObject destData) {
return false;
}
@Override
public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {}
@Override
public ChapInfo getChapInfo(DataObject dataObject) {
return null;
}
@Override
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
return false;
}
@Override
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
throw new UnsupportedOperationException("Revoke access operation is not supported for ONTAP primary storage.");
}
@Override
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool storagePool) {
return 0;
}
@Override
public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) {
return 0;
}
@Override
public long getUsedBytes(StoragePool storagePool) {
return 0;
}
@Override
public long getUsedIops(StoragePool storagePool) {
return 0;
}
@Override
public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback) {}
@Override
public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback<CommandResult> callback) {}
@Override
public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) {}
@Override
public boolean canProvideStorageStats() {
return false;
}
@Override
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
return null;
}
@Override
public boolean canProvideVolumeStats() {
return true;
}
@Override
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) {
return null;
}
@Override
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
return true;
}
@Override
public boolean isVmInfoNeeded() {
return true;
}
@Override
public void provideVmInfo(long vmId, long volumeId) {}
@Override
public boolean isVmTagsNeeded(String tagKey) {
return true;
}
@Override
public void provideVmTags(long vmId, long volumeId, String tagValue) {}
@Override
public boolean isStorageSupportHA(Storage.StoragePoolType type) {
return true;
}
@Override
public void detachVolumeFromAllStorageNodes(Volume volume) {}
}

View File

@ -0,0 +1,45 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.feign;
import feign.Feign;
public class FeignClientFactory {
private final FeignConfiguration feignConfiguration;
public FeignClientFactory() {
this.feignConfiguration = new FeignConfiguration();
}
public FeignClientFactory(FeignConfiguration feignConfiguration) {
this.feignConfiguration = feignConfiguration;
}
public <T> T createClient(Class<T> clientClass, String baseURL) {
return Feign.builder()
.client(feignConfiguration.createClient())
.encoder(feignConfiguration.createEncoder())
.decoder(feignConfiguration.createDecoder())
.retryer(feignConfiguration.createRetryer())
.requestInterceptor(feignConfiguration.createRequestInterceptor())
.target(clientClass, baseURL);
}
}

View File

@ -0,0 +1,158 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.feign;
import com.fasterxml.jackson.databind.ObjectMapper;
import feign.RequestInterceptor;
import feign.Retryer;
import feign.Client;
import feign.httpclient.ApacheHttpClient;
import feign.codec.Decoder;
import feign.codec.Encoder;
import feign.Response;
import feign.codec.DecodeException;
import feign.codec.EncodeException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.conn.ssl.TrustAllStrategy;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.ssl.SSLContexts;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.net.ssl.SSLContext;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Type;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.TimeUnit;
public class FeignConfiguration {
private static final Logger logger = LogManager.getLogger(FeignConfiguration.class);
private final int retryMaxAttempt = 3;
private final int retryMaxIntervalInSecs = 5;
private final String ontapFeignMaxConnection = "80";
private final String ontapFeignMaxConnectionPerRoute = "20";
private final ObjectMapper objectMapper;
public FeignConfiguration() {
this.objectMapper = new ObjectMapper();
this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
public Client createClient() {
int maxConn;
int maxConnPerRoute;
try {
maxConn = Integer.parseInt(this.ontapFeignMaxConnection);
} catch (Exception e) {
logger.error("ontapFeignClient: parse max connection failed, using default");
maxConn = 20;
}
try {
maxConnPerRoute = Integer.parseInt(this.ontapFeignMaxConnectionPerRoute);
} catch (Exception e) {
logger.error("ontapFeignClient: parse max connection per route failed, using default");
maxConnPerRoute = 2;
}
logger.debug("ontapFeignClient: maxConn={}, maxConnPerRoute={}", maxConn, maxConnPerRoute);
ConnectionKeepAliveStrategy keepAliveStrategy = (response, context) -> 0;
CloseableHttpClient httpClient = HttpClientBuilder.create()
.setMaxConnTotal(maxConn)
.setMaxConnPerRoute(maxConnPerRoute)
.setKeepAliveStrategy(keepAliveStrategy)
.setSSLSocketFactory(getSSLSocketFactory())
.setConnectionTimeToLive(60, TimeUnit.SECONDS)
.build();
return new ApacheHttpClient(httpClient);
}
private SSLConnectionSocketFactory getSSLSocketFactory() {
try {
SSLContext sslContext = SSLContexts.custom().loadTrustMaterial(null, new TrustAllStrategy()).build();
return new SSLConnectionSocketFactory(sslContext, new NoopHostnameVerifier());
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
public RequestInterceptor createRequestInterceptor() {
return template -> {
logger.info("Feign Request URL: {}", template.url());
logger.info("HTTP Method: {}", template.method());
logger.trace("Headers: {}", template.headers());
if (template.body() != null) {
logger.info("Body: {}", new String(template.body(), StandardCharsets.UTF_8));
}
};
}
public Retryer createRetryer() {
return new Retryer.Default(1000L, retryMaxIntervalInSecs * 1000L, retryMaxAttempt);
}
public Encoder createEncoder() {
return new Encoder() {
@Override
public void encode(Object object, Type bodyType, feign.RequestTemplate template) throws EncodeException {
if (object == null) {
template.body(null, StandardCharsets.UTF_8);
return;
}
try {
byte[] jsonBytes = objectMapper.writeValueAsBytes(object);
template.body(jsonBytes, StandardCharsets.UTF_8);
template.header("Content-Type", "application/json");
} catch (JsonProcessingException e) {
throw new EncodeException("Error encoding object to JSON", e);
}
}
};
}
public Decoder createDecoder() {
return new Decoder() {
@Override
public Object decode(Response response, Type type) throws IOException, DecodeException {
if (response.body() == null) {
logger.debug("Response body is null, returning null");
return null;
}
String json = null;
try (InputStream bodyStream = response.body().asInputStream()) {
json = new String(bodyStream.readAllBytes(), StandardCharsets.UTF_8);
logger.debug("Decoding JSON response: {}", json);
return objectMapper.readValue(json, objectMapper.getTypeFactory().constructType(type));
} catch (IOException e) {
logger.error("IOException during decoding. Status: {}, Raw body: {}", response.status(), json, e);
throw new DecodeException(response.status(), "Error decoding JSON response", response.request(), e);
} catch (Exception e) {
logger.error("Unexpected error during decoding. Status: {}, Type: {}, Raw body: {}", response.status(), type, json, e);
throw new DecodeException(response.status(), "Unexpected error during decoding", response.request(), e);
}
}
};
}
}

View File

@ -0,0 +1,37 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.feign.client;
import org.apache.cloudstack.storage.feign.model.Aggregate;
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
import feign.Headers;
import feign.Param;
import feign.RequestLine;
public interface AggregateFeignClient {
@RequestLine("GET /api/storage/aggregates")
@Headers({"Authorization: {authHeader}"})
OntapResponse<Aggregate> getAggregateResponse(@Param("authHeader") String authHeader);
@RequestLine("GET /api/storage/aggregates/{uuid}")
@Headers({"Authorization: {authHeader}"})
Aggregate getAggregateByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
}

View File

@ -0,0 +1,32 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.feign.client;
import org.apache.cloudstack.storage.feign.model.Cluster;
import feign.Headers;
import feign.Param;
import feign.RequestLine;
public interface ClusterFeignClient {
@RequestLine("GET /api/cluster")
@Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
Cluster getCluster(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords);
}

View File

@ -0,0 +1,31 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.feign.client;
import org.apache.cloudstack.storage.feign.model.Job;
import feign.Headers;
import feign.Param;
import feign.RequestLine;
public interface JobFeignClient {
@RequestLine("GET /api/cluster/jobs/{uuid}")
@Headers({"Authorization: {authHeader}"})
Job getJobByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
}

View File

@ -0,0 +1,86 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.feign.client;
import feign.QueryMap;
import org.apache.cloudstack.storage.feign.model.ExportPolicy;
import org.apache.cloudstack.storage.feign.model.FileInfo;
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
import feign.Headers;
import feign.Param;
import feign.RequestLine;
import java.util.Map;
public interface NASFeignClient {
// File Operations
@RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
OntapResponse<FileInfo> getFileResponse(@Param("authHeader") String authHeader,
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath);
@RequestLine("DELETE /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void deleteFile(@Param("authHeader") String authHeader,
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath);
@RequestLine("PATCH /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void updateFile(@Param("authHeader") String authHeader,
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath,
FileInfo fileInfo);
@RequestLine("POST /api/storage/volumes/{volumeUuid}/files/{path}")
@Headers({"Authorization: {authHeader}"})
void createFile(@Param("authHeader") String authHeader,
@Param("volumeUuid") String volumeUUID,
@Param("path") String filePath,
FileInfo file);
// Export Policy Operations
@RequestLine("POST /api/protocols/nfs/export-policies")
@Headers({"Authorization: {authHeader}"})
void createExportPolicy(@Param("authHeader") String authHeader,
ExportPolicy exportPolicy);
@RequestLine("GET /api/protocols/nfs/export-policies")
@Headers({"Authorization: {authHeader}"})
OntapResponse<ExportPolicy> getExportPolicyResponse(@Param("authHeader") String authHeader, @QueryMap Map<String, Object> queryMap);
@RequestLine("GET /api/protocols/nfs/export-policies/{id}")
@Headers({"Authorization: {authHeader}"})
ExportPolicy getExportPolicyById(@Param("authHeader") String authHeader,
@Param("id") String id);
@RequestLine("DELETE /api/protocols/nfs/export-policies/{id}")
@Headers({"Authorization: {authHeader}"})
void deleteExportPolicyById(@Param("authHeader") String authHeader,
@Param("id") String id);
@RequestLine("PATCH /api/protocols/nfs/export-policies/{id}")
@Headers({"Authorization: {authHeader}"})
OntapResponse<ExportPolicy> updateExportPolicy(@Param("authHeader") String authHeader,
@Param("id") String id,
ExportPolicy request);
}

Some files were not shown because too many files have changed in this diff Show More