mirror of https://github.com/apache/cloudstack.git
Merge branch 'main' of https://github.com/apache/cloudstack into clvm-enhancements
This commit is contained in:
commit
0e2bac1a06
|
|
@ -457,3 +457,18 @@ iscsi.session.cleanup.enabled=false
|
|||
|
||||
# Instance conversion VIRT_V2V_TMPDIR env var
|
||||
#convert.instance.env.virtv2v.tmpdir=
|
||||
|
||||
# Time, in seconds, to wait before retrying to rebase during the incremental snapshot process.
|
||||
# incremental.snapshot.retry.rebase.wait=60
|
||||
|
||||
# Path to the VDDK library directory for VMware to KVM conversion via VDDK,
|
||||
# passed to virt-v2v as -io vddk-libdir=<path>
|
||||
#vddk.lib.dir=
|
||||
|
||||
# Ordered VDDK transport preference for VMware to KVM conversion via VDDK, passed as
|
||||
# -io vddk-transports=<value> to virt-v2v. Example: nbd:nbdssl
|
||||
#vddk.transports=
|
||||
|
||||
# Optional vCenter SHA1 thumbprint for VMware to KVM conversion via VDDK, passed as
|
||||
# -io vddk-thumbprint=<value>. If unset, CloudStack computes it on the KVM host via openssl.
|
||||
#vddk.thumbprint=
|
||||
|
|
|
|||
|
|
@ -808,6 +808,30 @@ public class AgentProperties{
|
|||
*/
|
||||
public static final Property<String> CONVERT_ENV_VIRTV2V_TMPDIR = new Property<>("convert.instance.env.virtv2v.tmpdir", null, String.class);
|
||||
|
||||
/**
|
||||
* Path to the VDDK library directory on the KVM conversion host, used when converting VMs from VMware to KVM via VDDK.
|
||||
* This directory is passed to virt-v2v as <code>-io vddk-libdir=<path></code>.
|
||||
* Data type: String.<br>
|
||||
* Default value: <code>null</code>
|
||||
*/
|
||||
public static final Property<String> VDDK_LIB_DIR = new Property<>("vddk.lib.dir", null, String.class);
|
||||
|
||||
/**
|
||||
* Ordered list of VDDK transports for virt-v2v, passed as <code>-io vddk-transports=<value></code>.
|
||||
* Example: <code>nbd:nbdssl</code>.
|
||||
* Data type: String.<br>
|
||||
* Default value: <code>null</code>
|
||||
*/
|
||||
public static final Property<String> VDDK_TRANSPORTS = new Property<>("vddk.transports", null, String.class);
|
||||
|
||||
/**
|
||||
* vCenter TLS certificate thumbprint used by virt-v2v VDDK mode, passed as <code>-io vddk-thumbprint=<value></code>.
|
||||
* If unset, the KVM host computes it at runtime from the vCenter endpoint.
|
||||
* Data type: String.<br>
|
||||
* Default value: <code>null</code>
|
||||
*/
|
||||
public static final Property<String> VDDK_THUMBPRINT = new Property<>("vddk.thumbprint", null, String.class);
|
||||
|
||||
/**
|
||||
* BGP controll CIDR
|
||||
* Data type: String.<br>
|
||||
|
|
@ -885,6 +909,11 @@ public class AgentProperties{
|
|||
*/
|
||||
public static final Property<Boolean> CREATE_FULL_CLONE = new Property<>("create.full.clone", false);
|
||||
|
||||
/**
|
||||
* Time, in seconds, to wait before retrying to rebase during the incremental snapshot process.
|
||||
* */
|
||||
public static final Property<Integer> INCREMENTAL_SNAPSHOT_RETRY_REBASE_WAIT = new Property<>("incremental.snapshot.retry.rebase.wait", 60);
|
||||
|
||||
|
||||
public static class Property <T>{
|
||||
private String name;
|
||||
|
|
|
|||
|
|
@ -26,10 +26,13 @@ public final class BucketTO {
|
|||
|
||||
private String secretKey;
|
||||
|
||||
private long accountId;
|
||||
|
||||
public BucketTO(Bucket bucket) {
|
||||
this.name = bucket.getName();
|
||||
this.accessKey = bucket.getAccessKey();
|
||||
this.secretKey = bucket.getSecretKey();
|
||||
this.accountId = bucket.getAccountId();
|
||||
}
|
||||
|
||||
public BucketTO(String name) {
|
||||
|
|
@ -47,4 +50,8 @@ public final class BucketTO {
|
|||
public String getSecretKey() {
|
||||
return this.secretKey;
|
||||
}
|
||||
|
||||
public long getAccountId() {
|
||||
return this.accountId;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,13 +36,17 @@ public class RemoteInstanceTO implements Serializable {
|
|||
private String vcenterPassword;
|
||||
private String vcenterHost;
|
||||
private String datacenterName;
|
||||
private String clusterName;
|
||||
private String hostName;
|
||||
|
||||
public RemoteInstanceTO() {
|
||||
}
|
||||
|
||||
public RemoteInstanceTO(String instanceName) {
|
||||
public RemoteInstanceTO(String instanceName, String clusterName, String hostName) {
|
||||
this.hypervisorType = Hypervisor.HypervisorType.VMware;
|
||||
this.instanceName = instanceName;
|
||||
this.clusterName = clusterName;
|
||||
this.hostName = hostName;
|
||||
}
|
||||
|
||||
public RemoteInstanceTO(String instanceName, String instancePath, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName) {
|
||||
|
|
@ -55,6 +59,12 @@ public class RemoteInstanceTO implements Serializable {
|
|||
this.datacenterName = datacenterName;
|
||||
}
|
||||
|
||||
public RemoteInstanceTO(String instanceName, String instancePath, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName, String clusterName, String hostName) {
|
||||
this(instanceName, instancePath, vcenterHost, vcenterUsername, vcenterPassword, datacenterName);
|
||||
this.clusterName = clusterName;
|
||||
this.hostName = hostName;
|
||||
}
|
||||
|
||||
public Hypervisor.HypervisorType getHypervisorType() {
|
||||
return this.hypervisorType;
|
||||
}
|
||||
|
|
@ -82,4 +92,12 @@ public class RemoteInstanceTO implements Serializable {
|
|||
public String getDatacenterName() {
|
||||
return datacenterName;
|
||||
}
|
||||
|
||||
public String getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
public String getHostName() {
|
||||
return hostName;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,6 +57,9 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
|
|||
String HOST_UEFI_ENABLE = "host.uefi.enable";
|
||||
String HOST_VOLUME_ENCRYPTION = "host.volume.encryption";
|
||||
String HOST_INSTANCE_CONVERSION = "host.instance.conversion";
|
||||
String HOST_VDDK_SUPPORT = "host.vddk.support";
|
||||
String HOST_VDDK_LIB_DIR = "vddk.lib.dir";
|
||||
String HOST_VDDK_VERSION = "host.vddk.version";
|
||||
String HOST_OVFTOOL_VERSION = "host.ovftool.version";
|
||||
String HOST_VIRTV2V_VERSION = "host.virtv2v.version";
|
||||
String HOST_SSH_PORT = "host.ssh.port";
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ public interface ProjectService {
|
|||
|
||||
Project updateProject(long id, String name, String displayText, String newOwnerName, Long userId, Role newRole) throws ResourceAllocationException;
|
||||
|
||||
boolean addAccountToProject(long projectId, String accountName, String email, Long projectRoleId, Role projectRoleType);
|
||||
boolean addAccountToProject(long projectId, String accountName, String email, Long projectRoleId, Role projectRoleType) throws ResourceAllocationException;
|
||||
|
||||
boolean deleteAccountFromProject(long projectId, String accountName);
|
||||
|
||||
|
|
@ -100,6 +100,6 @@ public interface ProjectService {
|
|||
|
||||
Project findByProjectAccountIdIncludingRemoved(long projectAccountId);
|
||||
|
||||
boolean addUserToProject(Long projectId, String username, String email, Long projectRoleId, Role projectRole);
|
||||
boolean addUserToProject(Long projectId, String username, String email, Long projectRoleId, Role projectRole) throws ResourceAllocationException;
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,9 +23,10 @@ import org.apache.cloudstack.api.InternalIdentity;
|
|||
|
||||
public interface VMTemplateStorageResourceAssoc extends InternalIdentity {
|
||||
public static enum Status {
|
||||
UNKNOWN, DOWNLOAD_ERROR, NOT_DOWNLOADED, DOWNLOAD_IN_PROGRESS, DOWNLOADED, ABANDONED, UPLOADED, NOT_UPLOADED, UPLOAD_ERROR, UPLOAD_IN_PROGRESS, CREATING, CREATED, BYPASSED
|
||||
UNKNOWN, DOWNLOAD_ERROR, NOT_DOWNLOADED, DOWNLOAD_IN_PROGRESS, DOWNLOADED, ABANDONED, LIMIT_REACHED, UPLOADED, NOT_UPLOADED, UPLOAD_ERROR, UPLOAD_IN_PROGRESS, CREATING, CREATED, BYPASSED
|
||||
}
|
||||
|
||||
List<Status> ERROR_DOWNLOAD_STATES = List.of(Status.DOWNLOAD_ERROR, Status.ABANDONED, Status.LIMIT_REACHED, Status.UNKNOWN);
|
||||
List<Status> PENDING_DOWNLOAD_STATES = List.of(Status.NOT_DOWNLOADED, Status.DOWNLOAD_IN_PROGRESS);
|
||||
|
||||
String getInstallPath();
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ import com.cloud.exception.ResourceAllocationException;
|
|||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import org.apache.cloudstack.resourcelimit.Reserver;
|
||||
|
||||
public interface ResourceLimitService {
|
||||
|
||||
|
|
@ -191,6 +192,7 @@ public interface ResourceLimitService {
|
|||
*/
|
||||
public void checkResourceLimit(Account account, ResourceCount.ResourceType type, long... count) throws ResourceAllocationException;
|
||||
public void checkResourceLimitWithTag(Account account, ResourceCount.ResourceType type, String tag, long... count) throws ResourceAllocationException;
|
||||
public void checkResourceLimitWithTag(Account account, Long domainId, boolean considerSystemAccount, ResourceCount.ResourceType type, String tag, long... count) throws ResourceAllocationException;
|
||||
|
||||
/**
|
||||
* Gets the count of resources for a resource type and account
|
||||
|
|
@ -251,12 +253,12 @@ public interface ResourceLimitService {
|
|||
List<String> getResourceLimitStorageTags(DiskOffering diskOffering);
|
||||
void updateTaggedResourceLimitsAndCountsForAccounts(List<AccountResponse> responses, String tag);
|
||||
void updateTaggedResourceLimitsAndCountsForDomains(List<DomainResponse> responses, String tag);
|
||||
void checkVolumeResourceLimit(Account owner, Boolean display, Long size, DiskOffering diskOffering) throws ResourceAllocationException;
|
||||
|
||||
void checkVolumeResourceLimit(Account owner, Boolean display, Long size, DiskOffering diskOffering, List<Reserver> reservations) throws ResourceAllocationException;
|
||||
List<String> getResourceLimitStorageTagsForResourceCountOperation(Boolean display, DiskOffering diskOffering);
|
||||
void checkVolumeResourceLimitForDiskOfferingChange(Account owner, Boolean display, Long currentSize, Long newSize,
|
||||
DiskOffering currentOffering, DiskOffering newOffering) throws ResourceAllocationException;
|
||||
DiskOffering currentOffering, DiskOffering newOffering, List<Reserver> reservations) throws ResourceAllocationException;
|
||||
|
||||
void checkPrimaryStorageResourceLimit(Account owner, Boolean display, Long size, DiskOffering diskOffering) throws ResourceAllocationException;
|
||||
void checkPrimaryStorageResourceLimit(Account owner, Boolean display, Long size, DiskOffering diskOffering, List<Reserver> reservations) throws ResourceAllocationException;
|
||||
|
||||
void incrementVolumeResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
|
||||
void decrementVolumeResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
|
||||
|
|
@ -273,25 +275,23 @@ public interface ResourceLimitService {
|
|||
|
||||
void incrementVolumePrimaryStorageResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
|
||||
void decrementVolumePrimaryStorageResourceCount(long accountId, Boolean display, Long size, DiskOffering diskOffering);
|
||||
void checkVmResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template) throws ResourceAllocationException;
|
||||
void checkVmResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Reserver> reservations) throws ResourceAllocationException;
|
||||
void incrementVmResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template);
|
||||
void decrementVmResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template);
|
||||
|
||||
void checkVmResourceLimitsForServiceOfferingChange(Account owner, Boolean display, Long currentCpu, Long newCpu,
|
||||
Long currentMemory, Long newMemory, ServiceOffering currentOffering, ServiceOffering newOffering, VirtualMachineTemplate template) throws ResourceAllocationException;
|
||||
Long currentMemory, Long newMemory, ServiceOffering currentOffering, ServiceOffering newOffering, VirtualMachineTemplate template, List<Reserver> reservations) throws ResourceAllocationException;
|
||||
|
||||
void checkVmResourceLimitsForTemplateChange(Account owner, Boolean display, ServiceOffering offering,
|
||||
VirtualMachineTemplate currentTemplate, VirtualMachineTemplate newTemplate) throws ResourceAllocationException;
|
||||
VirtualMachineTemplate currentTemplate, VirtualMachineTemplate newTemplate, List<Reserver> reservations) throws ResourceAllocationException;
|
||||
|
||||
void checkVmCpuResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long cpu) throws ResourceAllocationException;
|
||||
void incrementVmCpuResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long cpu);
|
||||
void decrementVmCpuResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long cpu);
|
||||
void checkVmMemoryResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long memory) throws ResourceAllocationException;
|
||||
void incrementVmMemoryResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long memory);
|
||||
void decrementVmMemoryResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long memory);
|
||||
|
||||
void checkVmGpuResourceLimit(Account owner, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long gpu) throws ResourceAllocationException;
|
||||
void incrementVmGpuResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long gpu);
|
||||
void decrementVmGpuResourceCount(long accountId, Boolean display, ServiceOffering serviceOffering, VirtualMachineTemplate template, Long gpu);
|
||||
|
||||
long recalculateDomainResourceCount(final long domainId, final ResourceType type, String tag);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -127,8 +127,8 @@ public enum ApiCommandResourceType {
|
|||
}
|
||||
|
||||
public static ApiCommandResourceType fromString(String value) {
|
||||
if (StringUtils.isNotEmpty(value) && EnumUtils.isValidEnum(ApiCommandResourceType.class, value)) {
|
||||
return valueOf(value);
|
||||
if (StringUtils.isNotBlank(value) && EnumUtils.isValidEnumIgnoreCase(ApiCommandResourceType.class, value)) {
|
||||
return EnumUtils.getEnumIgnoreCase(ApiCommandResourceType.class, value);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -509,6 +509,7 @@ public class ApiConstants {
|
|||
public static final String REPAIR = "repair";
|
||||
public static final String REPETITION_ALLOWED = "repetitionallowed";
|
||||
public static final String REQUIRES_HVM = "requireshvm";
|
||||
public static final String RESERVED_RESOURCE_DETAILS = "reservedresourcedetails";
|
||||
public static final String RESOURCES = "resources";
|
||||
public static final String RESOURCE_COUNT = "resourcecount";
|
||||
public static final String RESOURCE_NAME = "resourcename";
|
||||
|
|
@ -525,7 +526,6 @@ public class ApiConstants {
|
|||
public static final String SCHEDULE = "schedule";
|
||||
public static final String SCHEDULE_ID = "scheduleid";
|
||||
public static final String SCOPE = "scope";
|
||||
public static final String USER_SECRET_KEY = "usersecretkey";
|
||||
public static final String SEARCH_BASE = "searchbase";
|
||||
public static final String SECONDARY_IP = "secondaryip";
|
||||
public static final String SECURITY_GROUP_IDS = "securitygroupids";
|
||||
|
|
@ -629,6 +629,8 @@ public class ApiConstants {
|
|||
public static final String USERNAME = "username";
|
||||
public static final String USER_CONFIGURABLE = "userconfigurable";
|
||||
public static final String USER_SECURITY_GROUP_LIST = "usersecuritygrouplist";
|
||||
public static final String USER_SECRET_KEY = "usersecretkey";
|
||||
public static final String USE_VDDK = "usevddk";
|
||||
public static final String USE_VIRTUAL_NETWORK = "usevirtualnetwork";
|
||||
public static final String USE_VIRTUAL_ROUTER_IP_RESOLVER = "userouteripresolver";
|
||||
public static final String UPDATE_IN_SEQUENCE = "updateinsequence";
|
||||
|
|
|
|||
|
|
@ -179,6 +179,14 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
|
|||
description = "(only for importing VMs from VMware to KVM) optional - the ID of the guest OS for the imported VM.")
|
||||
private Long guestOsId;
|
||||
|
||||
@Parameter(name = ApiConstants.USE_VDDK,
|
||||
type = CommandType.BOOLEAN,
|
||||
since = "4.22.1",
|
||||
description = "(only for importing VMs from VMware to KVM) optional - if true, uses VDDK on the KVM conversion host for converting the VM. " +
|
||||
"This parameter is mutually exclusive with " + ApiConstants.FORCE_MS_TO_IMPORT_VM_FILES + ".")
|
||||
private Boolean useVddk;
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -255,6 +263,10 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
|
|||
return storagePoolId;
|
||||
}
|
||||
|
||||
public boolean getUseVddk() {
|
||||
return BooleanUtils.toBooleanDefaultIfNull(useVddk, true);
|
||||
}
|
||||
|
||||
public String getTmpPath() {
|
||||
return tmpPath;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.user.account;
|
|||
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import org.apache.cloudstack.api.ApiArgValidator;
|
||||
import org.apache.cloudstack.api.ApiCommandResourceType;
|
||||
import org.apache.cloudstack.api.BaseCmd;
|
||||
|
|
@ -106,7 +107,7 @@ public class AddAccountToProjectCmd extends BaseAsyncCmd {
|
|||
/////////////////////////////////////////////////////
|
||||
|
||||
@Override
|
||||
public void execute() {
|
||||
public void execute() throws ResourceAllocationException {
|
||||
if (accountName == null && email == null) {
|
||||
throw new InvalidParameterValueException("Either accountName or email is required");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
package org.apache.cloudstack.api.command.user.account;
|
||||
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import org.apache.cloudstack.acl.RoleType;
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiArgValidator;
|
||||
|
|
@ -111,7 +112,7 @@ public class AddUserToProjectCmd extends BaseAsyncCmd {
|
|||
/////////////////////////////////////////////////////
|
||||
|
||||
@Override
|
||||
public void execute() {
|
||||
public void execute() throws ResourceAllocationException {
|
||||
validateInput();
|
||||
boolean result = _projectService.addUserToProject(getProjectId(), getUsername(), getEmail(), getProjectRoleId(), getRoleType());
|
||||
if (result) {
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ package org.apache.cloudstack.api.command.user.backup;
|
|||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.acl.RoleType;
|
||||
import org.apache.cloudstack.api.ACL;
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.ApiErrorCode;
|
||||
|
|
@ -53,6 +54,7 @@ public class RestoreVolumeFromBackupAndAttachToVMCmd extends BaseAsyncCmd {
|
|||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@ACL
|
||||
@Parameter(name = ApiConstants.BACKUP_ID,
|
||||
type = CommandType.UUID,
|
||||
entityType = BackupResponse.class,
|
||||
|
|
@ -60,12 +62,14 @@ public class RestoreVolumeFromBackupAndAttachToVMCmd extends BaseAsyncCmd {
|
|||
description = "ID of the Instance backup")
|
||||
private Long backupId;
|
||||
|
||||
@ACL
|
||||
@Parameter(name = ApiConstants.VOLUME_ID,
|
||||
type = CommandType.STRING,
|
||||
required = true,
|
||||
description = "ID of the volume backed up")
|
||||
private String volumeUuid;
|
||||
|
||||
@ACL
|
||||
@Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID,
|
||||
type = CommandType.UUID,
|
||||
entityType = UserVmResponse.class,
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.cloudstack.api.command.user.bucket;
|
||||
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import org.apache.cloudstack.acl.RoleType;
|
||||
import org.apache.cloudstack.storage.object.Bucket;
|
||||
import com.cloud.user.Account;
|
||||
|
|
@ -82,7 +83,7 @@ public class DeleteBucketCmd extends BaseCmd {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void execute() throws ConcurrentOperationException {
|
||||
public void execute() throws ConcurrentOperationException, ResourceAllocationException {
|
||||
CallContext.current().setEventDetails("Bucket ID: " + getResourceUuid(ApiConstants.ID));
|
||||
boolean result = _bucketService.deleteBucket(id, CallContext.current().getCallingAccount());
|
||||
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.user.job;
|
|||
import java.util.Date;
|
||||
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiArgValidator;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.BaseListAccountResourcesCmd;
|
||||
import org.apache.cloudstack.api.Parameter;
|
||||
|
|
@ -40,6 +41,12 @@ public class ListAsyncJobsCmd extends BaseListAccountResourcesCmd {
|
|||
@Parameter(name = ApiConstants.MANAGEMENT_SERVER_ID, type = CommandType.UUID, entityType = ManagementServerResponse.class, description = "The id of the management server", since="4.19")
|
||||
private Long managementServerId;
|
||||
|
||||
@Parameter(name = ApiConstants.RESOURCE_ID, validations = {ApiArgValidator.UuidString}, type = CommandType.STRING, description = "the ID of the resource associated with the job", since="4.22.1")
|
||||
private String resourceId;
|
||||
|
||||
@Parameter(name = ApiConstants.RESOURCE_TYPE, type = CommandType.STRING, description = "the type of the resource associated with the job", since="4.22.1")
|
||||
private String resourceType;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -52,6 +59,14 @@ public class ListAsyncJobsCmd extends BaseListAccountResourcesCmd {
|
|||
return managementServerId;
|
||||
}
|
||||
|
||||
public String getResourceId() {
|
||||
return resourceId;
|
||||
}
|
||||
|
||||
public String getResourceType() {
|
||||
return resourceType;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@
|
|||
// under the License.
|
||||
package org.apache.cloudstack.api.command.user.job;
|
||||
|
||||
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiArgValidator;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.BaseCmd;
|
||||
import org.apache.cloudstack.api.Parameter;
|
||||
|
|
@ -34,9 +34,15 @@ public class QueryAsyncJobResultCmd extends BaseCmd {
|
|||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Parameter(name = ApiConstants.JOB_ID, type = CommandType.UUID, entityType = AsyncJobResponse.class, required = true, description = "The ID of the asynchronous job")
|
||||
@Parameter(name = ApiConstants.JOB_ID, type = CommandType.UUID, entityType = AsyncJobResponse.class, description = "The ID of the asynchronous job")
|
||||
private Long id;
|
||||
|
||||
@Parameter(name = ApiConstants.RESOURCE_ID, validations = {ApiArgValidator.UuidString}, type = CommandType.STRING, description = "the ID of the resource associated with the job", since="4.22.1")
|
||||
private String resourceId;
|
||||
|
||||
@Parameter(name = ApiConstants.RESOURCE_TYPE, type = CommandType.STRING, description = "the type of the resource associated with the job", since="4.22.1")
|
||||
private String resourceType;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -45,6 +51,14 @@ public class QueryAsyncJobResultCmd extends BaseCmd {
|
|||
return id;
|
||||
}
|
||||
|
||||
public String getResourceId() {
|
||||
return resourceId;
|
||||
}
|
||||
|
||||
public String getResourceType() {
|
||||
return resourceType;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ public class CreateVMFromBackupCmd extends BaseDeployVMCmd {
|
|||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@ACL
|
||||
@Parameter(name = ApiConstants.BACKUP_ID,
|
||||
type = CommandType.UUID,
|
||||
entityType = BackupResponse.class,
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ import org.apache.cloudstack.api.response.DiskOfferingResponse;
|
|||
import org.apache.cloudstack.api.response.DomainResponse;
|
||||
import org.apache.cloudstack.api.response.ProjectResponse;
|
||||
import org.apache.cloudstack.api.response.SnapshotResponse;
|
||||
import org.apache.cloudstack.api.response.StoragePoolResponse;
|
||||
import org.apache.cloudstack.api.response.UserVmResponse;
|
||||
import org.apache.cloudstack.api.response.VolumeResponse;
|
||||
import org.apache.cloudstack.api.response.ZoneResponse;
|
||||
|
|
@ -109,6 +110,13 @@ public class CreateVolumeCmd extends BaseAsyncCreateCustomIdCmd implements UserC
|
|||
description = "The ID of the Instance; to be used with snapshot Id, Instance to which the volume gets attached after creation")
|
||||
private Long virtualMachineId;
|
||||
|
||||
@Parameter(name = ApiConstants.STORAGE_ID,
|
||||
type = CommandType.UUID,
|
||||
entityType = StoragePoolResponse.class,
|
||||
description = "Storage pool ID to create the volume in. Cannot be used with the snapshotid parameter.",
|
||||
authorized = {RoleType.Admin})
|
||||
private Long storageId;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -153,6 +161,13 @@ public class CreateVolumeCmd extends BaseAsyncCreateCustomIdCmd implements UserC
|
|||
return projectId;
|
||||
}
|
||||
|
||||
public Long getStorageId() {
|
||||
if (snapshotId != null && storageId != null) {
|
||||
throw new IllegalArgumentException("StorageId parameter cannot be specified with the SnapshotId parameter.");
|
||||
}
|
||||
return storageId;
|
||||
}
|
||||
|
||||
public Boolean getDisplayVolume() {
|
||||
return displayVolume;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,6 +85,12 @@ public class ExtensionResponse extends BaseResponse {
|
|||
@Param(description = "Removal timestamp of the extension, if applicable")
|
||||
private Date removed;
|
||||
|
||||
@SerializedName(ApiConstants.RESERVED_RESOURCE_DETAILS)
|
||||
@Param(description = "Resource detail names as comma separated string that should be reserved and not visible " +
|
||||
"to end users",
|
||||
since = "4.22.1")
|
||||
protected String reservedResourceDetails;
|
||||
|
||||
public ExtensionResponse(String id, String name, String description, String type) {
|
||||
this.id = id;
|
||||
this.name = name;
|
||||
|
|
@ -179,4 +185,8 @@ public class ExtensionResponse extends BaseResponse {
|
|||
public void setRemoved(Date removed) {
|
||||
this.removed = removed;
|
||||
}
|
||||
|
||||
public void setReservedResourceDetails(String reservedResourceDetails) {
|
||||
this.reservedResourceDetails = reservedResourceDetails;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer
|
|||
* @param forced Indicates if backup will be force removed or not
|
||||
* @return returns operation success
|
||||
*/
|
||||
boolean deleteBackup(final Long backupId, final Boolean forced);
|
||||
boolean deleteBackup(final Long backupId, final Boolean forced) throws ResourceAllocationException;
|
||||
|
||||
void validateBackupForZone(Long zoneId);
|
||||
|
||||
|
|
|
|||
|
|
@ -17,8 +17,11 @@
|
|||
|
||||
package org.apache.cloudstack.extension;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public interface ExtensionHelper {
|
||||
Long getExtensionIdForCluster(long clusterId);
|
||||
Extension getExtension(long id);
|
||||
Extension getExtensionForCluster(long clusterId);
|
||||
List<String> getExtensionReservedResourceDetails(long extensionId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,30 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.resourcelimit;
|
||||
|
||||
/**
|
||||
* Interface implemented by <code>CheckedReservation</code>.
|
||||
* </br></br>
|
||||
* This is defined in <code>cloud-api</code> to allow methods declared in modules that do not depend on <code>cloud-server</code>
|
||||
* to receive <code>CheckedReservations</code> as parameters.
|
||||
*/
|
||||
public interface Reserver extends AutoCloseable {
|
||||
|
||||
void close();
|
||||
|
||||
}
|
||||
|
|
@ -95,7 +95,7 @@ public interface BucketApiService {
|
|||
*/
|
||||
Bucket createBucket(CreateBucketCmd cmd);
|
||||
|
||||
boolean deleteBucket(long bucketId, Account caller);
|
||||
boolean deleteBucket(long bucketId, Account caller) throws ResourceAllocationException;
|
||||
|
||||
boolean updateBucket(UpdateBucketCmd cmd, Account caller) throws ResourceAllocationException;
|
||||
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
// under the License.
|
||||
package org.apache.cloudstack.api.command.test;
|
||||
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import junit.framework.Assert;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
|
|
@ -149,6 +150,8 @@ public class AddAccountToProjectCmdTest extends TestCase {
|
|||
addAccountToProjectCmd.execute();
|
||||
} catch (InvalidParameterValueException exception) {
|
||||
Assert.assertEquals("Either accountName or email is required", exception.getLocalizedMessage());
|
||||
} catch (ResourceAllocationException exception) {
|
||||
Assert.fail();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,8 @@ package com.cloud.agent.api;
|
|||
|
||||
public class CheckConvertInstanceCommand extends Command {
|
||||
boolean checkWindowsGuestConversionSupport = false;
|
||||
boolean useVddk = false;
|
||||
String vddkLibDir;
|
||||
|
||||
public CheckConvertInstanceCommand() {
|
||||
}
|
||||
|
|
@ -26,6 +28,11 @@ public class CheckConvertInstanceCommand extends Command {
|
|||
this.checkWindowsGuestConversionSupport = checkWindowsGuestConversionSupport;
|
||||
}
|
||||
|
||||
public CheckConvertInstanceCommand(boolean checkWindowsGuestConversionSupport, boolean useVddk) {
|
||||
this.checkWindowsGuestConversionSupport = checkWindowsGuestConversionSupport;
|
||||
this.useVddk = useVddk;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
|
|
@ -34,4 +41,20 @@ public class CheckConvertInstanceCommand extends Command {
|
|||
public boolean getCheckWindowsGuestConversionSupport() {
|
||||
return checkWindowsGuestConversionSupport;
|
||||
}
|
||||
|
||||
public boolean isUseVddk() {
|
||||
return useVddk;
|
||||
}
|
||||
|
||||
public void setUseVddk(boolean useVddk) {
|
||||
this.useVddk = useVddk;
|
||||
}
|
||||
|
||||
public String getVddkLibDir() {
|
||||
return vddkLibDir;
|
||||
}
|
||||
|
||||
public void setVddkLibDir(String vddkLibDir) {
|
||||
this.vddkLibDir = vddkLibDir;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,6 +31,10 @@ public class ConvertInstanceCommand extends Command {
|
|||
private boolean exportOvfToConversionLocation;
|
||||
private int threadsCountToExportOvf = 0;
|
||||
private String extraParams;
|
||||
private boolean useVddk;
|
||||
private String vddkLibDir;
|
||||
private String vddkTransports;
|
||||
private String vddkThumbprint;
|
||||
|
||||
public ConvertInstanceCommand() {
|
||||
}
|
||||
|
|
@ -90,6 +94,38 @@ public class ConvertInstanceCommand extends Command {
|
|||
this.extraParams = extraParams;
|
||||
}
|
||||
|
||||
public boolean isUseVddk() {
|
||||
return useVddk;
|
||||
}
|
||||
|
||||
public void setUseVddk(boolean useVddk) {
|
||||
this.useVddk = useVddk;
|
||||
}
|
||||
|
||||
public String getVddkLibDir() {
|
||||
return vddkLibDir;
|
||||
}
|
||||
|
||||
public void setVddkLibDir(String vddkLibDir) {
|
||||
this.vddkLibDir = vddkLibDir;
|
||||
}
|
||||
|
||||
public String getVddkTransports() {
|
||||
return vddkTransports;
|
||||
}
|
||||
|
||||
public void setVddkTransports(String vddkTransports) {
|
||||
this.vddkTransports = vddkTransports;
|
||||
}
|
||||
|
||||
public String getVddkThumbprint() {
|
||||
return vddkThumbprint;
|
||||
}
|
||||
|
||||
public void setVddkThumbprint(String vddkThumbprint) {
|
||||
this.vddkThumbprint = vddkThumbprint;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@ import com.cloud.resource.ResourceState;
|
|||
public class PropagateResourceEventCommand extends Command {
|
||||
long hostId;
|
||||
ResourceState.Event event;
|
||||
boolean forced;
|
||||
boolean forceDeleteStorage;
|
||||
|
||||
protected PropagateResourceEventCommand() {
|
||||
|
||||
|
|
@ -34,6 +36,13 @@ public class PropagateResourceEventCommand extends Command {
|
|||
this.event = event;
|
||||
}
|
||||
|
||||
public PropagateResourceEventCommand(long hostId, ResourceState.Event event, boolean forced, boolean forceDeleteStorage) {
|
||||
this.hostId = hostId;
|
||||
this.event = event;
|
||||
this.forced = forced;
|
||||
this.forceDeleteStorage = forceDeleteStorage;
|
||||
}
|
||||
|
||||
public long getHostId() {
|
||||
return hostId;
|
||||
}
|
||||
|
|
@ -42,6 +51,14 @@ public class PropagateResourceEventCommand extends Command {
|
|||
return event;
|
||||
}
|
||||
|
||||
public boolean isForced() {
|
||||
return forced;
|
||||
}
|
||||
|
||||
public boolean isForceDeleteStorage() {
|
||||
return forceDeleteStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
// TODO Auto-generated method stub
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ public class LoadBalancerConfigCommand extends NetworkElementCommand {
|
|||
public String lbStatsAuth = "admin1:AdMiN123";
|
||||
public String lbStatsUri = "/admin?stats";
|
||||
public String maxconn = "";
|
||||
public Long idleTimeout = 50000L; /* 0=infinite, >0 = timeout in milliseconds */
|
||||
public String lbProtocol;
|
||||
public boolean keepAliveEnabled = false;
|
||||
NicTO nic;
|
||||
|
|
@ -50,7 +51,7 @@ public class LoadBalancerConfigCommand extends NetworkElementCommand {
|
|||
}
|
||||
|
||||
public LoadBalancerConfigCommand(LoadBalancerTO[] loadBalancers, String publicIp, String guestIp, String privateIp, NicTO nic, Long vpcId, String maxconn,
|
||||
boolean keepAliveEnabled) {
|
||||
boolean keepAliveEnabled, Long idleTimeout) {
|
||||
this.loadBalancers = loadBalancers;
|
||||
this.lbStatsPublicIP = publicIp;
|
||||
this.lbStatsPrivateIP = privateIp;
|
||||
|
|
@ -59,6 +60,7 @@ public class LoadBalancerConfigCommand extends NetworkElementCommand {
|
|||
this.vpcId = vpcId;
|
||||
this.maxconn = maxconn;
|
||||
this.keepAliveEnabled = keepAliveEnabled;
|
||||
this.idleTimeout = idleTimeout;
|
||||
}
|
||||
|
||||
public NicTO getNic() {
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ public class DownloadAnswer extends Answer {
|
|||
}
|
||||
|
||||
public Long getTemplateSize() {
|
||||
return templateSize;
|
||||
return templateSize == 0 ? templatePhySicalSize : templateSize;
|
||||
}
|
||||
|
||||
public void setTemplatePhySicalSize(long templatePhySicalSize) {
|
||||
|
|
|
|||
|
|
@ -635,6 +635,19 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator {
|
|||
if (lbCmd.keepAliveEnabled) {
|
||||
dSection.set(7, "\tno option httpclose");
|
||||
}
|
||||
if (lbCmd.idleTimeout > 0) {
|
||||
dSection.set(9, "\ttimeout client " + Long.toString(lbCmd.idleTimeout));
|
||||
dSection.set(10, "\ttimeout server " + Long.toString(lbCmd.idleTimeout));
|
||||
} else if (lbCmd.idleTimeout == 0) {
|
||||
// .remove() is not allowed, only .set() operations are allowed as the list
|
||||
// is a fixed size. So lets just mark the entry as blank.
|
||||
dSection.set(9, "");
|
||||
dSection.set(10, "");
|
||||
} else {
|
||||
// Negative idleTimeout values are considered invalid; retain the
|
||||
// default HAProxy timeout values from defaultsSection for predictability.
|
||||
logger.warn("Negative idleTimeout ({}) configured; retaining default HAProxy timeouts.", lbCmd.idleTimeout);
|
||||
}
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
for (final String s : dSection) {
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ package org.apache.cloudstack.direct.download;
|
|||
import com.cloud.utils.UriUtils;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.utils.security.DigestHelper;
|
||||
import org.apache.commons.io.FilenameUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
|
@ -33,6 +34,7 @@ import java.security.NoSuchAlgorithmException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
public abstract class DirectTemplateDownloaderImpl implements DirectTemplateDownloader {
|
||||
|
||||
|
|
@ -128,15 +130,14 @@ public abstract class DirectTemplateDownloaderImpl implements DirectTemplateDown
|
|||
*/
|
||||
protected File createTemporaryDirectoryAndFile(String downloadDir) {
|
||||
createFolder(downloadDir);
|
||||
return new File(downloadDir + File.separator + getFileNameFromUrl());
|
||||
return new File(downloadDir + File.separator + getTemporaryFileName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Return filename from url
|
||||
* Return filename from the temporary download file
|
||||
*/
|
||||
public String getFileNameFromUrl() {
|
||||
String[] urlParts = url.split("/");
|
||||
return urlParts[urlParts.length - 1];
|
||||
public String getTemporaryFileName() {
|
||||
return String.format("%s.%s", UUID.randomUUID(), FilenameUtils.getExtension(url));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ public class MetalinkDirectTemplateDownloader extends DirectTemplateDownloaderIm
|
|||
DirectTemplateDownloader urlDownloader = createDownloaderForMetalinks(getUrl(), getTemplateId(), getDestPoolPath(),
|
||||
getChecksum(), headers, connectTimeout, soTimeout, null, temporaryDownloadPath);
|
||||
try {
|
||||
setDownloadedFilePath(downloadDir + File.separator + getFileNameFromUrl());
|
||||
setDownloadedFilePath(downloadDir + File.separator + getTemporaryFileName());
|
||||
File f = new File(getDownloadedFilePath());
|
||||
if (f.exists()) {
|
||||
f.delete();
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ public class NfsDirectTemplateDownloader extends DirectTemplateDownloaderImpl {
|
|||
String mount = String.format(mountCommand, srcHost + ":" + srcPath, "/mnt/" + mountSrcUuid);
|
||||
Script.runSimpleBashScript(mount);
|
||||
String downloadDir = getDestPoolPath() + File.separator + getDirectDownloadTempPath(getTemplateId());
|
||||
setDownloadedFilePath(downloadDir + File.separator + getFileNameFromUrl());
|
||||
setDownloadedFilePath(downloadDir + File.separator + getTemporaryFileName());
|
||||
Script.runSimpleBashScript("cp /mnt/" + mountSrcUuid + srcPath + " " + getDownloadedFilePath());
|
||||
Script.runSimpleBashScript("umount /mnt/" + mountSrcUuid);
|
||||
return new Pair<>(true, getDownloadedFilePath());
|
||||
|
|
|
|||
|
|
@ -19,6 +19,9 @@
|
|||
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
import com.cloud.configuration.Resource;
|
||||
import org.apache.cloudstack.utils.bytescale.ByteScaleUtils;
|
||||
|
||||
public class TemplateOrVolumePostUploadCommand {
|
||||
|
||||
long entityId;
|
||||
|
|
@ -185,6 +188,11 @@ public class TemplateOrVolumePostUploadCommand {
|
|||
this.description = description;
|
||||
}
|
||||
|
||||
public void setDefaultMaxSecondaryStorageInBytes(long defaultMaxSecondaryStorageInBytes) {
|
||||
this.defaultMaxSecondaryStorageInGB = defaultMaxSecondaryStorageInBytes != Resource.RESOURCE_UNLIMITED ?
|
||||
ByteScaleUtils.bytesToGibibytes(defaultMaxSecondaryStorageInBytes) : Resource.RESOURCE_UNLIMITED;
|
||||
}
|
||||
|
||||
public void setDefaultMaxSecondaryStorageInGB(long defaultMaxSecondaryStorageInGB) {
|
||||
this.defaultMaxSecondaryStorageInGB = defaultMaxSecondaryStorageInGB;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ public class UploadStatusCommand extends Command {
|
|||
}
|
||||
private String entityUuid;
|
||||
private EntityType entityType;
|
||||
private Boolean abort;
|
||||
|
||||
protected UploadStatusCommand() {
|
||||
}
|
||||
|
|
@ -37,6 +38,11 @@ public class UploadStatusCommand extends Command {
|
|||
this.entityType = entityType;
|
||||
}
|
||||
|
||||
public UploadStatusCommand(String entityUuid, EntityType entityType, Boolean abort) {
|
||||
this(entityUuid, entityType);
|
||||
this.abort = abort;
|
||||
}
|
||||
|
||||
public String getEntityUuid() {
|
||||
return entityUuid;
|
||||
}
|
||||
|
|
@ -45,6 +51,10 @@ public class UploadStatusCommand extends Command {
|
|||
return entityType;
|
||||
}
|
||||
|
||||
public Boolean getAbort() {
|
||||
return abort;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ public class ConfigHelperTest {
|
|||
lbs.toArray(arrayLbs);
|
||||
|
||||
final NicTO nic = new NicTO();
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, null, "1000", false);
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, null, "1000", false, 0L);
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, "10.1.10.2");
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME);
|
||||
|
||||
|
|
|
|||
|
|
@ -779,7 +779,7 @@ public class VirtualRoutingResourceTest implements VirtualRouterDeployer {
|
|||
final LoadBalancerTO[] arrayLbs = new LoadBalancerTO[lbs.size()];
|
||||
lbs.toArray(arrayLbs);
|
||||
final NicTO nic = new NicTO();
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, null, "1000", false);
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, null, "1000", false, 50000L);
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, "10.1.10.2");
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME);
|
||||
return cmd;
|
||||
|
|
@ -795,7 +795,7 @@ public class VirtualRoutingResourceTest implements VirtualRouterDeployer {
|
|||
lbs.toArray(arrayLbs);
|
||||
final NicTO nic = new NicTO();
|
||||
nic.setIp("10.1.10.2");
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, Long.valueOf(1), "1000", false);
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, Long.valueOf(1), "1000", false, 50000L);
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, "10.1.10.2");
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME);
|
||||
return cmd;
|
||||
|
|
|
|||
|
|
@ -79,13 +79,14 @@ public class HAProxyConfiguratorTest {
|
|||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false);
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
assertTrue("keepalive disabled should result in 'option httpclose' in the resulting haproxy config", result.contains("\toption httpclose"));
|
||||
|
||||
cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "4", true);
|
||||
cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "4", true, 0L);
|
||||
result = genConfig(hpg, cmd);
|
||||
assertTrue("keepalive enabled should result in 'no option httpclose' in the resulting haproxy config", result.contains("\tno option httpclose"));
|
||||
|
||||
// TODO
|
||||
// create lb command
|
||||
// setup tests for
|
||||
|
|
@ -93,6 +94,27 @@ public class HAProxyConfiguratorTest {
|
|||
// httpmode
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link com.cloud.network.HAProxyConfigurator#generateConfiguration(com.cloud.agent.api.routing.LoadBalancerConfigCommand)}.
|
||||
*/
|
||||
@Test
|
||||
public void testGenerateConfigurationLoadBalancerIdleTimeoutConfigCommand() {
|
||||
LoadBalancerTO lb = new LoadBalancerTO("1", "10.2.0.1", 80, "http", "bla", false, false, false, null);
|
||||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "4", true, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
assertTrue("idleTimeout of 0 should not generate 'timeout server' in the resulting haproxy config", !result.contains("\ttimeout server"));
|
||||
assertTrue("idleTimeout of 0 should not generate 'timeout client' in the resulting haproxy config", !result.contains("\ttimeout client"));
|
||||
|
||||
cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "4", true, 1234L);
|
||||
result = genConfig(hpg, cmd);
|
||||
assertTrue("idleTimeout of 1234 should result in 'timeout server 1234' in the resulting haproxy config", result.contains("\ttimeout server 1234"));
|
||||
assertTrue("idleTimeout of 1234 should result in 'timeout client 1234' in the resulting haproxy config", result.contains("\ttimeout client 1234"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link com.cloud.network.HAProxyConfigurator#generateConfiguration(com.cloud.agent.api.routing.LoadBalancerConfigCommand)}.
|
||||
*/
|
||||
|
|
@ -106,7 +128,7 @@ public class HAProxyConfiguratorTest {
|
|||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false);
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
assertTrue("'send-proxy' should result if protocol is 'tcp-proxy'", result.contains("send-proxy"));
|
||||
}
|
||||
|
|
@ -118,7 +140,7 @@ public class HAProxyConfiguratorTest {
|
|||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false);
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
Assert.assertTrue(result.contains("acl network_allowed src 1.1.1.1 2.2.2.2/24 \n\ttcp-request connection reject if !network_allowed"));
|
||||
}
|
||||
|
|
@ -131,7 +153,7 @@ public class HAProxyConfiguratorTest {
|
|||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false);
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
Assert.assertTrue(result.contains("bind 10.2.0.1:443 ssl crt /etc/cloudstack/ssl/10_2_0_1-443.pem"));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -122,6 +122,14 @@ public interface NetworkOrchestrationService {
|
|||
"Load Balancer(haproxy) maximum number of concurrent connections(global max)",
|
||||
true,
|
||||
Scope.Global);
|
||||
ConfigKey<Long> NETWORK_LB_HAPROXY_IDLE_TIMEOUT = new ConfigKey<>(
|
||||
"Network",
|
||||
Long.class,
|
||||
"network.loadbalancer.haproxy.idle.timeout",
|
||||
"50000",
|
||||
"Load Balancer(haproxy) idle timeout in milliseconds. Use 0 for infinite.",
|
||||
true,
|
||||
Scope.Global);
|
||||
|
||||
List<? extends Network> setupNetwork(Account owner, NetworkOffering offering, DeploymentPlan plan, String name, String displayText, boolean isDefault)
|
||||
throws ConcurrentOperationException;
|
||||
|
|
@ -310,7 +318,7 @@ public interface NetworkOrchestrationService {
|
|||
|
||||
void removeDhcpServiceInSubnet(Nic nic);
|
||||
|
||||
boolean resourceCountNeedsUpdate(NetworkOffering ntwkOff, ACLType aclType);
|
||||
boolean isResourceCountUpdateNeeded(NetworkOffering networkOffering);
|
||||
|
||||
void prepareAllNicsForMigration(VirtualMachineProfile vm, DeployDestination dest);
|
||||
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ public interface VolumeOrchestrationService {
|
|||
void destroyVolume(Volume volume);
|
||||
|
||||
DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template,
|
||||
Account owner, Long deviceId);
|
||||
Account owner, Long deviceId, boolean incrementResourceCount);
|
||||
|
||||
VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool) throws NoTransitionException;
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import static org.apache.cloudstack.framework.config.ConfigKey.Scope.Cluster;
|
|||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.Status;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.utils.component.Manager;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
|
|
@ -32,6 +33,8 @@ import java.util.List;
|
|||
*/
|
||||
public interface HighAvailabilityManager extends Manager {
|
||||
|
||||
List<StoragePoolType> LIBVIRT_STORAGE_POOL_TYPES_WITH_HA_SUPPORT = List.of(StoragePoolType.NetworkFilesystem, StoragePoolType.SharedMountPoint);
|
||||
|
||||
ConfigKey<Boolean> ForceHA = new ConfigKey<>("Advanced", Boolean.class, "force.ha", "false",
|
||||
"Force High-Availability to happen even if the VM says no.", true, Cluster);
|
||||
|
||||
|
|
|
|||
|
|
@ -122,6 +122,8 @@ public interface ResourceManager extends ResourceService, Configurable {
|
|||
|
||||
public boolean executeUserRequest(long hostId, ResourceState.Event event) throws AgentUnavailableException;
|
||||
|
||||
boolean executeUserRequest(long hostId, ResourceState.Event event, boolean isForced, boolean isForceDeleteStorage) throws AgentUnavailableException;
|
||||
|
||||
boolean resourceStateTransitTo(Host host, Event event, long msId) throws NoTransitionException;
|
||||
|
||||
boolean umanageHost(long hostId);
|
||||
|
|
|
|||
|
|
@ -805,8 +805,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||
String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE);
|
||||
String virtv2vVersion = detailsMap.get(Host.HOST_VIRTV2V_VERSION);
|
||||
String ovftoolVersion = detailsMap.get(Host.HOST_OVFTOOL_VERSION);
|
||||
String vddkSupport = detailsMap.get(Host.HOST_VDDK_SUPPORT);
|
||||
String vddkLibDir = detailsMap.get(Host.HOST_VDDK_LIB_DIR);
|
||||
String vddkVersion = detailsMap.get(Host.HOST_VDDK_VERSION);
|
||||
logger.debug("Got HOST_UEFI_ENABLE [{}] for host [{}]:", uefiEnabled, host);
|
||||
if (ObjectUtils.anyNotNull(uefiEnabled, virtv2vVersion, ovftoolVersion)) {
|
||||
if (ObjectUtils.anyNotNull(uefiEnabled, virtv2vVersion, ovftoolVersion, vddkSupport, vddkLibDir, vddkVersion)) {
|
||||
_hostDao.loadDetails(host);
|
||||
boolean updateNeeded = false;
|
||||
if (StringUtils.isNotBlank(uefiEnabled) && !uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) {
|
||||
|
|
@ -821,6 +824,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||
host.getDetails().put(Host.HOST_OVFTOOL_VERSION, ovftoolVersion);
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (StringUtils.isNotBlank(vddkSupport) && !vddkSupport.equals(host.getDetails().get(Host.HOST_VDDK_SUPPORT))) {
|
||||
host.getDetails().put(Host.HOST_VDDK_SUPPORT, vddkSupport);
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (!StringUtils.defaultString(vddkLibDir).equals(StringUtils.defaultString(host.getDetails().get(Host.HOST_VDDK_LIB_DIR)))) {
|
||||
if (StringUtils.isBlank(vddkLibDir)) {
|
||||
host.getDetails().remove(Host.HOST_VDDK_LIB_DIR);
|
||||
} else {
|
||||
host.getDetails().put(Host.HOST_VDDK_LIB_DIR, vddkLibDir);
|
||||
}
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (!StringUtils.defaultString(vddkVersion).equals(StringUtils.defaultString(host.getDetails().get(Host.HOST_VDDK_VERSION)))) {
|
||||
if (StringUtils.isBlank(vddkVersion)) {
|
||||
host.getDetails().remove(Host.HOST_VDDK_VERSION);
|
||||
} else {
|
||||
host.getDetails().put(Host.HOST_VDDK_VERSION, vddkVersion);
|
||||
}
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (updateNeeded) {
|
||||
_hostDao.saveDetails(host);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1306,11 +1306,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
|
||||
boolean result;
|
||||
try {
|
||||
result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent());
|
||||
result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent(), cmd.isForced(), cmd.isForceDeleteStorage());
|
||||
logger.debug("Result is {}", result);
|
||||
} catch (final AgentUnavailableException ex) {
|
||||
logger.warn("Agent is unavailable", ex);
|
||||
return null;
|
||||
} catch (final RuntimeException ex) {
|
||||
logger.error(String.format("Failed to execute propagated event %s for host %d", cmd.getEvent().name(), cmd.getHostId()), ex);
|
||||
final Answer[] answers = new Answer[1];
|
||||
String details = ex.getMessage();
|
||||
if (details == null || details.isEmpty()) {
|
||||
details = ex.toString();
|
||||
}
|
||||
answers[0] = new Answer(cmd, false, details);
|
||||
return _gson.toJson(answers);
|
||||
}
|
||||
|
||||
final Answer[] answers = new Answer[1];
|
||||
|
|
|
|||
|
|
@ -50,7 +50,6 @@ import javax.inject.Inject;
|
|||
import javax.naming.ConfigurationException;
|
||||
import javax.persistence.EntityExistsException;
|
||||
|
||||
|
||||
import com.cloud.agent.api.PostMigrationCommand;
|
||||
import com.cloud.storage.ClvmLockManager;
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||
|
|
@ -314,7 +313,6 @@ import com.cloud.vm.snapshot.VMSnapshotVO;
|
|||
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
|
||||
import com.google.gson.Gson;
|
||||
|
||||
|
||||
public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, VmWorkJobHandler, Listener, Configurable {
|
||||
|
||||
public static final String VM_WORK_JOB_HANDLER = VirtualMachineManagerImpl.class.getSimpleName();
|
||||
|
|
@ -596,7 +594,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
Long deviceId = dataDiskDeviceIds.get(index++);
|
||||
String volumeName = deviceId == null ? "DATA-" + persistedVm.getId() : "DATA-" + persistedVm.getId() + "-" + String.valueOf(deviceId);
|
||||
volumeMgr.allocateRawVolume(Type.DATADISK, volumeName, dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(),
|
||||
dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), persistedVm, template, owner, deviceId);
|
||||
dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), persistedVm, template, owner, deviceId, true);
|
||||
}
|
||||
}
|
||||
if (datadiskTemplateToDiskOfferingMap != null && !datadiskTemplateToDiskOfferingMap.isEmpty()) {
|
||||
|
|
@ -606,7 +604,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
long diskOfferingSize = diskOffering.getDiskSize() / (1024 * 1024 * 1024);
|
||||
VMTemplateVO dataDiskTemplate = _templateDao.findById(dataDiskTemplateToDiskOfferingMap.getKey());
|
||||
volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + persistedVm.getId() + "-" + String.valueOf( diskNumber), diskOffering, diskOfferingSize, null, null,
|
||||
persistedVm, dataDiskTemplate, owner, diskNumber);
|
||||
persistedVm, dataDiskTemplate, owner, diskNumber, true);
|
||||
diskNumber++;
|
||||
}
|
||||
}
|
||||
|
|
@ -636,7 +634,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
String rootVolumeName = String.format("ROOT-%s", vm.getId());
|
||||
if (template.getFormat() == ImageFormat.ISO) {
|
||||
volumeMgr.allocateRawVolume(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(),
|
||||
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vm, template, owner, null);
|
||||
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vm, template, owner, null, true);
|
||||
} else if (Arrays.asList(ImageFormat.BAREMETAL, ImageFormat.EXTERNAL).contains(template.getFormat())) {
|
||||
logger.debug("{} has format [{}]. Skipping ROOT volume [{}] allocation.", template, template.getFormat(), rootVolumeName);
|
||||
} else {
|
||||
|
|
@ -2227,7 +2225,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachineProfile profile, final boolean force, final boolean checkBeforeCleanup) {
|
||||
final VirtualMachine vm = profile.getVirtualMachine();
|
||||
Map<String, Boolean> vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId());
|
||||
|
||||
StopCommand stpCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), checkBeforeCleanup);
|
||||
updateStopCommandForExternalHypervisorType(vm.getHypervisorType(), profile, stpCmd);
|
||||
if (MapUtils.isNotEmpty(vlanToPersistenceMap)) {
|
||||
|
|
@ -5340,9 +5337,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
|
||||
private void saveCustomOfferingDetails(long vmId, ServiceOffering serviceOffering) {
|
||||
Map<String, String> details = vmInstanceDetailsDao.listDetailsKeyPairs(vmId);
|
||||
details.put(UsageEventVO.DynamicParameters.cpuNumber.name(), serviceOffering.getCpu().toString());
|
||||
details.put(UsageEventVO.DynamicParameters.cpuSpeed.name(), serviceOffering.getSpeed().toString());
|
||||
details.put(UsageEventVO.DynamicParameters.memory.name(), serviceOffering.getRamSize().toString());
|
||||
|
||||
// We need to restore only the customizable parameters. If we save a parameter that is not customizable and attempt
|
||||
// to restore a VM snapshot, com.cloud.vm.UserVmManagerImpl.validateCustomParameters will fail.
|
||||
ServiceOffering unfilledOffering = _serviceOfferingDao.findByIdIncludingRemoved(serviceOffering.getId());
|
||||
if (unfilledOffering.getCpu() == null) {
|
||||
details.put(UsageEventVO.DynamicParameters.cpuNumber.name(), serviceOffering.getCpu().toString());
|
||||
}
|
||||
if (unfilledOffering.getSpeed() == null) {
|
||||
details.put(UsageEventVO.DynamicParameters.cpuSpeed.name(), serviceOffering.getSpeed().toString());
|
||||
}
|
||||
if (unfilledOffering.getRamSize() == null) {
|
||||
details.put(UsageEventVO.DynamicParameters.memory.name(), serviceOffering.getRamSize().toString());
|
||||
}
|
||||
|
||||
List<VMInstanceDetailVO> detailList = new ArrayList<>();
|
||||
for (Map.Entry<String, String> entry: details.entrySet()) {
|
||||
VMInstanceDetailVO detailVO = new VMInstanceDetailVO(vmId, entry.getKey(), entry.getValue(), true);
|
||||
|
|
|
|||
|
|
@ -58,6 +58,7 @@ import org.apache.cloudstack.framework.messagebus.PublishScope;
|
|||
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
|
||||
import org.apache.cloudstack.network.RoutedIpv4Manager;
|
||||
import org.apache.cloudstack.network.dao.NetworkPermissionDao;
|
||||
import org.apache.cloudstack.reservation.dao.ReservationDao;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.lang3.BooleanUtils;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
|
|
@ -86,6 +87,7 @@ import com.cloud.api.query.dao.DomainRouterJoinDao;
|
|||
import com.cloud.api.query.vo.DomainRouterJoinVO;
|
||||
import com.cloud.bgp.BGPService;
|
||||
import com.cloud.configuration.ConfigurationManager;
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.configuration.Resource.ResourceType;
|
||||
import com.cloud.dc.ASNumberVO;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
|
|
@ -214,6 +216,7 @@ import com.cloud.offerings.dao.NetworkOfferingDao;
|
|||
import com.cloud.offerings.dao.NetworkOfferingDetailsDao;
|
||||
import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
import com.cloud.resourcelimit.CheckedReservation;
|
||||
import com.cloud.server.ManagementServer;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
|
|
@ -447,6 +450,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
ClusterDao clusterDao;
|
||||
@Inject
|
||||
RoutedIpv4Manager routedIpv4Manager;
|
||||
@Inject
|
||||
private ReservationDao reservationDao;
|
||||
|
||||
protected StateMachine2<Network.State, Network.Event, Network> _stateMachine;
|
||||
ScheduledExecutorService _executor;
|
||||
|
|
@ -2752,12 +2757,6 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
return null;
|
||||
}
|
||||
|
||||
final boolean updateResourceCount = resourceCountNeedsUpdate(ntwkOff, aclType);
|
||||
//check resource limits
|
||||
if (updateResourceCount) {
|
||||
_resourceLimitMgr.checkResourceLimit(owner, ResourceType.network, isDisplayNetworkEnabled);
|
||||
}
|
||||
|
||||
// Validate network offering
|
||||
if (ntwkOff.getState() != NetworkOffering.State.Enabled) {
|
||||
// see NetworkOfferingVO
|
||||
|
|
@ -2776,218 +2775,219 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
|
||||
boolean ipv6 = false;
|
||||
|
||||
if (StringUtils.isNoneBlank(ip6Gateway, ip6Cidr)) {
|
||||
ipv6 = true;
|
||||
}
|
||||
// Validate zone
|
||||
if (zone.getNetworkType() == NetworkType.Basic) {
|
||||
// In Basic zone the network should have aclType=Domain, domainId=1, subdomainAccess=true
|
||||
if (aclType == null || aclType != ACLType.Domain) {
|
||||
throw new InvalidParameterValueException("Only AclType=Domain can be specified for network creation in Basic zone");
|
||||
try (CheckedReservation networkReservation = new CheckedReservation(owner, domainId, Resource.ResourceType.network, null, null, 1L, reservationDao, _resourceLimitMgr)) {
|
||||
if (StringUtils.isNoneBlank(ip6Gateway, ip6Cidr)) {
|
||||
ipv6 = true;
|
||||
}
|
||||
// Validate zone
|
||||
if (zone.getNetworkType() == NetworkType.Basic) {
|
||||
// In Basic zone the network should have aclType=Domain, domainId=1, subdomainAccess=true
|
||||
if (aclType == null || aclType != ACLType.Domain) {
|
||||
throw new InvalidParameterValueException("Only AclType=Domain can be specified for network creation in Basic zone");
|
||||
}
|
||||
|
||||
// Only one guest network is supported in Basic zone
|
||||
final List<NetworkVO> guestNetworks = _networksDao.listByZoneAndTrafficType(zone.getId(), TrafficType.Guest);
|
||||
if (!guestNetworks.isEmpty()) {
|
||||
throw new InvalidParameterValueException("Can't have more than one Guest network in zone with network type " + NetworkType.Basic);
|
||||
}
|
||||
// Only one guest network is supported in Basic zone
|
||||
final List<NetworkVO> guestNetworks = _networksDao.listByZoneAndTrafficType(zone.getId(), TrafficType.Guest);
|
||||
if (!guestNetworks.isEmpty()) {
|
||||
throw new InvalidParameterValueException("Can't have more than one Guest network in zone with network type " + NetworkType.Basic);
|
||||
}
|
||||
|
||||
// if zone is basic, only Shared network offerings w/o source nat service are allowed
|
||||
if (!(ntwkOff.getGuestType() == GuestType.Shared && !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat))) {
|
||||
throw new InvalidParameterValueException("For zone of type " + NetworkType.Basic + " only offerings of " + "guestType " + GuestType.Shared + " with disabled "
|
||||
+ Service.SourceNat.getName() + " service are allowed");
|
||||
}
|
||||
// if zone is basic, only Shared network offerings w/o source nat service are allowed
|
||||
if (!(ntwkOff.getGuestType() == GuestType.Shared && !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat))) {
|
||||
throw new InvalidParameterValueException("For zone of type " + NetworkType.Basic + " only offerings of " + "guestType " + GuestType.Shared + " with disabled "
|
||||
+ Service.SourceNat.getName() + " service are allowed");
|
||||
}
|
||||
|
||||
if (domainId == null || domainId != Domain.ROOT_DOMAIN) {
|
||||
throw new InvalidParameterValueException("Guest network in Basic zone should be dedicated to ROOT domain");
|
||||
}
|
||||
if (domainId == null || domainId != Domain.ROOT_DOMAIN) {
|
||||
throw new InvalidParameterValueException("Guest network in Basic zone should be dedicated to ROOT domain");
|
||||
}
|
||||
|
||||
if (subdomainAccess == null) {
|
||||
subdomainAccess = true;
|
||||
} else if (!subdomainAccess) {
|
||||
throw new InvalidParameterValueException("Subdomain access should be set to true for the" + " guest network in the Basic zone");
|
||||
}
|
||||
if (subdomainAccess == null) {
|
||||
subdomainAccess = true;
|
||||
} else if (!subdomainAccess) {
|
||||
throw new InvalidParameterValueException("Subdomain access should be set to true for the" + " guest network in the Basic zone");
|
||||
}
|
||||
|
||||
if (vlanId == null) {
|
||||
vlanId = Vlan.UNTAGGED;
|
||||
} else {
|
||||
if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) {
|
||||
throw new InvalidParameterValueException("Only vlan " + Vlan.UNTAGGED + " can be created in " + "the zone of type " + NetworkType.Basic);
|
||||
if (vlanId == null) {
|
||||
vlanId = Vlan.UNTAGGED;
|
||||
} else {
|
||||
if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) {
|
||||
throw new InvalidParameterValueException("Only vlan " + Vlan.UNTAGGED + " can be created in " + "the zone of type " + NetworkType.Basic);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (zone.getNetworkType() == NetworkType.Advanced) {
|
||||
if (zone.isSecurityGroupEnabled()) {
|
||||
if (isolatedPvlan != null) {
|
||||
throw new InvalidParameterValueException("Isolated Private VLAN is not supported with security group!");
|
||||
}
|
||||
// Only Account specific Isolated network with sourceNat service disabled are allowed in security group
|
||||
// enabled zone
|
||||
if ((ntwkOff.getGuestType() != GuestType.Shared) && (ntwkOff.getGuestType() != GuestType.L2)) {
|
||||
throw new InvalidParameterValueException("Only shared or L2 guest network can be created in security group enabled zone");
|
||||
}
|
||||
if (_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)) {
|
||||
throw new InvalidParameterValueException("Service SourceNat is not allowed in security group enabled zone");
|
||||
}
|
||||
}
|
||||
|
||||
//don't allow eip/elb networks in Advance zone
|
||||
if (ntwkOff.isElasticIp() || ntwkOff.isElasticLb()) {
|
||||
throw new InvalidParameterValueException("Elastic IP and Elastic LB services are supported in zone of type " + NetworkType.Basic);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (zone.getNetworkType() == NetworkType.Advanced) {
|
||||
if (zone.isSecurityGroupEnabled()) {
|
||||
if (isolatedPvlan != null) {
|
||||
throw new InvalidParameterValueException("Isolated Private VLAN is not supported with security group!");
|
||||
}
|
||||
// Only Account specific Isolated network with sourceNat service disabled are allowed in security group
|
||||
// enabled zone
|
||||
if ((ntwkOff.getGuestType() != GuestType.Shared) && (ntwkOff.getGuestType() != GuestType.L2)) {
|
||||
throw new InvalidParameterValueException("Only shared or L2 guest network can be created in security group enabled zone");
|
||||
}
|
||||
if (_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)) {
|
||||
throw new InvalidParameterValueException("Service SourceNat is not allowed in security group enabled zone");
|
||||
if (ipv6 && !GuestType.Shared.equals(ntwkOff.getGuestType())) {
|
||||
_networkModel.checkIp6CidrSizeEqualTo64(ip6Cidr);
|
||||
}
|
||||
|
||||
//TODO(VXLAN): Support VNI specified
|
||||
// VlanId can be specified only when network offering supports it
|
||||
final boolean vlanSpecified = vlanId != null;
|
||||
if (vlanSpecified != ntwkOff.isSpecifyVlan()) {
|
||||
if (vlanSpecified) {
|
||||
if (!isSharedNetworkWithoutSpecifyVlan(ntwkOff) && !isPrivateGatewayWithoutSpecifyVlan(ntwkOff)) {
|
||||
throw new InvalidParameterValueException("Can't specify vlan; corresponding offering says specifyVlan=false");
|
||||
}
|
||||
} else {
|
||||
throw new InvalidParameterValueException("Vlan has to be specified; corresponding offering says specifyVlan=true");
|
||||
}
|
||||
}
|
||||
|
||||
//don't allow eip/elb networks in Advance zone
|
||||
if (ntwkOff.isElasticIp() || ntwkOff.isElasticLb()) {
|
||||
throw new InvalidParameterValueException("Elastic IP and Elastic LB services are supported in zone of type " + NetworkType.Basic);
|
||||
}
|
||||
}
|
||||
|
||||
if (ipv6 && !GuestType.Shared.equals(ntwkOff.getGuestType())) {
|
||||
_networkModel.checkIp6CidrSizeEqualTo64(ip6Cidr);
|
||||
}
|
||||
|
||||
//TODO(VXLAN): Support VNI specified
|
||||
// VlanId can be specified only when network offering supports it
|
||||
final boolean vlanSpecified = vlanId != null;
|
||||
if (vlanSpecified != ntwkOff.isSpecifyVlan()) {
|
||||
if (vlanSpecified) {
|
||||
if (!isSharedNetworkWithoutSpecifyVlan(ntwkOff) && !isPrivateGatewayWithoutSpecifyVlan(ntwkOff)) {
|
||||
throw new InvalidParameterValueException("Can't specify vlan; corresponding offering says specifyVlan=false");
|
||||
URI uri = encodeVlanIdIntoBroadcastUri(vlanId, pNtwk);
|
||||
// Aux: generate secondary URI for secondary VLAN ID (if provided) for performing checks
|
||||
URI secondaryUri = StringUtils.isNotBlank(isolatedPvlan) ? BroadcastDomainType.fromString(isolatedPvlan) : null;
|
||||
if (isSharedNetworkWithoutSpecifyVlan(ntwkOff) || isPrivateGatewayWithoutSpecifyVlan(ntwkOff)) {
|
||||
bypassVlanOverlapCheck = true;
|
||||
}
|
||||
} else {
|
||||
throw new InvalidParameterValueException("Vlan has to be specified; corresponding offering says specifyVlan=true");
|
||||
}
|
||||
}
|
||||
|
||||
if (vlanSpecified) {
|
||||
URI uri = encodeVlanIdIntoBroadcastUri(vlanId, pNtwk);
|
||||
// Aux: generate secondary URI for secondary VLAN ID (if provided) for performing checks
|
||||
URI secondaryUri = StringUtils.isNotBlank(isolatedPvlan) ? BroadcastDomainType.fromString(isolatedPvlan) : null;
|
||||
if (isSharedNetworkWithoutSpecifyVlan(ntwkOff) || isPrivateGatewayWithoutSpecifyVlan(ntwkOff)) {
|
||||
bypassVlanOverlapCheck = true;
|
||||
}
|
||||
//don't allow to specify vlan tag used by physical network for dynamic vlan allocation
|
||||
if (!(bypassVlanOverlapCheck && (ntwkOff.getGuestType() == GuestType.Shared || isPrivateNetwork))
|
||||
&& _dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(uri)).size() > 0) {
|
||||
throw new InvalidParameterValueException("The VLAN tag to use for new guest network, " + vlanId + " is already being used for dynamic vlan allocation for the guest network in zone "
|
||||
+ zone.getName());
|
||||
}
|
||||
if (secondaryUri != null && !(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) &&
|
||||
_dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(secondaryUri)).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"The VLAN tag for isolated PVLAN %s is already being used for dynamic vlan allocation for the guest network in zone %s",
|
||||
isolatedPvlan, zone));
|
||||
}
|
||||
if (!UuidUtils.isUuid(vlanId)) {
|
||||
// For Isolated and L2 networks, don't allow to create network with vlan that already exists in the zone
|
||||
if (!hasGuestBypassVlanOverlapCheck(bypassVlanOverlapCheck, ntwkOff, isPrivateNetwork)) {
|
||||
if (_networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), null).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
|
||||
vlanId, zone));
|
||||
} else if (secondaryUri != null && _networksDao.listByZoneAndUriAndGuestType(zoneId, secondaryUri.toString(), null).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
|
||||
isolatedPvlan, zone));
|
||||
} else {
|
||||
final List<DataCenterVnetVO> dcVnets = _datacenterVnetDao.findVnet(zoneId, BroadcastDomainType.getValue(uri));
|
||||
//for the network that is created as part of private gateway,
|
||||
//the vnet is not coming from the data center vnet table, so the list can be empty
|
||||
if (!dcVnets.isEmpty()) {
|
||||
final DataCenterVnetVO dcVnet = dcVnets.get(0);
|
||||
// Fail network creation if specified vlan is dedicated to a different account
|
||||
if (dcVnet.getAccountGuestVlanMapId() != null) {
|
||||
final Long accountGuestVlanMapId = dcVnet.getAccountGuestVlanMapId();
|
||||
final AccountGuestVlanMapVO map = _accountGuestVlanMapDao.findById(accountGuestVlanMapId);
|
||||
if (map.getAccountId() != owner.getAccountId()) {
|
||||
throw new InvalidParameterValueException("Vlan " + vlanId + " is dedicated to a different account");
|
||||
}
|
||||
// Fail network creation if owner has a dedicated range of vlans but the specified vlan belongs to the system pool
|
||||
} else {
|
||||
final List<AccountGuestVlanMapVO> maps = _accountGuestVlanMapDao.listAccountGuestVlanMapsByAccount(owner.getAccountId());
|
||||
if (maps != null && !maps.isEmpty()) {
|
||||
final int vnetsAllocatedToAccount = _datacenterVnetDao.countVnetsAllocatedToAccount(zoneId, owner.getAccountId());
|
||||
final int vnetsDedicatedToAccount = _datacenterVnetDao.countVnetsDedicatedToAccount(zoneId, owner.getAccountId());
|
||||
if (vnetsAllocatedToAccount < vnetsDedicatedToAccount) {
|
||||
throw new InvalidParameterValueException("Specified vlan " + vlanId + " doesn't belong" + " to the vlan range dedicated to the owner "
|
||||
+ owner.getAccountName());
|
||||
//don't allow to specify vlan tag used by physical network for dynamic vlan allocation
|
||||
if (!(bypassVlanOverlapCheck && (ntwkOff.getGuestType() == GuestType.Shared || isPrivateNetwork))
|
||||
&& _dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(uri)).size() > 0) {
|
||||
throw new InvalidParameterValueException("The VLAN tag to use for new guest network, " + vlanId + " is already being used for dynamic vlan allocation for the guest network in zone "
|
||||
+ zone.getName());
|
||||
}
|
||||
if (secondaryUri != null && !(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) &&
|
||||
_dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(secondaryUri)).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"The VLAN tag for isolated PVLAN %s is already being used for dynamic vlan allocation for the guest network in zone %s",
|
||||
isolatedPvlan, zone));
|
||||
}
|
||||
if (!UuidUtils.isUuid(vlanId)) {
|
||||
// For Isolated and L2 networks, don't allow to create network with vlan that already exists in the zone
|
||||
if (!hasGuestBypassVlanOverlapCheck(bypassVlanOverlapCheck, ntwkOff, isPrivateNetwork)) {
|
||||
if (_networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), null).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
|
||||
vlanId, zone));
|
||||
} else if (secondaryUri != null && _networksDao.listByZoneAndUriAndGuestType(zoneId, secondaryUri.toString(), null).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
|
||||
isolatedPvlan, zone));
|
||||
} else {
|
||||
final List<DataCenterVnetVO> dcVnets = _datacenterVnetDao.findVnet(zoneId, BroadcastDomainType.getValue(uri));
|
||||
//for the network that is created as part of private gateway,
|
||||
//the vnet is not coming from the data center vnet table, so the list can be empty
|
||||
if (!dcVnets.isEmpty()) {
|
||||
final DataCenterVnetVO dcVnet = dcVnets.get(0);
|
||||
// Fail network creation if specified vlan is dedicated to a different account
|
||||
if (dcVnet.getAccountGuestVlanMapId() != null) {
|
||||
final Long accountGuestVlanMapId = dcVnet.getAccountGuestVlanMapId();
|
||||
final AccountGuestVlanMapVO map = _accountGuestVlanMapDao.findById(accountGuestVlanMapId);
|
||||
if (map.getAccountId() != owner.getAccountId()) {
|
||||
throw new InvalidParameterValueException("Vlan " + vlanId + " is dedicated to a different account");
|
||||
}
|
||||
// Fail network creation if owner has a dedicated range of vlans but the specified vlan belongs to the system pool
|
||||
} else {
|
||||
final List<AccountGuestVlanMapVO> maps = _accountGuestVlanMapDao.listAccountGuestVlanMapsByAccount(owner.getAccountId());
|
||||
if (maps != null && !maps.isEmpty()) {
|
||||
final int vnetsAllocatedToAccount = _datacenterVnetDao.countVnetsAllocatedToAccount(zoneId, owner.getAccountId());
|
||||
final int vnetsDedicatedToAccount = _datacenterVnetDao.countVnetsDedicatedToAccount(zoneId, owner.getAccountId());
|
||||
if (vnetsAllocatedToAccount < vnetsDedicatedToAccount) {
|
||||
throw new InvalidParameterValueException("Specified vlan " + vlanId + " doesn't belong" + " to the vlan range dedicated to the owner "
|
||||
+ owner.getAccountName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// don't allow to creating shared network with given Vlan ID, if there already exists a isolated network or
|
||||
// shared network with same Vlan ID in the zone
|
||||
if (!bypassVlanOverlapCheck && _networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), GuestType.Isolated).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"There is an existing isolated/shared network that overlaps with vlan id:%s in zone %s", vlanId, zone));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// don't allow to creating shared network with given Vlan ID, if there already exists a isolated network or
|
||||
// shared network with same Vlan ID in the zone
|
||||
if (!bypassVlanOverlapCheck && _networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), GuestType.Isolated).size() > 0) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If networkDomain is not specified, take it from the global configuration
|
||||
if (_networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.Dns)) {
|
||||
final Map<Network.Capability, String> dnsCapabilities = _networkModel.getNetworkOfferingServiceCapabilities(_entityMgr.findById(NetworkOffering.class, networkOfferingId),
|
||||
Service.Dns);
|
||||
final String isUpdateDnsSupported = dnsCapabilities.get(Capability.AllowDnsSuffixModification);
|
||||
if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) {
|
||||
if (networkDomain != null) {
|
||||
// TBD: NetworkOfferingId and zoneId. Send uuids instead.
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"There is an existing isolated/shared network that overlaps with vlan id:%s in zone %s", vlanId, zone));
|
||||
"Domain name change is not supported by network offering id=%d in zone %s",
|
||||
networkOfferingId, zone));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If networkDomain is not specified, take it from the global configuration
|
||||
if (_networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.Dns)) {
|
||||
final Map<Network.Capability, String> dnsCapabilities = _networkModel.getNetworkOfferingServiceCapabilities(_entityMgr.findById(NetworkOffering.class, networkOfferingId),
|
||||
Service.Dns);
|
||||
final String isUpdateDnsSupported = dnsCapabilities.get(Capability.AllowDnsSuffixModification);
|
||||
if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) {
|
||||
if (networkDomain != null) {
|
||||
// TBD: NetworkOfferingId and zoneId. Send uuids instead.
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Domain name change is not supported by network offering id=%d in zone %s",
|
||||
networkOfferingId, zone));
|
||||
}
|
||||
} else {
|
||||
if (networkDomain == null) {
|
||||
// 1) Get networkDomain from the corresponding account/domain/zone
|
||||
if (aclType == ACLType.Domain) {
|
||||
networkDomain = _networkModel.getDomainNetworkDomain(domainId, zoneId);
|
||||
} else if (aclType == ACLType.Account) {
|
||||
networkDomain = _networkModel.getAccountNetworkDomain(owner.getId(), zoneId);
|
||||
}
|
||||
|
||||
// 2) If null, generate networkDomain using domain suffix from the global config variables
|
||||
if (networkDomain == null) {
|
||||
networkDomain = "cs" + Long.toHexString(owner.getId()) + GuestDomainSuffix.valueIn(zoneId);
|
||||
}
|
||||
|
||||
} else {
|
||||
// validate network domain
|
||||
if (!NetUtils.verifyDomainName(networkDomain)) {
|
||||
throw new InvalidParameterValueException("Invalid network domain. Total length shouldn't exceed 190 chars. Each domain "
|
||||
+ "label must be between 1 and 63 characters long, can contain ASCII letters 'a' through 'z', the digits '0' through '9', "
|
||||
+ "and the hyphen ('-'); can't start or end with \"-\"");
|
||||
if (networkDomain == null) {
|
||||
// 1) Get networkDomain from the corresponding account/domain/zone
|
||||
if (aclType == ACLType.Domain) {
|
||||
networkDomain = _networkModel.getDomainNetworkDomain(domainId, zoneId);
|
||||
} else if (aclType == ACLType.Account) {
|
||||
networkDomain = _networkModel.getAccountNetworkDomain(owner.getId(), zoneId);
|
||||
}
|
||||
|
||||
// 2) If null, generate networkDomain using domain suffix from the global config variables
|
||||
if (networkDomain == null) {
|
||||
networkDomain = "cs" + Long.toHexString(owner.getId()) + GuestDomainSuffix.valueIn(zoneId);
|
||||
}
|
||||
|
||||
} else {
|
||||
// validate network domain
|
||||
if (!NetUtils.verifyDomainName(networkDomain)) {
|
||||
throw new InvalidParameterValueException("Invalid network domain. Total length shouldn't exceed 190 chars. Each domain "
|
||||
+ "label must be between 1 and 63 characters long, can contain ASCII letters 'a' through 'z', the digits '0' through '9', "
|
||||
+ "and the hyphen ('-'); can't start or end with \"-\"");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In Advance zone Cidr for Shared networks and Isolated networks w/o source nat service can't be NULL - 2.2.x
|
||||
// limitation, remove after we introduce support for multiple ip ranges
|
||||
// with different Cidrs for the same Shared network
|
||||
final boolean cidrRequired = zone.getNetworkType() == NetworkType.Advanced
|
||||
&& ntwkOff.getTrafficType() == TrafficType.Guest
|
||||
&& (ntwkOff.getGuestType() == GuestType.Shared || (ntwkOff.getGuestType() == GuestType.Isolated
|
||||
&& !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)
|
||||
&& !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.Gateway)));
|
||||
if (cidr == null && ip6Cidr == null && cidrRequired) {
|
||||
if (ntwkOff.getGuestType() == GuestType.Shared) {
|
||||
throw new InvalidParameterValueException(String.format("Gateway/netmask are required when creating %s networks.", Network.GuestType.Shared));
|
||||
} else {
|
||||
throw new InvalidParameterValueException("gateway/netmask are required when create network of" + " type " + GuestType.Isolated + " with service " + Service.SourceNat.getName() + " disabled");
|
||||
// In Advance zone Cidr for Shared networks and Isolated networks w/o source nat service can't be NULL - 2.2.x
|
||||
// limitation, remove after we introduce support for multiple ip ranges
|
||||
// with different Cidrs for the same Shared network
|
||||
final boolean cidrRequired = zone.getNetworkType() == NetworkType.Advanced
|
||||
&& ntwkOff.getTrafficType() == TrafficType.Guest
|
||||
&& (ntwkOff.getGuestType() == GuestType.Shared || (ntwkOff.getGuestType() == GuestType.Isolated
|
||||
&& !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)
|
||||
&& !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.Gateway)));
|
||||
if (cidr == null && ip6Cidr == null && cidrRequired) {
|
||||
if (ntwkOff.getGuestType() == GuestType.Shared) {
|
||||
throw new InvalidParameterValueException(String.format("Gateway/netmask are required when creating %s networks.", Network.GuestType.Shared));
|
||||
} else {
|
||||
throw new InvalidParameterValueException("gateway/netmask are required when create network of" + " type " + GuestType.Isolated + " with service " + Service.SourceNat.getName() + " disabled");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkL2OfferingServices(ntwkOff);
|
||||
checkL2OfferingServices(ntwkOff);
|
||||
|
||||
// No cidr can be specified in Basic zone
|
||||
if (zone.getNetworkType() == NetworkType.Basic && cidr != null) {
|
||||
throw new InvalidParameterValueException("StartIp/endIp/gateway/netmask can't be specified for zone of type " + NetworkType.Basic);
|
||||
}
|
||||
// No cidr can be specified in Basic zone
|
||||
if (zone.getNetworkType() == NetworkType.Basic && cidr != null) {
|
||||
throw new InvalidParameterValueException("StartIp/endIp/gateway/netmask can't be specified for zone of type " + NetworkType.Basic);
|
||||
}
|
||||
|
||||
// Check if cidr is RFC1918 compliant if the network is Guest Isolated for IPv4
|
||||
if (cidr != null && (ntwkOff.getGuestType() == Network.GuestType.Isolated && ntwkOff.getTrafficType() == TrafficType.Guest) &&
|
||||
!NetUtils.validateGuestCidr(cidr, !ConfigurationManager.AllowNonRFC1918CompliantIPs.value())) {
|
||||
// Check if cidr is RFC1918 compliant if the network is Guest Isolated for IPv4
|
||||
if (cidr != null && (ntwkOff.getGuestType() == Network.GuestType.Isolated && ntwkOff.getTrafficType() == TrafficType.Guest) &&
|
||||
!NetUtils.validateGuestCidr(cidr, !ConfigurationManager.AllowNonRFC1918CompliantIPs.value())) {
|
||||
throw new InvalidParameterValueException("Virtual Guest Cidr " + cidr + " is not RFC 1918 or 6598 compliant");
|
||||
}
|
||||
}
|
||||
|
||||
final String networkDomainFinal = networkDomain;
|
||||
final String vlanIdFinal = vlanId;
|
||||
|
|
@ -3003,75 +3003,75 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
final NetworkVO userNetwork = new NetworkVO();
|
||||
userNetwork.setNetworkDomain(networkDomainFinal);
|
||||
|
||||
if (cidr != null && gateway != null) {
|
||||
userNetwork.setCidr(cidr);
|
||||
userNetwork.setGateway(gateway);
|
||||
}
|
||||
if (cidr != null && gateway != null) {
|
||||
userNetwork.setCidr(cidr);
|
||||
userNetwork.setGateway(gateway);
|
||||
}
|
||||
|
||||
if (StringUtils.isNoneBlank(ip6Gateway, ip6Cidr)) {
|
||||
userNetwork.setIp6Cidr(ip6Cidr);
|
||||
userNetwork.setIp6Gateway(ip6Gateway);
|
||||
}
|
||||
if (StringUtils.isNoneBlank(ip6Gateway, ip6Cidr)) {
|
||||
userNetwork.setIp6Cidr(ip6Cidr);
|
||||
userNetwork.setIp6Gateway(ip6Gateway);
|
||||
}
|
||||
|
||||
if (externalId != null) {
|
||||
userNetwork.setExternalId(externalId);
|
||||
}
|
||||
if (externalId != null) {
|
||||
userNetwork.setExternalId(externalId);
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(routerIp)) {
|
||||
userNetwork.setRouterIp(routerIp);
|
||||
}
|
||||
if (StringUtils.isNotBlank(routerIp)) {
|
||||
userNetwork.setRouterIp(routerIp);
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(routerIpv6)) {
|
||||
userNetwork.setRouterIpv6(routerIpv6);
|
||||
}
|
||||
if (StringUtils.isNotBlank(routerIpv6)) {
|
||||
userNetwork.setRouterIpv6(routerIpv6);
|
||||
}
|
||||
|
||||
if (vrIfaceMTUs != null) {
|
||||
if (vrIfaceMTUs.first() != null && vrIfaceMTUs.first() > 0) {
|
||||
userNetwork.setPublicMtu(vrIfaceMTUs.first());
|
||||
if (vrIfaceMTUs != null) {
|
||||
if (vrIfaceMTUs.first() != null && vrIfaceMTUs.first() > 0) {
|
||||
userNetwork.setPublicMtu(vrIfaceMTUs.first());
|
||||
} else {
|
||||
userNetwork.setPublicMtu(Integer.valueOf(NetworkService.VRPublicInterfaceMtu.defaultValue()));
|
||||
}
|
||||
|
||||
if (vrIfaceMTUs.second() != null && vrIfaceMTUs.second() > 0) {
|
||||
userNetwork.setPrivateMtu(vrIfaceMTUs.second());
|
||||
} else {
|
||||
userNetwork.setPrivateMtu(Integer.valueOf(NetworkService.VRPrivateInterfaceMtu.defaultValue()));
|
||||
}
|
||||
} else {
|
||||
userNetwork.setPublicMtu(Integer.valueOf(NetworkService.VRPublicInterfaceMtu.defaultValue()));
|
||||
}
|
||||
|
||||
if (vrIfaceMTUs.second() != null && vrIfaceMTUs.second() > 0) {
|
||||
userNetwork.setPrivateMtu(vrIfaceMTUs.second());
|
||||
} else {
|
||||
userNetwork.setPrivateMtu(Integer.valueOf(NetworkService.VRPrivateInterfaceMtu.defaultValue()));
|
||||
}
|
||||
} else {
|
||||
userNetwork.setPublicMtu(Integer.valueOf(NetworkService.VRPublicInterfaceMtu.defaultValue()));
|
||||
userNetwork.setPrivateMtu(Integer.valueOf(NetworkService.VRPrivateInterfaceMtu.defaultValue()));
|
||||
}
|
||||
|
||||
if (!GuestType.L2.equals(userNetwork.getGuestType())) {
|
||||
if (StringUtils.isNotBlank(ip4Dns1)) {
|
||||
userNetwork.setDns1(ip4Dns1);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip4Dns2)) {
|
||||
userNetwork.setDns2(ip4Dns2);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip6Dns1)) {
|
||||
userNetwork.setIp6Dns1(ip6Dns1);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip6Dns2)) {
|
||||
userNetwork.setIp6Dns2(ip6Dns2);
|
||||
}
|
||||
}
|
||||
|
||||
if (vlanIdFinal != null) {
|
||||
if (isolatedPvlan == null) {
|
||||
URI uri = null;
|
||||
if (UuidUtils.isUuid(vlanIdFinal)) {
|
||||
//Logical router's UUID provided as VLAN_ID
|
||||
userNetwork.setVlanIdAsUUID(vlanIdFinal); //Set transient field
|
||||
} else {
|
||||
uri = encodeVlanIdIntoBroadcastUri(vlanIdFinal, pNtwk);
|
||||
if (!GuestType.L2.equals(userNetwork.getGuestType())) {
|
||||
if (StringUtils.isNotBlank(ip4Dns1)) {
|
||||
userNetwork.setDns1(ip4Dns1);
|
||||
}
|
||||
|
||||
if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString()).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network pvlans in zone %s",
|
||||
vlanIdFinal, zone));
|
||||
if (StringUtils.isNotBlank(ip4Dns2)) {
|
||||
userNetwork.setDns2(ip4Dns2);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip6Dns1)) {
|
||||
userNetwork.setIp6Dns1(ip6Dns1);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip6Dns2)) {
|
||||
userNetwork.setIp6Dns2(ip6Dns2);
|
||||
}
|
||||
}
|
||||
|
||||
if (vlanIdFinal != null) {
|
||||
if (isolatedPvlan == null) {
|
||||
URI uri = null;
|
||||
if (UuidUtils.isUuid(vlanIdFinal)) {
|
||||
//Logical router's UUID provided as VLAN_ID
|
||||
userNetwork.setVlanIdAsUUID(vlanIdFinal); //Set transient field
|
||||
} else {
|
||||
uri = encodeVlanIdIntoBroadcastUri(vlanIdFinal, pNtwk);
|
||||
}
|
||||
|
||||
if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString()).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network pvlans in zone %s",
|
||||
vlanIdFinal, zone));
|
||||
}
|
||||
|
||||
userNetwork.setBroadcastUri(uri);
|
||||
if (!vlanIdFinal.equalsIgnoreCase(Vlan.UNTAGGED)) {
|
||||
|
|
@ -3115,8 +3115,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
}
|
||||
}
|
||||
|
||||
if (updateResourceCount) {
|
||||
_resourceLimitMgr.incrementResourceCount(owner.getId(), ResourceType.network, isDisplayNetworkEnabled);
|
||||
if (isResourceCountUpdateNeeded(ntwkOff)) {
|
||||
changeAccountResourceCountOrRecalculateDomainResourceCount(owner.getAccountId(), domainId, isDisplayNetworkEnabled, true);
|
||||
}
|
||||
UsageEventUtils.publishNetworkCreation(network);
|
||||
|
||||
|
|
@ -3127,6 +3127,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
CallContext.current().setEventDetails("Network ID: " + network.getUuid());
|
||||
CallContext.current().putContextParameter(Network.class, network.getUuid());
|
||||
return network;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -3492,9 +3493,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
}
|
||||
|
||||
final NetworkOffering ntwkOff = _entityMgr.findById(NetworkOffering.class, networkFinal.getNetworkOfferingId());
|
||||
final boolean updateResourceCount = resourceCountNeedsUpdate(ntwkOff, networkFinal.getAclType());
|
||||
if (updateResourceCount) {
|
||||
_resourceLimitMgr.decrementResourceCount(networkFinal.getAccountId(), ResourceType.network, networkFinal.getDisplayNetwork());
|
||||
if (isResourceCountUpdateNeeded(ntwkOff)) {
|
||||
changeAccountResourceCountOrRecalculateDomainResourceCount(networkFinal.getAccountId(), networkFinal.getDomainId(), networkFinal.getDisplayNetwork(), false);
|
||||
}
|
||||
}
|
||||
return deletedVlans.second();
|
||||
|
|
@ -3517,6 +3517,23 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
return success;
|
||||
}
|
||||
|
||||
/**
|
||||
* If it is a shared network with {@link ACLType#Domain}, it will belong to account {@link Account#ACCOUNT_ID_SYSTEM} and the resources will be not incremented for the
|
||||
* domain. Therefore, we force the recalculation of the domain's resource count in this case. Otherwise, it will change the count for the account owner.
|
||||
* @param incrementAccountResourceCount If true, the account resource count will be incremented by 1; otherwise, it will decremented by 1.
|
||||
*/
|
||||
private void changeAccountResourceCountOrRecalculateDomainResourceCount(Long accountId, Long domainId, boolean displayNetwork, boolean incrementAccountResourceCount) {
|
||||
if (Account.ACCOUNT_ID_SYSTEM == accountId && ObjectUtils.isNotEmpty(domainId)) {
|
||||
_resourceLimitMgr.recalculateDomainResourceCount(domainId, ResourceType.network, null);
|
||||
} else {
|
||||
if (incrementAccountResourceCount) {
|
||||
_resourceLimitMgr.incrementResourceCount(accountId, ResourceType.network, displayNetwork);
|
||||
} else {
|
||||
_resourceLimitMgr.decrementResourceCount(accountId, ResourceType.network, displayNetwork);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void publishDeletedVlanRanges(List<VlanVO> deletedVlanRangeToPublish) {
|
||||
if (CollectionUtils.isNotEmpty(deletedVlanRangeToPublish)) {
|
||||
for (VlanVO vlan : deletedVlanRangeToPublish) {
|
||||
|
|
@ -3526,10 +3543,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean resourceCountNeedsUpdate(final NetworkOffering ntwkOff, final ACLType aclType) {
|
||||
//Update resource count only for Isolated account specific non-system networks
|
||||
final boolean updateResourceCount = ntwkOff.getGuestType() == GuestType.Isolated && !ntwkOff.isSystemOnly() && aclType == ACLType.Account;
|
||||
return updateResourceCount;
|
||||
public boolean isResourceCountUpdateNeeded(NetworkOffering networkOffering) {
|
||||
return !networkOffering.isSystemOnly();
|
||||
}
|
||||
|
||||
protected Pair<Boolean, List<VlanVO>> deleteVlansInNetwork(final NetworkVO network, final long userId, final Account callerAccount) {
|
||||
|
|
@ -4924,6 +4939,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
return new ConfigKey<?>[]{NetworkGcWait, NetworkGcInterval, NetworkLockTimeout, DeniedRoutes,
|
||||
GuestDomainSuffix, NetworkThrottlingRate, MinVRVersion,
|
||||
PromiscuousMode, MacAddressChanges, ForgedTransmits, MacLearning, RollingRestartEnabled,
|
||||
TUNGSTEN_ENABLED, NSX_ENABLED, NETRIS_ENABLED, NETWORK_LB_HAPROXY_MAX_CONN};
|
||||
TUNGSTEN_ENABLED, NSX_ENABLED, NETRIS_ENABLED, NETWORK_LB_HAPROXY_MAX_CONN,
|
||||
NETWORK_LB_HAPROXY_IDLE_TIMEOUT};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ import com.cloud.agent.AgentManager;
|
|||
import com.cloud.deploy.DeploymentClusterPlanner;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.storage.ClvmLockManager;
|
||||
import com.cloud.resourcelimit.ReservationHelper;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
|
|
@ -84,6 +85,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager;
|
|||
import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
|
||||
import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO;
|
||||
import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao;
|
||||
import org.apache.cloudstack.resourcelimit.Reserver;
|
||||
import org.apache.cloudstack.secret.PassphraseVO;
|
||||
import org.apache.cloudstack.secret.dao.PassphraseDao;
|
||||
import org.apache.cloudstack.snapshot.SnapshotHelper;
|
||||
|
|
@ -994,7 +996,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
@ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true)
|
||||
@Override
|
||||
public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner,
|
||||
Long deviceId) {
|
||||
Long deviceId, boolean incrementResourceCount) {
|
||||
if (size == null) {
|
||||
size = offering.getDiskSize();
|
||||
} else {
|
||||
|
|
@ -1033,7 +1035,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
saveVolumeDetails(offering.getId(), vol.getId());
|
||||
|
||||
// Save usage event and update resource count for user vm volumes
|
||||
if (vm.getType() == VirtualMachine.Type.User) {
|
||||
if (vm.getType() == VirtualMachine.Type.User && incrementResourceCount) {
|
||||
UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), offering.getId(), null, size,
|
||||
Volume.class.getName(), vol.getUuid(), vol.getInstanceId(), vol.isDisplayVolume());
|
||||
_resourceLimitMgr.incrementVolumeResourceCount(vm.getAccountId(), vol.isDisplayVolume(), vol.getSize(), offering);
|
||||
|
|
@ -2107,14 +2109,20 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
template == null ? null : template.getSize(),
|
||||
vol.getPassphraseId() != null);
|
||||
|
||||
if (newSize != vol.getSize()) {
|
||||
DiskOfferingVO diskOffering = diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId());
|
||||
if (newSize == vol.getSize()) {
|
||||
return;
|
||||
}
|
||||
|
||||
DiskOfferingVO diskOffering = diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId());
|
||||
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
try {
|
||||
VMInstanceVO vm = vol.getInstanceId() != null ? vmInstanceDao.findById(vol.getInstanceId()) : null;
|
||||
if (vm == null || vm.getType() == VirtualMachine.Type.User) {
|
||||
// Update resource count for user vm volumes when volume is attached
|
||||
if (newSize > vol.getSize()) {
|
||||
_resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()),
|
||||
vol.isDisplay(), newSize - vol.getSize(), diskOffering);
|
||||
vol.isDisplay(), newSize - vol.getSize(), diskOffering, reservations);
|
||||
_resourceLimitMgr.incrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
|
||||
newSize - vol.getSize(), diskOffering);
|
||||
} else {
|
||||
|
|
@ -2122,9 +2130,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
vol.getSize() - newSize, diskOffering);
|
||||
}
|
||||
}
|
||||
vol.setSize(newSize);
|
||||
_volsDao.persist(vol);
|
||||
} finally {
|
||||
ReservationHelper.closeAll(reservations);
|
||||
}
|
||||
vol.setSize(newSize);
|
||||
_volsDao.persist(vol);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -27,6 +27,6 @@ public interface AccountVlanMapDao extends GenericDao<AccountVlanMapVO, Long> {
|
|||
|
||||
public List<AccountVlanMapVO> listAccountVlanMapsByVlan(long vlanDbId);
|
||||
|
||||
public AccountVlanMapVO findAccountVlanMap(long accountId, long vlanDbId);
|
||||
public AccountVlanMapVO findAccountVlanMap(Long accountId, long vlanDbId);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,9 +48,9 @@ public class AccountVlanMapDaoImpl extends GenericDaoBase<AccountVlanMapVO, Long
|
|||
}
|
||||
|
||||
@Override
|
||||
public AccountVlanMapVO findAccountVlanMap(long accountId, long vlanDbId) {
|
||||
public AccountVlanMapVO findAccountVlanMap(Long accountId, long vlanDbId) {
|
||||
SearchCriteria<AccountVlanMapVO> sc = AccountVlanSearch.create();
|
||||
sc.setParameters("accountId", accountId);
|
||||
sc.setParametersIfNotNull("accountId", accountId);
|
||||
sc.setParameters("vlanDbId", vlanDbId);
|
||||
return findOneIncludingRemovedBy(sc);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,5 +24,5 @@ import com.cloud.utils.db.GenericDao;
|
|||
public interface DomainVlanMapDao extends GenericDao<DomainVlanMapVO, Long> {
|
||||
public List<DomainVlanMapVO> listDomainVlanMapsByDomain(long domainId);
|
||||
public List<DomainVlanMapVO> listDomainVlanMapsByVlan(long vlanDbId);
|
||||
public DomainVlanMapVO findDomainVlanMap(long domainId, long vlanDbId);
|
||||
public DomainVlanMapVO findDomainVlanMap(Long domainId, long vlanDbId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,9 +46,9 @@ public class DomainVlanMapDaoImpl extends GenericDaoBase<DomainVlanMapVO, Long>
|
|||
}
|
||||
|
||||
@Override
|
||||
public DomainVlanMapVO findDomainVlanMap(long domainId, long vlanDbId) {
|
||||
public DomainVlanMapVO findDomainVlanMap(Long domainId, long vlanDbId) {
|
||||
SearchCriteria<DomainVlanMapVO> sc = DomainVlanSearch.create();
|
||||
sc.setParameters("domainId", domainId);
|
||||
sc.setParametersIfNotNull("domainId", domainId);
|
||||
sc.setParameters("vlanDbId", vlanDbId);
|
||||
return findOneIncludingRemovedBy(sc);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
// under the License.
|
||||
package com.cloud.upgrade;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
|
@ -96,7 +97,9 @@ public final class DatabaseVersionHierarchy {
|
|||
// we cannot find the version specified, so get the
|
||||
// most recent one immediately before this version
|
||||
if (!contains(fromVersion)) {
|
||||
return getPath(getRecentVersion(fromVersion), toVersion);
|
||||
DbUpgrade[] dbUpgrades = getPath(getRecentVersion(fromVersion), toVersion);
|
||||
return Arrays.stream(dbUpgrades).filter(up -> CloudStackVersion.compare(up.getUpgradedVersion(), fromVersion.toString()) > 0)
|
||||
.toArray(DbUpgrade[]::new);
|
||||
}
|
||||
|
||||
final Predicate<? super VersionNode> predicate;
|
||||
|
|
|
|||
|
|
@ -57,8 +57,4 @@ public class Upgrade42020to42030 extends DbUpgradeAbstractImpl implements DbUpgr
|
|||
public InputStream[] getCleanupScripts() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSystemVmTemplates(Connection conn) {
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import java.util.Date;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.utils.Pair;
|
||||
|
|
@ -192,4 +193,8 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
|
|||
int getVmCountByOfferingNotInDomain(Long serviceOfferingId, List<Long> domainIds);
|
||||
|
||||
List<VMInstanceVO> listByIdsIncludingRemoved(List<Long> ids);
|
||||
|
||||
List<VMInstanceVO> listDeleteProtectedVmsByAccountId(long accountId);
|
||||
|
||||
List<VMInstanceVO> listDeleteProtectedVmsByDomainIds(Set<Long> domainIds);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,11 +25,13 @@ import java.util.Date;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
|
|
@ -106,6 +108,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
protected SearchBuilder<VMInstanceVO> IdsPowerStateSelectSearch;
|
||||
GenericSearchBuilder<VMInstanceVO, Integer> CountByOfferingId;
|
||||
GenericSearchBuilder<VMInstanceVO, Integer> CountUserVmNotInDomain;
|
||||
SearchBuilder<VMInstanceVO> DeleteProtectedVmSearchByAccount;
|
||||
SearchBuilder<VMInstanceVO> DeleteProtectedVmSearchByDomainIds;
|
||||
|
||||
@Inject
|
||||
ResourceTagDao tagsDao;
|
||||
|
|
@ -368,6 +372,19 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
CountUserVmNotInDomain.and("domainIdsNotIn", CountUserVmNotInDomain.entity().getDomainId(), Op.NIN);
|
||||
CountUserVmNotInDomain.done();
|
||||
|
||||
DeleteProtectedVmSearchByAccount = createSearchBuilder();
|
||||
DeleteProtectedVmSearchByAccount.selectFields(DeleteProtectedVmSearchByAccount.entity().getUuid());
|
||||
DeleteProtectedVmSearchByAccount.and(ApiConstants.ACCOUNT_ID, DeleteProtectedVmSearchByAccount.entity().getAccountId(), Op.EQ);
|
||||
DeleteProtectedVmSearchByAccount.and(ApiConstants.DELETE_PROTECTION, DeleteProtectedVmSearchByAccount.entity().isDeleteProtection(), Op.EQ);
|
||||
DeleteProtectedVmSearchByAccount.and(ApiConstants.REMOVED, DeleteProtectedVmSearchByAccount.entity().getRemoved(), Op.NULL);
|
||||
DeleteProtectedVmSearchByAccount.done();
|
||||
|
||||
DeleteProtectedVmSearchByDomainIds = createSearchBuilder();
|
||||
DeleteProtectedVmSearchByDomainIds.selectFields(DeleteProtectedVmSearchByDomainIds.entity().getUuid());
|
||||
DeleteProtectedVmSearchByDomainIds.and(ApiConstants.DOMAIN_IDS, DeleteProtectedVmSearchByDomainIds.entity().getDomainId(), Op.IN);
|
||||
DeleteProtectedVmSearchByDomainIds.and(ApiConstants.DELETE_PROTECTION, DeleteProtectedVmSearchByDomainIds.entity().isDeleteProtection(), Op.EQ);
|
||||
DeleteProtectedVmSearchByDomainIds.and(ApiConstants.REMOVED, DeleteProtectedVmSearchByDomainIds.entity().getRemoved(), Op.NULL);
|
||||
DeleteProtectedVmSearchByDomainIds.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -1296,4 +1313,22 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("ids", ids.toArray());
|
||||
return listIncludingRemovedBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listDeleteProtectedVmsByAccountId(long accountId) {
|
||||
SearchCriteria<VMInstanceVO> sc = DeleteProtectedVmSearchByAccount.create();
|
||||
sc.setParameters(ApiConstants.ACCOUNT_ID, accountId);
|
||||
sc.setParameters(ApiConstants.DELETE_PROTECTION, true);
|
||||
Filter filter = new Filter(VMInstanceVO.class, null, false, 0L, 10L);
|
||||
return listBy(sc, filter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<VMInstanceVO> listDeleteProtectedVmsByDomainIds(Set<Long> domainIds) {
|
||||
SearchCriteria<VMInstanceVO> sc = DeleteProtectedVmSearchByDomainIds.create();
|
||||
sc.setParameters(ApiConstants.DOMAIN_IDS, domainIds.toArray());
|
||||
sc.setParameters(ApiConstants.DELETE_PROTECTION, true);
|
||||
Filter filter = new Filter(VMInstanceVO.class, null, false, 0L, 10L);
|
||||
return listBy(sc, filter);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,20 +21,14 @@ import java.util.Date;
|
|||
import java.util.List;
|
||||
|
||||
import com.cloud.utils.DateUtil;
|
||||
import org.apache.cloudstack.api.response.BackupScheduleResponse;
|
||||
import org.apache.cloudstack.backup.BackupSchedule;
|
||||
import org.apache.cloudstack.backup.BackupScheduleVO;
|
||||
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
|
||||
public interface BackupScheduleDao extends GenericDao<BackupScheduleVO, Long> {
|
||||
BackupScheduleVO findByVM(Long vmId);
|
||||
|
||||
List<BackupScheduleVO> listByVM(Long vmId);
|
||||
|
||||
BackupScheduleVO findByVMAndIntervalType(Long vmId, DateUtil.IntervalType intervalType);
|
||||
|
||||
List<BackupScheduleVO> getSchedulesToExecute(Date currentTimestamp);
|
||||
|
||||
BackupScheduleResponse newBackupScheduleResponse(BackupSchedule schedule);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,28 +17,23 @@
|
|||
|
||||
package org.apache.cloudstack.backup.dao;
|
||||
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.utils.DateUtil;
|
||||
import org.apache.cloudstack.api.response.BackupScheduleResponse;
|
||||
import org.apache.cloudstack.backup.BackupSchedule;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
import org.apache.cloudstack.backup.BackupScheduleVO;
|
||||
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
public class BackupScheduleDaoImpl extends GenericDaoBase<BackupScheduleVO, Long> implements BackupScheduleDao {
|
||||
|
||||
@Inject
|
||||
VMInstanceDao vmInstanceDao;
|
||||
|
||||
private SearchBuilder<BackupScheduleVO> backupScheduleSearch;
|
||||
private SearchBuilder<BackupScheduleVO> executableSchedulesSearch;
|
||||
|
||||
|
|
@ -59,13 +54,6 @@ public class BackupScheduleDaoImpl extends GenericDaoBase<BackupScheduleVO, Long
|
|||
executableSchedulesSearch.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
public BackupScheduleVO findByVM(Long vmId) {
|
||||
SearchCriteria<BackupScheduleVO> sc = backupScheduleSearch.create();
|
||||
sc.setParameters("vm_id", vmId);
|
||||
return findOneBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<BackupScheduleVO> listByVM(Long vmId) {
|
||||
SearchCriteria<BackupScheduleVO> sc = backupScheduleSearch.create();
|
||||
|
|
@ -88,21 +76,19 @@ public class BackupScheduleDaoImpl extends GenericDaoBase<BackupScheduleVO, Long
|
|||
return listBy(sc);
|
||||
}
|
||||
|
||||
@DB
|
||||
@Override
|
||||
public BackupScheduleResponse newBackupScheduleResponse(BackupSchedule schedule) {
|
||||
VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(schedule.getVmId());
|
||||
BackupScheduleResponse response = new BackupScheduleResponse();
|
||||
response.setId(schedule.getUuid());
|
||||
response.setVmId(vm.getUuid());
|
||||
response.setVmName(vm.getHostName());
|
||||
response.setIntervalType(schedule.getScheduleType());
|
||||
response.setSchedule(schedule.getSchedule());
|
||||
response.setTimezone(schedule.getTimezone());
|
||||
response.setMaxBackups(schedule.getMaxBackups());
|
||||
if (schedule.getQuiesceVM() != null) {
|
||||
response.setQuiesceVM(schedule.getQuiesceVM());
|
||||
public boolean remove(Long id) {
|
||||
String sql = "UPDATE backups SET backup_schedule_id = NULL WHERE backup_schedule_id = ?";
|
||||
TransactionLegacy transaction = TransactionLegacy.currentTxn();
|
||||
try {
|
||||
PreparedStatement preparedStatement = transaction.prepareAutoCloseStatement(sql);
|
||||
preparedStatement.setLong(1, id);
|
||||
preparedStatement.executeUpdate();
|
||||
return super.remove(id);
|
||||
} catch (SQLException e) {
|
||||
logger.warn("Unable to clean up backup schedules references from the backups table.", e);
|
||||
return false;
|
||||
}
|
||||
response.setObjectName("backupschedule");
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.account', 'api_key_access', 'boolean
|
|||
CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.account', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the account" ');
|
||||
|
||||
-- Create a new group for Usage Server related configurations
|
||||
INSERT INTO `cloud`.`configuration_group` (`name`, `description`, `precedence`) VALUES ('Usage Server', 'Usage Server related configuration', 9);
|
||||
INSERT IGNORE INTO `cloud`.`configuration_group` (`name`, `description`, `precedence`) VALUES ('Usage Server', 'Usage Server related configuration', 9);
|
||||
UPDATE `cloud`.`configuration_subgroup` set `group_id` = (SELECT `id` FROM `cloud`.`configuration_group` WHERE `name` = 'Usage Server'), `precedence` = 1 WHERE `name`='Usage';
|
||||
UPDATE `cloud`.`configuration` SET `group_id` = (SELECT `id` FROM `cloud`.`configuration_group` WHERE `name` = 'Usage Server') where `subgroup_id` = (SELECT `id` FROM `cloud`.`configuration_subgroup` WHERE `name` = 'Usage');
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,28 @@ UPDATE `cloud`.`alert` SET type = 34 WHERE name = 'ALERT.VR.PRIVATE.IFACE.MTU';
|
|||
-- Update configuration 'kvm.ssh.to.agent' description and is_dynamic fields
|
||||
UPDATE `cloud`.`configuration` SET description = 'True if the management server will restart the agent service via SSH into the KVM hosts after or during maintenance operations', is_dynamic = 1 WHERE name = 'kvm.ssh.to.agent';
|
||||
|
||||
-- Sanitize legacy network-level addressing fields for Public networks
|
||||
UPDATE `cloud`.`networks`
|
||||
SET `broadcast_uri` = NULL,
|
||||
`gateway` = NULL,
|
||||
`cidr` = NULL,
|
||||
`ip6_gateway` = NULL,
|
||||
`ip6_cidr` = NULL
|
||||
WHERE `traffic_type` = 'Public';
|
||||
|
||||
UPDATE `cloud`.`vm_template` SET guest_os_id = 99 WHERE name = 'kvm-default-vm-import-dummy-template';
|
||||
|
||||
-- Update existing vm_template records with NULL type to "USER"
|
||||
UPDATE `cloud`.`vm_template` SET `type` = 'USER' WHERE `type` IS NULL;
|
||||
|
||||
-- remove unused config item
|
||||
DELETE FROM `cloud`.`configuration` WHERE name = 'consoleproxy.cmd.port';
|
||||
|
||||
-- Drops the unused "backup_interval_type" column of the "cloud.backups" table
|
||||
ALTER TABLE `cloud`.`backups` DROP COLUMN `backup_interval_type`;
|
||||
|
||||
-- Update `user.password.reset.mail.template` configuration value to match new logic
|
||||
UPDATE `cloud`.`configuration`
|
||||
SET value = CONCAT_WS('\n', 'Hello {{username}}!', 'You have requested to reset your password. Please click the following link to reset your password:', '{{{resetLink}}}', 'If you did not request a password reset, please ignore this email.', '', 'Regards,', 'The CloudStack Team')
|
||||
WHERE name = 'user.password.reset.mail.template'
|
||||
AND value IN (CONCAT_WS('\n', 'Hello {{username}}!', 'You have requested to reset your password. Please click the following link to reset your password:', 'http://{{{resetLink}}}', 'If you did not request a password reset, please ignore this email.', '', 'Regards,', 'The CloudStack Team'), CONCAT_WS('\n', 'Hello {{username}}!', 'You have requested to reset your password. Please click the following link to reset your password:', '{{{domainUrl}}}{{{resetLink}}}', 'If you did not request a password reset, please ignore this email.', '', 'Regards,', 'The CloudStack Team'));
|
||||
|
|
|
|||
|
|
@ -79,6 +79,7 @@ SELECT
|
|||
`vm_template`.`format` AS `template_format`,
|
||||
`vm_template`.`display_text` AS `template_display_text`,
|
||||
`vm_template`.`enable_password` AS `password_enabled`,
|
||||
`vm_template`.`extension_id` AS `template_extension_id`,
|
||||
`iso`.`id` AS `iso_id`,
|
||||
`iso`.`uuid` AS `iso_uuid`,
|
||||
`iso`.`name` AS `iso_name`,
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ import com.cloud.upgrade.dao.Upgrade41120to41130;
|
|||
import com.cloud.upgrade.dao.Upgrade41120to41200;
|
||||
import com.cloud.upgrade.dao.Upgrade41510to41520;
|
||||
import com.cloud.upgrade.dao.Upgrade41610to41700;
|
||||
import com.cloud.upgrade.dao.Upgrade42010to42100;
|
||||
import com.cloud.upgrade.dao.Upgrade452to453;
|
||||
import com.cloud.upgrade.dao.Upgrade453to460;
|
||||
import com.cloud.upgrade.dao.Upgrade460to461;
|
||||
|
|
@ -380,4 +381,23 @@ public class DatabaseUpgradeCheckerTest {
|
|||
assertFalse("DatabaseUpgradeChecker should not be a standalone component", checker.isStandalone());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCalculateUpgradePath42010to42100() {
|
||||
|
||||
final CloudStackVersion dbVersion = CloudStackVersion.parse("4.20.1.0");
|
||||
assertNotNull(dbVersion);
|
||||
|
||||
final CloudStackVersion currentVersion = CloudStackVersion.parse("4.21.0.0");
|
||||
assertNotNull(currentVersion);
|
||||
|
||||
final DatabaseUpgradeChecker checker = new DatabaseUpgradeChecker();
|
||||
final DbUpgrade[] upgrades = checker.calculateUpgradePath(dbVersion, currentVersion);
|
||||
|
||||
assertNotNull(upgrades);
|
||||
assertEquals(1, upgrades.length);
|
||||
assertTrue(upgrades[0] instanceof Upgrade42010to42100);
|
||||
|
||||
assertArrayEquals(new String[]{"4.20.1.0", "4.21.0.0"}, upgrades[0].getUpgradableVersionRange());
|
||||
assertEquals(currentVersion.toString(), upgrades[0].getUpgradedVersion());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
|
|||
FreezeThawVMAnswer freezeAnswer = null;
|
||||
FreezeThawVMCommand thawCmd = null;
|
||||
FreezeThawVMAnswer thawAnswer = null;
|
||||
List<SnapshotInfo> forRollback = new ArrayList<>();
|
||||
List<SnapshotInfo> snapshotsForRollback = new ArrayList<>();
|
||||
long startFreeze = 0;
|
||||
try {
|
||||
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested);
|
||||
|
|
@ -165,7 +165,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
|
|||
logger.info("The virtual machine is frozen");
|
||||
for (VolumeInfo vol : vinfos) {
|
||||
long startSnapshtot = System.nanoTime();
|
||||
SnapshotInfo snapInfo = createDiskSnapshot(vmSnapshot, forRollback, vol);
|
||||
SnapshotInfo snapInfo = createDiskSnapshot(vmSnapshot, snapshotsForRollback, vol);
|
||||
|
||||
if (snapInfo == null) {
|
||||
thawAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, thawCmd);
|
||||
|
|
@ -222,7 +222,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
|
|||
}
|
||||
}
|
||||
if (!result) {
|
||||
for (SnapshotInfo snapshotInfo : forRollback) {
|
||||
for (SnapshotInfo snapshotInfo : snapshotsForRollback) {
|
||||
rollbackDiskSnapshot(snapshotInfo);
|
||||
}
|
||||
try {
|
||||
|
|
@ -395,10 +395,16 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
|
|||
|
||||
//Rollback if one of disks snapshot fails
|
||||
protected void rollbackDiskSnapshot(SnapshotInfo snapshotInfo) {
|
||||
if (snapshotInfo == null) {
|
||||
return;
|
||||
}
|
||||
Long snapshotID = snapshotInfo.getId();
|
||||
SnapshotVO snapshot = snapshotDao.findById(snapshotID);
|
||||
if (snapshot == null) {
|
||||
return;
|
||||
}
|
||||
deleteSnapshotByStrategy(snapshot);
|
||||
logger.debug("Rollback is executed: deleting snapshot with id:" + snapshotID);
|
||||
logger.debug("Rollback is executed: deleting snapshot with id: {}", snapshotID);
|
||||
}
|
||||
|
||||
protected void deleteSnapshotByStrategy(SnapshotVO snapshot) {
|
||||
|
|
@ -441,7 +447,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
|
|||
}
|
||||
}
|
||||
|
||||
protected SnapshotInfo createDiskSnapshot(VMSnapshot vmSnapshot, List<SnapshotInfo> forRollback, VolumeInfo vol) {
|
||||
protected SnapshotInfo createDiskSnapshot(VMSnapshot vmSnapshot, List<SnapshotInfo> snapshotsForRollback, VolumeInfo vol) {
|
||||
String snapshotName = vmSnapshot.getId() + "_" + vol.getUuid();
|
||||
SnapshotVO snapshot = new SnapshotVO(vol.getDataCenterId(), vol.getAccountId(), vol.getDomainId(), vol.getId(), vol.getDiskOfferingId(),
|
||||
snapshotName, (short) Snapshot.Type.GROUP.ordinal(), Snapshot.Type.GROUP.name(), vol.getSize(), vol.getMinIops(), vol.getMaxIops(), Hypervisor.HypervisorType.KVM, null);
|
||||
|
|
@ -455,6 +461,7 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
|
|||
vol.addPayload(setPayload(vol, snapshot, quiescevm));
|
||||
SnapshotInfo snapshotInfo = snapshotDataFactory.getSnapshot(snapshot.getId(), vol.getDataStore());
|
||||
snapshotInfo.addPayload(vol.getpayload());
|
||||
snapshotsForRollback.add(snapshotInfo);
|
||||
SnapshotStrategy snapshotStrategy = storageStrategyFactory.getSnapshotStrategy(snapshotInfo, SnapshotOperation.TAKE);
|
||||
if (snapshotStrategy == null) {
|
||||
throw new CloudRuntimeException("Could not find strategy for snapshot uuid:" + snapshotInfo.getUuid());
|
||||
|
|
@ -462,8 +469,6 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
|
|||
snapshotInfo = snapshotStrategy.takeSnapshot(snapshotInfo);
|
||||
if (snapshotInfo == null) {
|
||||
throw new CloudRuntimeException("Failed to create snapshot");
|
||||
} else {
|
||||
forRollback.add(snapshotInfo);
|
||||
}
|
||||
vmSnapshotDetailsDao.persist(new VMSnapshotDetailsVO(vmSnapshot.getId(), STORAGE_SNAPSHOT, String.valueOf(snapshot.getId()), true));
|
||||
snapshotInfo.markBackedUp();
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ public class VMSnapshotStrategyKVMTest extends TestCase{
|
|||
@Test
|
||||
public void testCreateDiskSnapshotBasedOnStrategy() throws Exception {
|
||||
VMSnapshotVO vmSnapshot = Mockito.mock(VMSnapshotVO.class);
|
||||
List<SnapshotInfo> forRollback = new ArrayList<>();
|
||||
List<SnapshotInfo> snapshotsForRollback = new ArrayList<>();
|
||||
VolumeInfo vol = Mockito.mock(VolumeInfo.class);
|
||||
SnapshotInfo snapshotInfo = Mockito.mock(SnapshotInfo.class);
|
||||
SnapshotStrategy strategy = Mockito.mock(SnapshotStrategy.class);
|
||||
|
|
@ -179,7 +179,7 @@ public class VMSnapshotStrategyKVMTest extends TestCase{
|
|||
VMSnapshotDetailsVO vmDetails = new VMSnapshotDetailsVO(vmSnapshot.getId(), volUuid, String.valueOf(snapshot.getId()), false);
|
||||
when(vmSnapshotDetailsDao.persist(any())).thenReturn(vmDetails);
|
||||
|
||||
info = vmStrategy.createDiskSnapshot(vmSnapshot, forRollback, vol);
|
||||
info = vmStrategy.createDiskSnapshot(vmSnapshot, snapshotsForRollback, vol);
|
||||
assertNotNull(info);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -145,10 +145,10 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
storageType = "shared";
|
||||
}
|
||||
|
||||
logger.debug(String.format(
|
||||
"Filtering storage pools by capacity type [%s] as the first storage pool of the list, with name [%s] and ID [%s], is a [%s] storage.",
|
||||
logger.info(
|
||||
"Filtering storage pools by capacity type [{}] as the first storage pool of the list, with name [{}] and ID [{}], is a [{}] storage.",
|
||||
capacityType, storagePool.getName(), storagePool.getUuid(), storageType
|
||||
));
|
||||
);
|
||||
|
||||
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
|
||||
List<Long> poolIdsByCapacity = result.first();
|
||||
|
|
@ -185,7 +185,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
Long clusterId = plan.getClusterId();
|
||||
|
||||
List<Long> poolIdsByVolCount = volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
|
||||
logger.debug(String.format("List of pools in ascending order of number of volumes for account [%s] is [%s].", account, poolIdsByVolCount));
|
||||
logger.debug("List of pools in ascending order of number of volumes for account [{}] is [{}].", account, poolIdsByVolCount);
|
||||
|
||||
// now filter the given list of Pools by this ordered list
|
||||
Map<Long, StoragePool> poolMap = new HashMap<>();
|
||||
|
|
@ -206,16 +206,11 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
|
||||
@Override
|
||||
public List<StoragePool> reorderPools(List<StoragePool> pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("reordering pools");
|
||||
}
|
||||
if (pools == null) {
|
||||
logger.trace("There are no pools to reorder; returning null.");
|
||||
logger.info("There are no pools to reorder.");
|
||||
return null;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(String.format("reordering %d pools", pools.size()));
|
||||
}
|
||||
logger.info("Reordering [{}] pools", pools.size());
|
||||
Account account = null;
|
||||
if (vmProfile.getVirtualMachine() != null) {
|
||||
account = vmProfile.getOwner();
|
||||
|
|
@ -224,9 +219,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
pools = reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
|
||||
|
||||
if (vmProfile.getVirtualMachine() == null) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("The VM is null, skipping pools reordering by disk provisioning type.");
|
||||
}
|
||||
logger.info("The VM is null, skipping pool reordering by disk provisioning type.");
|
||||
return pools;
|
||||
}
|
||||
|
||||
|
|
@ -240,14 +233,10 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
|
||||
List<StoragePool> reorderStoragePoolsBasedOnAlgorithm(List<StoragePool> pools, DeploymentPlan plan, Account account) {
|
||||
String volumeAllocationAlgorithm = VolumeOrchestrationService.VolumeAllocationAlgorithm.value();
|
||||
logger.debug("Using volume allocation algorithm {} to reorder pools.", volumeAllocationAlgorithm);
|
||||
logger.info("Using volume allocation algorithm {} to reorder pools.", volumeAllocationAlgorithm);
|
||||
if (volumeAllocationAlgorithm.equals("random") || (account == null)) {
|
||||
reorderRandomPools(pools);
|
||||
} else if (StringUtils.equalsAny(volumeAllocationAlgorithm, "userdispersing", "firstfitleastconsumed")) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Using reordering algorithm {}", volumeAllocationAlgorithm);
|
||||
}
|
||||
|
||||
if (volumeAllocationAlgorithm.equals("userdispersing")) {
|
||||
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
} else {
|
||||
|
|
@ -259,16 +248,15 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
|
||||
void reorderRandomPools(List<StoragePool> pools) {
|
||||
StorageUtil.traceLogStoragePools(pools, logger, "pools to choose from: ");
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Shuffle this so that we don't check the pools in the same order. Algorithm == 'random' (or no account?)");
|
||||
}
|
||||
StorageUtil.traceLogStoragePools(pools, logger, "pools to shuffle: ");
|
||||
logger.trace("Shuffle this so that we don't check the pools in the same order. Algorithm == 'random' (or no account?)");
|
||||
logger.debug("Pools to shuffle: [{}]", pools);
|
||||
Collections.shuffle(pools, secureRandom);
|
||||
StorageUtil.traceLogStoragePools(pools, logger, "shuffled list of pools to choose from: ");
|
||||
logger.debug("Shuffled list of pools to choose from: [{}]", pools);
|
||||
}
|
||||
|
||||
private List<StoragePool> reorderPoolsByDiskProvisioningType(List<StoragePool> pools, DiskProfile diskProfile) {
|
||||
if (diskProfile != null && diskProfile.getProvisioningType() != null && !diskProfile.getProvisioningType().equals(Storage.ProvisioningType.THIN)) {
|
||||
logger.info("Reordering [{}] pools by disk provisioning type [{}].", pools.size(), diskProfile.getProvisioningType());
|
||||
List<StoragePool> reorderedPools = new ArrayList<>();
|
||||
int preferredIndex = 0;
|
||||
for (StoragePool pool : pools) {
|
||||
|
|
@ -282,22 +270,28 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
reorderedPools.add(preferredIndex++, pool);
|
||||
}
|
||||
}
|
||||
logger.debug("Reordered list of pools by disk provisioning type [{}]: [{}]", diskProfile.getProvisioningType(), reorderedPools);
|
||||
return reorderedPools;
|
||||
} else {
|
||||
if (diskProfile == null) {
|
||||
logger.info("Reordering pools by disk provisioning type wasn't necessary, since no disk profile was found.");
|
||||
} else {
|
||||
logger.debug("Reordering pools by disk provisioning type wasn't necessary, since the provisioning type is [{}].", diskProfile.getProvisioningType());
|
||||
}
|
||||
return pools;
|
||||
}
|
||||
}
|
||||
|
||||
protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) {
|
||||
logger.debug(String.format("Checking if storage pool [%s] is suitable to disk [%s].", pool, dskCh));
|
||||
logger.debug("Checking if storage pool [{}] is suitable to disk [{}].", pool, dskCh);
|
||||
if (avoid.shouldAvoid(pool)) {
|
||||
logger.debug(String.format("StoragePool [%s] is in avoid set, skipping this pool to allocation of disk [%s].", pool, dskCh));
|
||||
logger.debug("StoragePool [{}] is in avoid set, skipping this pool to allocation of disk [{}].", pool, dskCh);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (dskCh.requiresEncryption() && !pool.getPoolType().supportsEncryption()) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("Storage pool type '%s' doesn't support encryption required for volume, skipping this pool", pool.getPoolType()));
|
||||
logger.debug("Storage pool type '[{}]' doesn't support encryption required for volume, skipping this pool", pool.getPoolType());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
@ -319,8 +313,8 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
}
|
||||
|
||||
if (!checkDiskProvisioningSupport(dskCh, pool)) {
|
||||
logger.debug(String.format("Storage pool [%s] does not have support to disk provisioning of disk [%s].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh,
|
||||
"type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType")));
|
||||
logger.debug("Storage pool [{}] does not have support to disk provisioning of disk [{}].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh,
|
||||
"type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType"));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -332,7 +326,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
HostVO plannedHost = hostDao.findById(plan.getHostId());
|
||||
if (!storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(plannedHost, pool)) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, plannedHost));
|
||||
logger.debug("StoragePool [{}] and host [{}] does not have matching storage access groups", pool, plannedHost);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
@ -343,13 +337,13 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
if (!isTempVolume) {
|
||||
volume = volumeDao.findById(dskCh.getVolumeId());
|
||||
if (!storageMgr.storagePoolCompatibleWithVolumePool(pool, volume)) {
|
||||
logger.debug(String.format("Pool [%s] is not compatible with volume [%s], skipping it.", pool, volume));
|
||||
logger.debug("Pool [{}] is not compatible with volume [{}], skipping it.", pool, volume);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (pool.isManaged() && !storageUtil.managedStoragePoolCanScale(pool, plan.getClusterId(), plan.getHostId())) {
|
||||
logger.debug(String.format("Cannot allocate pool [%s] to volume [%s] because the max number of managed clustered filesystems has been exceeded.", pool, volume));
|
||||
logger.debug("Cannot allocate pool [{}] to volume [{}] because the max number of managed clustered filesystems has been exceeded.", pool, volume);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -358,13 +352,13 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
requestVolumeDiskProfilePairs.add(new Pair<>(volume, dskCh));
|
||||
if (dskCh.getHypervisorType() == HypervisorType.VMware) {
|
||||
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster && storageMgr.isStoragePoolDatastoreClusterParent(pool)) {
|
||||
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is a parent datastore cluster.", pool, volume));
|
||||
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is a parent datastore cluster.", pool, volume);
|
||||
return false;
|
||||
}
|
||||
if (pool.getParent() != 0L) {
|
||||
StoragePoolVO datastoreCluster = storagePoolDao.findById(pool.getParent());
|
||||
if (datastoreCluster == null || (datastoreCluster != null && datastoreCluster.getStatus() != StoragePoolStatus.Up)) {
|
||||
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not in [%s] state.", datastoreCluster, volume, StoragePoolStatus.Up));
|
||||
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is not in [{}] state.", datastoreCluster, volume, StoragePoolStatus.Up);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
@ -374,11 +368,11 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
storageMgr.isStoragePoolCompliantWithStoragePolicy(dskCh.getDiskOfferingId(), pool) :
|
||||
storageMgr.isStoragePoolCompliantWithStoragePolicy(requestVolumeDiskProfilePairs, pool);
|
||||
if (!isStoragePoolStoragePolicyCompliance) {
|
||||
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not compliant with the storage policy required by the volume.", pool, volume));
|
||||
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is not compliant with the storage policy required by the volume.", pool, volume);
|
||||
return false;
|
||||
}
|
||||
} catch (StorageUnavailableException e) {
|
||||
logger.warn(String.format("Could not verify storage policy compliance against storage pool %s due to exception %s", pool.getUuid(), e.getMessage()));
|
||||
logger.warn("Could not verify storage policy compliance against storage pool [{}] due to exception [{}]", pool.getUuid(), e.getMessage());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
@ -427,19 +421,19 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
protected void logDisabledStoragePools(long dcId, Long podId, Long clusterId, ScopeType scope) {
|
||||
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, scope);
|
||||
if (disabledPools != null && !disabledPools.isEmpty()) {
|
||||
logger.trace(String.format("Ignoring pools [%s] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools)));
|
||||
logger.trace("Ignoring pools [{}] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools));
|
||||
}
|
||||
}
|
||||
|
||||
protected void logStartOfSearch(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, int returnUpTo,
|
||||
boolean bypassStorageTypeCheck){
|
||||
logger.trace(String.format("%s is looking for storage pools that match the VM's disk profile [%s], virtual machine profile [%s] and "
|
||||
+ "deployment plan [%s]. Returning up to [%d] and bypassStorageTypeCheck [%s].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck));
|
||||
logger.trace("[{}] is looking for storage pools that match the VM's disk profile [{}], virtual machine profile [{}] and "
|
||||
+ "deployment plan [{}]. Returning up to [{}] and bypassStorageTypeCheck [{}].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);
|
||||
}
|
||||
|
||||
protected void logEndOfSearch(List<StoragePool> storagePoolList) {
|
||||
logger.debug(String.format("%s is returning [%s] suitable storage pools [%s].", this.getClass().getSimpleName(), storagePoolList.size(),
|
||||
Arrays.toString(storagePoolList.toArray())));
|
||||
logger.debug("[{}] is returning [{}] suitable storage pools [{}].", this.getClass().getSimpleName(), storagePoolList.size(),
|
||||
Arrays.toString(storagePoolList.toArray()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -230,8 +230,10 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
|
|||
updateBuilder.setJobId(answer.getJobId());
|
||||
updateBuilder.setLocalDownloadPath(answer.getDownloadPath());
|
||||
updateBuilder.setInstallPath(answer.getInstallPath());
|
||||
updateBuilder.setSize(answer.getTemplateSize());
|
||||
updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
|
||||
if (!VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
|
||||
updateBuilder.setSize(answer.getTemplateSize());
|
||||
updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
|
||||
}
|
||||
_templateStoreDao.update(tmpltStoreVO.getId(), updateBuilder);
|
||||
// update size in vm_template table
|
||||
VMTemplateVO tmlptUpdater = _templateDao.createForUpdate();
|
||||
|
|
@ -241,8 +243,7 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
|
|||
|
||||
AsyncCompletionCallback<CreateCmdResult> caller = context.getParentCallback();
|
||||
|
||||
if (answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR ||
|
||||
answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.ABANDONED || answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.UNKNOWN) {
|
||||
if (VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
|
||||
CreateCmdResult result = new CreateCmdResult(null, null);
|
||||
result.setSuccess(false);
|
||||
result.setResult(answer.getErrorString());
|
||||
|
|
@ -285,19 +286,22 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
|
|||
updateBuilder.setJobId(answer.getJobId());
|
||||
updateBuilder.setLocalDownloadPath(answer.getDownloadPath());
|
||||
updateBuilder.setInstallPath(answer.getInstallPath());
|
||||
updateBuilder.setSize(answer.getTemplateSize());
|
||||
updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
|
||||
if (!VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
|
||||
updateBuilder.setSize(answer.getTemplateSize());
|
||||
updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize());
|
||||
}
|
||||
_volumeStoreDao.update(volStoreVO.getId(), updateBuilder);
|
||||
// update size in volume table
|
||||
VolumeVO volUpdater = volumeDao.createForUpdate();
|
||||
volUpdater.setSize(answer.getTemplateSize());
|
||||
volumeDao.update(obj.getId(), volUpdater);
|
||||
if (!VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
|
||||
VolumeVO volUpdater = volumeDao.createForUpdate();
|
||||
volUpdater.setSize(answer.getTemplateSize());
|
||||
volumeDao.update(obj.getId(), volUpdater);
|
||||
}
|
||||
}
|
||||
|
||||
AsyncCompletionCallback<CreateCmdResult> caller = context.getParentCallback();
|
||||
|
||||
if (answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR ||
|
||||
answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.ABANDONED || answer.getDownloadStatus() == VMTemplateStorageResourceAssoc.Status.UNKNOWN) {
|
||||
if (VMTemplateStorageResourceAssoc.ERROR_DOWNLOAD_STATES.contains(answer.getDownloadStatus())) {
|
||||
CreateCmdResult result = new CreateCmdResult(null, null);
|
||||
result.setSuccess(false);
|
||||
result.setResult(answer.getErrorString());
|
||||
|
|
|
|||
|
|
@ -83,6 +83,12 @@ public class CreateExtensionCmd extends BaseCmd {
|
|||
description = "Details in key/value pairs using format details[i].keyname=keyvalue. Example: details[0].endpoint.url=urlvalue")
|
||||
protected Map details;
|
||||
|
||||
@Parameter(name = ApiConstants.RESERVED_RESOURCE_DETAILS, type = CommandType.STRING,
|
||||
description = "Resource detail names as comma separated string that should be reserved and not visible " +
|
||||
"to end users",
|
||||
since = "4.22.1")
|
||||
protected String reservedResourceDetails;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -115,6 +121,10 @@ public class CreateExtensionCmd extends BaseCmd {
|
|||
return convertDetailsToMap(details);
|
||||
}
|
||||
|
||||
public String getReservedResourceDetails() {
|
||||
return reservedResourceDetails;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -78,6 +78,12 @@ public class UpdateExtensionCmd extends BaseCmd {
|
|||
"if false or not set, no action)")
|
||||
private Boolean cleanupDetails;
|
||||
|
||||
@Parameter(name = ApiConstants.RESERVED_RESOURCE_DETAILS, type = CommandType.STRING,
|
||||
description = "Resource detail names as comma separated string that should be reserved and not visible " +
|
||||
"to end users",
|
||||
since = "4.22.1")
|
||||
protected String reservedResourceDetails;
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -106,6 +112,10 @@ public class UpdateExtensionCmd extends BaseCmd {
|
|||
return cleanupDetails;
|
||||
}
|
||||
|
||||
public String getReservedResourceDetails() {
|
||||
return reservedResourceDetails;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -216,6 +216,11 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
@Inject
|
||||
AccountService accountService;
|
||||
|
||||
// Map of in-built extension names and their reserved resource details that shouldn't be accessible to end-users
|
||||
protected static final Map<String, List<String>> INBUILT_RESERVED_RESOURCE_DETAILS = Map.of(
|
||||
"proxmox", List.of("proxmox_vmid")
|
||||
);
|
||||
|
||||
private ScheduledExecutorService extensionPathStateCheckExecutor;
|
||||
|
||||
protected String getDefaultExtensionRelativePath(String name) {
|
||||
|
|
@ -563,6 +568,25 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
updateExtensionPathReady(extension, true);
|
||||
}
|
||||
|
||||
protected void addInbuiltExtensionReservedResourceDetails(long extensionId, List<String> reservedResourceDetails) {
|
||||
ExtensionVO vo = extensionDao.findById(extensionId);
|
||||
if (vo == null || vo.isUserDefined()) {
|
||||
return;
|
||||
}
|
||||
String lowerName = StringUtils.defaultString(vo.getName()).toLowerCase();
|
||||
Optional<Map.Entry<String, List<String>>> match = INBUILT_RESERVED_RESOURCE_DETAILS.entrySet().stream()
|
||||
.filter(e -> lowerName.contains(e.getKey().toLowerCase()))
|
||||
.findFirst();
|
||||
if (match.isPresent()) {
|
||||
Set<String> existing = new HashSet<>(reservedResourceDetails);
|
||||
for (String detailKey : match.get().getValue()) {
|
||||
if (existing.add(detailKey)) {
|
||||
reservedResourceDetails.add(detailKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getExtensionsPath() {
|
||||
return externalProvisioner.getExtensionsPath();
|
||||
|
|
@ -577,6 +601,7 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
String relativePath = cmd.getPath();
|
||||
final Boolean orchestratorRequiresPrepareVm = cmd.isOrchestratorRequiresPrepareVm();
|
||||
final String stateStr = cmd.getState();
|
||||
final String reservedResourceDetails = cmd.getReservedResourceDetails();
|
||||
ExtensionVO extensionByName = extensionDao.findByName(name);
|
||||
if (extensionByName != null) {
|
||||
throw new CloudRuntimeException("Extension by name already exists");
|
||||
|
|
@ -624,6 +649,10 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM, String.valueOf(orchestratorRequiresPrepareVm),
|
||||
false));
|
||||
}
|
||||
if (StringUtils.isNotBlank(reservedResourceDetails)) {
|
||||
detailsVOList.add(new ExtensionDetailsVO(extension.getId(),
|
||||
ApiConstants.RESERVED_RESOURCE_DETAILS, reservedResourceDetails, false));
|
||||
}
|
||||
if (CollectionUtils.isNotEmpty(detailsVOList)) {
|
||||
extensionDetailsDao.saveDetails(detailsVOList);
|
||||
}
|
||||
|
|
@ -704,6 +733,7 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
final String stateStr = cmd.getState();
|
||||
final Map<String, String> details = cmd.getDetails();
|
||||
final Boolean cleanupDetails = cmd.isCleanupDetails();
|
||||
final String reservedResourceDetails = cmd.getReservedResourceDetails();
|
||||
final ExtensionVO extensionVO = extensionDao.findById(id);
|
||||
if (extensionVO == null) {
|
||||
throw new InvalidParameterValueException("Failed to find the extension");
|
||||
|
|
@ -732,7 +762,8 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
throw new CloudRuntimeException(String.format("Failed to updated the extension: %s",
|
||||
extensionVO.getName()));
|
||||
}
|
||||
updateExtensionsDetails(cleanupDetails, details, orchestratorRequiresPrepareVm, id);
|
||||
updateExtensionsDetails(cleanupDetails, details, orchestratorRequiresPrepareVm, reservedResourceDetails,
|
||||
id);
|
||||
return extensionVO;
|
||||
});
|
||||
if (StringUtils.isNotBlank(stateStr)) {
|
||||
|
|
@ -748,9 +779,11 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
return result;
|
||||
}
|
||||
|
||||
protected void updateExtensionsDetails(Boolean cleanupDetails, Map<String, String> details, Boolean orchestratorRequiresPrepareVm, long id) {
|
||||
protected void updateExtensionsDetails(Boolean cleanupDetails, Map<String, String> details,
|
||||
Boolean orchestratorRequiresPrepareVm, String reservedResourceDetails, long id) {
|
||||
final boolean needToUpdateAllDetails = Boolean.TRUE.equals(cleanupDetails) || MapUtils.isNotEmpty(details);
|
||||
if (!needToUpdateAllDetails && orchestratorRequiresPrepareVm == null) {
|
||||
if (!needToUpdateAllDetails && orchestratorRequiresPrepareVm == null &&
|
||||
StringUtils.isBlank(reservedResourceDetails)) {
|
||||
return;
|
||||
}
|
||||
if (needToUpdateAllDetails) {
|
||||
|
|
@ -761,6 +794,9 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
hiddenDetails.put(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM,
|
||||
String.valueOf(orchestratorRequiresPrepareVm));
|
||||
}
|
||||
if (StringUtils.isNotBlank(reservedResourceDetails)) {
|
||||
hiddenDetails.put(ApiConstants.RESERVED_RESOURCE_DETAILS, reservedResourceDetails);
|
||||
}
|
||||
if (MapUtils.isNotEmpty(hiddenDetails)) {
|
||||
hiddenDetails.forEach((key, value) -> detailsVOList.add(
|
||||
new ExtensionDetailsVO(id, key, value, false)));
|
||||
|
|
@ -775,15 +811,29 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
extensionDetailsDao.removeDetails(id);
|
||||
}
|
||||
} else {
|
||||
ExtensionDetailsVO detailsVO = extensionDetailsDao.findDetail(id,
|
||||
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM);
|
||||
if (detailsVO == null) {
|
||||
extensionDetailsDao.persist(new ExtensionDetailsVO(id,
|
||||
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM,
|
||||
String.valueOf(orchestratorRequiresPrepareVm), false));
|
||||
} else if (Boolean.parseBoolean(detailsVO.getValue()) != orchestratorRequiresPrepareVm) {
|
||||
detailsVO.setValue(String.valueOf(orchestratorRequiresPrepareVm));
|
||||
extensionDetailsDao.update(detailsVO.getId(), detailsVO);
|
||||
if (orchestratorRequiresPrepareVm != null) {
|
||||
ExtensionDetailsVO detailsVO = extensionDetailsDao.findDetail(id,
|
||||
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM);
|
||||
if (detailsVO == null) {
|
||||
extensionDetailsDao.persist(new ExtensionDetailsVO(id,
|
||||
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM,
|
||||
String.valueOf(orchestratorRequiresPrepareVm), false));
|
||||
} else if (Boolean.parseBoolean(detailsVO.getValue()) != orchestratorRequiresPrepareVm) {
|
||||
detailsVO.setValue(String.valueOf(orchestratorRequiresPrepareVm));
|
||||
extensionDetailsDao.update(detailsVO.getId(), detailsVO);
|
||||
}
|
||||
}
|
||||
if (StringUtils.isNotBlank(reservedResourceDetails)) {
|
||||
ExtensionDetailsVO detailsVO = extensionDetailsDao.findDetail(id,
|
||||
ApiConstants.RESERVED_RESOURCE_DETAILS);
|
||||
if (detailsVO == null) {
|
||||
extensionDetailsDao.persist(new ExtensionDetailsVO(id,
|
||||
ApiConstants.RESERVED_RESOURCE_DETAILS,
|
||||
reservedResourceDetails, false));
|
||||
} else if (!reservedResourceDetails.equals(detailsVO.getValue())) {
|
||||
detailsVO.setValue(reservedResourceDetails);
|
||||
extensionDetailsDao.update(detailsVO.getId(), detailsVO);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -961,12 +1011,16 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
hiddenDetails = extensionDetails.second();
|
||||
} else {
|
||||
hiddenDetails = extensionDetailsDao.listDetailsKeyPairs(extension.getId(),
|
||||
List.of(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM));
|
||||
List.of(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM,
|
||||
ApiConstants.RESERVED_RESOURCE_DETAILS));
|
||||
}
|
||||
if (hiddenDetails.containsKey(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM)) {
|
||||
response.setOrchestratorRequiresPrepareVm(Boolean.parseBoolean(
|
||||
hiddenDetails.get(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM)));
|
||||
}
|
||||
if (hiddenDetails.containsKey(ApiConstants.RESERVED_RESOURCE_DETAILS)) {
|
||||
response.setReservedResourceDetails(hiddenDetails.get(ApiConstants.RESERVED_RESOURCE_DETAILS));
|
||||
}
|
||||
response.setObjectName(Extension.class.getSimpleName().toLowerCase());
|
||||
return response;
|
||||
}
|
||||
|
|
@ -1605,6 +1659,24 @@ public class ExtensionsManagerImpl extends ManagerBase implements ExtensionsMana
|
|||
return extensionDao.findById(extensionId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getExtensionReservedResourceDetails(long extensionId) {
|
||||
ExtensionDetailsVO detailsVO = extensionDetailsDao.findDetail(extensionId,
|
||||
ApiConstants.RESERVED_RESOURCE_DETAILS);
|
||||
if (detailsVO == null || !StringUtils.isNotBlank(detailsVO.getValue())) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<String> reservedDetails = new ArrayList<>();
|
||||
String[] parts = detailsVO.getValue().split(",");
|
||||
for (String part : parts) {
|
||||
if (StringUtils.isNotBlank(part)) {
|
||||
reservedDetails.add(part.trim());
|
||||
}
|
||||
}
|
||||
addInbuiltExtensionReservedResourceDetails(extensionId, reservedDetails);
|
||||
return reservedDetails;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean start() {
|
||||
long pathStateCheckInterval = PathStateCheckInterval.value();
|
||||
|
|
|
|||
|
|
@ -94,4 +94,18 @@ public class CreateExtensionCmdTest {
|
|||
setField(cmd, "details", details);
|
||||
assertTrue(MapUtils.isNotEmpty(cmd.getDetails()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getReservedResourceDetailsReturnsValueWhenSet() {
|
||||
setField(cmd, "reservedResourceDetails", "detail1,detail2,detail3");
|
||||
String result = cmd.getReservedResourceDetails();
|
||||
assertEquals("detail1,detail2,detail3", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getReservedResourceDetailsReturnsNullWhenNotSet() {
|
||||
setField(cmd, "reservedResourceDetails", null);
|
||||
String result = cmd.getReservedResourceDetails();
|
||||
assertNull(result);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import static org.mockito.Mockito.doNothing;
|
|||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
import static org.springframework.test.util.ReflectionTestUtils.setField;
|
||||
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
|
|
@ -134,6 +135,20 @@ public class UpdateExtensionCmdTest {
|
|||
assertTrue(cmd.isCleanupDetails());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getReservedResourceDetailsReturnsValueWhenSet() {
|
||||
setField(cmd, "reservedResourceDetails", "detail1,detail2,detail3");
|
||||
String result = cmd.getReservedResourceDetails();
|
||||
assertEquals("detail1,detail2,detail3", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getReservedResourceDetailsReturnsNullWhenNotSet() {
|
||||
setField(cmd, "reservedResourceDetails", null);
|
||||
String result = cmd.getReservedResourceDetails();
|
||||
assertNull(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void executeSetsExtensionResponseWhenManagerSucceeds() {
|
||||
Extension extension = mock(Extension.class);
|
||||
|
|
|
|||
|
|
@ -23,11 +23,13 @@ import static org.junit.Assert.assertNotNull;
|
|||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertThrows;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.anyList;
|
||||
import static org.mockito.Mockito.any;
|
||||
import static org.mockito.Mockito.anyBoolean;
|
||||
import static org.mockito.Mockito.anyLong;
|
||||
import static org.mockito.Mockito.anyString;
|
||||
import static org.mockito.Mockito.atLeastOnce;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
|
|
@ -40,6 +42,7 @@ import static org.mockito.Mockito.when;
|
|||
|
||||
import java.io.File;
|
||||
import java.security.InvalidParameterException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
|
|
@ -49,8 +52,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import com.cloud.exception.PermissionDeniedException;
|
||||
import com.cloud.user.AccountService;
|
||||
import org.apache.cloudstack.acl.Role;
|
||||
import org.apache.cloudstack.acl.RoleService;
|
||||
import org.apache.cloudstack.acl.RoleType;
|
||||
|
|
@ -85,9 +86,11 @@ import org.apache.cloudstack.framework.extensions.dao.ExtensionResourceMapDao;
|
|||
import org.apache.cloudstack.framework.extensions.dao.ExtensionResourceMapDetailsDao;
|
||||
import org.apache.cloudstack.framework.extensions.vo.ExtensionCustomActionDetailsVO;
|
||||
import org.apache.cloudstack.framework.extensions.vo.ExtensionCustomActionVO;
|
||||
import org.apache.cloudstack.framework.extensions.vo.ExtensionDetailsVO;
|
||||
import org.apache.cloudstack.framework.extensions.vo.ExtensionResourceMapVO;
|
||||
import org.apache.cloudstack.framework.extensions.vo.ExtensionVO;
|
||||
import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
|
@ -113,6 +116,7 @@ import com.cloud.dc.dao.ClusterDao;
|
|||
import com.cloud.exception.AgentUnavailableException;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.exception.PermissionDeniedException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.host.dao.HostDetailsDao;
|
||||
|
|
@ -122,6 +126,7 @@ import com.cloud.org.Cluster;
|
|||
import com.cloud.serializer.GsonHelper;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.AccountService;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.UuidUtils;
|
||||
import com.cloud.utils.db.EntityManager;
|
||||
|
|
@ -664,6 +669,8 @@ public class ExtensionsManagerImplTest {
|
|||
when(cmd.getPath()).thenReturn(null);
|
||||
when(cmd.isOrchestratorRequiresPrepareVm()).thenReturn(null);
|
||||
when(cmd.getState()).thenReturn(null);
|
||||
String reservedResourceDetails = "abc,xyz";
|
||||
when(cmd.getReservedResourceDetails()).thenReturn(reservedResourceDetails);
|
||||
when(extensionDao.findByName("ext1")).thenReturn(null);
|
||||
when(extensionDao.persist(any())).thenAnswer(inv -> {
|
||||
ExtensionVO extensionVO = inv.getArgument(0);
|
||||
|
|
@ -671,11 +678,20 @@ public class ExtensionsManagerImplTest {
|
|||
return extensionVO;
|
||||
});
|
||||
when(managementServerHostDao.listBy(any())).thenReturn(Collections.emptyList());
|
||||
|
||||
List<ExtensionDetailsVO> detailsList = new ArrayList<>();
|
||||
doAnswer(inv -> {
|
||||
List<ExtensionDetailsVO> detailsVO = inv.getArgument(0);
|
||||
detailsList.addAll(detailsVO);
|
||||
return null;
|
||||
}).when(extensionDetailsDao).saveDetails(anyList());
|
||||
Extension ext = extensionsManager.createExtension(cmd);
|
||||
|
||||
assertEquals("ext1", ext.getName());
|
||||
verify(extensionDao).persist(any());
|
||||
assertTrue(CollectionUtils.isNotEmpty(detailsList));
|
||||
assertTrue(detailsList.stream()
|
||||
.anyMatch(detail -> ApiConstants.RESERVED_RESOURCE_DETAILS.equals(detail.getName())
|
||||
&& reservedResourceDetails.equals(detail.getValue())));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -938,14 +954,32 @@ public class ExtensionsManagerImplTest {
|
|||
public void updateExtensionsDetails_SavesDetails_WhenDetailsProvided() {
|
||||
long extensionId = 10L;
|
||||
Map<String, String> details = Map.of("foo", "bar", "baz", "qux");
|
||||
extensionsManager.updateExtensionsDetails(false, details, null, extensionId);
|
||||
extensionsManager.updateExtensionsDetails(false, details, null, null, extensionId);
|
||||
verify(extensionDetailsDao).saveDetails(any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateExtensionsDetails_PersistReservedDetail_WhenProvided() {
|
||||
long extensionId = 10L;
|
||||
when(extensionDetailsDao.persist(any())).thenReturn(mock(ExtensionDetailsVO.class));
|
||||
extensionsManager.updateExtensionsDetails(false, null, null, "abc,xyz", extensionId);
|
||||
verify(extensionDetailsDao).persist(any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateExtensionsDetails_UpdateReservedDetail_WhenProvided() {
|
||||
long extensionId = 10L;
|
||||
when(extensionDetailsDao.findDetail(anyLong(), eq(ApiConstants.RESERVED_RESOURCE_DETAILS)))
|
||||
.thenReturn(mock(ExtensionDetailsVO.class));
|
||||
when(extensionDetailsDao.update(anyLong(), any())).thenReturn(true);
|
||||
extensionsManager.updateExtensionsDetails(false, null, null, "abc,xyz", extensionId);
|
||||
verify(extensionDetailsDao).update(anyLong(), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void updateExtensionsDetails_DoesNothing_WhenDetailsAndCleanupAreNull() {
|
||||
long extensionId = 11L;
|
||||
extensionsManager.updateExtensionsDetails(null, null, null, extensionId);
|
||||
extensionsManager.updateExtensionsDetails(null, null, null, null, extensionId);
|
||||
verify(extensionDetailsDao, never()).removeDetails(anyLong());
|
||||
verify(extensionDetailsDao, never()).saveDetails(any());
|
||||
}
|
||||
|
|
@ -953,7 +987,7 @@ public class ExtensionsManagerImplTest {
|
|||
@Test
|
||||
public void updateExtensionsDetails_RemovesDetailsOnly_WhenCleanupIsTrue() {
|
||||
long extensionId = 12L;
|
||||
extensionsManager.updateExtensionsDetails(true, null, null, extensionId);
|
||||
extensionsManager.updateExtensionsDetails(true, null, null, null, extensionId);
|
||||
verify(extensionDetailsDao).removeDetails(extensionId);
|
||||
verify(extensionDetailsDao, never()).saveDetails(any());
|
||||
}
|
||||
|
|
@ -961,7 +995,7 @@ public class ExtensionsManagerImplTest {
|
|||
@Test
|
||||
public void updateExtensionsDetails_PersistsOrchestratorFlag_WhenFlagIsNotNull() {
|
||||
long extensionId = 13L;
|
||||
extensionsManager.updateExtensionsDetails(false, null, true, extensionId);
|
||||
extensionsManager.updateExtensionsDetails(false, null, true, null, extensionId);
|
||||
verify(extensionDetailsDao).persist(any());
|
||||
}
|
||||
|
||||
|
|
@ -970,7 +1004,7 @@ public class ExtensionsManagerImplTest {
|
|||
long extensionId = 14L;
|
||||
Map<String, String> details = Map.of("foo", "bar");
|
||||
doThrow(CloudRuntimeException.class).when(extensionDetailsDao).saveDetails(any());
|
||||
extensionsManager.updateExtensionsDetails(false, details, null, extensionId);
|
||||
extensionsManager.updateExtensionsDetails(false, details, null, null, extensionId);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -1161,7 +1195,8 @@ public class ExtensionsManagerImplTest {
|
|||
when(externalProvisioner.getExtensionPath("entry2.sh")).thenReturn("/some/path/entry2.sh");
|
||||
|
||||
Map<String, String> hiddenDetails = Map.of(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM, "false");
|
||||
when(extensionDetailsDao.listDetailsKeyPairs(2L, List.of(ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM)))
|
||||
when(extensionDetailsDao.listDetailsKeyPairs(2L, List.of(
|
||||
ApiConstants.ORCHESTRATOR_REQUIRES_PREPARE_VM, ApiConstants.RESERVED_RESOURCE_DETAILS)))
|
||||
.thenReturn(hiddenDetails);
|
||||
|
||||
EnumSet<ApiConstants.ExtensionDetails> viewDetails = EnumSet.noneOf(ApiConstants.ExtensionDetails.class);
|
||||
|
|
@ -2069,4 +2104,118 @@ public class ExtensionsManagerImplTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getExtensionReservedResourceDetailsReturnsEmptyListWhenDetailsNotFound() {
|
||||
long extensionId = 1L;
|
||||
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(null);
|
||||
|
||||
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
|
||||
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getExtensionReservedResourceDetailsReturnsEmptyListWhenValueIsBlank() {
|
||||
long extensionId = 2L;
|
||||
ExtensionDetailsVO detailsVO = mock(ExtensionDetailsVO.class);
|
||||
when(detailsVO.getValue()).thenReturn(" ");
|
||||
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(detailsVO);
|
||||
|
||||
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
|
||||
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getExtensionReservedResourceDetailsReturnsListOfTrimmedDetails() {
|
||||
long extensionId = 3L;
|
||||
ExtensionDetailsVO detailsVO = mock(ExtensionDetailsVO.class);
|
||||
when(detailsVO.getValue()).thenReturn(" detail1 , detail2,detail3 ");
|
||||
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(detailsVO);
|
||||
|
||||
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
|
||||
|
||||
assertNotNull(result);
|
||||
assertEquals(3, result.size());
|
||||
assertEquals("detail1", result.get(0));
|
||||
assertEquals("detail2", result.get(1));
|
||||
assertEquals("detail3", result.get(2));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getExtensionReservedResourceDetailsHandlesEmptyPartsGracefully() {
|
||||
long extensionId = 4L;
|
||||
ExtensionDetailsVO detailsVO = mock(ExtensionDetailsVO.class);
|
||||
when(detailsVO.getValue()).thenReturn("detail1,,detail2, ,detail3");
|
||||
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(detailsVO);
|
||||
|
||||
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
|
||||
|
||||
assertNotNull(result);
|
||||
assertEquals(3, result.size());
|
||||
assertEquals("detail1", result.get(0));
|
||||
assertEquals("detail2", result.get(1));
|
||||
assertEquals("detail3", result.get(2));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getExtensionReservedResourceDetailsReturnsEmptyListWhenSplitResultsInNoParts() {
|
||||
long extensionId = 5L;
|
||||
ExtensionDetailsVO detailsVO = mock(ExtensionDetailsVO.class);
|
||||
when(detailsVO.getValue()).thenReturn(",");
|
||||
when(extensionDetailsDao.findDetail(extensionId, ApiConstants.RESERVED_RESOURCE_DETAILS)).thenReturn(detailsVO);
|
||||
|
||||
List<String> result = extensionsManager.getExtensionReservedResourceDetails(extensionId);
|
||||
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addInbuiltExtensionReservedResourceDetailsDoesNothingWhenExtensionNotFound() {
|
||||
when(extensionDao.findById(1L)).thenReturn(null);
|
||||
List<String> reservedResourceDetails = new ArrayList<>();
|
||||
extensionsManager.addInbuiltExtensionReservedResourceDetails(1L, reservedResourceDetails);
|
||||
assertTrue(reservedResourceDetails.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addInbuiltExtensionReservedResourceDetailsDoesNothingForUserDefinedExtension() {
|
||||
ExtensionVO extension = mock(ExtensionVO.class);
|
||||
when(extension.isUserDefined()).thenReturn(true);
|
||||
when(extensionDao.findById(2L)).thenReturn(extension);
|
||||
List<String> reservedResourceDetails = new ArrayList<>();
|
||||
reservedResourceDetails.add("existing-detail");
|
||||
extensionsManager.addInbuiltExtensionReservedResourceDetails(2L, reservedResourceDetails);
|
||||
assertEquals(1, reservedResourceDetails.size());
|
||||
assertTrue(reservedResourceDetails.contains("existing-detail"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addInbuiltExtensionReservedResourceDetailsDoesNothingWhenNoMatchFound() {
|
||||
ExtensionVO extension = mock(ExtensionVO.class);
|
||||
when(extension.isUserDefined()).thenReturn(false);
|
||||
when(extension.getName()).thenReturn("no-such-inbuilt-key-expected");
|
||||
when(extensionDao.findById(3L)).thenReturn(extension);
|
||||
List<String> reservedResourceDetails = new ArrayList<>();
|
||||
extensionsManager.addInbuiltExtensionReservedResourceDetails(3L, reservedResourceDetails);
|
||||
assertTrue(reservedResourceDetails.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addInbuiltExtensionReservedResourceDetailsAddedDetails() {
|
||||
ExtensionVO extension = mock(ExtensionVO.class);
|
||||
when(extension.isUserDefined()).thenReturn(false);
|
||||
Map.Entry<String, List<String>> entry =
|
||||
ExtensionsManagerImpl.INBUILT_RESERVED_RESOURCE_DETAILS.entrySet().iterator().next();
|
||||
when(extension.getName()).thenReturn(entry.getKey());
|
||||
when(extensionDao.findById(3L)).thenReturn(extension);
|
||||
List<String> reservedResourceDetails = new ArrayList<>();
|
||||
extensionsManager.addInbuiltExtensionReservedResourceDetails(3L, reservedResourceDetails);
|
||||
assertFalse(reservedResourceDetails.isEmpty());
|
||||
assertEquals(reservedResourceDetails.size(), entry.getValue().size());
|
||||
assertTrue(reservedResourceDetails.containsAll(entry.getValue()));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,12 +23,30 @@ import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
|
|||
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
|
||||
public interface AsyncJobDao extends GenericDao<AsyncJobVO, Long> {
|
||||
|
||||
AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instanceId);
|
||||
|
||||
List<AsyncJobVO> findInstancePendingAsyncJobs(String instanceType, Long accountId);
|
||||
|
||||
/**
|
||||
* Finds async job matching the given parameters.
|
||||
* Non-null parameters are added to search criteria.
|
||||
* Returns the most recent job by creation date.
|
||||
* <p>
|
||||
* When searching by resourceId and resourceType, only one active job
|
||||
* is expected per resource, so returning a single result is sufficient.
|
||||
*
|
||||
* @param id job ID
|
||||
* @param resourceId resource ID (instanceId)
|
||||
* @param resourceType resource type (instanceType)
|
||||
* @return matching job or null
|
||||
*/
|
||||
@Nullable
|
||||
AsyncJobVO findJob(Long id, Long resourceId, String resourceType);
|
||||
|
||||
AsyncJobVO findPseudoJob(long threadId, long msid);
|
||||
|
||||
void cleanupPseduoJobs(long msid);
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@ import java.util.Date;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
|
||||
import org.apache.cloudstack.jobs.JobInfo;
|
||||
|
|
@ -45,6 +47,7 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
|
|||
private final SearchBuilder<AsyncJobVO> expiringUnfinishedAsyncJobSearch;
|
||||
private final SearchBuilder<AsyncJobVO> expiringCompletedAsyncJobSearch;
|
||||
private final SearchBuilder<AsyncJobVO> failureMsidAsyncJobSearch;
|
||||
private final SearchBuilder<AsyncJobVO> byIdResourceIdResourceTypeSearch;
|
||||
private final GenericSearchBuilder<AsyncJobVO, Long> asyncJobTypeSearch;
|
||||
private final GenericSearchBuilder<AsyncJobVO, Long> pendingNonPseudoAsyncJobsSearch;
|
||||
|
||||
|
|
@ -95,6 +98,12 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
|
|||
failureMsidAsyncJobSearch.and("job_cmd", failureMsidAsyncJobSearch.entity().getCmd(), Op.IN);
|
||||
failureMsidAsyncJobSearch.done();
|
||||
|
||||
byIdResourceIdResourceTypeSearch = createSearchBuilder();
|
||||
byIdResourceIdResourceTypeSearch.and("id", byIdResourceIdResourceTypeSearch.entity().getId(), SearchCriteria.Op.EQ);
|
||||
byIdResourceIdResourceTypeSearch.and("instanceId", byIdResourceIdResourceTypeSearch.entity().getInstanceId(), SearchCriteria.Op.EQ);
|
||||
byIdResourceIdResourceTypeSearch.and("instanceType", byIdResourceIdResourceTypeSearch.entity().getInstanceType(), SearchCriteria.Op.EQ);
|
||||
byIdResourceIdResourceTypeSearch.done();
|
||||
|
||||
asyncJobTypeSearch = createSearchBuilder(Long.class);
|
||||
asyncJobTypeSearch.select(null, SearchCriteria.Func.COUNT, asyncJobTypeSearch.entity().getId());
|
||||
asyncJobTypeSearch.and("job_info", asyncJobTypeSearch.entity().getCmdInfo(),Op.LIKE);
|
||||
|
|
@ -140,6 +149,30 @@ public class AsyncJobDaoImpl extends GenericDaoBase<AsyncJobVO, Long> implements
|
|||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncJobVO findJob(Long id, Long resourceId, String resourceType) {
|
||||
SearchCriteria<AsyncJobVO> sc = byIdResourceIdResourceTypeSearch.create();
|
||||
|
||||
if (id == null && resourceId == null && StringUtils.isBlank(resourceType)) {
|
||||
logger.debug("findJob called with all null parameters");
|
||||
return null;
|
||||
}
|
||||
|
||||
if (id != null) {
|
||||
sc.setParameters("id", id);
|
||||
}
|
||||
if (resourceId != null && StringUtils.isNotBlank(resourceType)) {
|
||||
sc.setParameters("instanceType", resourceType);
|
||||
sc.setParameters("instanceId", resourceId);
|
||||
}
|
||||
Filter filter = new Filter(AsyncJobVO.class, "created", false, 0L, 1L);
|
||||
List<AsyncJobVO> result = searchIncludingRemoved(sc, filter, Boolean.FALSE, false);
|
||||
if (CollectionUtils.isNotEmpty(result)) {
|
||||
return result.get(0);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncJobVO findPseudoJob(long threadId, long msid) {
|
||||
SearchCriteria<AsyncJobVO> sc = pseudoJobSearch.create();
|
||||
|
|
|
|||
|
|
@ -83,6 +83,8 @@ Requires: (iptables-services or iptables)
|
|||
Requires: rng-tools
|
||||
Requires: (qemu-img or qemu-tools)
|
||||
Requires: python3-pip
|
||||
Requires: python3-six
|
||||
Requires: python3-protobuf
|
||||
Requires: python3-setuptools
|
||||
Requires: (libgcrypt > 1.8.3 or libgcrypt20)
|
||||
Group: System Environment/Libraries
|
||||
|
|
@ -334,11 +336,11 @@ cp -r ui/dist/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/
|
|||
rm -f ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/config.json
|
||||
ln -sf /etc/%{name}/ui/config.json ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/config.json
|
||||
|
||||
# Package mysql-connector-python
|
||||
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/ee/ff/48bde5c0f013094d729fe4b0316ba2a24774b3ff1c52d924a8a4cb04078a/six-1.15.0-py2.py3-none-any.whl
|
||||
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/e9/93/4860cebd5ad3ff2664ad3c966490ccb46e3b88458b2095145bca11727ca4/setuptools-47.3.1-py3-none-any.whl
|
||||
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/32/27/1141a8232723dcb10a595cc0ce4321dcbbd5215300bf4acfc142343205bf/protobuf-3.19.6-py2.py3-none-any.whl
|
||||
# Package mysql-connector-python (bundled to avoid dependency on external community repo)
|
||||
# Version 8.0.31 is the last version supporting Python 3.6 (EL8)
|
||||
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/08/1f/42d74bae9dd6dcfec67c9ed0f3fa482b1ae5ac5f117ca82ab589ecb3ca19/mysql_connector_python-8.0.31-py2.py3-none-any.whl
|
||||
# Version 8.3.0 supports Python 3.8 to 3.12 (EL9, EL10)
|
||||
wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/53/ed/26a4b8cacb8852c6fd97d2d58a7f2591c41989807ea82bd8d9725a4e6937/mysql_connector_python-8.3.0-py2.py3-none-any.whl
|
||||
|
||||
chmod 440 ${RPM_BUILD_ROOT}%{_sysconfdir}/sudoers.d/%{name}-management
|
||||
chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/%{name}/mnt
|
||||
|
|
@ -455,8 +457,13 @@ then
|
|||
fi
|
||||
|
||||
%post management
|
||||
# Install mysql-connector-python
|
||||
pip3 install %{_datadir}/%{name}-management/setup/wheel/six-1.15.0-py2.py3-none-any.whl %{_datadir}/%{name}-management/setup/wheel/setuptools-47.3.1-py3-none-any.whl %{_datadir}/%{name}-management/setup/wheel/protobuf-3.19.6-py2.py3-none-any.whl %{_datadir}/%{name}-management/setup/wheel/mysql_connector_python-8.0.31-py2.py3-none-any.whl
|
||||
# Install mysql-connector-python wheel
|
||||
# Detect Python version to install compatible wheel
|
||||
if python3 -c 'import sys; sys.exit(0 if sys.version_info >= (3, 7) else 1)'; then
|
||||
pip3 install %{_datadir}/%{name}-management/setup/wheel/mysql_connector_python-8.3.0-py2.py3-none-any.whl
|
||||
else
|
||||
pip3 install %{_datadir}/%{name}-management/setup/wheel/mysql_connector_python-8.0.31-py2.py3-none-any.whl
|
||||
fi
|
||||
|
||||
/usr/bin/systemctl enable cloudstack-management > /dev/null 2>&1 || true
|
||||
/usr/bin/systemctl enable --now rngd > /dev/null 2>&1 || true
|
||||
|
|
|
|||
|
|
@ -106,7 +106,6 @@ public class BareMetalTemplateAdapter extends TemplateAdapterBase implements Tem
|
|||
}
|
||||
}
|
||||
|
||||
_resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template);
|
||||
return template;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
|||
public class KVMHABase {
|
||||
protected Logger logger = LogManager.getLogger(getClass());
|
||||
private long _timeout = 60000; /* 1 minutes */
|
||||
protected static String s_heartBeatPath;
|
||||
protected long _heartBeatUpdateTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.HEARTBEAT_UPDATE_TIMEOUT);
|
||||
protected long _heartBeatUpdateFreq = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_UPDATE_FREQUENCY);
|
||||
protected long _heartBeatUpdateMaxTries = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_UPDATE_MAX_TRIES);
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ package com.cloud.hypervisor.kvm.resource;
|
|||
|
||||
import com.cloud.agent.properties.AgentProperties;
|
||||
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.ha.HighAvailabilityManager;
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
|
@ -39,20 +39,15 @@ public class KVMHAMonitor extends KVMHABase implements Runnable {
|
|||
|
||||
private final String hostPrivateIp;
|
||||
|
||||
public KVMHAMonitor(HAStoragePool pool, String host, String scriptPath) {
|
||||
public KVMHAMonitor(HAStoragePool pool, String host) {
|
||||
if (pool != null) {
|
||||
storagePool.put(pool.getPoolUUID(), pool);
|
||||
}
|
||||
hostPrivateIp = host;
|
||||
configureHeartBeatPath(scriptPath);
|
||||
|
||||
rebootHostAndAlertManagementOnHeartbeatTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.REBOOT_HOST_AND_ALERT_MANAGEMENT_ON_HEARTBEAT_TIMEOUT);
|
||||
}
|
||||
|
||||
private static synchronized void configureHeartBeatPath(String scriptPath) {
|
||||
KVMHABase.s_heartBeatPath = scriptPath;
|
||||
}
|
||||
|
||||
public void addStoragePool(HAStoragePool pool) {
|
||||
synchronized (storagePool) {
|
||||
storagePool.put(pool.getPoolUUID(), pool);
|
||||
|
|
@ -86,8 +81,8 @@ public class KVMHAMonitor extends KVMHABase implements Runnable {
|
|||
Set<String> removedPools = new HashSet<>();
|
||||
for (String uuid : storagePool.keySet()) {
|
||||
HAStoragePool primaryStoragePool = storagePool.get(uuid);
|
||||
if (primaryStoragePool.getPool().getType() == StoragePoolType.NetworkFilesystem) {
|
||||
checkForNotExistingPools(removedPools, uuid);
|
||||
if (HighAvailabilityManager.LIBVIRT_STORAGE_POOL_TYPES_WITH_HA_SUPPORT.contains(primaryStoragePool.getPool().getType())) {
|
||||
checkForNotExistingLibvirtStoragePools(removedPools, uuid);
|
||||
if (removedPools.contains(uuid)) {
|
||||
continue;
|
||||
}
|
||||
|
|
@ -127,7 +122,7 @@ public class KVMHAMonitor extends KVMHABase implements Runnable {
|
|||
return result;
|
||||
}
|
||||
|
||||
private void checkForNotExistingPools(Set<String> removedPools, String uuid) {
|
||||
private void checkForNotExistingLibvirtStoragePools(Set<String> removedPools, String uuid) {
|
||||
try {
|
||||
Connect conn = LibvirtConnection.getConnection();
|
||||
StoragePool storage = conn.storagePoolLookupByUUIDString(uuid);
|
||||
|
|
|
|||
|
|
@ -18,6 +18,9 @@ package com.cloud.hypervisor.kvm.resource;
|
|||
|
||||
import static com.cloud.host.Host.HOST_INSTANCE_CONVERSION;
|
||||
import static com.cloud.host.Host.HOST_OVFTOOL_VERSION;
|
||||
import static com.cloud.host.Host.HOST_VDDK_LIB_DIR;
|
||||
import static com.cloud.host.Host.HOST_VDDK_SUPPORT;
|
||||
import static com.cloud.host.Host.HOST_VDDK_VERSION;
|
||||
import static com.cloud.host.Host.HOST_VIRTV2V_VERSION;
|
||||
import static com.cloud.host.Host.HOST_VOLUME_ENCRYPTION;
|
||||
import static org.apache.cloudstack.utils.linux.KVMHostInfo.isHostS390x;
|
||||
|
|
@ -365,6 +368,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
public static final String WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD = "rpm -qa | grep -i virtio-win";
|
||||
public static final String UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD = "dpkg -l virtio-win";
|
||||
public static final String UBUNTU_NBDKIT_PKG_CHECK_CMD = "dpkg -l nbdkit";
|
||||
public static final String VDDK_AUTODETECT_PATH_CMD = "find / -type d -name 'vmware-vix-disklib-distrib' 2>/dev/null | head -n 1";
|
||||
|
||||
public static final int LIBVIRT_CGROUP_CPU_SHARES_MIN = 2;
|
||||
public static final int LIBVIRT_CGROUP_CPU_SHARES_MAX = 262144;
|
||||
|
|
@ -885,10 +889,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
private boolean convertInstanceVerboseMode = false;
|
||||
private Map<String, String> convertInstanceEnv = null;
|
||||
private String vddkLibDir = null;
|
||||
private static final String libguestfsBackend = "direct";
|
||||
protected boolean dpdkSupport = false;
|
||||
protected String dpdkOvsPath;
|
||||
protected String directDownloadTemporaryDownloadPath;
|
||||
protected String cachePath;
|
||||
private String vddkTransports = null;
|
||||
private String vddkThumbprint = null;
|
||||
private String vddkVersion = null;
|
||||
private String detectedPasswordFileOption = null;
|
||||
protected String javaTempDir = System.getProperty("java.io.tmpdir");
|
||||
|
||||
private String getEndIpFromStartIp(final String startIp, final int numIps) {
|
||||
|
|
@ -953,6 +963,26 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return convertInstanceEnv;
|
||||
}
|
||||
|
||||
public String getVddkLibDir() {
|
||||
return vddkLibDir;
|
||||
}
|
||||
|
||||
public String getLibguestfsBackend() {
|
||||
return libguestfsBackend;
|
||||
}
|
||||
|
||||
public String getVddkTransports() {
|
||||
return vddkTransports;
|
||||
}
|
||||
|
||||
public String getVddkThumbprint() {
|
||||
return vddkThumbprint;
|
||||
}
|
||||
|
||||
public String getVddkVersion() {
|
||||
return vddkVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines resource's public and private network interface according to what is configured in agent.properties.
|
||||
*/
|
||||
|
|
@ -1065,11 +1095,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
throw new ConfigurationException("Unable to find patch.sh");
|
||||
}
|
||||
|
||||
heartBeatPath = Script.findScript(kvmScriptsDir, "kvmheartbeat.sh");
|
||||
if (heartBeatPath == null) {
|
||||
throw new ConfigurationException("Unable to find kvmheartbeat.sh");
|
||||
}
|
||||
|
||||
createVmPath = Script.findScript(storageScriptsDir, "createvm.sh");
|
||||
if (createVmPath == null) {
|
||||
throw new ConfigurationException("Unable to find the createvm.sh");
|
||||
|
|
@ -1158,6 +1183,37 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
setConvertInstanceEnv(convertEnvTmpDir, convertEnvVirtv2vTmpDir);
|
||||
|
||||
vddkLibDir = StringUtils.trimToNull(AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VDDK_LIB_DIR));
|
||||
if (StringUtils.isNotBlank(vddkLibDir) && !isVddkLibDirValid(vddkLibDir)) {
|
||||
LOGGER.warn("Configured VDDK library dir [{}] is invalid (missing lib64/libvixDiskLib.so), attempting auto-detection", vddkLibDir);
|
||||
vddkLibDir = null;
|
||||
}
|
||||
if (StringUtils.isBlank(vddkLibDir)) {
|
||||
vddkLibDir = detectVddkLibDir();
|
||||
}
|
||||
if (StringUtils.isNotBlank(vddkLibDir)) {
|
||||
LOGGER.info("Detected VDDK library dir: {}", vddkLibDir);
|
||||
} else {
|
||||
LOGGER.warn("Could not detect a valid VDDK library dir; VDDK conversion will be unavailable");
|
||||
}
|
||||
|
||||
vddkVersion = detectVddkVersion();
|
||||
if (StringUtils.isNotBlank(vddkVersion)) {
|
||||
LOGGER.info("Detected nbdkit VDDK plugin version: {}", vddkVersion);
|
||||
}
|
||||
|
||||
vddkTransports = StringUtils.trimToNull(
|
||||
AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VDDK_TRANSPORTS));
|
||||
vddkThumbprint = StringUtils.trimToNull(
|
||||
AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VDDK_THUMBPRINT));
|
||||
|
||||
detectedPasswordFileOption = detectPasswordFileOption();
|
||||
if (StringUtils.isNotBlank(detectedPasswordFileOption)) {
|
||||
LOGGER.info("Detected virt-v2v password option: {}", detectedPasswordFileOption);
|
||||
} else {
|
||||
LOGGER.warn("Could not detect virt-v2v password option, VDDK conversions may fail");
|
||||
}
|
||||
|
||||
pool = (String)params.get("pool");
|
||||
if (pool == null) {
|
||||
pool = "/root";
|
||||
|
|
@ -1332,7 +1388,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
final String[] info = NetUtils.getNetworkParams(privateNic);
|
||||
|
||||
kvmhaMonitor = new KVMHAMonitor(null, info[0], heartBeatPath);
|
||||
kvmhaMonitor = new KVMHAMonitor(null, info[0]);
|
||||
final Thread ha = new Thread(kvmhaMonitor);
|
||||
ha.start();
|
||||
|
||||
|
|
@ -4235,6 +4291,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
cmd.setHostTags(getHostTags());
|
||||
boolean instanceConversionSupported = hostSupportsInstanceConversion();
|
||||
cmd.getHostDetails().put(HOST_INSTANCE_CONVERSION, String.valueOf(instanceConversionSupported));
|
||||
cmd.getHostDetails().put(HOST_VDDK_SUPPORT, String.valueOf(hostSupportsVddk()));
|
||||
if (StringUtils.isNotBlank(vddkLibDir)) {
|
||||
cmd.getHostDetails().put(HOST_VDDK_LIB_DIR, vddkLibDir);
|
||||
}
|
||||
if (StringUtils.isNotBlank(vddkVersion)) {
|
||||
cmd.getHostDetails().put(HOST_VDDK_VERSION, vddkVersion);
|
||||
}
|
||||
if (instanceConversionSupported) {
|
||||
cmd.getHostDetails().put(HOST_VIRTV2V_VERSION, getHostVirtV2vVersion());
|
||||
}
|
||||
|
|
@ -5956,6 +6019,66 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return exitValue == 0;
|
||||
}
|
||||
|
||||
public boolean hostSupportsVddk() {
|
||||
return hostSupportsVddk(null);
|
||||
}
|
||||
|
||||
public boolean hostSupportsVddk(String overriddenVddkLibDir) {
|
||||
String effectiveVddkLibDir = StringUtils.trimToNull(overriddenVddkLibDir);
|
||||
if (StringUtils.isBlank(effectiveVddkLibDir)) {
|
||||
effectiveVddkLibDir = StringUtils.trimToNull(vddkLibDir);
|
||||
}
|
||||
if (StringUtils.isBlank(effectiveVddkLibDir) || !isVddkLibDirValid(effectiveVddkLibDir)) {
|
||||
effectiveVddkLibDir = detectVddkLibDir();
|
||||
}
|
||||
return hostSupportsInstanceConversion() && isVddkLibDirValid(effectiveVddkLibDir) && StringUtils.isNotBlank(detectVddkVersion());
|
||||
}
|
||||
|
||||
protected boolean isVddkLibDirValid(String path) {
|
||||
if (StringUtils.isBlank(path)) {
|
||||
return false;
|
||||
}
|
||||
File libDir = new File(path, "lib64");
|
||||
if (!libDir.isDirectory()) {
|
||||
return false;
|
||||
}
|
||||
File[] libs = libDir.listFiles((dir, name) -> name.startsWith("libvixDiskLib.so"));
|
||||
return libs != null && libs.length > 0;
|
||||
}
|
||||
|
||||
protected String detectVddkLibDir() {
|
||||
String detectedPath = StringUtils.trimToNull(Script.runSimpleBashScript(VDDK_AUTODETECT_PATH_CMD));
|
||||
if (StringUtils.isNotBlank(detectedPath) && isVddkLibDirValid(detectedPath)) {
|
||||
return detectedPath;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected String detectVddkVersion() {
|
||||
try {
|
||||
ProcessBuilder pb = new ProcessBuilder("nbdkit", "vddk", "--version");
|
||||
Process process = pb.start();
|
||||
|
||||
String output = new String(process.getInputStream().readAllBytes());
|
||||
process.waitFor();
|
||||
|
||||
if (StringUtils.isBlank(output)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (String line : output.split("\\R")) {
|
||||
String trimmed = StringUtils.trimToEmpty(line);
|
||||
if (trimmed.startsWith("vddk ")) {
|
||||
return StringUtils.trimToNull(trimmed.substring("vddk ".length()));
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to detect vddk version: {}", e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hostSupportsWindowsGuestConversion() {
|
||||
if (isUbuntuOrDebianHost()) {
|
||||
int exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD);
|
||||
|
|
@ -5970,6 +6093,40 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return exitValue == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect which password option virt-v2v supports by examining its --help output
|
||||
* @return "-ip" if supported (virt-v2v >= 2.8.1), "--password-file" if older version, or null if detection fails
|
||||
*/
|
||||
protected String detectPasswordFileOption() {
|
||||
try {
|
||||
ProcessBuilder pb = new ProcessBuilder("virt-v2v", "--help");
|
||||
Process process = pb.start();
|
||||
|
||||
String output = new String(process.getInputStream().readAllBytes());
|
||||
process.waitFor();
|
||||
|
||||
if (output.contains("-ip <filename>")) {
|
||||
return "-ip";
|
||||
} else if (output.contains("--password-file")) {
|
||||
return "--password-file";
|
||||
} else {
|
||||
LOGGER.error("virt-v2v does not support -ip or --password-file");
|
||||
return null;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to detect virt-v2v password option: {}", e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the detected password file option for virt-v2v
|
||||
* @return the password option ("-ip" or "--password-file") or null if not detected
|
||||
*/
|
||||
public String getDetectedPasswordFileOption() {
|
||||
return detectedPasswordFileOption;
|
||||
}
|
||||
|
||||
public String getHostVirtV2vVersion() {
|
||||
if (!hostSupportsInstanceConversion()) {
|
||||
return "";
|
||||
|
|
|
|||
|
|
@ -30,7 +30,15 @@ public class LibvirtCheckConvertInstanceCommandWrapper extends CommandWrapper<Ch
|
|||
|
||||
@Override
|
||||
public Answer execute(CheckConvertInstanceCommand cmd, LibvirtComputingResource serverResource) {
|
||||
if (!serverResource.hostSupportsInstanceConversion()) {
|
||||
if (cmd.isUseVddk()) {
|
||||
if (!serverResource.hostSupportsVddk(cmd.getVddkLibDir())) {
|
||||
String msg = String.format("Cannot convert the instance from VMware using VDDK on host %s. " +
|
||||
"Please make sure virt-v2v%s, nbdkit-vddk and a valid VDDK library directory are available on the host.",
|
||||
serverResource.getPrivateIp(), serverResource.isUbuntuOrDebianHost() ? ", nbdkit" : "");
|
||||
logger.info(msg);
|
||||
return new CheckConvertInstanceAnswer(cmd, false, msg);
|
||||
}
|
||||
} else if (!serverResource.hostSupportsInstanceConversion()) {
|
||||
String msg = String.format("Cannot convert the instance from VMware as the virt-v2v binary is not found on host %s. " +
|
||||
"Please install virt-v2v%s on the host before attempting the instance conversion.", serverResource.getPrivateIp(), serverResource.isUbuntuOrDebianHost()? ", nbdkit" : "");
|
||||
logger.info(msg);
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ public final class LibvirtCheckVMActivityOnStoragePoolCommandWrapper extends Com
|
|||
|
||||
KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid());
|
||||
|
||||
if (primaryPool.isPoolSupportHA()){
|
||||
if (primaryPool.isPoolSupportHA()) {
|
||||
final HAStoragePool nfspool = monitor.getStoragePool(pool.getUuid());
|
||||
final KVMHAVMActivityChecker ha = new KVMHAVMActivityChecker(nfspool, command.getHost(), command.getVolumeList(), libvirtComputingResource.getVmActivityCheckPath(), command.getSuspectTimeInSeconds());
|
||||
final Future<Boolean> future = executors.submit(ha);
|
||||
|
|
|
|||
|
|
@ -45,11 +45,10 @@ public final class LibvirtCheckVirtualMachineCommandWrapper extends CommandWrapp
|
|||
Integer vncPort = null;
|
||||
if (state == PowerState.PowerOn) {
|
||||
vncPort = libvirtComputingResource.getVncPort(conn, command.getVmName());
|
||||
}
|
||||
|
||||
Domain vm = conn.domainLookupByName(command.getVmName());
|
||||
if (state == PowerState.PowerOn && DomainInfo.DomainState.VIR_DOMAIN_PAUSED.equals(vm.getInfo().state)) {
|
||||
return new CheckVirtualMachineAnswer(command, PowerState.PowerUnknown, vncPort);
|
||||
Domain vm = conn.domainLookupByName(command.getVmName());
|
||||
if (DomainInfo.DomainState.VIR_DOMAIN_PAUSED.equals(vm.getInfo().state)) {
|
||||
return new CheckVirtualMachineAnswer(command, PowerState.PowerUnknown, vncPort);
|
||||
}
|
||||
}
|
||||
|
||||
return new CheckVirtualMachineAnswer(command, state, vncPort);
|
||||
|
|
|
|||
|
|
@ -20,10 +20,17 @@ package com.cloud.hypervisor.kvm.resource.wrapper;
|
|||
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.util.Locale;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.commons.collections4.MapUtils;
|
||||
|
|
@ -51,6 +58,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
|
||||
private static final List<Hypervisor.HypervisorType> supportedInstanceConvertSourceHypervisors =
|
||||
List.of(Hypervisor.HypervisorType.VMware);
|
||||
private static final Pattern SHA1_FINGERPRINT_PATTERN = Pattern.compile("(?i)(?:SHA1\\s+)?Fingerprint\\s*=\\s*([0-9A-F:]+)");
|
||||
|
||||
@Override
|
||||
public Answer execute(ConvertInstanceCommand cmd, LibvirtComputingResource serverResource) {
|
||||
|
|
@ -61,7 +69,8 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
DataStoreTO conversionTemporaryLocation = cmd.getConversionTemporaryLocation();
|
||||
long timeout = (long) cmd.getWait() * 1000;
|
||||
String extraParams = cmd.getExtraParams();
|
||||
String originalVMName = cmd.getOriginalVMName(); // For logging purposes, as the sourceInstance may have been cloned
|
||||
boolean useVddk = cmd.isUseVddk();
|
||||
String originalVMName = cmd.getOriginalVMName();
|
||||
|
||||
if (cmd.getCheckConversionSupport() && !serverResource.hostSupportsInstanceConversion()) {
|
||||
String msg = String.format("Cannot convert the instance %s from VMware as the virt-v2v binary is not found. " +
|
||||
|
|
@ -84,61 +93,75 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
logger.info(String.format("(%s) Attempting to convert the instance %s from %s to KVM",
|
||||
originalVMName, sourceInstanceName, sourceHypervisorType));
|
||||
final String temporaryConvertPath = temporaryStoragePool.getLocalPath();
|
||||
|
||||
String ovfTemplateDirOnConversionLocation;
|
||||
String sourceOVFDirPath;
|
||||
boolean ovfExported = false;
|
||||
if (cmd.getExportOvfToConversionLocation()) {
|
||||
String exportInstanceOVAUrl = getExportInstanceOVAUrl(sourceInstance, originalVMName);
|
||||
if (StringUtils.isBlank(exportInstanceOVAUrl)) {
|
||||
String err = String.format("Couldn't export OVA for the VM %s, due to empty url", sourceInstanceName);
|
||||
logger.error(String.format("(%s) %s", originalVMName, err));
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
|
||||
int noOfThreads = cmd.getThreadsCountToExportOvf();
|
||||
if (noOfThreads > 1 && !serverResource.ovfExportToolSupportsParallelThreads()) {
|
||||
noOfThreads = 0;
|
||||
}
|
||||
ovfTemplateDirOnConversionLocation = UUID.randomUUID().toString();
|
||||
temporaryStoragePool.createFolder(ovfTemplateDirOnConversionLocation);
|
||||
sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||
ovfExported = exportOVAFromVMOnVcenter(exportInstanceOVAUrl, sourceOVFDirPath, noOfThreads, originalVMName, timeout);
|
||||
if (!ovfExported) {
|
||||
String err = String.format("Export OVA for the VM %s failed", sourceInstanceName);
|
||||
logger.error(String.format("(%s) %s", originalVMName, err));
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
sourceOVFDirPath = String.format("%s%s/", sourceOVFDirPath, sourceInstanceName);
|
||||
} else {
|
||||
ovfTemplateDirOnConversionLocation = cmd.getTemplateDirOnConversionLocation();
|
||||
sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||
}
|
||||
|
||||
logger.info(String.format("(%s) Attempting to convert the OVF %s of the instance %s from %s to KVM",
|
||||
originalVMName, ovfTemplateDirOnConversionLocation, sourceInstanceName, sourceHypervisorType));
|
||||
|
||||
final String temporaryConvertUuid = UUID.randomUUID().toString();
|
||||
boolean verboseModeEnabled = serverResource.isConvertInstanceVerboseModeEnabled();
|
||||
|
||||
boolean cleanupSecondaryStorage = false;
|
||||
boolean ovfExported = false;
|
||||
String ovfTemplateDirOnConversionLocation = null;
|
||||
|
||||
try {
|
||||
boolean result = performInstanceConversion(originalVMName, sourceOVFDirPath, temporaryConvertPath, temporaryConvertUuid,
|
||||
timeout, verboseModeEnabled, extraParams, serverResource);
|
||||
boolean result;
|
||||
if (useVddk) {
|
||||
logger.info("({}) Using VDDK-based conversion (direct from VMware)", originalVMName);
|
||||
String vddkLibDir = resolveVddkSetting(cmd.getVddkLibDir(), serverResource.getVddkLibDir());
|
||||
if (StringUtils.isBlank(vddkLibDir)) {
|
||||
String err = String.format("VDDK lib dir is not configured on the host. " +
|
||||
"Set '%s' in agent.properties or in details parameter of the import api call to use VDDK-based conversion.", "vddk.lib.dir");
|
||||
logger.error("({}) {}", originalVMName, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
String vddkTransports = resolveVddkSetting(cmd.getVddkTransports(), serverResource.getVddkTransports());
|
||||
String configuredVddkThumbprint = resolveVddkSetting(cmd.getVddkThumbprint(), serverResource.getVddkThumbprint());
|
||||
String passwordOption = serverResource.getDetectedPasswordFileOption();
|
||||
result = performInstanceConversionUsingVddk(sourceInstance, originalVMName, temporaryConvertPath,
|
||||
vddkLibDir, serverResource.getLibguestfsBackend(), vddkTransports, configuredVddkThumbprint,
|
||||
timeout, verboseModeEnabled, extraParams, temporaryConvertUuid, passwordOption);
|
||||
} else {
|
||||
logger.info("({}) Using OVF-based conversion (export + local convert)", originalVMName);
|
||||
String sourceOVFDirPath;
|
||||
if (cmd.getExportOvfToConversionLocation()) {
|
||||
String exportInstanceOVAUrl = getExportInstanceOVAUrl(sourceInstance, originalVMName);
|
||||
|
||||
if (StringUtils.isBlank(exportInstanceOVAUrl)) {
|
||||
String err = String.format("Couldn't export OVA for the VM %s, due to empty url", sourceInstanceName);
|
||||
logger.error("({}) {}", originalVMName, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
|
||||
int noOfThreads = cmd.getThreadsCountToExportOvf();
|
||||
if (noOfThreads > 1 && !serverResource.ovfExportToolSupportsParallelThreads()) {
|
||||
noOfThreads = 0;
|
||||
}
|
||||
ovfTemplateDirOnConversionLocation = UUID.randomUUID().toString();
|
||||
temporaryStoragePool.createFolder(ovfTemplateDirOnConversionLocation);
|
||||
sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||
ovfExported = exportOVAFromVMOnVcenter(exportInstanceOVAUrl, sourceOVFDirPath, noOfThreads, originalVMName, timeout);
|
||||
|
||||
if (!ovfExported) {
|
||||
String err = String.format("Export OVA for the VM %s failed", sourceInstanceName);
|
||||
logger.error("({}) {}", originalVMName, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
sourceOVFDirPath = String.format("%s%s/", sourceOVFDirPath, sourceInstanceName);
|
||||
} else {
|
||||
ovfTemplateDirOnConversionLocation = cmd.getTemplateDirOnConversionLocation();
|
||||
sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||
}
|
||||
|
||||
result = performInstanceConversion(originalVMName, sourceOVFDirPath, temporaryConvertPath, temporaryConvertUuid,
|
||||
timeout, verboseModeEnabled, extraParams, serverResource);
|
||||
}
|
||||
|
||||
if (!result) {
|
||||
String err = String.format(
|
||||
"The virt-v2v conversion for the OVF %s failed. Please check the agent logs " +
|
||||
"for the virt-v2v output. Please try on a different kvm host which " +
|
||||
"has a different virt-v2v version.",
|
||||
ovfTemplateDirOnConversionLocation);
|
||||
logger.error(String.format("(%s) %s", originalVMName, err));
|
||||
String err = String.format("Instance conversion failed for VM %s. Please check virt-v2v logs.", sourceInstanceName);
|
||||
logger.error("({}) {}", originalVMName, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
return new ConvertInstanceAnswer(cmd, temporaryConvertUuid);
|
||||
} catch (Exception e) {
|
||||
String error = String.format("Error converting instance %s from %s, due to: %s",
|
||||
sourceInstanceName, sourceHypervisorType, e.getMessage());
|
||||
logger.error(String.format("(%s) %s", originalVMName, error), e);
|
||||
String error = String.format("Error converting instance %s from %s, due to: %s", sourceInstanceName, sourceHypervisorType, e.getMessage());
|
||||
logger.error("({}) {}", originalVMName, error, e);
|
||||
cleanupSecondaryStorage = true;
|
||||
return new Answer(cmd, false, error);
|
||||
} finally {
|
||||
|
|
@ -275,4 +298,198 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
protected String encodeUsername(String username) {
|
||||
return URLEncoder.encode(username, Charset.defaultCharset());
|
||||
}
|
||||
|
||||
private String resolveVddkSetting(String commandValue, String agentValue) {
|
||||
return StringUtils.defaultIfBlank(StringUtils.trimToNull(commandValue), StringUtils.trimToNull(agentValue));
|
||||
}
|
||||
|
||||
protected boolean performInstanceConversionUsingVddk(RemoteInstanceTO vmwareInstance, String originalVMName,
|
||||
String temporaryConvertFolder, String vddkLibDir,
|
||||
String libguestfsBackend, String vddkTransports,
|
||||
String configuredVddkThumbprint,
|
||||
long timeout, boolean verboseModeEnabled, String extraParams,
|
||||
String temporaryConvertUuid, String passwordOption) {
|
||||
|
||||
String vcenterPassword = vmwareInstance.getVcenterPassword();
|
||||
if (StringUtils.isBlank(vcenterPassword)) {
|
||||
logger.error("({}) Could not determine vCenter password for {}", originalVMName, vmwareInstance.getVcenterHost());
|
||||
return false;
|
||||
}
|
||||
|
||||
String passwordFilePath = String.format("/tmp/v2v.pass.cloud.%s.%s",
|
||||
StringUtils.defaultIfBlank(vmwareInstance.getVcenterHost(), "unknown"),
|
||||
UUID.randomUUID());
|
||||
try {
|
||||
Files.writeString(Path.of(passwordFilePath), vcenterPassword);
|
||||
Files.setPosixFilePermissions(Path.of(passwordFilePath), Set.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE));
|
||||
logger.debug("({}) Written vCenter password to {}", originalVMName, passwordFilePath);
|
||||
} catch (Exception e) {
|
||||
logger.error("({}) Failed to write vCenter password file {}: {}", originalVMName, passwordFilePath, e.getMessage());
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
String vpxUrl = buildVpxUrl(vmwareInstance);
|
||||
|
||||
StringBuilder cmd = new StringBuilder();
|
||||
|
||||
cmd.append("export LIBGUESTFS_BACKEND=").append(libguestfsBackend).append(" && ");
|
||||
|
||||
cmd.append("virt-v2v ");
|
||||
cmd.append("--root first ");
|
||||
cmd.append("-ic '").append(vpxUrl).append("' ");
|
||||
if (StringUtils.isBlank(passwordOption)) {
|
||||
logger.error("({}) Could not determine supported password file option for virt-v2v", originalVMName);
|
||||
return false;
|
||||
}
|
||||
|
||||
cmd.append(passwordOption).append(" ").append(passwordFilePath).append(" ");
|
||||
cmd.append("-it vddk ");
|
||||
cmd.append("-io vddk-libdir=").append(vddkLibDir).append(" ");
|
||||
String vddkThumbprint = StringUtils.trimToNull(configuredVddkThumbprint);
|
||||
if (StringUtils.isBlank(vddkThumbprint)) {
|
||||
vddkThumbprint = getVcenterThumbprint(vmwareInstance.getVcenterHost(), timeout, originalVMName);
|
||||
}
|
||||
if (StringUtils.isBlank(vddkThumbprint)) {
|
||||
logger.error("({}) Could not determine vCenter thumbprint for {}", originalVMName, vmwareInstance.getVcenterHost());
|
||||
return false;
|
||||
}
|
||||
cmd.append("-io vddk-thumbprint=").append(vddkThumbprint).append(" ");
|
||||
if (StringUtils.isNotBlank(vddkTransports)) {
|
||||
cmd.append("-io vddk-transports=").append(vddkTransports).append(" ");
|
||||
}
|
||||
cmd.append(vmwareInstance.getInstanceName()).append(" ");
|
||||
cmd.append("-o local ");
|
||||
cmd.append("-os ").append(temporaryConvertFolder).append(" ");
|
||||
cmd.append("-of qcow2 ");
|
||||
cmd.append("-on ").append(temporaryConvertUuid).append(" ");
|
||||
|
||||
if (verboseModeEnabled) {
|
||||
cmd.append("-v ");
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(extraParams)) {
|
||||
cmd.append(extraParams).append(" ");
|
||||
}
|
||||
|
||||
Script script = new Script("/bin/bash", timeout, logger);
|
||||
script.add("-c");
|
||||
script.add(cmd.toString());
|
||||
|
||||
String logPrefix = String.format("(%s) virt-v2v vddk import", originalVMName);
|
||||
OutputInterpreter.LineByLineOutputLogger outputLogger =
|
||||
new OutputInterpreter.LineByLineOutputLogger(logger, logPrefix);
|
||||
|
||||
logger.info("({}) Starting virt-v2v VDDK conversion", originalVMName);
|
||||
script.execute(outputLogger);
|
||||
|
||||
int exitValue = script.getExitValue();
|
||||
if (exitValue != 0) {
|
||||
logger.error("({}) virt-v2v failed with exit code {}", originalVMName, exitValue);
|
||||
}
|
||||
|
||||
return exitValue == 0;
|
||||
} finally {
|
||||
try {
|
||||
Files.deleteIfExists(Path.of(passwordFilePath));
|
||||
logger.debug("({}) Deleted password file {}", originalVMName, passwordFilePath);
|
||||
} catch (Exception e) {
|
||||
logger.warn("({}) Failed to delete password file {}: {}", originalVMName, passwordFilePath, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected String getVcenterThumbprint(String vcenterHost, long timeout, String originalVMName) {
|
||||
if (StringUtils.isBlank(vcenterHost)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
String endpoint = String.format("%s:443", vcenterHost);
|
||||
String command = String.format("openssl s_client -connect '%s' </dev/null 2>/dev/null | " +
|
||||
"openssl x509 -fingerprint -sha1 -noout", endpoint);
|
||||
|
||||
Script script = new Script("/bin/bash", timeout, logger);
|
||||
script.add("-c");
|
||||
script.add(command);
|
||||
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
script.execute(parser);
|
||||
|
||||
String output = parser.getLines();
|
||||
if (script.getExitValue() != 0) {
|
||||
logger.error("({}) Failed to fetch vCenter thumbprint for {}", originalVMName, vcenterHost);
|
||||
return null;
|
||||
}
|
||||
|
||||
String thumbprint = extractSha1Fingerprint(output);
|
||||
if (StringUtils.isBlank(thumbprint)) {
|
||||
logger.error("({}) Failed to parse vCenter thumbprint from output for {}", originalVMName, vcenterHost);
|
||||
return null;
|
||||
}
|
||||
return thumbprint;
|
||||
}
|
||||
|
||||
private String extractSha1Fingerprint(String output) {
|
||||
String parsedOutput = StringUtils.trimToEmpty(output);
|
||||
if (StringUtils.isBlank(parsedOutput)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (String line : parsedOutput.split("\\R")) {
|
||||
String trimmedLine = StringUtils.trimToEmpty(line);
|
||||
if (StringUtils.isBlank(trimmedLine)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Matcher matcher = SHA1_FINGERPRINT_PATTERN.matcher(trimmedLine);
|
||||
if (matcher.find()) {
|
||||
return matcher.group(1).toUpperCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
// Fallback for raw fingerprint-only output.
|
||||
if (trimmedLine.matches("(?i)[0-9a-f]{2}(:[0-9a-f]{2})+")) {
|
||||
return trimmedLine.toUpperCase(Locale.ROOT);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build vpx:// URL for virt-v2v
|
||||
*
|
||||
* Format:
|
||||
* vpx://user@vcenter/DC/cluster/host?no_verify=1
|
||||
*/
|
||||
private String buildVpxUrl(RemoteInstanceTO vmwareInstance) {
|
||||
|
||||
String vmName = vmwareInstance.getInstanceName();
|
||||
String vcenter = vmwareInstance.getVcenterHost();
|
||||
String username = vmwareInstance.getVcenterUsername();
|
||||
String datacenter = vmwareInstance.getDatacenterName();
|
||||
String cluster = vmwareInstance.getClusterName();
|
||||
String host = vmwareInstance.getHostName();
|
||||
|
||||
String encodedUsername = encodeUsername(username);
|
||||
|
||||
StringBuilder url = new StringBuilder();
|
||||
url.append("vpx://")
|
||||
.append(encodedUsername)
|
||||
.append("@")
|
||||
.append(vcenter)
|
||||
.append("/")
|
||||
.append(datacenter);
|
||||
|
||||
if (StringUtils.isNotBlank(cluster)) {
|
||||
url.append("/").append(cluster);
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(host)) {
|
||||
url.append("/").append(host);
|
||||
}
|
||||
|
||||
url.append("?no_verify=1");
|
||||
|
||||
logger.info("({}) Using VPX URL: {}", vmName, url);
|
||||
return url.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -259,6 +259,12 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
final int migrateDowntime = libvirtComputingResource.getMigrateDowntime();
|
||||
boolean isMigrateDowntimeSet = false;
|
||||
|
||||
final int migrateWait = libvirtComputingResource.getMigrateWait();
|
||||
logger.info("vm.migrate.wait value set to: {} secs for VM: {}", migrateWait, vmName);
|
||||
|
||||
final int migratePauseAfter = libvirtComputingResource.getMigratePauseAfter();
|
||||
logger.info("vm.migrate.pauseafter value set to: {} ms for VM: {}", migratePauseAfter, vmName);
|
||||
|
||||
while (!executor.isTerminated()) {
|
||||
Thread.sleep(100);
|
||||
sleeptime += 100;
|
||||
|
|
@ -278,8 +284,6 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
}
|
||||
|
||||
// abort the vm migration if the job is executed more than vm.migrate.wait
|
||||
final int migrateWait = libvirtComputingResource.getMigrateWait();
|
||||
logger.info("vm.migrate.wait value set to: {}for VM: {}", migrateWait, vmName);
|
||||
if (migrateWait > 0 && sleeptime > migrateWait * 1000) {
|
||||
DomainState state = null;
|
||||
try {
|
||||
|
|
@ -306,8 +310,6 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
}
|
||||
|
||||
// pause vm if we meet the vm.migrate.pauseafter threshold and not already paused
|
||||
final int migratePauseAfter = libvirtComputingResource.getMigratePauseAfter();
|
||||
logger.info("vm.migrate.pauseafter value set to: {} for VM: {}", migratePauseAfter, vmName);
|
||||
if (migratePauseAfter > 0 && sleeptime > migratePauseAfter) {
|
||||
DomainState state = null;
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
|||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
@ResourceWrapper(handles = ReadyCommand.class)
|
||||
public final class LibvirtReadyCommandWrapper extends CommandWrapper<ReadyCommand, Answer, LibvirtComputingResource> {
|
||||
|
|
@ -50,6 +51,9 @@ public final class LibvirtReadyCommandWrapper extends CommandWrapper<ReadyComman
|
|||
if (libvirtComputingResource.hostSupportsInstanceConversion()) {
|
||||
hostDetails.put(Host.HOST_VIRTV2V_VERSION, libvirtComputingResource.getHostVirtV2vVersion());
|
||||
}
|
||||
hostDetails.put(Host.HOST_VDDK_SUPPORT, Boolean.toString(libvirtComputingResource.hostSupportsVddk()));
|
||||
hostDetails.put(Host.HOST_VDDK_LIB_DIR, StringUtils.defaultString(libvirtComputingResource.getVddkLibDir()));
|
||||
hostDetails.put(Host.HOST_VDDK_VERSION, StringUtils.defaultString(libvirtComputingResource.getVddkVersion()));
|
||||
|
||||
if (libvirtComputingResource.hostSupportsOvfExport()) {
|
||||
hostDetails.put(Host.HOST_OVFTOOL_VERSION, libvirtComputingResource.getHostOvfToolVersion());
|
||||
|
|
|
|||
|
|
@ -291,6 +291,7 @@ public class KVMStoragePoolManager {
|
|||
LibvirtStoragePool libvirtPool = (LibvirtStoragePool) pool;
|
||||
addPoolDetails(uuid, libvirtPool);
|
||||
|
||||
((LibvirtStoragePool) pool).setType(type);
|
||||
updatePoolTypeIfApplicable(libvirtPool, pool, type, uuid);
|
||||
}
|
||||
|
||||
|
|
@ -412,6 +413,9 @@ public class KVMStoragePoolManager {
|
|||
private synchronized KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type, Map<String, String> details, boolean primaryStorage) {
|
||||
StorageAdaptor adaptor = getStorageAdaptor(type);
|
||||
KVMStoragePool pool = adaptor.createStoragePool(name, host, port, path, userInfo, type, details, primaryStorage);
|
||||
if (pool instanceof LibvirtStoragePool) {
|
||||
((LibvirtStoragePool) pool).setType(type);
|
||||
}
|
||||
|
||||
// LibvirtStorageAdaptor-specific statement
|
||||
if (pool.isPoolSupportHA() && primaryStorage) {
|
||||
|
|
|
|||
|
|
@ -186,6 +186,8 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
|
||||
private int incrementalSnapshotTimeout;
|
||||
|
||||
private int incrementalSnapshotRetryRebaseWait;
|
||||
|
||||
private static final String CHECKPOINT_XML_TEMP_DIR = "/tmp/cloudstack/checkpointXMLs";
|
||||
|
||||
private static final String BACKUP_XML_TEMP_DIR = "/tmp/cloudstack/backupXMLs";
|
||||
|
|
@ -273,6 +275,7 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
_cmdsTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.CMDS_TIMEOUT) * 1000;
|
||||
|
||||
incrementalSnapshotTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.INCREMENTAL_SNAPSHOT_TIMEOUT) * 1000;
|
||||
incrementalSnapshotRetryRebaseWait = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.INCREMENTAL_SNAPSHOT_RETRY_REBASE_WAIT) * 1000;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -2311,8 +2314,25 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
QemuImg qemuImg = new QemuImg(wait);
|
||||
qemuImg.rebase(snapshotFile, parentSnapshotFile, PhysicalDiskFormat.QCOW2.toString(), false);
|
||||
} catch (LibvirtException | QemuImgException e) {
|
||||
logger.error("Exception while rebasing incremental snapshot [{}] due to: [{}].", snapshotName, e.getMessage(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
if (!StringUtils.contains(e.getMessage(), "Is another process using the image")) {
|
||||
logger.error("Exception while rebasing incremental snapshot [{}] due to: [{}].", snapshotName, e.getMessage(), e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
retryRebase(snapshotName, wait, e, snapshotFile, parentSnapshotFile);
|
||||
}
|
||||
}
|
||||
|
||||
private void retryRebase(String snapshotName, int wait, Exception e, QemuImgFile snapshotFile, QemuImgFile parentSnapshotFile) {
|
||||
logger.warn("Libvirt still has not released the lock, will wait [{}] milliseconds and try again later.", incrementalSnapshotRetryRebaseWait);
|
||||
try {
|
||||
Thread.sleep(incrementalSnapshotRetryRebaseWait);
|
||||
QemuImg qemuImg = new QemuImg(wait);
|
||||
qemuImg.rebase(snapshotFile, parentSnapshotFile, PhysicalDiskFormat.QCOW2.toString(), false);
|
||||
} catch (LibvirtException | QemuImgException | InterruptedException ex) {
|
||||
logger.error("Unable to rebase snapshot [{}].", snapshotName, ex);
|
||||
CloudRuntimeException cre = new CloudRuntimeException(ex);
|
||||
cre.addSuppressed(e);
|
||||
throw cre;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
|||
import com.cloud.agent.api.to.HostTO;
|
||||
import com.cloud.agent.properties.AgentProperties;
|
||||
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||
import com.cloud.ha.HighAvailabilityManager;
|
||||
import com.cloud.hypervisor.kvm.resource.KVMHABase.HAStoragePool;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
|
|
@ -324,13 +325,24 @@ public class LibvirtStoragePool implements KVMStoragePool {
|
|||
|
||||
@Override
|
||||
public boolean isPoolSupportHA() {
|
||||
return type == StoragePoolType.NetworkFilesystem;
|
||||
return HighAvailabilityManager.LIBVIRT_STORAGE_POOL_TYPES_WITH_HA_SUPPORT.contains(type);
|
||||
}
|
||||
|
||||
public String getHearthBeatPath() {
|
||||
if (type == StoragePoolType.NetworkFilesystem) {
|
||||
if (StoragePoolType.NetworkFilesystem.equals(type)) {
|
||||
String kvmScriptsDir = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_SCRIPTS_DIR);
|
||||
return Script.findScript(kvmScriptsDir, "kvmheartbeat.sh");
|
||||
String scriptPath = Script.findScript(kvmScriptsDir, "kvmheartbeat.sh");
|
||||
if (scriptPath == null) {
|
||||
throw new CloudRuntimeException("Unable to find heartbeat script 'kvmheartbeat.sh' in directory: " + kvmScriptsDir);
|
||||
}
|
||||
return scriptPath;
|
||||
} else if (StoragePoolType.SharedMountPoint.equals(type)) {
|
||||
String kvmScriptsDir = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_SCRIPTS_DIR);
|
||||
String scriptPath = Script.findScript(kvmScriptsDir, "kvmsmpheartbeat.sh");
|
||||
if (scriptPath == null) {
|
||||
throw new CloudRuntimeException("Unable to find heartbeat script 'kvmsmpheartbeat.sh' in directory: " + kvmScriptsDir);
|
||||
}
|
||||
return scriptPath;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
@ -414,4 +426,8 @@ public class LibvirtStoragePool implements KVMStoragePool {
|
|||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public void setType(StoragePoolType type) {
|
||||
this.type = type;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ public class LibvirtCheckConvertInstanceCommandWrapperTest {
|
|||
|
||||
@Test
|
||||
public void testCheckInstanceCommand_success() {
|
||||
Mockito.when(checkConvertInstanceCommandMock.isUseVddk()).thenReturn(false);
|
||||
Mockito.when(libvirtComputingResourceMock.hostSupportsInstanceConversion()).thenReturn(true);
|
||||
Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock);
|
||||
assertTrue(answer.getResult());
|
||||
|
|
@ -59,9 +60,33 @@ public class LibvirtCheckConvertInstanceCommandWrapperTest {
|
|||
|
||||
@Test
|
||||
public void testCheckInstanceCommand_failure() {
|
||||
Mockito.when(checkConvertInstanceCommandMock.isUseVddk()).thenReturn(false);
|
||||
Mockito.when(libvirtComputingResourceMock.hostSupportsInstanceConversion()).thenReturn(false);
|
||||
Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock);
|
||||
assertFalse(answer.getResult());
|
||||
assertTrue(StringUtils.isNotBlank(answer.getDetails()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCheckInstanceCommand_vddkSuccess() {
|
||||
Mockito.when(checkConvertInstanceCommandMock.isUseVddk()).thenReturn(true);
|
||||
Mockito.when(checkConvertInstanceCommandMock.getVddkLibDir()).thenReturn("/opt/vmware-vddk/vmware-vix-disklib-distrib");
|
||||
Mockito.when(libvirtComputingResourceMock.hostSupportsVddk("/opt/vmware-vddk/vmware-vix-disklib-distrib")).thenReturn(true);
|
||||
|
||||
Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCheckInstanceCommand_vddkFailure() {
|
||||
Mockito.when(checkConvertInstanceCommandMock.isUseVddk()).thenReturn(true);
|
||||
Mockito.when(checkConvertInstanceCommandMock.getVddkLibDir()).thenReturn("/opt/vmware-vddk/vmware-vix-disklib-distrib");
|
||||
Mockito.when(libvirtComputingResourceMock.hostSupportsVddk("/opt/vmware-vddk/vmware-vix-disklib-distrib")).thenReturn(false);
|
||||
|
||||
Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertTrue(StringUtils.isNotBlank(answer.getDetails()));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,191 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.DomainInfo;
|
||||
import org.libvirt.DomainInfo.DomainState;
|
||||
import org.libvirt.LibvirtException;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.agent.api.CheckVirtualMachineAnswer;
|
||||
import com.cloud.agent.api.CheckVirtualMachineCommand;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.vm.VirtualMachine.PowerState;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class LibvirtCheckVirtualMachineCommandWrapperTest {
|
||||
|
||||
private static final String VM_NAME = "i-2-3-VM";
|
||||
|
||||
@Mock
|
||||
private LibvirtComputingResource libvirtComputingResource;
|
||||
@Mock
|
||||
private LibvirtUtilitiesHelper libvirtUtilitiesHelper;
|
||||
@Mock
|
||||
private Connect conn;
|
||||
@Mock
|
||||
private Domain domain;
|
||||
|
||||
private LibvirtCheckVirtualMachineCommandWrapper wrapper;
|
||||
private CheckVirtualMachineCommand command;
|
||||
|
||||
@Before
|
||||
public void setUp() throws LibvirtException {
|
||||
wrapper = new LibvirtCheckVirtualMachineCommandWrapper();
|
||||
command = new CheckVirtualMachineCommand(VM_NAME);
|
||||
|
||||
when(libvirtComputingResource.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper);
|
||||
when(libvirtUtilitiesHelper.getConnectionByVmName(VM_NAME)).thenReturn(conn);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteVmPoweredOnReturnsStateAndVncPort() throws LibvirtException {
|
||||
DomainInfo domainInfo = new DomainInfo();
|
||||
domainInfo.state = DomainState.VIR_DOMAIN_RUNNING;
|
||||
|
||||
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
|
||||
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenReturn(5900);
|
||||
when(conn.domainLookupByName(VM_NAME)).thenReturn(domain);
|
||||
when(domain.getInfo()).thenReturn(domainInfo);
|
||||
|
||||
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
assertEquals(PowerState.PowerOn, answer.getState());
|
||||
assertEquals(Integer.valueOf(5900), answer.getVncPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteVmPausedReturnsPowerUnknown() throws LibvirtException {
|
||||
DomainInfo domainInfo = new DomainInfo();
|
||||
domainInfo.state = DomainState.VIR_DOMAIN_PAUSED;
|
||||
|
||||
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
|
||||
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenReturn(5901);
|
||||
when(conn.domainLookupByName(VM_NAME)).thenReturn(domain);
|
||||
when(domain.getInfo()).thenReturn(domainInfo);
|
||||
|
||||
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
assertEquals(PowerState.PowerUnknown, answer.getState());
|
||||
assertEquals(Integer.valueOf(5901), answer.getVncPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteVmPoweredOffReturnsStateWithNullVncPort() throws LibvirtException {
|
||||
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOff);
|
||||
|
||||
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
assertEquals(PowerState.PowerOff, answer.getState());
|
||||
assertNull(answer.getVncPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteVmStateUnknownReturnsStateWithNullVncPort() throws LibvirtException {
|
||||
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerUnknown);
|
||||
|
||||
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
assertEquals(PowerState.PowerUnknown, answer.getState());
|
||||
assertNull(answer.getVncPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteVmPoweredOnWithNullVncPort() throws LibvirtException {
|
||||
DomainInfo domainInfo = new DomainInfo();
|
||||
domainInfo.state = DomainState.VIR_DOMAIN_RUNNING;
|
||||
|
||||
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
|
||||
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenReturn(null);
|
||||
when(conn.domainLookupByName(VM_NAME)).thenReturn(domain);
|
||||
when(domain.getInfo()).thenReturn(domainInfo);
|
||||
|
||||
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
assertEquals(PowerState.PowerOn, answer.getState());
|
||||
assertNull(answer.getVncPort());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteLibvirtExceptionOnGetConnectionReturnsFailure() throws LibvirtException {
|
||||
LibvirtException libvirtException = mock(LibvirtException.class);
|
||||
when(libvirtException.getMessage()).thenReturn("Connection refused");
|
||||
when(libvirtUtilitiesHelper.getConnectionByVmName(VM_NAME)).thenThrow(libvirtException);
|
||||
|
||||
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertEquals("Connection refused", answer.getDetails());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteLibvirtExceptionOnGetVncPortReturnsFailure() throws LibvirtException {
|
||||
LibvirtException libvirtException = mock(LibvirtException.class);
|
||||
when(libvirtException.getMessage()).thenReturn("VNC port error");
|
||||
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
|
||||
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenThrow(libvirtException);
|
||||
|
||||
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertEquals("VNC port error", answer.getDetails());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteLibvirtExceptionOnDomainLookupReturnsFailure() throws LibvirtException {
|
||||
LibvirtException libvirtException = mock(LibvirtException.class);
|
||||
when(libvirtException.getMessage()).thenReturn("Domain not found");
|
||||
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOn);
|
||||
when(libvirtComputingResource.getVncPort(conn, VM_NAME)).thenReturn(5900);
|
||||
when(conn.domainLookupByName(VM_NAME)).thenThrow(libvirtException);
|
||||
|
||||
CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer) wrapper.execute(command, libvirtComputingResource);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertEquals("Domain not found", answer.getDetails());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuteCallsGetLibvirtUtilitiesHelper() throws LibvirtException {
|
||||
when(libvirtComputingResource.getVmState(conn, VM_NAME)).thenReturn(PowerState.PowerOff);
|
||||
|
||||
wrapper.execute(command, libvirtComputingResource);
|
||||
|
||||
verify(libvirtComputingResource).getLibvirtUtilitiesHelper();
|
||||
verify(libvirtUtilitiesHelper).getConnectionByVmName(VM_NAME);
|
||||
}
|
||||
}
|
||||
|
|
@ -18,6 +18,7 @@
|
|||
//
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
|
|
@ -189,4 +190,127 @@ public class LibvirtConvertInstanceCommandWrapperTest {
|
|||
Mockito.verify(script).add("-x");
|
||||
Mockito.verify(script).add("-v");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformInstanceConversionUsingVddkUsesConfiguredLibguestfsBackend() {
|
||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||
Mockito.when(remoteInstanceTO.getVcenterHost()).thenReturn("vcenter.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterUsername()).thenReturn("administrator@vsphere.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterPassword()).thenReturn("secret");
|
||||
Mockito.when(remoteInstanceTO.getDatacenterName()).thenReturn("dc1");
|
||||
Mockito.when(remoteInstanceTO.getClusterName()).thenReturn("cluster1");
|
||||
Mockito.when(remoteInstanceTO.getHostName()).thenReturn("host1");
|
||||
Mockito.doReturn("28:19:A6:1C:90:ED:46:D7:1C:86:BC:F6:13:52:F0:B9:19:81:0D:81")
|
||||
.when(convertInstanceCommandWrapper).getVcenterThumbprint(Mockito.anyString(), Mockito.anyLong(), Mockito.anyString());
|
||||
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class);
|
||||
MockedConstruction<Script> ignored = Mockito.mockConstruction(Script.class, (mock, context) -> {
|
||||
Mockito.when(mock.execute(Mockito.any())).thenReturn("");
|
||||
Mockito.when(mock.getExitValue()).thenReturn(0);
|
||||
})) {
|
||||
filesMock.when(() -> Files.writeString(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local.")), Mockito.eq("secret")))
|
||||
.thenAnswer(invocation -> invocation.getArgument(0));
|
||||
filesMock.when(() -> Files.deleteIfExists(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local."))))
|
||||
.thenReturn(true);
|
||||
|
||||
boolean result = convertInstanceCommandWrapper.performInstanceConversionUsingVddk(
|
||||
remoteInstanceTO, vmName, "/tmp/convert", "/opt/vddk", "libvirt", null, null, 1000L, false, null, "tmp-uuid", "-ip");
|
||||
|
||||
Assert.assertTrue(result);
|
||||
Script scriptMock = ignored.constructed().get(0);
|
||||
Mockito.verify(scriptMock).add("-c");
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("export LIBGUESTFS_BACKEND=libvirt &&"));
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("-ip /tmp/v2v.pass.cloud.vcenter.local."));
|
||||
Mockito.verify(scriptMock).add(Mockito.contains(" -on tmp-uuid "));
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("-io vddk-thumbprint=28:19:A6:1C:90:ED:46:D7:1C:86:BC:F6:13:52:F0:B9:19:81:0D:81 "));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformInstanceConversionUsingVddkUsesConfiguredTransportsOrder() {
|
||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||
Mockito.when(remoteInstanceTO.getVcenterHost()).thenReturn("vcenter.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterUsername()).thenReturn("administrator@vsphere.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterPassword()).thenReturn("secret");
|
||||
Mockito.when(remoteInstanceTO.getDatacenterName()).thenReturn("dc1");
|
||||
Mockito.when(remoteInstanceTO.getClusterName()).thenReturn("cluster1");
|
||||
Mockito.when(remoteInstanceTO.getHostName()).thenReturn("host1");
|
||||
Mockito.doReturn("28:19:A6:1C:90:ED:46:D7:1C:86:BC:F6:13:52:F0:B9:19:81:0D:81")
|
||||
.when(convertInstanceCommandWrapper).getVcenterThumbprint(Mockito.anyString(), Mockito.anyLong(), Mockito.anyString());
|
||||
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class);
|
||||
MockedConstruction<Script> ignored = Mockito.mockConstruction(Script.class, (mock, context) -> {
|
||||
Mockito.when(mock.execute(Mockito.any())).thenReturn("");
|
||||
Mockito.when(mock.getExitValue()).thenReturn(0);
|
||||
})) {
|
||||
filesMock.when(() -> Files.writeString(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local.")), Mockito.eq("secret")))
|
||||
.thenAnswer(invocation -> invocation.getArgument(0));
|
||||
filesMock.when(() -> Files.deleteIfExists(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local."))))
|
||||
.thenReturn(true);
|
||||
|
||||
boolean result = convertInstanceCommandWrapper.performInstanceConversionUsingVddk(
|
||||
remoteInstanceTO, vmName, "/tmp/convert", "/opt/vddk", "direct", "nbd:nbdssl", null, 1000L, false, null, "tmp-uuid", "-ip");
|
||||
|
||||
Assert.assertTrue(result);
|
||||
Script scriptMock = ignored.constructed().get(0);
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("-io vddk-transports=nbd:nbdssl "));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformInstanceConversionUsingVddkFailsWhenThumbprintUnavailable() {
|
||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||
Mockito.when(remoteInstanceTO.getVcenterHost()).thenReturn("vcenter.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterUsername()).thenReturn("administrator@vsphere.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterPassword()).thenReturn("secret");
|
||||
Mockito.when(remoteInstanceTO.getDatacenterName()).thenReturn("dc1");
|
||||
Mockito.when(remoteInstanceTO.getClusterName()).thenReturn("cluster1");
|
||||
Mockito.when(remoteInstanceTO.getHostName()).thenReturn("host1");
|
||||
Mockito.doReturn(null)
|
||||
.when(convertInstanceCommandWrapper).getVcenterThumbprint(Mockito.anyString(), Mockito.anyLong(), Mockito.anyString());
|
||||
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class)) {
|
||||
filesMock.when(() -> Files.writeString(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local.")), Mockito.eq("secret")))
|
||||
.thenAnswer(invocation -> invocation.getArgument(0));
|
||||
filesMock.when(() -> Files.deleteIfExists(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local."))))
|
||||
.thenReturn(true);
|
||||
|
||||
boolean result = convertInstanceCommandWrapper.performInstanceConversionUsingVddk(
|
||||
remoteInstanceTO, vmName, "/tmp/convert", "/opt/vddk", "direct", null, null, 1000L, false, null, "tmp-uuid", "-ip");
|
||||
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformInstanceConversionUsingVddkUsesConfiguredThumbprintFromAgentProperty() {
|
||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||
Mockito.when(remoteInstanceTO.getVcenterHost()).thenReturn("vcenter.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterUsername()).thenReturn("administrator@vsphere.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterPassword()).thenReturn("secret");
|
||||
Mockito.when(remoteInstanceTO.getDatacenterName()).thenReturn("dc1");
|
||||
Mockito.when(remoteInstanceTO.getClusterName()).thenReturn("cluster1");
|
||||
Mockito.when(remoteInstanceTO.getHostName()).thenReturn("host1");
|
||||
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class);
|
||||
MockedConstruction<Script> ignored = Mockito.mockConstruction(Script.class, (mock, context) -> {
|
||||
Mockito.when(mock.execute(Mockito.any())).thenReturn("");
|
||||
Mockito.when(mock.getExitValue()).thenReturn(0);
|
||||
})) {
|
||||
filesMock.when(() -> Files.writeString(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local.")), Mockito.eq("secret")))
|
||||
.thenAnswer(invocation -> invocation.getArgument(0));
|
||||
filesMock.when(() -> Files.deleteIfExists(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local."))))
|
||||
.thenReturn(true);
|
||||
|
||||
boolean result = convertInstanceCommandWrapper.performInstanceConversionUsingVddk(
|
||||
remoteInstanceTO, vmName, "/tmp/convert", "/opt/vddk", "direct", null,
|
||||
"AA:BB:CC:DD:EE", 1000L, false, null, "tmp-uuid", "-ip");
|
||||
|
||||
Assert.assertTrue(result);
|
||||
Script scriptMock = ignored.constructed().get(0);
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("-io vddk-thumbprint=AA:BB:CC:DD:EE "));
|
||||
Mockito.verify(convertInstanceCommandWrapper, Mockito.never())
|
||||
.getVcenterThumbprint(Mockito.anyString(), Mockito.anyLong(), Mockito.anyString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -581,7 +581,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||
List<FirewallRuleVO> firewallRules = firewallRulesDao.listByIpPurposeProtocolAndNotRevoked(publicIp.getId(), FirewallRule.Purpose.Firewall, NetUtils.TCP_PROTO);
|
||||
for (FirewallRuleVO firewallRule : firewallRules) {
|
||||
PortForwardingRuleVO pfRule = portForwardingRulesDao.findByNetworkAndPorts(networkId, firewallRule.getSourcePortStart(), firewallRule.getSourcePortEnd());
|
||||
if (firewallRule.getSourcePortStart() == CLUSTER_NODES_DEFAULT_START_SSH_PORT || (Objects.nonNull(pfRule) && pfRule.getDestinationPortStart() == DEFAULT_SSH_PORT) ) {
|
||||
if (Objects.equals(firewallRule.getSourcePortStart(), CLUSTER_NODES_DEFAULT_START_SSH_PORT) || (Objects.nonNull(pfRule) && pfRule.getDestinationPortStart() == DEFAULT_SSH_PORT) ) {
|
||||
rule = firewallRule;
|
||||
firewallService.revokeIngressFwRule(firewallRule.getId(), true);
|
||||
logger.debug("The SSH firewall rule {} with the id {} was revoked", firewallRule.getName(), firewallRule.getId());
|
||||
|
|
|
|||
|
|
@ -135,10 +135,14 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
|
|||
|
||||
// Remove existing SSH firewall rules
|
||||
FirewallRule firewallRule = removeSshFirewallRule(publicIp, network.getId());
|
||||
int existingFirewallRuleSourcePortEnd;
|
||||
if (firewallRule == null) {
|
||||
throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned");
|
||||
logger.warn("SSH firewall rule not found for Kubernetes cluster: {}. It may have been manually deleted or modified.", kubernetesCluster.getName());
|
||||
existingFirewallRuleSourcePortEnd = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1;
|
||||
} else {
|
||||
existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd();
|
||||
}
|
||||
int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd();
|
||||
|
||||
try {
|
||||
removePortForwardingRules(publicIp, network, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, existingFirewallRuleSourcePortEnd);
|
||||
} catch (ResourceUnavailableException e) {
|
||||
|
|
|
|||
|
|
@ -214,7 +214,8 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast
|
|||
maxconn = offering.getConcurrentConnections().toString();
|
||||
}
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lbs, elbVm.getPublicIpAddress(), _nicDao.getIpAddress(guestNetworkId, elbVm.getId()),
|
||||
elbVm.getPrivateIpAddress(), null, null, maxconn, offering.isKeepAliveEnabled());
|
||||
elbVm.getPrivateIpAddress(), null, null, maxconn, offering.isKeepAliveEnabled(),
|
||||
NetworkOrchestrationService.NETWORK_LB_HAPROXY_IDLE_TIMEOUT.value());
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, elbVm.getPrivateIpAddress());
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, elbVm.getInstanceName());
|
||||
//FIXME: why are we setting attributes directly? Ick!! There should be accessors and
|
||||
|
|
|
|||
|
|
@ -513,7 +513,8 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In
|
|||
}
|
||||
final LoadBalancerConfigCommand cmd =
|
||||
new LoadBalancerConfigCommand(lbs, guestNic.getIPv4Address(), guestNic.getIPv4Address(), internalLbVm.getPrivateIpAddress(), _itMgr.toNicTO(guestNicProfile,
|
||||
internalLbVm.getHypervisorType()), internalLbVm.getVpcId(), maxconn, offering.isKeepAliveEnabled());
|
||||
internalLbVm.getHypervisorType()), internalLbVm.getVpcId(), maxconn, offering.isKeepAliveEnabled(),
|
||||
NetworkOrchestrationService.NETWORK_LB_HAPROXY_IDLE_TIMEOUT.value());
|
||||
|
||||
cmd.lbStatsVisibility = _configDao.getValue(Config.NetworkLBHaproxyStatsVisbility.key());
|
||||
cmd.lbStatsUri = _configDao.getValue(Config.NetworkLBHaproxyStatsUri.key());
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ import com.vmware.nsx_policy.infra.tier_0s.LocaleServices;
|
|||
import com.vmware.nsx_policy.infra.tier_1s.nat.NatRules;
|
||||
import com.vmware.nsx_policy.model.ApiError;
|
||||
import com.vmware.nsx_policy.model.DhcpRelayConfig;
|
||||
import com.vmware.nsx_policy.model.EnforcementPoint;
|
||||
import com.vmware.nsx_policy.model.EnforcementPointListResult;
|
||||
import com.vmware.nsx_policy.model.Group;
|
||||
import com.vmware.nsx_policy.model.GroupListResult;
|
||||
|
|
@ -64,12 +65,13 @@ import com.vmware.nsx_policy.model.PathExpression;
|
|||
import com.vmware.nsx_policy.model.PolicyGroupMembersListResult;
|
||||
import com.vmware.nsx_policy.model.PolicyNatRule;
|
||||
import com.vmware.nsx_policy.model.PolicyNatRuleListResult;
|
||||
import com.vmware.nsx_policy.model.PolicyGroupMemberDetails;
|
||||
import com.vmware.nsx_policy.model.Rule;
|
||||
import com.vmware.nsx_policy.model.SecurityPolicy;
|
||||
import com.vmware.nsx_policy.model.Segment;
|
||||
import com.vmware.nsx_policy.model.SegmentSubnet;
|
||||
import com.vmware.nsx_policy.model.ServiceListResult;
|
||||
import com.vmware.nsx_policy.model.SiteListResult;
|
||||
import com.vmware.nsx_policy.model.Site;
|
||||
import com.vmware.nsx_policy.model.Tier1;
|
||||
import com.vmware.vapi.bindings.Service;
|
||||
import com.vmware.vapi.bindings.Structure;
|
||||
|
|
@ -83,6 +85,7 @@ import com.vmware.vapi.internal.protocol.RestProtocol;
|
|||
import com.vmware.vapi.internal.protocol.client.rest.authn.BasicAuthenticationAppender;
|
||||
import com.vmware.vapi.protocol.HttpConfiguration;
|
||||
import com.vmware.vapi.std.errors.Error;
|
||||
import com.vmware.vapi.std.errors.NotFound;
|
||||
import org.apache.cloudstack.resource.NsxLoadBalancerMember;
|
||||
import org.apache.cloudstack.resource.NsxNetworkRule;
|
||||
import org.apache.cloudstack.utils.NsxControllerUtils;
|
||||
|
|
@ -96,9 +99,12 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.stream.Collectors.toSet;
|
||||
import static org.apache.cloudstack.utils.NsxControllerUtils.getServerPoolMemberName;
|
||||
import static org.apache.cloudstack.utils.NsxControllerUtils.getServerPoolName;
|
||||
import static org.apache.cloudstack.utils.NsxControllerUtils.getServiceName;
|
||||
|
|
@ -282,16 +288,18 @@ public class NsxApiClient {
|
|||
Tier1s tier1service = (Tier1s) nsxService.apply(Tier1s.class);
|
||||
return tier1service.get(tier1GatewayId);
|
||||
} catch (Exception e) {
|
||||
logger.debug(String.format("NSX Tier-1 gateway with name: %s not found", tier1GatewayId));
|
||||
logger.debug("NSX Tier-1 gateway with name: {} not found", tier1GatewayId);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private List<com.vmware.nsx_policy.model.LocaleServices> getTier0LocalServices(String tier0Gateway) {
|
||||
private Optional<com.vmware.nsx_policy.model.LocaleServices> findTier0LocalServices(String tier0Gateway) {
|
||||
try {
|
||||
LocaleServices tier0LocaleServices = (LocaleServices) nsxService.apply(LocaleServices.class);
|
||||
LocaleServicesListResult result = tier0LocaleServices.list(tier0Gateway, null, false, null, null, null, null);
|
||||
return result.getResults();
|
||||
LocaleServicesListResult result = tier0LocaleServices.list(tier0Gateway, null, false, null, 1L, null, null);
|
||||
return Optional.ofNullable(result.getResults())
|
||||
.filter(Predicate.not(List::isEmpty))
|
||||
.map(l -> l.get(0));
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(String.format("Failed to fetch locale services for tier gateway %s due to %s", tier0Gateway, e.getMessage()));
|
||||
}
|
||||
|
|
@ -302,10 +310,13 @@ public class NsxApiClient {
|
|||
*/
|
||||
private void createTier1LocaleServices(String tier1Id, String edgeCluster, String tier0Gateway) {
|
||||
try {
|
||||
List<com.vmware.nsx_policy.model.LocaleServices> localeServices = getTier0LocalServices(tier0Gateway);
|
||||
Optional<com.vmware.nsx_policy.model.LocaleServices> localeServices = findTier0LocalServices(tier0Gateway);
|
||||
if (localeServices.isEmpty()) {
|
||||
throw new CloudRuntimeException(String.format("Failed to find locale services for tier-0 gateway %s", tier0Gateway));
|
||||
}
|
||||
com.vmware.nsx_policy.infra.tier_1s.LocaleServices tier1LocalService = (com.vmware.nsx_policy.infra.tier_1s.LocaleServices) nsxService.apply(com.vmware.nsx_policy.infra.tier_1s.LocaleServices.class);
|
||||
com.vmware.nsx_policy.model.LocaleServices localeService = new com.vmware.nsx_policy.model.LocaleServices.Builder()
|
||||
.setEdgeClusterPath(localeServices.get(0).getEdgeClusterPath()).build();
|
||||
.setEdgeClusterPath(localeServices.get().getEdgeClusterPath()).build();
|
||||
tier1LocalService.patch(tier1Id, TIER_1_LOCALE_SERVICE_ID, localeService);
|
||||
} catch (Error error) {
|
||||
throw new CloudRuntimeException(String.format("Failed to instantiate tier-1 gateway %s in edge cluster %s", tier1Id, edgeCluster));
|
||||
|
|
@ -327,7 +338,7 @@ public class NsxApiClient {
|
|||
String tier0GatewayPath = TIER_0_GATEWAY_PATH_PREFIX + tier0Gateway;
|
||||
Tier1 tier1 = getTier1Gateway(name);
|
||||
if (tier1 != null) {
|
||||
logger.info(String.format("VPC network with name %s exists in NSX zone", name));
|
||||
logger.info("VPC network with name {} exists in NSX zone", name);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -359,7 +370,7 @@ public class NsxApiClient {
|
|||
com.vmware.nsx_policy.infra.tier_1s.LocaleServices localeService = (com.vmware.nsx_policy.infra.tier_1s.LocaleServices)
|
||||
nsxService.apply(com.vmware.nsx_policy.infra.tier_1s.LocaleServices.class);
|
||||
if (getTier1Gateway(tier1Id) == null) {
|
||||
logger.warn(String.format("The Tier 1 Gateway %s does not exist, cannot be removed", tier1Id));
|
||||
logger.warn("The Tier 1 Gateway {} does not exist, cannot be removed", tier1Id);
|
||||
return;
|
||||
}
|
||||
removeTier1GatewayNatRules(tier1Id);
|
||||
|
|
@ -370,13 +381,21 @@ public class NsxApiClient {
|
|||
|
||||
private void removeTier1GatewayNatRules(String tier1Id) {
|
||||
NatRules natRulesService = (NatRules) nsxService.apply(NatRules.class);
|
||||
PolicyNatRuleListResult result = natRulesService.list(tier1Id, NAT_ID, null, false, null, null, null, null);
|
||||
List<PolicyNatRule> natRules = result.getResults();
|
||||
List<PolicyNatRule> natRules = PagedFetcher.<PolicyNatRuleListResult, PolicyNatRule>withPageFetcher(
|
||||
cursor -> natRulesService.list(tier1Id, NAT_ID, cursor, false, null, null, null, null)
|
||||
).cursorExtractor(PolicyNatRuleListResult::getCursor)
|
||||
.itemsExtractor(PolicyNatRuleListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll()
|
||||
.getResults();
|
||||
if (CollectionUtils.isEmpty(natRules)) {
|
||||
logger.debug(String.format("Didn't find any NAT rule to remove on the Tier 1 Gateway %s", tier1Id));
|
||||
logger.debug("Didn't find any NAT rule to remove on the Tier 1 Gateway {}", tier1Id);
|
||||
} else {
|
||||
for (PolicyNatRule natRule : natRules) {
|
||||
logger.debug(String.format("Removing NAT rule %s from Tier 1 Gateway %s", natRule.getId(), tier1Id));
|
||||
logger.debug("Removing NAT rule {} from Tier 1 Gateway {}", natRule.getId(), tier1Id);
|
||||
natRulesService.delete(tier1Id, NAT_ID, natRule.getId());
|
||||
}
|
||||
}
|
||||
|
|
@ -384,38 +403,45 @@ public class NsxApiClient {
|
|||
}
|
||||
|
||||
public String getDefaultSiteId() {
|
||||
SiteListResult sites = getSites();
|
||||
if (CollectionUtils.isEmpty(sites.getResults())) {
|
||||
Optional<Site> site = findFirstSite();
|
||||
if (site.isEmpty()) {
|
||||
String errorMsg = "No sites are found in the linked NSX infrastructure";
|
||||
logger.error(errorMsg);
|
||||
throw new CloudRuntimeException(errorMsg);
|
||||
}
|
||||
return sites.getResults().get(0).getId();
|
||||
return site.get().getId();
|
||||
}
|
||||
|
||||
protected SiteListResult getSites() {
|
||||
protected Optional<Site> findFirstSite() {
|
||||
try {
|
||||
Sites sites = (Sites) nsxService.apply(Sites.class);
|
||||
return sites.list(null, false, null, null, null, null);
|
||||
List<Site> siteList = sites.list(null, false, null, 1L, null, null)
|
||||
.getResults();
|
||||
return Optional.ofNullable(siteList)
|
||||
.filter(Predicate.not(List::isEmpty))
|
||||
.map(l -> l.get(0));
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(String.format("Failed to fetch sites list due to %s", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
public String getDefaultEnforcementPointPath(String siteId) {
|
||||
EnforcementPointListResult epList = getEnforcementPoints(siteId);
|
||||
if (CollectionUtils.isEmpty(epList.getResults())) {
|
||||
Optional<EnforcementPoint> ep = findFirstEnforcementPoint(siteId);
|
||||
if (ep.isEmpty()) {
|
||||
String errorMsg = String.format("No enforcement points are found in the linked NSX infrastructure for site ID %s", siteId);
|
||||
logger.error(errorMsg);
|
||||
throw new CloudRuntimeException(errorMsg);
|
||||
}
|
||||
return epList.getResults().get(0).getPath();
|
||||
return ep.get().getPath();
|
||||
}
|
||||
|
||||
protected EnforcementPointListResult getEnforcementPoints(String siteId) {
|
||||
protected Optional<EnforcementPoint> findFirstEnforcementPoint(String siteId) {
|
||||
try {
|
||||
EnforcementPoints enforcementPoints = (EnforcementPoints) nsxService.apply(EnforcementPoints.class);
|
||||
return enforcementPoints.list(siteId, null, false, null, null, null, null);
|
||||
EnforcementPointListResult result = enforcementPoints.list(siteId, null, false, null, 1L, null, null);
|
||||
return Optional.ofNullable(result.getResults())
|
||||
.filter(Predicate.not(List::isEmpty))
|
||||
.map(l -> l.get(0));
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(String.format("Failed to fetch enforcement points due to %s", e.getMessage()));
|
||||
}
|
||||
|
|
@ -424,7 +450,15 @@ public class NsxApiClient {
|
|||
public TransportZoneListResult getTransportZones() {
|
||||
try {
|
||||
com.vmware.nsx.TransportZones transportZones = (com.vmware.nsx.TransportZones) nsxService.apply(com.vmware.nsx.TransportZones.class);
|
||||
return transportZones.list(null, null, true, null, null, null, null, null, TransportType.OVERLAY.name(), null);
|
||||
return PagedFetcher.<TransportZoneListResult, TransportZone>withPageFetcher(
|
||||
cursor -> transportZones.list(cursor, null, true, null, null, null, null, null, TransportType.OVERLAY.name(), null)
|
||||
).cursorExtractor(TransportZoneListResult::getCursor)
|
||||
.itemsExtractor(TransportZoneListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(String.format("Failed to fetch transport zones due to %s", e.getMessage()));
|
||||
}
|
||||
|
|
@ -465,7 +499,7 @@ public class NsxApiClient {
|
|||
removeSegment(segmentName, zoneId);
|
||||
DhcpRelayConfigs dhcpRelayConfig = (DhcpRelayConfigs) nsxService.apply(DhcpRelayConfigs.class);
|
||||
String dhcpRelayConfigId = NsxControllerUtils.getNsxDhcpRelayConfigId(zoneId, domainId, accountId, vpcId, networkId);
|
||||
logger.debug(String.format("Removing the DHCP relay config with ID %s", dhcpRelayConfigId));
|
||||
logger.debug("Removing the DHCP relay config with ID {}", dhcpRelayConfigId);
|
||||
dhcpRelayConfig.delete(dhcpRelayConfigId);
|
||||
} catch (Error error) {
|
||||
ApiError ae = error.getData()._convertTo(ApiError.class);
|
||||
|
|
@ -476,7 +510,7 @@ public class NsxApiClient {
|
|||
}
|
||||
|
||||
protected void removeSegment(String segmentName, long zoneId) {
|
||||
logger.debug(String.format("Removing the segment with ID %s", segmentName));
|
||||
logger.debug("Removing the segment with ID {}", segmentName);
|
||||
Segments segmentService = (Segments) nsxService.apply(Segments.class);
|
||||
String errMsg = String.format("The segment with ID %s is not found, skipping removal", segmentName);
|
||||
try {
|
||||
|
|
@ -498,7 +532,7 @@ public class NsxApiClient {
|
|||
portCount = retrySegmentDeletion(segmentPortsService, segmentName, enforcementPointPath, zoneId);
|
||||
}
|
||||
if (portCount == 0L) {
|
||||
logger.debug(String.format("Removing the segment with ID %s", segmentName));
|
||||
logger.debug("Removing the segment with ID {}", segmentName);
|
||||
removeGroupForSegment(segmentName);
|
||||
segmentService.delete(segmentName);
|
||||
} else {
|
||||
|
|
@ -509,8 +543,18 @@ public class NsxApiClient {
|
|||
}
|
||||
|
||||
private PolicyGroupMembersListResult getSegmentPortList(SegmentPorts segmentPortsService, String segmentName, String enforcementPointPath) {
|
||||
return segmentPortsService.list(DEFAULT_DOMAIN, segmentName, null, enforcementPointPath,
|
||||
false, null, 50L, false, null);
|
||||
return PagedFetcher.
|
||||
<PolicyGroupMembersListResult, PolicyGroupMemberDetails>withPageFetcher(
|
||||
cursor -> segmentPortsService.list(DEFAULT_DOMAIN, segmentName, cursor, enforcementPointPath,
|
||||
false, null, 50L, false, null)
|
||||
)
|
||||
.cursorExtractor(PolicyGroupMembersListResult::getCursor)
|
||||
.itemsExtractor(PolicyGroupMembersListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
}
|
||||
|
||||
private Long retrySegmentDeletion(SegmentPorts segmentPortsService, String segmentName, String enforcementPointPath, long zoneId) {
|
||||
|
|
@ -546,7 +590,7 @@ public class NsxApiClient {
|
|||
.setEnabled(true)
|
||||
.build();
|
||||
|
||||
logger.debug(String.format("Creating NSX static NAT rule %s for tier-1 gateway %s (VPC: %s)", ruleName, tier1GatewayName, vpcName));
|
||||
logger.debug("Creating NSX static NAT rule {} for tier-1 gateway {} (VPC: {})", ruleName, tier1GatewayName, vpcName);
|
||||
natService.patch(tier1GatewayName, NatId.USER.name(), ruleName, rule);
|
||||
} catch (Error error) {
|
||||
ApiError ae = error.getData()._convertTo(ApiError.class);
|
||||
|
|
@ -582,8 +626,7 @@ public class NsxApiClient {
|
|||
natService.delete(tier1GatewayName, NatId.USER.name(), ruleName);
|
||||
}
|
||||
} catch (Error error) {
|
||||
String msg = String.format("Cannot find NAT rule with name %s: %s, skipping deletion", ruleName, error.getMessage());
|
||||
logger.debug(msg);
|
||||
logger.debug("Cannot find NAT rule with name {}: {}, skipping deletion", ruleName, error.getMessage());
|
||||
}
|
||||
|
||||
if (service == Network.Service.PortForwarding) {
|
||||
|
|
@ -595,7 +638,7 @@ public class NsxApiClient {
|
|||
String vmIp, String publicPort, String service) {
|
||||
try {
|
||||
NatRules natService = (NatRules) nsxService.apply(NatRules.class);
|
||||
logger.debug(String.format("Creating NSX Port-Forwarding NAT %s for network %s", ruleName, networkName));
|
||||
logger.debug("Creating NSX Port-Forwarding NAT {} for network {}", ruleName, networkName);
|
||||
PolicyNatRule rule = new PolicyNatRule.Builder()
|
||||
.setId(ruleName)
|
||||
.setDisplayName(ruleName)
|
||||
|
|
@ -656,9 +699,20 @@ public class NsxApiClient {
|
|||
public void createNsxLbServerPool(List<NsxLoadBalancerMember> memberList, String tier1GatewayName, String lbServerPoolName,
|
||||
String algorithm, String privatePort, String protocol) {
|
||||
try {
|
||||
String activeMonitorPath = getLbActiveMonitorPath(lbServerPoolName, privatePort, protocol);
|
||||
List<LBPoolMember> members = getLbPoolMembers(memberList, tier1GatewayName);
|
||||
LbPools lbPools = (LbPools) nsxService.apply(LbPools.class);
|
||||
Optional<LBPool> nsxLbServerPool = getNsxLbServerPool(lbPools, lbServerPoolName);
|
||||
// Skip if pool exists and members unchanged
|
||||
if (nsxLbServerPool.isPresent()) {
|
||||
List<LBPoolMember> existingMembers = nsxLbServerPool
|
||||
.map(LBPool::getMembers)
|
||||
.orElseGet(List::of);
|
||||
if (hasSamePoolMembers(existingMembers, members)) {
|
||||
logger.debug("Skipping patch for LB pool {} on Tier-1 {}: members unchanged", lbServerPoolName, tier1GatewayName);
|
||||
return;
|
||||
}
|
||||
}
|
||||
String activeMonitorPath = getLbActiveMonitorPath(lbServerPoolName, privatePort, protocol);
|
||||
LBPool lbPool = new LBPool.Builder()
|
||||
.setId(lbServerPoolName)
|
||||
.setDisplayName(lbServerPoolName)
|
||||
|
|
@ -676,9 +730,52 @@ public class NsxApiClient {
|
|||
}
|
||||
}
|
||||
|
||||
private Optional<LBPool> getNsxLbServerPool(LbPools lbPools, String lbServerPoolName) {
|
||||
try {
|
||||
return Optional.ofNullable(lbPools.get(lbServerPoolName));
|
||||
} catch (NotFound e) {
|
||||
logger.warn("Server Pool not found: {}", lbServerPoolName);
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean hasSamePoolMembers(List<LBPoolMember> existingMembers, List<LBPoolMember> membersUpdate) {
|
||||
Set<String> existingMembersSet = existingMembers.stream()
|
||||
.map(this::buildPoolMemberKey)
|
||||
.collect(toSet());
|
||||
Set<String> updateMembersSet = membersUpdate.stream()
|
||||
.map(this::buildPoolMemberKey)
|
||||
.collect(toSet());
|
||||
|
||||
return existingMembersSet.size() == updateMembersSet.size()
|
||||
&& existingMembersSet.containsAll(updateMembersSet);
|
||||
}
|
||||
|
||||
private String buildPoolMemberKey(LBPoolMember member) {
|
||||
return member.getIpAddress() + ':' + member.getPort() + ':' + member.getDisplayName();
|
||||
}
|
||||
|
||||
private String getLbActiveMonitorPath(String lbServerPoolName, String port, String protocol) {
|
||||
LbMonitorProfiles lbActiveMonitor = (LbMonitorProfiles) nsxService.apply(LbMonitorProfiles.class);
|
||||
String lbMonitorProfileId = getActiveMonitorProfileName(lbServerPoolName, port, protocol);
|
||||
Optional<Structure> monitorProfile = getMonitorProfile(lbActiveMonitor, lbMonitorProfileId);
|
||||
if (monitorProfile.isEmpty()) {
|
||||
patchMonitoringProfile(port, protocol, lbMonitorProfileId, lbActiveMonitor);
|
||||
monitorProfile = getMonitorProfile(lbActiveMonitor, lbMonitorProfileId);
|
||||
}
|
||||
return monitorProfile.map(structure -> structure._getDataValue().getField("path").toString()).orElse(null);
|
||||
}
|
||||
|
||||
private Optional<Structure> getMonitorProfile(LbMonitorProfiles lbActiveMonitor, String lbMonitorProfileId) {
|
||||
try {
|
||||
return Optional.ofNullable(lbActiveMonitor.get(lbMonitorProfileId));
|
||||
} catch (NotFound e) {
|
||||
logger.warn("LB Monitor Profile not found: {}", lbMonitorProfileId);
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
||||
private void patchMonitoringProfile(String port, String protocol, String lbMonitorProfileId, LbMonitorProfiles lbActiveMonitor) {
|
||||
if ("TCP".equals(protocol.toUpperCase(Locale.ROOT))) {
|
||||
LBTcpMonitorProfile lbTcpMonitorProfile = new LBTcpMonitorProfile.Builder(TCP_MONITOR_PROFILE)
|
||||
.setDisplayName(lbMonitorProfileId)
|
||||
|
|
@ -691,14 +788,18 @@ public class NsxApiClient {
|
|||
.build();
|
||||
lbActiveMonitor.patch(lbMonitorProfileId, icmpMonitorProfile);
|
||||
}
|
||||
|
||||
LBMonitorProfileListResult listResult = listLBActiveMonitors(lbActiveMonitor);
|
||||
Optional<Structure> monitorProfile = listResult.getResults().stream().filter(profile -> profile._getDataValue().getField("id").toString().equals(lbMonitorProfileId)).findFirst();
|
||||
return monitorProfile.map(structure -> structure._getDataValue().getField("path").toString()).orElse(null);
|
||||
}
|
||||
|
||||
LBMonitorProfileListResult listLBActiveMonitors(LbMonitorProfiles lbActiveMonitor) {
|
||||
return lbActiveMonitor.list(null, false, null, null, null, null);
|
||||
return PagedFetcher.<LBMonitorProfileListResult, Structure>withPageFetcher(
|
||||
cursor -> lbActiveMonitor.list(cursor, false, null, null, null, null)
|
||||
).cursorExtractor(LBMonitorProfileListResult::getCursor)
|
||||
.itemsExtractor(LBMonitorProfileListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
}
|
||||
|
||||
public void createNsxLoadBalancer(String tier1GatewayName) {
|
||||
|
|
@ -735,7 +836,7 @@ public class NsxApiClient {
|
|||
String lbVirtualServerName = getVirtualServerName(tier1GatewayName, lbId);
|
||||
String lbServiceName = getLoadBalancerName(tier1GatewayName);
|
||||
LbVirtualServers lbVirtualServers = (LbVirtualServers) nsxService.apply(LbVirtualServers.class);
|
||||
if (Objects.nonNull(getLbVirtualServerService(lbVirtualServers, lbServiceName))) {
|
||||
if (Objects.nonNull(getLbVirtualServerService(lbVirtualServers, lbVirtualServerName))) {
|
||||
return;
|
||||
}
|
||||
LBVirtualServer lbVirtualServer = new LBVirtualServer.Builder()
|
||||
|
|
@ -763,7 +864,7 @@ public class NsxApiClient {
|
|||
return lbVirtualServer;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.debug(String.format("Found an LB virtual server named: %s on NSX", lbVSName));
|
||||
logger.debug("Found an LB virtual server named: {} on NSX", lbVSName);
|
||||
return null;
|
||||
}
|
||||
return null;
|
||||
|
|
@ -851,8 +952,15 @@ public class NsxApiClient {
|
|||
private String getLbProfileForProtocol(String protocol) {
|
||||
try {
|
||||
LbAppProfiles lbAppProfiles = (LbAppProfiles) nsxService.apply(LbAppProfiles.class);
|
||||
LBAppProfileListResult lbAppProfileListResults = lbAppProfiles.list(null, null,
|
||||
null, null, null, null);
|
||||
LBAppProfileListResult lbAppProfileListResults = PagedFetcher.<LBAppProfileListResult, Structure>withPageFetcher(
|
||||
cursor -> lbAppProfiles.list(cursor, null, null, null, null, null)
|
||||
).cursorExtractor(LBAppProfileListResult::getCursor)
|
||||
.itemsExtractor(LBAppProfileListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
Optional<Structure> appProfile = lbAppProfileListResults.getResults().stream().filter(profile -> profile._getDataValue().getField("path").toString().contains(protocol.toLowerCase(Locale.ROOT))).findFirst();
|
||||
return appProfile.map(structure -> structure._getDataValue().getField("path").toString()).orElse(null);
|
||||
} catch (Error error) {
|
||||
|
|
@ -868,7 +976,15 @@ public class NsxApiClient {
|
|||
Services service = (Services) nsxService.apply(Services.class);
|
||||
|
||||
// Find default service if present
|
||||
ServiceListResult serviceList = service.list(null, true, false, null, null, null, null);
|
||||
ServiceListResult serviceList = PagedFetcher.<ServiceListResult, com.vmware.nsx_policy.model.Service>withPageFetcher(
|
||||
cursor -> service.list(cursor, true, false, null, null, null, null)
|
||||
).cursorExtractor(ServiceListResult::getCursor)
|
||||
.itemsExtractor(ServiceListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
|
||||
List<com.vmware.nsx_policy.model.Service> services = serviceList.getResults();
|
||||
List<String> matchedDefaultSvc = services.parallelStream().filter(svc ->
|
||||
|
|
@ -1095,9 +1211,17 @@ public class NsxApiClient {
|
|||
|
||||
private List<Group> listNsxGroups() {
|
||||
try {
|
||||
Groups groups = (Groups) nsxService.apply(Groups.class);
|
||||
GroupListResult result = groups.list(DEFAULT_DOMAIN, null, false, null, null, null, null, null);
|
||||
return result.getResults();
|
||||
Groups groups = (Groups) nsxService.apply(Groups.class);
|
||||
GroupListResult result = PagedFetcher.<GroupListResult, Group>withPageFetcher(
|
||||
cursor -> groups.list(DEFAULT_DOMAIN, cursor, false, null, null, null, null, null)
|
||||
).cursorExtractor(GroupListResult::getCursor)
|
||||
.itemsExtractor(GroupListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
return result.getResults();
|
||||
} catch (Error error) {
|
||||
ApiError ae = error.getData()._convertTo(ApiError.class);
|
||||
String msg = String.format("Failed to list NSX groups, due to: %s", ae.getErrorMessage());
|
||||
|
|
|
|||
|
|
@ -0,0 +1,82 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.service;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
class PagedFetcher<R, T> {
|
||||
|
||||
private final Function<String, R> fetchPage;
|
||||
private Function<R, String> cursorExtractor;
|
||||
private Function<R, List<T>> itemsExtractor;
|
||||
private BiConsumer<R, List<T>> itemsSetter;
|
||||
|
||||
static <R, T> PagedFetcher<R, T> withPageFetcher(Function<String, R> pageFetcher) {
|
||||
return new PagedFetcher<>(pageFetcher);
|
||||
}
|
||||
|
||||
PagedFetcher<R, T> cursorExtractor(Function<R, String> cursorProvider) {
|
||||
this.cursorExtractor = cursorProvider;
|
||||
return this;
|
||||
}
|
||||
|
||||
PagedFetcher<R, T> itemsExtractor(Function<R, List<T>> resultsProvider) {
|
||||
this.itemsExtractor = resultsProvider;
|
||||
return this;
|
||||
}
|
||||
|
||||
PagedFetcher<R, T> itemsSetter(BiConsumer<R, List<T>> resultsSetter) {
|
||||
this.itemsSetter = resultsSetter;
|
||||
return this;
|
||||
}
|
||||
|
||||
private PagedFetcher(Function<String, R> pageFetcher) {
|
||||
this.fetchPage = pageFetcher;
|
||||
}
|
||||
|
||||
R fetchAll() {
|
||||
Objects.requireNonNull(cursorExtractor, "Cursor extractor must be set");
|
||||
Objects.requireNonNull(itemsExtractor, "Items extractor must be set");
|
||||
Objects.requireNonNull(itemsSetter, "Items setter must be set");
|
||||
|
||||
R firstPage = fetchPage.apply(null);
|
||||
String cursor = cursorExtractor.apply(firstPage);
|
||||
if (cursor == null || cursor.isEmpty()) {
|
||||
return firstPage;
|
||||
}
|
||||
|
||||
List<T> firstResults = itemsExtractor.apply(firstPage);
|
||||
List<T> allItems = firstResults != null
|
||||
? new ArrayList<>(firstResults)
|
||||
: new ArrayList<>();
|
||||
while (cursor != null && !cursor.isEmpty()) {
|
||||
R nextPage = fetchPage.apply(cursor);
|
||||
List<T> nextItems = itemsExtractor.apply(nextPage);
|
||||
if (nextItems != null && !nextItems.isEmpty()) {
|
||||
allItems.addAll(nextItems);
|
||||
}
|
||||
cursor = cursorExtractor.apply(nextPage);
|
||||
}
|
||||
|
||||
itemsSetter.accept(firstPage, allItems);
|
||||
return firstPage;
|
||||
}
|
||||
}
|
||||
|
|
@ -18,13 +18,32 @@ package org.apache.cloudstack.service;
|
|||
|
||||
import com.cloud.network.Network;
|
||||
import com.cloud.network.SDNProviderNetworkRule;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.vmware.nsx.cluster.Status;
|
||||
import com.vmware.nsx.model.ClusterStatus;
|
||||
import com.vmware.nsx.model.ControllerClusterStatus;
|
||||
import com.vmware.nsx_policy.infra.LbAppProfiles;
|
||||
import com.vmware.nsx_policy.infra.LbMonitorProfiles;
|
||||
import com.vmware.nsx_policy.infra.LbPools;
|
||||
import com.vmware.nsx_policy.infra.LbServices;
|
||||
import com.vmware.nsx_policy.infra.LbVirtualServers;
|
||||
import com.vmware.nsx_policy.infra.domains.Groups;
|
||||
import com.vmware.nsx_policy.model.ApiError;
|
||||
import com.vmware.nsx_policy.model.Group;
|
||||
import com.vmware.nsx_policy.model.LBAppProfileListResult;
|
||||
import com.vmware.nsx_policy.model.LBIcmpMonitorProfile;
|
||||
import com.vmware.nsx_policy.model.LBService;
|
||||
import com.vmware.nsx_policy.model.LBTcpMonitorProfile;
|
||||
import com.vmware.nsx_policy.model.LBPool;
|
||||
import com.vmware.nsx_policy.model.LBPoolMember;
|
||||
import com.vmware.nsx_policy.model.LBVirtualServer;
|
||||
import com.vmware.nsx_policy.model.PathExpression;
|
||||
import com.vmware.vapi.bindings.Service;
|
||||
import com.vmware.vapi.bindings.Structure;
|
||||
import com.vmware.vapi.std.errors.Error;
|
||||
import com.vmware.vapi.std.errors.NotFound;
|
||||
import org.apache.cloudstack.resource.NsxLoadBalancerMember;
|
||||
import org.apache.cloudstack.utils.NsxControllerUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
|
@ -36,8 +55,20 @@ import org.mockito.MockitoAnnotations;
|
|||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.junit.Assert.assertThrows;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class NsxApiClientTest {
|
||||
|
||||
private static final String TIER_1_GATEWAY_NAME = "t1";
|
||||
|
||||
@Mock
|
||||
private Function<Class<? extends Service>, Service> nsxService;
|
||||
@Mock
|
||||
|
|
@ -108,4 +139,284 @@ public class NsxApiClientTest {
|
|||
Mockito.when(clusterStatus.getControlClusterStatus()).thenReturn(status);
|
||||
Assert.assertTrue(client.isNsxControllerActive());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolExistingMonitorProfileSkipsMonitorPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LbMonitorProfiles lbMonitorProfiles = mockLbMonitorProfiles();
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(lbMonitorProfiles, never()).patch(anyString(), any(LBTcpMonitorProfile.class));
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolMissingMonitorTCPProfilePerformsPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LbMonitorProfiles lbMonitorProfiles = Mockito.mock(LbMonitorProfiles.class);
|
||||
Structure monitorStructure = Mockito.mock(Structure.class, Mockito.RETURNS_DEEP_STUBS);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(nsxService.apply(LbMonitorProfiles.class)).thenReturn(lbMonitorProfiles);
|
||||
Mockito.when(lbMonitorProfiles.get(anyString())).thenThrow(new NotFound(null, null)).thenReturn(monitorStructure);
|
||||
Mockito.when(monitorStructure._getDataValue().getField("path").toString()).thenReturn("/infra/lb-monitor-profiles/test");
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(lbMonitorProfiles).patch(anyString(), any(LBTcpMonitorProfile.class));
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolMissingMonitorUDPProfilePerformsPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LbMonitorProfiles lbMonitorProfiles = Mockito.mock(LbMonitorProfiles.class);
|
||||
Structure monitorStructure = Mockito.mock(Structure.class, Mockito.RETURNS_DEEP_STUBS);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(nsxService.apply(LbMonitorProfiles.class)).thenReturn(lbMonitorProfiles);
|
||||
Mockito.when(lbMonitorProfiles.get(anyString())).thenThrow(new NotFound(null, null)).thenReturn(monitorStructure);
|
||||
Mockito.when(monitorStructure._getDataValue().getField("path").toString()).thenReturn("/infra/lb-monitor-profiles/test");
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "UDP");
|
||||
|
||||
verify(lbMonitorProfiles).patch(anyString(), any(LBIcmpMonitorProfile.class));
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolExistsWithSameMembersSkipsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, lbId);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(
|
||||
new NsxLoadBalancerMember(1L, "10.0.0.1", 80),
|
||||
new NsxLoadBalancerMember(2L, "10.0.0.2", 80)
|
||||
);
|
||||
List<LBPoolMember> sameMembers = List.of(
|
||||
createPoolMember(2L, "10.0.0.2", 80),
|
||||
createPoolMember(1L, "10.0.0.1", 80)
|
||||
);
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool existingPool = Mockito.mock(LBPool.class);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenReturn(existingPool);
|
||||
Mockito.when(existingPool.getMembers()).thenReturn(sameMembers);
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(nsxService, never()).apply(LbMonitorProfiles.class);
|
||||
verify(lbPools, never()).patch(anyString(), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolExistsWithoutMembersAndEmptyUpdateSkipsPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool existingPool = Mockito.mock(LBPool.class);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenReturn(existingPool);
|
||||
Mockito.when(existingPool.getMembers()).thenReturn(null);
|
||||
|
||||
client.createNsxLbServerPool(List.of(), TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(nsxService, never()).apply(LbMonitorProfiles.class);
|
||||
verify(lbPools, never()).patch(anyString(), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolExistsWithDuplicateMembersSkipsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, lbId);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(
|
||||
new NsxLoadBalancerMember(1L, "10.0.0.1", 80),
|
||||
new NsxLoadBalancerMember(2L, "10.0.0.2", 80)
|
||||
);
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool existingPool = Mockito.mock(LBPool.class);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenReturn(existingPool);
|
||||
Mockito.when(existingPool.getMembers()).thenReturn(List.of(
|
||||
createPoolMember(1L, "10.0.0.1", 80),
|
||||
createPoolMember(1L, "10.0.0.1", 80),
|
||||
createPoolMember(2L, "10.0.0.2", 80)
|
||||
));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(nsxService, never()).apply(LbMonitorProfiles.class);
|
||||
verify(lbPools, never()).patch(anyString(), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolExistsWithDifferentMembersPerformsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, lbId);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(
|
||||
new NsxLoadBalancerMember(1L, "10.0.0.1", 80),
|
||||
new NsxLoadBalancerMember(2L, "10.0.0.2", 80)
|
||||
);
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool existingPool = Mockito.mock(LBPool.class);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenReturn(existingPool);
|
||||
Mockito.when(existingPool.getMembers()).thenReturn(List.of(
|
||||
createPoolMember(1L, "10.0.0.10", 80)
|
||||
));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolDoesNotExistPerformsPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateAndAddNsxLbVirtualServerVirtualServerAlreadyExistsSkipsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbVirtualServerName = NsxControllerUtils.getVirtualServerName(TIER_1_GATEWAY_NAME, lbId);
|
||||
String lbServiceName = NsxControllerUtils.getLoadBalancerName(TIER_1_GATEWAY_NAME);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LbServices lbServices = Mockito.mock(LbServices.class);
|
||||
LbVirtualServers lbVirtualServers = Mockito.mock(LbVirtualServers.class);
|
||||
LBVirtualServer existingVs = Mockito.mock(LBVirtualServer.class);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(nsxService.apply(LbServices.class)).thenReturn(lbServices);
|
||||
Mockito.when(nsxService.apply(LbVirtualServers.class)).thenReturn(lbVirtualServers);
|
||||
Mockito.when(lbPools.get(anyString())).thenThrow(new NotFound(null, null));
|
||||
Mockito.when(lbServices.get(anyString())).thenReturn(null);
|
||||
Mockito.when(lbVirtualServers.get(lbVirtualServerName)).thenReturn(existingVs);
|
||||
|
||||
client.createAndAddNsxLbVirtualServer(TIER_1_GATEWAY_NAME, lbId, "192.168.1.1", "443",
|
||||
memberList, "roundrobin", "TCP", "80");
|
||||
|
||||
verify(lbVirtualServers).get(lbVirtualServerName);
|
||||
verify(lbVirtualServers, never()).get(lbServiceName);
|
||||
verify(lbVirtualServers, never()).patch(anyString(), any(LBVirtualServer.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateAndAddNsxLbVirtualServerVirtualServerNotFoundPerformsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, lbId);
|
||||
String lbVirtualServerName = NsxControllerUtils.getVirtualServerName(TIER_1_GATEWAY_NAME, lbId);
|
||||
String lbServiceName = NsxControllerUtils.getLoadBalancerName(TIER_1_GATEWAY_NAME);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool lbPool = Mockito.mock(LBPool.class);
|
||||
LbServices lbServices = Mockito.mock(LbServices.class);
|
||||
LBService lbService = Mockito.mock(LBService.class);
|
||||
LbVirtualServers lbVirtualServers = Mockito.mock(LbVirtualServers.class);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
mockLbAppProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(nsxService.apply(LbServices.class)).thenReturn(lbServices);
|
||||
Mockito.when(nsxService.apply(LbVirtualServers.class)).thenReturn(lbVirtualServers);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null)).thenReturn(lbPool);
|
||||
Mockito.when(lbPool.getPath()).thenReturn("/infra/lb-pools/" + lbServerPoolName);
|
||||
Mockito.when(lbServices.get(lbServiceName)).thenReturn(lbService);
|
||||
Mockito.when(lbService.getPath()).thenReturn("/infra/lb-services/" + lbServiceName);
|
||||
Mockito.when(lbVirtualServers.get(lbVirtualServerName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createAndAddNsxLbVirtualServer(TIER_1_GATEWAY_NAME, lbId, "192.168.1.1", "443",
|
||||
memberList, "roundrobin", "TCP", "80");
|
||||
|
||||
verify(lbVirtualServers).get(lbVirtualServerName);
|
||||
verify(lbVirtualServers, never()).get(lbServiceName);
|
||||
verify(lbVirtualServers).patch(eq(lbVirtualServerName), any(LBVirtualServer.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolThrowsExceptionOnPatchError() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
Structure errorData = Mockito.mock(Structure.class);
|
||||
ApiError apiError = new ApiError();
|
||||
apiError.setErrorData(errorData);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
when(errorData._convertTo(ApiError.class)).thenReturn(apiError);
|
||||
doThrow(new Error(List.of(), errorData)).when(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
|
||||
CloudRuntimeException thrownException = assertThrows(CloudRuntimeException.class, () -> {
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
});
|
||||
assertTrue(thrownException.getMessage().startsWith("Failed to create NSX LB server pool, due to"));
|
||||
}
|
||||
|
||||
private LbMonitorProfiles mockLbMonitorProfiles() {
|
||||
LbMonitorProfiles lbMonitorProfiles = Mockito.mock(LbMonitorProfiles.class);
|
||||
Structure monitorStructure = Mockito.mock(Structure.class, Mockito.RETURNS_DEEP_STUBS);
|
||||
|
||||
Mockito.when(nsxService.apply(LbMonitorProfiles.class)).thenReturn(lbMonitorProfiles);
|
||||
Mockito.when(lbMonitorProfiles.get(anyString())).thenReturn(monitorStructure);
|
||||
Mockito.when(monitorStructure._getDataValue().getField("path").toString()).thenReturn("/infra/lb-monitor-profiles/test");
|
||||
return lbMonitorProfiles;
|
||||
}
|
||||
|
||||
private void mockLbAppProfiles() {
|
||||
LbAppProfiles lbAppProfiles = Mockito.mock(LbAppProfiles.class);
|
||||
LBAppProfileListResult appProfileListResult = Mockito.mock(LBAppProfileListResult.class);
|
||||
Structure appProfile = Mockito.mock(Structure.class, Mockito.RETURNS_DEEP_STUBS);
|
||||
|
||||
Mockito.when(nsxService.apply(LbAppProfiles.class)).thenReturn(lbAppProfiles);
|
||||
Mockito.when(lbAppProfiles.list(null, null, null, null, null, null)).thenReturn(appProfileListResult);
|
||||
Mockito.when(appProfileListResult.getResults()).thenReturn(List.of(appProfile));
|
||||
Mockito.when(appProfile._getDataValue().getField("path").toString()).thenReturn("/infra/lb-app-profiles/default-tcp-profile");
|
||||
}
|
||||
|
||||
private LBPoolMember createPoolMember(long vmId, String ipAddress, int port) {
|
||||
return new LBPoolMember.Builder()
|
||||
.setDisplayName(NsxControllerUtils.getServerPoolMemberName(TIER_1_GATEWAY_NAME, vmId))
|
||||
.setIpAddress(ipAddress)
|
||||
.setPort(String.valueOf(port))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,156 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.service;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertSame;
|
||||
|
||||
public class PagedFetcherTest {
|
||||
|
||||
private static class Page {
|
||||
private String cursor;
|
||||
private List<String> items;
|
||||
|
||||
Page(String cursor, List<String> items) {
|
||||
this.cursor = cursor;
|
||||
this.items = items;
|
||||
}
|
||||
|
||||
String getCursor() {
|
||||
return cursor;
|
||||
}
|
||||
|
||||
List<String> getItems() {
|
||||
return items;
|
||||
}
|
||||
|
||||
void setItems(List<String> items) {
|
||||
this.items = items;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFetchAllWhenThereIsNoPagination() {
|
||||
// given
|
||||
Page firstPage = new Page(null, new ArrayList<>(List.of("a", "b")));
|
||||
AtomicBoolean itemsSetterCalled = new AtomicBoolean(false);
|
||||
PagedFetcher<Page, String> fetcher = PagedFetcher.<Page, String>withPageFetcher(
|
||||
cursor -> {
|
||||
assertNull(cursor);
|
||||
return firstPage;
|
||||
})
|
||||
.cursorExtractor(Page::getCursor)
|
||||
.itemsExtractor(Page::getItems)
|
||||
.itemsSetter((page, items) -> itemsSetterCalled.set(true));
|
||||
|
||||
// when
|
||||
Page result = fetcher.fetchAll();
|
||||
|
||||
// then
|
||||
assertSame(firstPage, result);
|
||||
assertEquals(List.of("a", "b"), result.getItems());
|
||||
assertFalse("itemsSetter must not be called when there is no next page", itemsSetterCalled.get());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFetchAllWhenThereIsNoPaginationAndEmptyCursor() {
|
||||
// given
|
||||
Page firstPage = new Page("", new ArrayList<>(List.of("x")));
|
||||
|
||||
AtomicBoolean itemsSetterCalled = new AtomicBoolean(false);
|
||||
|
||||
PagedFetcher<Page, String> fetcher = PagedFetcher
|
||||
.<Page, String>withPageFetcher(cursor -> {
|
||||
assertNull(cursor);
|
||||
return firstPage;
|
||||
})
|
||||
.cursorExtractor(Page::getCursor)
|
||||
.itemsExtractor(Page::getItems)
|
||||
.itemsSetter((page, items) -> itemsSetterCalled.set(true));
|
||||
|
||||
// when
|
||||
Page result = fetcher.fetchAll();
|
||||
|
||||
// then
|
||||
assertSame(firstPage, result);
|
||||
assertEquals(List.of("x"), result.getItems());
|
||||
assertFalse("itemsSetter must not be called when there is no next page", itemsSetterCalled.get());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFetchAllWhenMultiPages() {
|
||||
// given
|
||||
Page page1 = new Page("c1", new ArrayList<>(List.of("p1a", "p1b")));
|
||||
Page page2 = new Page("c2", new ArrayList<>(List.of("p2a")));
|
||||
Page page3 = new Page(null, new ArrayList<>(List.of("p3a", "p3b")));
|
||||
|
||||
Map<String, Page> pagesByCursor = new HashMap<>();
|
||||
pagesByCursor.put(null, page1);
|
||||
pagesByCursor.put("c1", page2);
|
||||
pagesByCursor.put("c2", page3);
|
||||
|
||||
PagedFetcher<Page, String> fetcher = PagedFetcher
|
||||
.<Page, String>withPageFetcher(pagesByCursor::get)
|
||||
.cursorExtractor(Page::getCursor)
|
||||
.itemsExtractor(Page::getItems)
|
||||
.itemsSetter((page, items) -> {
|
||||
assertSame(page1, page);
|
||||
page.setItems(items);
|
||||
});
|
||||
|
||||
// when
|
||||
Page result = fetcher.fetchAll();
|
||||
|
||||
// then
|
||||
assertSame("Result must be the first page object", page1, result);
|
||||
assertEquals(List.of("p1a", "p1b", "p2a", "p3a", "p3b"), result.getItems());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFetchAllFirstPageItemsNullSecondWithItems() {
|
||||
// given
|
||||
Page page1 = new Page("next", null);
|
||||
Page page2 = new Page(null, new ArrayList<>(List.of("x", "y")));
|
||||
|
||||
Map<String, Page> pages = new HashMap<>();
|
||||
pages.put(null, page1);
|
||||
pages.put("next", page2);
|
||||
|
||||
PagedFetcher<Page, String> fetcher = PagedFetcher
|
||||
.<Page, String>withPageFetcher(pages::get)
|
||||
.cursorExtractor(Page::getCursor)
|
||||
.itemsExtractor(Page::getItems)
|
||||
.itemsSetter(Page::setItems);
|
||||
|
||||
// when
|
||||
Page result = fetcher.fetchAll();
|
||||
|
||||
// then
|
||||
assertSame(page1, result);
|
||||
assertEquals(List.of("x", "y"), result.getItems());
|
||||
}
|
||||
}
|
||||
|
|
@ -24,6 +24,8 @@ import java.security.NoSuchAlgorithmException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.crypto.KeyGenerator;
|
||||
import javax.crypto.SecretKey;
|
||||
|
|
@ -98,6 +100,51 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
|||
return String.format("%s-%s", ACS_PREFIX, account.getUuid());
|
||||
}
|
||||
|
||||
private void updateCannedPolicy(long storeId, Account account, String excludeBucket) {
|
||||
List<BucketVO> buckets = _bucketDao.listByObjectStoreIdAndAccountId(storeId, account.getId());
|
||||
|
||||
String resources = buckets.stream()
|
||||
.map(BucketVO::getName)
|
||||
.filter(name -> !Objects.equals(name, excludeBucket))
|
||||
.map(name -> "\"arn:aws:s3:::" + name + "/*\"")
|
||||
.collect(Collectors.joining(",\n"));
|
||||
String policy;
|
||||
if (resources.isEmpty()) {
|
||||
// Resource cannot be empty in a canned Policy so deny access to all resources if the user has no buckets
|
||||
policy = " {\n" +
|
||||
" \"Statement\": [\n" +
|
||||
" {\n" +
|
||||
" \"Action\": \"s3:*\",\n" +
|
||||
" \"Effect\": \"Deny\",\n" +
|
||||
" \"Resource\": [\"arn:aws:s3:::*\", \"arn:aws:s3:::*/*\"]\n" +
|
||||
" }\n" +
|
||||
" ],\n" +
|
||||
" \"Version\": \"2012-10-17\"\n" +
|
||||
" }";
|
||||
} else {
|
||||
policy = " {\n" +
|
||||
" \"Statement\": [\n" +
|
||||
" {\n" +
|
||||
" \"Action\": \"s3:*\",\n" +
|
||||
" \"Effect\": \"Allow\",\n" +
|
||||
" \"Resource\": [" + resources + "]\n" +
|
||||
" }\n" +
|
||||
" ],\n" +
|
||||
" \"Version\": \"2012-10-17\"\n" +
|
||||
" }";
|
||||
}
|
||||
|
||||
MinioAdminClient minioAdminClient = getMinIOAdminClient(storeId);
|
||||
String policyName = getUserOrAccessKeyForAccount(account) + "-policy";
|
||||
String userName = getUserOrAccessKeyForAccount(account);
|
||||
try {
|
||||
minioAdminClient.addCannedPolicy(policyName, policy);
|
||||
minioAdminClient.setPolicy(userName, false, policyName);
|
||||
} catch (NoSuchAlgorithmException | IOException | InvalidKeyException e) {
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bucket createBucket(Bucket bucket, boolean objectLock) {
|
||||
//ToDo Client pool mgmt
|
||||
|
|
@ -125,33 +172,8 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
|||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
|
||||
List<BucketVO> buckets = _bucketDao.listByObjectStoreIdAndAccountId(storeId, accountId);
|
||||
StringBuilder resources_builder = new StringBuilder();
|
||||
for(BucketVO exitingBucket : buckets) {
|
||||
resources_builder.append("\"arn:aws:s3:::"+exitingBucket.getName()+"/*\",\n");
|
||||
}
|
||||
resources_builder.append("\"arn:aws:s3:::"+bucketName+"/*\"\n");
|
||||
updateCannedPolicy(storeId, account,null);
|
||||
|
||||
String policy = " {\n" +
|
||||
" \"Statement\": [\n" +
|
||||
" {\n" +
|
||||
" \"Action\": \"s3:*\",\n" +
|
||||
" \"Effect\": \"Allow\",\n" +
|
||||
" \"Principal\": \"*\",\n" +
|
||||
" \"Resource\": ["+resources_builder+"]" +
|
||||
" }\n" +
|
||||
" ],\n" +
|
||||
" \"Version\": \"2012-10-17\"\n" +
|
||||
" }";
|
||||
MinioAdminClient minioAdminClient = getMinIOAdminClient(storeId);
|
||||
String policyName = getUserOrAccessKeyForAccount(account) + "-policy";
|
||||
String userName = getUserOrAccessKeyForAccount(account);
|
||||
try {
|
||||
minioAdminClient.addCannedPolicy(policyName, policy);
|
||||
minioAdminClient.setPolicy(userName, false, policyName);
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
String accessKey = _accountDetailsDao.findDetail(accountId, MINIO_ACCESS_KEY).getValue();
|
||||
String secretKey = _accountDetailsDao.findDetail(accountId, MINIO_SECRET_KEY).getValue();
|
||||
ObjectStoreVO store = _storeDao.findById(storeId);
|
||||
|
|
@ -183,6 +205,8 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
|||
@Override
|
||||
public boolean deleteBucket(BucketTO bucket, long storeId) {
|
||||
String bucketName = bucket.getName();
|
||||
long accountId = bucket.getAccountId();
|
||||
Account account = _accountDao.findById(accountId);
|
||||
MinioClient minioClient = getMinIOClient(storeId);
|
||||
try {
|
||||
if(!minioClient.bucketExists(BucketExistsArgs.builder().bucket(bucketName).build())) {
|
||||
|
|
@ -197,6 +221,9 @@ public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
|||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
|
||||
updateCannedPolicy(storeId, account, bucketName);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue